diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..f8705120b387f5c53396dbbf2ea8caac377cf879 100644 --- a/.gitattributes +++ b/.gitattributes @@ -57,3 +57,20 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o filter=lfs diff=lfs merge=lfs -text +SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o filter=lfs diff=lfs merge=lfs -text +SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o filter=lfs diff=lfs merge=lfs -text +SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/lib.linux-x86_64-cpython-312/selective_scan_cuda.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_deps filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o filter=lfs diff=lfs merge=lfs -text +SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o filter=lfs diff=lfs merge=lfs -text diff --git a/SegMamba/.DS_Store b/SegMamba/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..012e2d62b0baf86b81f29caf4c5324e021192325 Binary files /dev/null and b/SegMamba/.DS_Store differ diff --git a/SegMamba/.gitignore b/SegMamba/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..68bc17f9ff2104a9d7b6777058bb4c343ca72609 --- /dev/null +++ b/SegMamba/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/SegMamba/0_inference.py b/SegMamba/0_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f2b4530214000a644360a860527648dce98546 --- /dev/null +++ b/SegMamba/0_inference.py @@ -0,0 +1,20 @@ + + +import torch +from model_segmamba.segmamba import SegMamba + +t1 = torch.rand(1, 4, 128, 128, 128).cuda() + + +model = SegMamba(in_chans=4, + out_chans=4, + depths=[2,2,2,2], + feat_size=[48, 96, 192, 384]).cuda() + +out = model(t1) + +print(out.shape) + + + + diff --git a/SegMamba/1_rename_mri_data.py b/SegMamba/1_rename_mri_data.py new file mode 100644 index 0000000000000000000000000000000000000000..50c3f409e8a0de972e0a9e9c40f85ffbe4fe9978 --- /dev/null +++ b/SegMamba/1_rename_mri_data.py @@ -0,0 +1,26 @@ + + + +import os + +data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/" + +all_cases = os.listdir(data_dir) + +for case_name in all_cases: + case_dir = os.path.join(data_dir, case_name) + + for data_name in os.listdir(case_dir): + + if "-" not in data_name: + continue + new_name = data_name.split("-")[-1] + + new_path = os.path.join(case_dir, new_name) + + old_path = os.path.join(case_dir, data_name) + + os.rename(old_path, new_path) + + print(f"{new_path} 命名成功") + diff --git a/SegMamba/2_preprocessing_mri.py b/SegMamba/2_preprocessing_mri.py new file mode 100644 index 0000000000000000000000000000000000000000..7765a91d50fab108c93335e5e333a1994f1b4d8c --- /dev/null +++ b/SegMamba/2_preprocessing_mri.py @@ -0,0 +1,45 @@ + +from light_training.preprocessing.preprocessors.preprocessor_mri import MultiModalityPreprocessor +import numpy as np +import pickle +import json + +data_filename = ["t2w.nii.gz", + "t2f.nii.gz", + "t1n.nii.gz", + "t1c.nii.gz"] +seg_filename = "seg.nii.gz" + +base_dir = "./data/raw_data/BraTS2023/" +image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData" + +def process_train(): + preprocessor = MultiModalityPreprocessor(base_dir=base_dir, + image_dir=image_dir, + data_filenames=data_filename, + seg_filename=seg_filename + ) + + out_spacing = [1.0, 1.0, 1.0] + output_dir = "./data/fullres/train/" + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3], + ) + +def plan(): + preprocessor = MultiModalityPreprocessor(base_dir=base_dir, + image_dir=image_dir, + data_filenames=data_filename, + seg_filename=seg_filename + ) + + preprocessor.run_plan() + + +if __name__ == "__main__": + + plan() + process_train() + diff --git a/SegMamba/3_train.py b/SegMamba/3_train.py new file mode 100644 index 0000000000000000000000000000000000000000..60533ed9809a9253b019d7296b708c8aec3d13e6 --- /dev/null +++ b/SegMamba/3_train.py @@ -0,0 +1,168 @@ +import numpy as np +from light_training.dataloading.dataset import get_train_val_test_loader_from_train +import torch +import torch.nn as nn +from monai.inferers import SlidingWindowInferer +from light_training.evaluation.metric import dice +from light_training.trainer import Trainer +from monai.utils import set_determinism +from light_training.utils.files_helper import save_new_model_and_delete_last +from monai.losses.dice import DiceLoss +set_determinism(123) +import os + +data_dir = "./data/fullres/train" +logdir = f"./logs/segmamba" + +model_save_path = os.path.join(logdir, "model") +# augmentation = "nomirror" +augmentation = True + +env = "pytorch" +max_epoch = 1000 +batch_size = 2 +val_every = 2 +num_gpus = 1 +device = "cuda:0" +roi_size = [128, 128, 128] + +def func(m, epochs): + return np.exp(-10*(1- m / epochs)**2) + +class BraTSTrainer(Trainer): + def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"): + super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script) + self.window_infer = SlidingWindowInferer(roi_size=roi_size, + sw_batch_size=1, + overlap=0.5) + self.augmentation = augmentation + from model_segmamba.segmamba import SegMamba + + self.model = SegMamba(in_chans=4, + out_chans=4, + depths=[2,2,2,2], + feat_size=[48, 96, 192, 384]) + + self.patch_size = roi_size + self.best_mean_dice = 0.0 + self.ce = nn.CrossEntropyLoss() + self.mse = nn.MSELoss() + self.train_process = 18 + self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5, + momentum=0.99, nesterov=True) + + self.scheduler_type = "poly" + self.cross = nn.CrossEntropyLoss() + + def training_step(self, batch): + image, label = self.get_input(batch) + + pred = self.model(image) + + loss = self.cross(pred, label) + + self.log("training_loss", loss, step=self.global_step) + + return loss + + def convert_labels(self, labels): + ## TC, WT and ET + result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] + + return torch.cat(result, dim=1).float() + + + def get_input(self, batch): + image = batch["data"] + label = batch["seg"] + + label = label[:, 0].long() + return image, label + + def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]): + if pred.sum() > 0 and gt.sum() > 0: + d = dice(pred, gt) + return np.array([d, 50]) + + elif gt.sum() == 0 and pred.sum() == 0: + return np.array([1.0, 50]) + + else: + return np.array([0.0, 50]) + + def validation_step(self, batch): + image, label = self.get_input(batch) + + output = self.model(image) + + output = output.argmax(dim=1) + + output = output[:, None] + output = self.convert_labels(output) + + label = label[:, None] + label = self.convert_labels(label) + + output = output.cpu().numpy() + target = label.cpu().numpy() + + dices = [] + + c = 3 + for i in range(0, c): + pred_c = output[:, i] + target_c = target[:, i] + + cal_dice, _ = self.cal_metric(target_c, pred_c) + dices.append(cal_dice) + + return dices + + def validation_end(self, val_outputs): + dices = val_outputs + + tc, wt, et = dices[0].mean(), dices[1].mean(), dices[2].mean() + + print(f"dices is {tc, wt, et}") + + mean_dice = (tc + wt + et) / 3 + + self.log("tc", tc, step=self.epoch) + self.log("wt", wt, step=self.epoch) + self.log("et", et, step=self.epoch) + + self.log("mean_dice", mean_dice, step=self.epoch) + + if mean_dice > self.best_mean_dice: + self.best_mean_dice = mean_dice + save_new_model_and_delete_last(self.model, + os.path.join(model_save_path, + f"best_model_{mean_dice:.4f}.pt"), + delete_symbol="best_model") + + save_new_model_and_delete_last(self.model, + os.path.join(model_save_path, + f"final_model_{mean_dice:.4f}.pt"), + delete_symbol="final_model") + + + if (self.epoch + 1) % 100 == 0: + torch.save(self.model.state_dict(), os.path.join(model_save_path, f"tmp_model_ep{self.epoch}_{mean_dice:.4f}.pt")) + + print(f"mean_dice is {mean_dice}") + +if __name__ == "__main__": + + trainer = BraTSTrainer(env_type=env, + max_epochs=max_epoch, + batch_size=batch_size, + device=device, + logdir=logdir, + val_every=val_every, + num_gpus=num_gpus, + master_port=17759, + training_script=__file__) + + train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir) + + trainer.train(train_dataset=train_ds, val_dataset=val_ds) diff --git a/SegMamba/4_predict.py b/SegMamba/4_predict.py new file mode 100644 index 0000000000000000000000000000000000000000..a2b5d48642b1ae5c5c89170bb05115a19dc124af --- /dev/null +++ b/SegMamba/4_predict.py @@ -0,0 +1,139 @@ +import numpy as np +from light_training.dataloading.dataset import get_train_val_test_loader_from_train +import torch +import torch.nn as nn +from monai.inferers import SlidingWindowInferer +from light_training.evaluation.metric import dice +from light_training.trainer import Trainer +from monai.utils import set_determinism +from light_training.evaluation.metric import dice +set_determinism(123) +import os +from light_training.prediction import Predictor + +data_dir = "./data/fullres/train" +env = "pytorch" +max_epoch = 1000 +batch_size = 2 +val_every = 2 +num_gpus = 1 +device = "cuda:0" +patch_size = [128, 128, 128] + +class BraTSTrainer(Trainer): + def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"): + super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script) + + self.patch_size = patch_size + self.augmentation = False + + def convert_labels(self, labels): + ## TC, WT and ET + result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] + + return torch.cat(result, dim=1).float() + + def get_input(self, batch): + image = batch["data"] + label = batch["seg"] + properties = batch["properties"] + label = self.convert_labels(label) + + return image, label, properties + + def define_model_segmamba(self): + from model_segmamba.segmamba import SegMamba + model = SegMamba(in_chans=4, + out_chans=4, + depths=[2,2,2,2], + feat_size=[48, 96, 192, 384]) + + model_path = "/home/xingzhaohu/dev/jiuding_code/brats23/logs/segmamba/model/final_model_0.9038.pt" + new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu")) + model.load_state_dict(new_sd) + model.eval() + window_infer = SlidingWindowInferer(roi_size=patch_size, + sw_batch_size=2, + overlap=0.5, + progress=True, + mode="gaussian") + + predictor = Predictor(window_infer=window_infer, + mirror_axes=[0,1,2]) + + save_path = "./prediction_results/segmamba" + os.makedirs(save_path, exist_ok=True) + + return model, predictor, save_path + + def validation_step(self, batch): + image, label, properties = self.get_input(batch) + ddim = False + + model, predictor, save_path = self.define_model_segmamba() + + model_output = predictor.maybe_mirror_and_predict(image, model, device=device) + + model_output = predictor.predict_raw_probability(model_output, + properties=properties) + + + model_output = model_output.argmax(dim=0)[None] + model_output = self.convert_labels_dim0(model_output) + + label = label[0] + c = 3 + dices = [] + for i in range(0, c): + output_i = model_output[i].cpu().numpy() + label_i = label[i].cpu().numpy() + d = dice(output_i, label_i) + dices.append(d) + + print(dices) + + model_output = predictor.predict_noncrop_probability(model_output, properties) + predictor.save_to_nii(model_output, + raw_spacing=[1,1,1], + case_name = properties['name'][0], + save_dir=save_path) + + return 0 + + def convert_labels_dim0(self, labels): + ## TC, WT and ET + result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] + + return torch.cat(result, dim=0).float() + + + def filte_state_dict(self, sd): + if "module" in sd : + sd = sd["module"] + new_sd = {} + for k, v in sd.items(): + k = str(k) + new_k = k[7:] if k.startswith("module") else k + new_sd[new_k] = v + del sd + return new_sd + +if __name__ == "__main__": + + trainer = BraTSTrainer(env_type=env, + max_epochs=max_epoch, + batch_size=batch_size, + device=device, + logdir="", + val_every=val_every, + num_gpus=num_gpus, + master_port=17751, + training_script=__file__) + + train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir) + + trainer.validation_single_gpu(test_ds) + + # print(f"result is {v_mean}") + + diff --git a/SegMamba/5_compute_metrics.py b/SegMamba/5_compute_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..947252eaa9d5b00483dcde2600d4f5871eccd8a6 --- /dev/null +++ b/SegMamba/5_compute_metrics.py @@ -0,0 +1,84 @@ +from light_training.dataloading.dataset import get_train_val_test_loader_from_train +from monai.utils import set_determinism +import torch +import os +import numpy as np +import SimpleITK as sitk +from medpy import metric +import argparse +from tqdm import tqdm + +import numpy as np + +set_determinism(123) + +parser = argparse.ArgumentParser() + +parser.add_argument("--pred_name", required=True, type=str) + +results_root = "prediction_results" +args = parser.parse_args() + +pred_name = args.pred_name + +def cal_metric(gt, pred, voxel_spacing): + if pred.sum() > 0 and gt.sum() > 0: + dice = metric.binary.dc(pred, gt) + hd95 = metric.binary.hd95(pred, gt, voxelspacing=voxel_spacing) + return np.array([dice, hd95]) + else: + return np.array([0.0, 50]) + +def each_cases_metric(gt, pred, voxel_spacing): + classes_num = 3 + class_wise_metric = np.zeros((classes_num, 2)) + for cls in range(0, classes_num): + class_wise_metric[cls, ...] = cal_metric(pred[cls], gt[cls], voxel_spacing) + print(class_wise_metric) + return class_wise_metric + +def convert_labels(labels): + ## TC, WT and ET + labels = labels.unsqueeze(dim=0) + + result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] + + return torch.cat(result, dim=0).float() + + +if __name__ == "__main__": + data_dir = "./data/fullres/train" + raw_data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/" + train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir) + print(len(test_ds)) + all_results = np.zeros((250,3,2)) + + ind = 0 + for batch in tqdm(test_ds, total=len(test_ds)): + properties = batch["properties"] + case_name = properties["name"] + gt_itk = os.path.join(raw_data_dir, case_name, f"seg.nii.gz") + voxel_spacing = [1, 1, 1] + gt_itk = sitk.ReadImage(gt_itk) + gt_array = sitk.GetArrayFromImage(gt_itk).astype(np.int32) + gt_array = torch.from_numpy(gt_array) + gt_array = convert_labels(gt_array).numpy() + pred_itk = sitk.ReadImage(f"./{results_root}/{pred_name}/{case_name}.nii.gz") + pred_array = sitk.GetArrayFromImage(pred_itk) + + m = each_cases_metric(gt_array, pred_array, voxel_spacing) + + all_results[ind, ...] = m + + ind += 1 + + os.makedirs(f"./{results_root}/result_metrics/", exist_ok=True) + np.save(f"./{results_root}/result_metrics/{pred_name}.npy", all_results) + + result = np.load(f"./{results_root}/result_metrics/{pred_name}.npy") + print(result.shape) + print(result.mean(axis=0)) + print(result.std(axis=0)) + + + diff --git a/SegMamba/README.md b/SegMamba/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7d6f555a49e21b9d8ab942bb58545787229d64aa --- /dev/null +++ b/SegMamba/README.md @@ -0,0 +1,132 @@ +# SegMamba + +**Recent news: If you are interested in the research about vision language models, please refers to the latest work: https://github.com/MrGiovanni/RadGPT (ICCV2025)** + +**Now we have open-sourced the pre-processing, training, inference, and metrics computation codes.** + +SegMamba: Long-range Sequential Modeling Mamba For 3D Medical Image Segmentation + +[https://arxiv.org/abs/2401.13560](https://arxiv.org/abs/2401.13560) + +![](images/method_figure.jpg) + +![](images/modules.jpg) + +Our advantage in speed and memory. +![](images/segmamba_ablation.jpg) + +## Contact +If you have any questions about our project, please feel free to contact us by email at zxing565@connect.hkust-gz.edu.cn or via WeChat at 18340097191. Furthermore, the data underlying this article will be shared on reasonable request to gaof57@mail.sysu.edu.cn. + +## Environment install +Clone this repository and navigate to the root directory of the project. + +```bash +git clone https://github.com/ge-xing/SegMamba.git + +cd SegMamba +``` +### Install causal-conv1d + +```bash +cd causal-conv1d + +python setup.py install +``` + +### Install mamba + +```bash +cd mamba + +python setup.py install +``` + +### Install monai + +```bash +pip install monai +``` + +## Simple test + +```bash +python 0_inference.py +``` + +## Preprocessing, training, testing, inference, and metrics computation + +### Data downloading + +Data is from [https://arxiv.org/abs/2305.17033](https://arxiv.org/abs/2305.17033) + +Download from Baidu Disk [https://pan.baidu.com/s/1C0FUHdDtWNaYWLtDDP9TnA?pwd=ty22提取码ty22](https://pan.baidu.com/s/1C0FUHdDtWNaYWLtDDP9TnA?pwd=ty22) + +Download from OneDrive [https://hkustgz-my.sharepoint.com/:f:/g/personal/zxing565_connect_hkust-gz_edu_cn/EqqaINbHRxREuIj0XGicY2EBv8hjwEFKgFOhF_Ub0mvENw?e=yTpE9B](https://hkustgz-my.sharepoint.com/:f:/g/personal/zxing565_connect_hkust-gz_edu_cn/EqqaINbHRxREuIj0XGicY2EBv8hjwEFKgFOhF_Ub0mvENw?e=yTpE9B) + +### Preprocessing +In my setting, the data directory of BraTS2023 is : "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/" + +First, we need to run the rename process. + +```bash +python 1_rename_mri_data.py +``` + +Then, we need to run the pre-processing code to do resample, normalization, and crop processes. + +```bash +python 2_preprocessing_mri.py +``` + +After pre-processing, the data structure will be in this format: + +![](images/data_structure.jpg) +### Training + +When the pre-processing process is done, we can train our model. + +We mainly use the pre-processde data from last step: **data_dir = "./data/fullres/train"** + + +```bash +python 3_train.py +``` + +The training logs and checkpoints are saved in: +**logdir = f"./logs/segmamba"** + + + + +### Inference + +When we have trained our models, we can inference all the data in testing set. + +```bash +python 4_predict.py +``` + +When this process is done, the prediction cases will be put in this path: +**save_path = "./prediction_results/segmamba"** + +### Metrics computation +We can obtain the Dice score and HD95 on each segmentation target (WT, TC, ET for BraTS2023 dataset) using this code: + +```bash +python 5_compute_metrics.py --pred_name="segmamba" +``` + + + +## Acknowledgement +Many thanks for these repos for their great contribution! + +[https://github.com/MIC-DKFZ/nnUNet](https://github.com/MIC-DKFZ/nnUNet) + +[https://github.com/Project-MONAI/MONAI](https://github.com/Project-MONAI/MONAI) + +[https://github.com/hustvl/Vim](https://github.com/hustvl/Vim) + +[https://github.com/bowang-lab/U-Mamba](https://github.com/bowang-lab/U-Mamba) + diff --git a/SegMamba/causal-conv1d/.DS_Store b/SegMamba/causal-conv1d/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..c6665734a44cf14bd7d2d71492646e87eae6cfa1 Binary files /dev/null and b/SegMamba/causal-conv1d/.DS_Store differ diff --git a/SegMamba/causal-conv1d/AUTHORS b/SegMamba/causal-conv1d/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..88193855314bb723ced1860384e417954f559700 --- /dev/null +++ b/SegMamba/causal-conv1d/AUTHORS @@ -0,0 +1 @@ +Tri Dao, tri@tridao.me diff --git a/SegMamba/causal-conv1d/LICENSE b/SegMamba/causal-conv1d/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5860e4b33f3d9d85fc636137c559331d51783a5b --- /dev/null +++ b/SegMamba/causal-conv1d/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2022, the respective contributors, as shown by the AUTHORS file. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/SegMamba/causal-conv1d/README.md b/SegMamba/causal-conv1d/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4e905425a650d77c5c4854e4c4a261778c4d2690 --- /dev/null +++ b/SegMamba/causal-conv1d/README.md @@ -0,0 +1 @@ +# Causal depthwise conv1d in CUDA with a PyTorch interface diff --git a/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py b/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc4d610a1e557cabd723fb6e33438f03c5c4bf66 --- /dev/null +++ b/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py @@ -0,0 +1,3 @@ +__version__ = "1.0.0" + +from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_update diff --git a/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py b/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..f66143c39e767572ca12112811a384239b8beb63 --- /dev/null +++ b/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py @@ -0,0 +1,104 @@ +# Copyright (c) 2023, Tri Dao. + +import torch +import torch.nn.functional as F + + +import causal_conv1d_cuda + + +class CausalConv1dFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x, weight, bias=None, activation=None): + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + if x.stride(2) != 1 and x.stride(1) != 1: + x = x.contiguous() + bias = bias.contiguous() if bias is not None else None + ctx.save_for_backward(x, weight, bias) + ctx.activation = activation in ["silu", "swish"] + out = causal_conv1d_cuda.causal_conv1d_fwd(x, weight, bias, ctx.activation) + return out + + @staticmethod + def backward(ctx, dout): + x, weight, bias = ctx.saved_tensors + if dout.stride(2) != 1 and dout.stride(1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + # Here we just pass in None and dx will be allocated in the C++ code. + dx, dweight, dbias = causal_conv1d_cuda.causal_conv1d_bwd( + x, weight, bias, dout, None, ctx.activation + ) + return dx, dweight, dbias if bias is not None else None, None + + +def causal_conv1d_fn(x, weight, bias=None, activation=None): + """ + x: (batch, dim, seqlen) + weight: (dim, width) + bias: (dim,) + activation: either None or "silu" or "swish" + + out: (batch, dim, seqlen) + """ + return CausalConv1dFn.apply(x, weight, bias, activation) + + +def causal_conv1d_ref(x, weight, bias=None, activation=None): + """ + x: (batch, dim, seqlen) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim, seqlen) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + dtype_in = x.dtype + x = x.to(weight.dtype) + seqlen = x.shape[-1] + dim, width = weight.shape + out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim) + out = out[..., :seqlen] + return (out if activation is None else F.silu(out)).to(dtype=dtype_in) + + +def causal_conv1d_update(x, conv_state, weight, bias=None, activation=None): + """ + x: (batch, dim) + conv_state: (batch, dim, width) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + activation = activation in ["silu", "swish"] + return causal_conv1d_cuda.causal_conv1d_update(x, conv_state, weight, bias, activation) + + +def causal_conv1d_update_ref(x, conv_state, weight, bias=None, activation=None): + """ + x: (batch, dim) + conv_state: (batch, dim, width) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + dtype_in = x.dtype + batch, dim = x.shape + width = weight.shape[1] + assert conv_state.shape == (batch, dim, width) + assert weight.shape == (dim, width) + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = x + out = torch.sum(conv_state * weight, dim=-1) # (B D) + if bias is not None: + out += bias + return (out if activation is None else F.silu(out)).to(dtype=dtype_in) diff --git a/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so b/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6eedd45e2c4084a3f4504b362dd30eace347bdf4 --- /dev/null +++ b/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dedd9d99881bf7f043ac14c79ad2b71fea8e93f166482597bfe5a3a09849b627 +size 30227360 diff --git a/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o b/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o new file mode 100644 index 0000000000000000000000000000000000000000..1b3dc32e2af79fd55ef2f2123d3f204448f61d67 --- /dev/null +++ b/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef366a1da5c5f81e3aa761f5cd37bc90c046b17068b504191043faaa162230e5 +size 377648 diff --git a/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o b/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o new file mode 100644 index 0000000000000000000000000000000000000000..1caa3061396365bf5e2cfbc5f146b0fca6ec6322 --- /dev/null +++ b/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e581d37f9a1a942c98bdb8b2986540fab333e42092bcdf19e211c12fcc347bdb +size 22535976 diff --git a/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o b/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o new file mode 100644 index 0000000000000000000000000000000000000000..ba054a6717e95ea43aeed852b5ee1ef1f9b0d629 --- /dev/null +++ b/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0f5650bf1e870dbbcaafaa14acbde28f03014ccaf73d8137ae5fa2967807af7 +size 6723096 diff --git a/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o b/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o new file mode 100644 index 0000000000000000000000000000000000000000..4f5bfe9e6efb77a5d50ce32cb78229cb47a5bf23 --- /dev/null +++ b/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1391cc2ab0159135d0887ac6d659fa2ec85466bb6c5978722d55868054b12726 +size 910152 diff --git a/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO b/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..e933a3d307c4158492494dae393112800cfd6b36 --- /dev/null +++ b/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO @@ -0,0 +1,29 @@ +Metadata-Version: 2.4 +Name: causal_conv1d +Version: 1.0.0 +Summary: Causal depthwise conv1d in CUDA, with a PyTorch interface +Home-page: https://github.com/Dao-AILab/causal-conv1d +Author: Tri Dao +Author-email: tri@tridao.me +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: Unix +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS +Requires-Dist: torch +Requires-Dist: packaging +Requires-Dist: ninja +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: license-file +Dynamic: requires-dist +Dynamic: requires-python +Dynamic: summary + +# Causal depthwise conv1d in CUDA with a PyTorch interface diff --git a/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt b/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..085f8c1e16070f5d8c191ccba0365e90512ae180 --- /dev/null +++ b/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt @@ -0,0 +1,16 @@ +AUTHORS +LICENSE +README.md +setup.py +causal_conv1d/__init__.py +causal_conv1d/causal_conv1d_interface.py +causal_conv1d.egg-info/PKG-INFO +causal_conv1d.egg-info/SOURCES.txt +causal_conv1d.egg-info/dependency_links.txt +causal_conv1d.egg-info/requires.txt +causal_conv1d.egg-info/top_level.txt +csrc/causal_conv1d.cpp +csrc/causal_conv1d_bwd.cu +csrc/causal_conv1d_fwd.cu +csrc/causal_conv1d_update.cu +tests/test_causal_conv1d.py \ No newline at end of file diff --git a/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt b/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt b/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..4abdfa4e1eeb60695a9dc850226f9ca2cf8d3c94 --- /dev/null +++ b/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt @@ -0,0 +1,3 @@ +torch +packaging +ninja diff --git a/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt b/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..4e7fccded0b0646bbe9f67acaba866f9861f3333 --- /dev/null +++ b/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt @@ -0,0 +1,2 @@ +causal_conv1d +causal_conv1d_cuda diff --git a/SegMamba/causal-conv1d/causal_conv1d/__init__.py b/SegMamba/causal-conv1d/causal_conv1d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc4d610a1e557cabd723fb6e33438f03c5c4bf66 --- /dev/null +++ b/SegMamba/causal-conv1d/causal_conv1d/__init__.py @@ -0,0 +1,3 @@ +__version__ = "1.0.0" + +from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_update diff --git a/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py b/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..f66143c39e767572ca12112811a384239b8beb63 --- /dev/null +++ b/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py @@ -0,0 +1,104 @@ +# Copyright (c) 2023, Tri Dao. + +import torch +import torch.nn.functional as F + + +import causal_conv1d_cuda + + +class CausalConv1dFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x, weight, bias=None, activation=None): + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + if x.stride(2) != 1 and x.stride(1) != 1: + x = x.contiguous() + bias = bias.contiguous() if bias is not None else None + ctx.save_for_backward(x, weight, bias) + ctx.activation = activation in ["silu", "swish"] + out = causal_conv1d_cuda.causal_conv1d_fwd(x, weight, bias, ctx.activation) + return out + + @staticmethod + def backward(ctx, dout): + x, weight, bias = ctx.saved_tensors + if dout.stride(2) != 1 and dout.stride(1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + # Here we just pass in None and dx will be allocated in the C++ code. + dx, dweight, dbias = causal_conv1d_cuda.causal_conv1d_bwd( + x, weight, bias, dout, None, ctx.activation + ) + return dx, dweight, dbias if bias is not None else None, None + + +def causal_conv1d_fn(x, weight, bias=None, activation=None): + """ + x: (batch, dim, seqlen) + weight: (dim, width) + bias: (dim,) + activation: either None or "silu" or "swish" + + out: (batch, dim, seqlen) + """ + return CausalConv1dFn.apply(x, weight, bias, activation) + + +def causal_conv1d_ref(x, weight, bias=None, activation=None): + """ + x: (batch, dim, seqlen) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim, seqlen) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + dtype_in = x.dtype + x = x.to(weight.dtype) + seqlen = x.shape[-1] + dim, width = weight.shape + out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim) + out = out[..., :seqlen] + return (out if activation is None else F.silu(out)).to(dtype=dtype_in) + + +def causal_conv1d_update(x, conv_state, weight, bias=None, activation=None): + """ + x: (batch, dim) + conv_state: (batch, dim, width) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + activation = activation in ["silu", "swish"] + return causal_conv1d_cuda.causal_conv1d_update(x, conv_state, weight, bias, activation) + + +def causal_conv1d_update_ref(x, conv_state, weight, bias=None, activation=None): + """ + x: (batch, dim) + conv_state: (batch, dim, width) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + dtype_in = x.dtype + batch, dim = x.shape + width = weight.shape[1] + assert conv_state.shape == (batch, dim, width) + assert weight.shape == (dim, width) + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = x + out = torch.sum(conv_state * weight, dim=-1) # (B D) + if bias is not None: + out += bias + return (out if activation is None else F.silu(out)).to(dtype=dtype_in) diff --git a/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp b/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1c80516ac8599d4d80910a1d4d85c4c435cf1e4f --- /dev/null +++ b/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp @@ -0,0 +1,333 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include +#include + +#include "causal_conv1d.h" + +#define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")") + +#define DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(ITYPE, NAME, ...) \ + if (ITYPE == at::ScalarType::Half) { \ + using input_t = at::Half; \ + __VA_ARGS__(); \ + } else if (ITYPE == at::ScalarType::BFloat16) { \ + using input_t = at::BFloat16; \ + __VA_ARGS__(); \ + } else if (ITYPE == at::ScalarType::Float) { \ + using input_t = float; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for input type '", toString(ITYPE), "'"); \ + } + +#define DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(WTYPE, NAME, ...) \ + if (WTYPE == at::ScalarType::Half) { \ + using weight_t = at::Half; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::BFloat16) { \ + using weight_t = at::BFloat16; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::Float) { \ + using weight_t = float; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \ + } + +template +void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template +void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); + +template +void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template +void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); + +template +void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); + +void set_conv_params_fwd(ConvParamsBase ¶ms, + // sizes + const size_t batch, + const size_t dim, + const size_t seqlen, + const size_t width, + // device pointers + const at::Tensor x, + const at::Tensor weight, + const at::Tensor out, + void* bias_ptr, + bool silu_activation) { + + // Reset the parameters + memset(¶ms, 0, sizeof(params)); + + params.batch = batch; + params.dim = dim; + params.seqlen = seqlen; + params.width = width; + + params.silu_activation = silu_activation; + + // Set the pointers and strides. + params.x_ptr = x.data_ptr(); + params.weight_ptr = weight.data_ptr(); + params.bias_ptr = bias_ptr; + params.out_ptr = out.data_ptr(); + // All stride are in elements, not bytes. + params.x_batch_stride = x.stride(0); + params.x_c_stride = x.stride(1); + params.x_l_stride = x.stride(-1); + params.weight_c_stride = weight.stride(0); + params.weight_width_stride = weight.stride(1); + params.out_batch_stride = out.stride(0); + params.out_c_stride = out.stride(1); + params.out_l_stride = out.stride(-1); +} + + +void set_conv_params_bwd(ConvParamsBwd ¶ms, + // sizes + const size_t batch, + const size_t dim, + const size_t seqlen, + const size_t width, + // device pointers + const at::Tensor x, + const at::Tensor weight, + void* bias_ptr, + const at::Tensor dout, + const at::Tensor dx, + const at::Tensor dweight, + void* dbias_ptr, + bool silu_activation) { + // Pass in "dout" instead of "out", we're not gonna use "out" at all. + set_conv_params_fwd(params, batch, dim, seqlen, width, + x, weight, dout, bias_ptr, silu_activation); + + // Set the pointers and strides. + params.dout_ptr = dout.data_ptr(); + params.dx_ptr = dx.data_ptr(); + params.dweight_ptr = dweight.data_ptr(); + params.dbias_ptr = dbias_ptr; + // All stride are in elements, not bytes. + params.dout_batch_stride = dout.stride(0); + params.dout_c_stride = dout.stride(1); + params.dout_l_stride = dout.stride(2); + params.dweight_c_stride = dweight.stride(0); + params.dweight_width_stride = dweight.stride(1); + params.dx_batch_stride = dx.stride(0); + params.dx_c_stride = dx.stride(1); + params.dx_l_stride = dx.stride(2); +} + +at::Tensor +causal_conv1d_fwd(const at::Tensor &x, const at::Tensor &weight, + const c10::optional &bias_, + bool silu_activation) { + auto input_type = x.scalar_type(); + auto weight_type = weight.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16); + + TORCH_CHECK(x.is_cuda()); + TORCH_CHECK(weight.is_cuda()); + + const auto sizes = x.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int seqlen = sizes[2]; + const int width = weight.size(-1); + + CHECK_SHAPE(x, batch_size, dim, seqlen); + CHECK_SHAPE(weight, dim, width); + + TORCH_CHECK(x.stride(2) == 1 || x.stride(1) == 1); + const bool is_channel_last = x.stride(1) == 1 && x.stride(2) > 1; + + if (is_channel_last) { + TORCH_CHECK(dim % 8 == 0, "causal_conv1d only supports channel dimension divisible by 8 for now"); + } + TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4"); + + + if (bias_.has_value()) { + auto bias = bias_.value(); + TORCH_CHECK(bias.scalar_type() == weight_type); + TORCH_CHECK(bias.is_cuda()); + TORCH_CHECK(bias.stride(-1) == 1); + CHECK_SHAPE(bias, dim); + } + + at::Tensor out = torch::empty_like(x); + + ConvParamsBase params; + set_conv_params_fwd(params, batch_size, dim, seqlen, width, x, weight, out, + bias_.has_value() ? bias_.value().data_ptr() : nullptr, + silu_activation); + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)x.get_device()}; + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_fwd", [&] { + DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_fwd", [&] { + if (!is_channel_last) { + causal_conv1d_fwd_cuda(params, stream); + } else { + causal_conv1d_channellast_fwd_cuda(params, stream); + } + }); + }); + return out; +} + +std::vector +causal_conv1d_bwd(const at::Tensor &x, const at::Tensor &weight, + const c10::optional &bias_, + at::Tensor &dout, + c10::optional &dx_, + bool silu_activation) { + auto input_type = x.scalar_type(); + auto weight_type = weight.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16); + + TORCH_CHECK(x.is_cuda()); + TORCH_CHECK(weight.is_cuda()); + TORCH_CHECK(dout.is_cuda()); + + const auto sizes = x.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int seqlen = sizes[2]; + const int width = weight.size(-1); + + TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4"); + + CHECK_SHAPE(x, batch_size, dim, seqlen); + CHECK_SHAPE(weight, dim, width); + CHECK_SHAPE(dout, batch_size, dim, seqlen); + + TORCH_CHECK(x.stride(2) == 1 || x.stride(1) == 1); + const bool is_channel_last = x.stride(1) == 1 && x.stride(2) > 1; + if (!is_channel_last && dout.stride(2) != 1) { dout = dout.contiguous(); } + if (is_channel_last && dout.stride(1) != 1) { dout = dout.transpose(-1, -2).contiguous().transpose(-1, -2); } + + if (bias_.has_value()) { + auto bias = bias_.value(); + TORCH_CHECK(bias.scalar_type() == weight_type); + TORCH_CHECK(bias.is_cuda()); + TORCH_CHECK(bias.stride(-1) == 1); + CHECK_SHAPE(bias, dim); + } + + at::Tensor dx; + if (dx_.has_value()) { + dx = dx_.value(); + TORCH_CHECK(dx.scalar_type() == input_type); + TORCH_CHECK(dx.is_cuda()); + CHECK_SHAPE(dx, batch_size, dim, seqlen); + if (!is_channel_last) { TORCH_CHECK(dx.stride(2) == 1); } + if (is_channel_last) { TORCH_CHECK(dx.stride(1) == 1); } + } else { + dx = torch::empty_like(x); + } + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)x.get_device()}; + + at::Tensor dweight = torch::zeros_like(weight, weight.options().dtype(at::kFloat)); + at::Tensor dbias; + if (bias_.has_value()) { dbias = torch::zeros_like(bias_.value(), bias_.value().options().dtype(at::kFloat)); } + + ConvParamsBwd params; + set_conv_params_bwd(params, batch_size, dim, seqlen, width, + x, weight, bias_.has_value() ? bias_.value().data_ptr() : nullptr, + dout, dx, dweight, bias_.has_value() ? dbias.data_ptr() : nullptr, + silu_activation); + + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_bwd", [&] { + DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_bwd", [&] { + if (!is_channel_last) { + causal_conv1d_bwd_cuda(params, stream); + } else { + causal_conv1d_channellast_bwd_cuda(params, stream); + } + }); + }); + return {dx, dweight.to(weight.dtype()), bias_.has_value() ? dbias.to(bias_.value().dtype()) : dbias}; +} + +at::Tensor +causal_conv1d_update(const at::Tensor &x, + const at::Tensor &conv_state, + const at::Tensor &weight, + const c10::optional &bias_, + bool silu_activation) { + auto input_type = x.scalar_type(); + auto weight_type = weight.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16); + TORCH_CHECK(conv_state.scalar_type() == input_type); + + TORCH_CHECK(x.is_cuda()); + TORCH_CHECK(conv_state.is_cuda()); + TORCH_CHECK(weight.is_cuda()); + + const auto sizes = x.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int width = weight.size(-1); + + CHECK_SHAPE(x, batch_size, dim); + CHECK_SHAPE(conv_state, batch_size, dim, width); + CHECK_SHAPE(weight, dim, width); + + TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4"); + + if (bias_.has_value()) { + auto bias = bias_.value(); + TORCH_CHECK(bias.scalar_type() == weight_type); + TORCH_CHECK(bias.is_cuda()); + TORCH_CHECK(bias.stride(-1) == 1); + CHECK_SHAPE(bias, dim); + } + + at::Tensor out = torch::empty_like(x); + + ConvParamsBase params; + set_conv_params_fwd(params, batch_size, dim, /*seqlen=*/1, width, x, weight, out, + bias_.has_value() ? bias_.value().data_ptr() : nullptr, + silu_activation); + params.conv_state_ptr = conv_state.data_ptr(); + // All stride are in elements, not bytes. + params.conv_state_batch_stride = conv_state.stride(0); + params.conv_state_c_stride = conv_state.stride(1); + params.conv_state_l_stride = conv_state.stride(2); + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)x.get_device()}; + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_update", [&] { + DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_update", [&] { + causal_conv1d_update_cuda(params, stream); + }); + }); + return out; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("causal_conv1d_fwd", &causal_conv1d_fwd, "Causal conv1d forward"); + m.def("causal_conv1d_bwd", &causal_conv1d_bwd, "Causal conv1d backward"); + m.def("causal_conv1d_update", &causal_conv1d_update, "Causal conv1d update"); +} diff --git a/SegMamba/causal-conv1d/csrc/causal_conv1d.h b/SegMamba/causal-conv1d/csrc/causal_conv1d.h new file mode 100644 index 0000000000000000000000000000000000000000..844ed92cfc91a881e58fccfca001a13ebcc434cc --- /dev/null +++ b/SegMamba/causal-conv1d/csrc/causal_conv1d.h @@ -0,0 +1,53 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +struct ConvParamsBase { + using index_t = uint32_t; + + int batch, dim, seqlen, width; + bool silu_activation; + + index_t x_batch_stride; + index_t x_c_stride; + index_t x_l_stride; + index_t weight_c_stride; + index_t weight_width_stride; + index_t out_batch_stride; + index_t out_c_stride; + index_t out_l_stride; + + index_t conv_state_batch_stride; + index_t conv_state_c_stride; + index_t conv_state_l_stride; + + // Common data pointers. + void *__restrict__ x_ptr; + void *__restrict__ weight_ptr; + void *__restrict__ bias_ptr; + void *__restrict__ out_ptr; + + void *__restrict__ conv_state_ptr; +}; + +struct ConvParamsBwd: public ConvParamsBase { + index_t dx_batch_stride; + index_t dx_c_stride; + index_t dx_l_stride; + index_t dweight_c_stride; + index_t dweight_width_stride; + index_t dout_batch_stride; + index_t dout_c_stride; + index_t dout_l_stride; + + // Common data pointers. + void *__restrict__ dx_ptr; + void *__restrict__ dweight_ptr; + void *__restrict__ dbias_ptr; + void *__restrict__ dout_ptr; +}; + diff --git a/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu b/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu new file mode 100644 index 0000000000000000000000000000000000000000..66609750a30a86a284451871ca163d79a0529047 --- /dev/null +++ b/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu @@ -0,0 +1,525 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK + +#include +#include +#include + +#include "causal_conv1d.h" +#include "causal_conv1d_common.h" +#include "static_switch.h" + +template +struct Causal_conv1d_bwd_kernel_traits { + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static constexpr int kWidth = kWidth_; + static constexpr bool kSiluAct = kSiluAct_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : 8; + static_assert(kWidth <= kNElts); + // It's possible that we need to do 2 rounds of exchange if input_t is 16 bits + // (since then we'd have 8 values of float, and each round we can exchange 4 floats). + static constexpr int kNExchangeRounds = sizeof(float) / sizeof(input_t); + static constexpr bool kIsVecLoad = kIsVecLoad_; + using vec_t = typename BytesToType::Type; + using BlockLoadT = cub::BlockLoad; + using BlockLoadVecT = cub::BlockLoad; + using BlockStoreT = cub::BlockStore; + using BlockStoreVecT = cub::BlockStore; + using BlockReduceFloatT = cub::BlockReduce; + static constexpr int kSmemIOSize = kIsVecLoad + ? 0 + : std::max({sizeof(typename BlockLoadT::TempStorage), sizeof(typename BlockStoreT::TempStorage)}); + static constexpr int kSmemExchangeSize = kNThreads * kNBytes * kNElts * (!kSiluAct ? 1 : kNExchangeRounds + 1); + static constexpr int kSmemSize = std::max({kSmemExchangeSize, + int(sizeof(typename BlockReduceFloatT::TempStorage))}) + (kIsVecLoad ? 0 : kSmemIOSize); +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads) +void causal_conv1d_bwd_kernel(ConvParamsBwd params) { + constexpr int kWidth = Ktraits::kWidth; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr bool kSiluAct = Ktraits::kSiluAct; + constexpr int kNElts = Ktraits::kNElts; + constexpr int kNExchangeRounds = Ktraits::kNExchangeRounds; + constexpr bool kIsVecLoad = Ktraits::kIsVecLoad; + using input_t = typename Ktraits::input_t; + using vec_t = typename Ktraits::vec_t; + using weight_t = typename Ktraits::weight_t; + + // Shared memory. + extern __shared__ char smem_[]; + auto& smem_load = reinterpret_cast(smem_); + auto& smem_load_vec = reinterpret_cast(smem_); + auto& smem_store = reinterpret_cast(smem_); + auto& smem_store_vec = reinterpret_cast(smem_); + vec_t *smem_exchange = reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + vec_t *smem_exchange_x = reinterpret_cast(smem_ + Ktraits::kSmemIOSize) + kNThreads * kNExchangeRounds; + auto& smem_reduce_float = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + + const int tidx = threadIdx.x; + const int batch_id = blockIdx.x; + const int dim_id = blockIdx.y; + input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride + + dim_id * params.x_c_stride; + weight_t *weight = reinterpret_cast(params.weight_ptr) + dim_id * params.weight_c_stride; + input_t *dout = reinterpret_cast(params.dout_ptr) + batch_id * params.dout_batch_stride + + dim_id * params.dout_c_stride; + input_t *dx = reinterpret_cast(params.dx_ptr) + batch_id * params.dx_batch_stride + + dim_id * params.dx_c_stride; + float *dweight = reinterpret_cast(params.dweight_ptr) + dim_id * params.dweight_c_stride; + float bias_val = params.bias_ptr == nullptr ? 0.f : float(reinterpret_cast(params.bias_ptr)[dim_id]); + + // Thread kNThreads - 1 will load the first elements of the next chunk so we initialize those to 0. + if (tidx == 0) { + if constexpr (!kSiluAct) { + input_t zeros[kNElts] = {0}; + smem_exchange[0] = reinterpret_cast(zeros)[0]; + } else { + float zeros[kNElts] = {0}; + #pragma unroll + for (int r = 0; r < kNExchangeRounds; ++r) { + smem_exchange[r * kNThreads] = reinterpret_cast(zeros)[r]; + } + } + } + + float weight_vals[kWidth]; + #pragma unroll + for (int i = 0; i < kWidth; ++i) { weight_vals[i] = weight[i * params.weight_width_stride]; } + + float dweight_vals[kWidth] = {0}; + float dbias_val = 0; + + constexpr int kChunkSize = kNThreads * kNElts; + const int n_chunks = (params.seqlen + kChunkSize - 1) / kChunkSize; + x += (n_chunks - 1) * kChunkSize; + dout += (n_chunks - 1) * kChunkSize; + dx += (n_chunks - 1) * kChunkSize; + for (int chunk = n_chunks - 1; chunk >= 0; --chunk) { + input_t x_vals_load[2 * kNElts] = {0}; + input_t dout_vals_load[2 * kNElts] = {0}; + if constexpr(kIsVecLoad) { + Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast(x), *reinterpret_cast(&x_vals_load[kNElts]), (params.seqlen - chunk * kChunkSize) / kNElts); + Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast(dout), *reinterpret_cast(&dout_vals_load[0]), (params.seqlen - chunk * kChunkSize) / kNElts); + } else { + __syncthreads(); + Ktraits::BlockLoadT(smem_load).Load(x, *reinterpret_cast(&x_vals_load[kNElts]), params.seqlen - chunk * kChunkSize); + __syncthreads(); + Ktraits::BlockLoadT(smem_load).Load(dout, *reinterpret_cast(&dout_vals_load[0]), params.seqlen - chunk * kChunkSize); + } + float dout_vals[2 * kNElts], x_vals[2 * kNElts]; + if constexpr (!kSiluAct) { + __syncthreads(); + // Thread 0 don't write yet, so that thread kNThreads - 1 can read + // the first elements of the next chunk. + if (tidx > 0) { smem_exchange[tidx] = reinterpret_cast(dout_vals_load)[0]; } + __syncthreads(); + reinterpret_cast(dout_vals_load)[1] = smem_exchange[tidx < kNThreads - 1 ? tidx + 1 : 0]; + __syncthreads(); + // Now thread 0 can write the first elements of the current chunk. + if (tidx == 0) { smem_exchange[tidx] = reinterpret_cast(dout_vals_load)[0]; } + #pragma unroll + for (int i = 0; i < 2 * kNElts; ++i) { + dout_vals[i] = float(dout_vals_load[i]); + x_vals[i] = float(x_vals_load[i]); + } + } else { + if (tidx == 0 && chunk > 0) { + if constexpr(kIsVecLoad) { + reinterpret_cast(x_vals_load)[0] = reinterpret_cast(x)[-1]; + } else { + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + if (chunk * kChunkSize + i < params.seqlen) { x_vals_load[i] = x[-kNElts + i]; } + } + } + } + __syncthreads(); + smem_exchange_x[tidx] = reinterpret_cast(x_vals_load)[1]; + __syncthreads(); + if (tidx > 0) { reinterpret_cast(x_vals_load)[0] = smem_exchange_x[tidx - 1]; } + #pragma unroll + for (int i = 0; i < 2 * kNElts; ++i) { x_vals[i] = float(x_vals_load[i]); } + // Recompute the output + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + float out_val = bias_val; + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + out_val += weight_vals[w] * x_vals[kNElts + i - (kWidth - w - 1)]; + } + float out_sigmoid_val = 1.0f / (1.0f + expf(-out_val)); + dout_vals[i] = float(dout_vals_load[i]) * out_sigmoid_val + * (1.0f + out_val * (1.0f - out_sigmoid_val)); + } + // Exchange the dout_vals. It's possible that we need to do 2 rounds of exchange + // if input_t is 16 bits (since then we'd have 8 values of float) + __syncthreads(); + // Thread 0 don't write yet, so that thread kNThreads - 1 can read + // the first elements of the next chunk. + if (tidx > 0) { + #pragma unroll + for (int r = 0; r < kNExchangeRounds; ++r) { + smem_exchange[r * kNThreads + tidx] = reinterpret_cast(dout_vals)[r]; + } + } + __syncthreads(); + #pragma unroll + for (int r = 0; r < kNExchangeRounds; ++r) { + reinterpret_cast(dout_vals)[kNExchangeRounds + r] + = smem_exchange[r * kNThreads + (tidx < kNThreads - 1 ? tidx + 1 : 0)]; + } + __syncthreads(); + // Now thread 0 can write the first elements of the current chunk. + if (tidx == 0) { + #pragma unroll + for (int r = 0; r < kNExchangeRounds; ++r) { + smem_exchange[r * kNThreads + tidx] = reinterpret_cast(dout_vals)[r]; + } + } + } + dout -= kChunkSize; + x -= kChunkSize; + + #pragma unroll + for (int i = 0; i < kNElts; ++i) { dbias_val += dout_vals[i]; } + + float dx_vals[kNElts] = {0}; + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + dx_vals[i] += weight_vals[w] * dout_vals[i + kWidth - w - 1]; + } + } + + input_t dx_vals_store[kNElts]; + #pragma unroll + for (int i = 0; i < kNElts; ++i) { dx_vals_store[i] = dx_vals[i]; } + if constexpr(kIsVecLoad) { + Ktraits::BlockStoreVecT(smem_store_vec).Store(reinterpret_cast(dx), reinterpret_cast(dx_vals_store), (params.seqlen - chunk * kChunkSize) / kNElts); + } else { + Ktraits::BlockStoreT(smem_store).Store(dx, dx_vals_store, params.seqlen - chunk * kChunkSize); + } + dx -= kChunkSize; + + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + dweight_vals[w] += x_vals[kNElts + i] * dout_vals[i + kWidth - w - 1]; + } + } + } + + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + __syncthreads(); + dweight_vals[w] = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dweight_vals[w]); + if (tidx == 0) { + atomicAdd(&reinterpret_cast(dweight)[w * params.dweight_width_stride], dweight_vals[w]); + } + } + if (params.bias_ptr != nullptr) { + __syncthreads(); + dbias_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dbias_val); + if (tidx == 0) { + atomicAdd(&reinterpret_cast(params.dbias_ptr)[dim_id], dbias_val); + } + } +} + +template +void causal_conv1d_bwd_launch(ConvParamsBwd ¶ms, cudaStream_t stream) { + static constexpr int kNElts = sizeof(input_t) == 4 ? 4 : 8; + BOOL_SWITCH(params.seqlen % kNElts == 0, kIsVecLoad, [&] { + BOOL_SWITCH(params.silu_activation, kSiluAct, [&] { + using Ktraits = Causal_conv1d_bwd_kernel_traits; + constexpr int kSmemSize = Ktraits::kSmemSize; + dim3 grid(params.batch, params.dim); + auto kernel = &causal_conv1d_bwd_kernel; + if (kSmemSize >= 48 * 1024) { + C10_CUDA_CHECK(cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + } + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); +} + +template +void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream) { + if (params.width == 2) { + causal_conv1d_bwd_launch<128, 2, input_t, weight_t>(params, stream); + } else if (params.width == 3) { + causal_conv1d_bwd_launch<128, 3, input_t, weight_t>(params, stream); + } else if (params.width == 4) { + causal_conv1d_bwd_launch<128, 4, input_t, weight_t>(params, stream); + } +} + +template +struct Causal_conv1d_channellast_bwd_kernel_traits { + // The cache line is 128 bytes, and we try to read 16 bytes per thread. + // So we have 8 threads per "row", so 32 or 64 elements in the channel dimension. + // That leaves 4 columns per warp, and so 16 columns per block (assuming each block has 128 + // threads). Each each load is 16 x 32|64 elements in the L x C dimensions. + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr bool kSiluAct = kSiluAct_; + static constexpr int kNThreads = kNThreads_; + static_assert(kNThreads % 32 == 0); + static constexpr int kNWarps = kNThreads / 32; + static constexpr int kWidth = kWidth_; + static constexpr int kChunkSizeL = kChunkSizeL_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : 8; + static constexpr int kNEltsPerRow = 128 / kNBytes; + static constexpr int kNThreadsPerRow = kNEltsPerRow / kNElts; // Always 8 for now + static_assert(kNThreadsPerRow * kNBytes * kNElts == 128); + static constexpr int kNColsPerWarp = 32 / kNThreadsPerRow; // Always 4 for now + static_assert(kNColsPerWarp * kNThreadsPerRow == 32); + static constexpr int kNColsPerLoad = kNColsPerWarp * kNWarps; + static constexpr int kNLoads = kChunkSizeL / kNColsPerLoad; + static_assert(kNLoads * kNColsPerLoad == kChunkSizeL); + static constexpr bool kIsVecLoad = kIsVecLoad_; + using vec_t = typename BytesToType::Type; + // using BlockLoadT = cub::BlockLoad; + // using BlockStoreT = cub::BlockStore; + // static constexpr int kSmemSize = std::max({sizeof(typename BlockLoadT::TempStorage), + // sizeof(typename BlockStoreT::TempStorage)}); + // static constexpr int kSmemSize = kChunkSizeL * kNEltsPerRow * kNBytes; +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads) +void causal_conv1d_channellast_bwd_kernel(ConvParamsBwd params) { + constexpr int kWidth = Ktraits::kWidth; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr bool kSiluAct = Ktraits::kSiluAct; + constexpr int kNElts = Ktraits::kNElts; + constexpr int kNWarp = Ktraits::kNWarps; + constexpr int kNThreadsPerC = Ktraits::kNThreadsPerRow; + constexpr int kLPerLoad = Ktraits::kNColsPerLoad; + constexpr int kChunkSizeL = Ktraits::kChunkSizeL; + constexpr int kChunkSizeC = Ktraits::kNEltsPerRow; + using input_t = typename Ktraits::input_t; + using vec_t = typename Ktraits::vec_t; + using weight_t = typename Ktraits::weight_t; + + // Shared memory. + __shared__ input_t dout_smem[kChunkSizeL + kWidth - 1][kChunkSizeC + kNElts]; + __shared__ input_t x_smem[kWidth - 1 + kChunkSizeL + kWidth - 1][kChunkSizeC + kNElts]; + + const int tid = threadIdx.x; + const int l_idx = tid / kNThreadsPerC; + const int c_idx = tid % kNThreadsPerC; + const int batch_id = blockIdx.x; + const int chunk_l_id = blockIdx.y; + const int chunk_c_id = blockIdx.z; + input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride + + (chunk_l_id * kChunkSizeL + l_idx) * params.x_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts; + weight_t *weight = reinterpret_cast(params.weight_ptr) + + chunk_c_id * kChunkSizeC * params.weight_c_stride; + input_t *dout = reinterpret_cast(params.dout_ptr) + batch_id * params.dout_batch_stride + + (chunk_l_id * kChunkSizeL + l_idx) * params.dout_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts; + input_t *dx = reinterpret_cast(params.dx_ptr) + batch_id * params.dx_batch_stride + + (chunk_l_id * kChunkSizeL + l_idx) * params.dx_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts; + float *dweight = reinterpret_cast(params.dweight_ptr) + + chunk_c_id * kChunkSizeC * params.dweight_c_stride; + + #pragma unroll + for (int l = 0; l < Ktraits::kNLoads; ++l) { + input_t dout_vals_load[kNElts] = {0}; + input_t x_vals_load[kNElts] = {0}; + if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(dout_vals_load)[0] = *reinterpret_cast(dout + l * kLPerLoad * params.dout_l_stride); + reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x + l * kLPerLoad * params.x_l_stride); + } + reinterpret_cast(dout_smem[l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast(dout_vals_load)[0]; + reinterpret_cast(x_smem[kWidth - 1 + l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0]; + } + // Load the elements from the previous chunk or next chunk that are needed for convolution. + if (l_idx < kWidth - 1) { + input_t dout_vals_load[kNElts] = {0}; + input_t x_vals_load[kNElts] = {0}; + if ((chunk_l_id + 1) * kChunkSizeL + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(dout_vals_load)[0] = *reinterpret_cast(dout + kChunkSizeL * params.dout_l_stride); + } + if (chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) >= 0 + && chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x - (kWidth - 1) * params.x_l_stride); + } + reinterpret_cast(dout_smem[kChunkSizeL + l_idx])[c_idx] = reinterpret_cast(dout_vals_load)[0]; + reinterpret_cast(x_smem[l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0]; + } + // Need to load (kWdith - 1) extra x's on the right to recompute the (kChunkSizeL + kWidth - 1) outputs + if constexpr (kSiluAct) { + if (l_idx < kWidth - 1) { + input_t x_vals_load[kNElts] = {0}; + if ((chunk_l_id + 1) * kChunkSizeL + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x + kChunkSizeL * params.x_l_stride); + } + reinterpret_cast(x_smem[kWidth - 1 + kChunkSizeL + l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0]; + } + } + + __syncthreads(); + + constexpr int kLPerThread = std::min(kChunkSizeL * kChunkSizeC / kNThreads, kChunkSizeL); + static_assert(kLPerThread * kNThreads == kChunkSizeL * kChunkSizeC); + constexpr int kNThreadsPerRow = kChunkSizeL / kLPerThread; + static_assert(kNThreadsPerRow * kLPerThread == kChunkSizeL); + // kChunkSizeL, kLPerThread, kNThreadsPerRow should be powers of 2 for simplicity + static_assert((kChunkSizeL & (kChunkSizeL - 1)) == 0); + static_assert((kLPerThread & (kLPerThread - 1)) == 0); + static_assert((kNThreadsPerRow & (kNThreadsPerRow - 1)) == 0); + static_assert(kNThreadsPerRow <= 32); + + const int row_idx = tid / kNThreadsPerRow; + const int col_idx = tid % kNThreadsPerRow; + + float bias_val = params.bias_ptr == nullptr || chunk_c_id * kChunkSizeC + row_idx >= params.dim ? 0.f : float(reinterpret_cast(params.bias_ptr)[chunk_c_id * kChunkSizeC + row_idx]); + float weight_vals[kWidth] = {0}; + if (chunk_c_id * kChunkSizeC + row_idx < params.dim) { + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + weight_vals[w] = weight[row_idx * params.weight_c_stride + w * params.weight_width_stride]; + } + } + float dout_vals[kLPerThread + kWidth - 1]; + float x_vals[kWidth - 1 + kLPerThread + kWidth - 1]; + #pragma unroll + for (int i = 0; i < kWidth - 1 + kLPerThread; ++i) { + dout_vals[i] = float(dout_smem[col_idx * kLPerThread + i][row_idx]); + x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]); + } + + if constexpr (kSiluAct) { // Recompute the output + #pragma unroll + for (int i = kWidth - 1 + kLPerThread; i < kWidth - 1 + kLPerThread + kWidth - 1; ++i) { + x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]); + } + #pragma unroll + for (int i = 0; i < kLPerThread + kWidth - 1; ++i) { + float out_val = bias_val; + #pragma unroll + for (int w = 0; w < kWidth; ++w) { out_val += weight_vals[w] * x_vals[i + w]; } + float out_val_sigmoid = 1.f / (1.f + expf(-out_val)); + dout_vals[i] *= out_val_sigmoid * (1 + out_val * (1 - out_val_sigmoid)); + } + } + + float dweight_vals[kWidth] = {0}; + SumOp sum_op; + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + #pragma unroll + for (int i = 0; i < kLPerThread; ++i) { dweight_vals[w] += x_vals[i + w] * dout_vals[i]; } + dweight_vals[w] = Allreduce::run(dweight_vals[w], sum_op); + if (col_idx == 0 && chunk_c_id * kChunkSizeC + row_idx < params.dim) { + atomicAdd(&reinterpret_cast(dweight)[row_idx * params.dweight_c_stride + w * params.dweight_width_stride], dweight_vals[w]); + } + } + + if (params.bias_ptr != nullptr) { + float dbias_val = 0.f; + for (int i = 0; i < kLPerThread; ++i) { dbias_val += dout_vals[i]; } + dbias_val = Allreduce::run(dbias_val, sum_op); + if (col_idx == 0 && chunk_c_id * kChunkSizeC + row_idx < params.dim) { + atomicAdd(&reinterpret_cast(params.dbias_ptr)[chunk_c_id * kChunkSizeC + row_idx], dbias_val); + } + } + + float dx_vals[kLPerThread] = {0}; + #pragma unroll + for (int i = 0; i < kLPerThread; ++i) { + #pragma unroll + for (int w = 0; w < kWidth; ++w) { dx_vals[i] += weight_vals[kWidth - 1 - w] * dout_vals[i + w]; } + } + // Since kNThreadsPerRow is a power of 2 and <= 32, we only need syncwarp and not syncthreads. + __syncwarp(); + #pragma unroll + for (int i = 0; i < kLPerThread; ++i) { x_smem[col_idx * kLPerThread + i][row_idx] = dx_vals[i]; } + __syncthreads(); + + #pragma unroll + for (int l = 0; l < Ktraits::kNLoads; ++l) { + input_t dx_vals_store[kNElts]; + reinterpret_cast(dx_vals_store)[0] = reinterpret_cast(x_smem[l * kLPerLoad + l_idx])[c_idx]; + if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + *reinterpret_cast(dx + l * kLPerLoad * params.dx_l_stride) = reinterpret_cast(dx_vals_store)[0]; + } + } + +} + +template +void causal_conv1d_channellast_bwd_launch(ConvParamsBwd ¶ms, cudaStream_t stream) { + BOOL_SWITCH(params.silu_activation, kSiluAct, [&] { + using Ktraits = Causal_conv1d_channellast_bwd_kernel_traits; + // constexpr int kSmemSize = Ktraits::kSmemSize; + constexpr int kChunkSizeL = Ktraits::kChunkSizeL; + constexpr int kChunkSizeC = Ktraits::kNEltsPerRow; + const int n_chunks_L = (params.seqlen + kChunkSizeL - 1) / kChunkSizeL; + const int n_chunks_C = (params.dim + kChunkSizeC - 1) / kChunkSizeC; + dim3 grid(params.batch, n_chunks_L, n_chunks_C); + dim3 block(Ktraits::kNThreads); + auto kernel = &causal_conv1d_channellast_bwd_kernel; + // if (kSmemSize >= 48 * 1024) { + // C10_CUDA_CHECK(cudaFuncSetAttribute( + // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + // } + // kernel<<>>(params); + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); +} + +template +void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream) { + if (params.width == 2) { + causal_conv1d_channellast_bwd_launch<128, 2, input_t, weight_t>(params, stream); + } else if (params.width == 3) { + causal_conv1d_channellast_bwd_launch<128, 3, input_t, weight_t>(params, stream); + } else if (params.width == 4) { + causal_conv1d_channellast_bwd_launch<128, 4, input_t, weight_t>(params, stream); + } +} + +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); + +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h b/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h new file mode 100644 index 0000000000000000000000000000000000000000..8dd6a333b52163986c085f71475709706ce8f9c3 --- /dev/null +++ b/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h @@ -0,0 +1,64 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#include +#include + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template struct BytesToType {}; + +template<> struct BytesToType<16> { + using Type = uint4; + static_assert(sizeof(Type) == 16); +}; + +template<> struct BytesToType<8> { + using Type = uint64_t; + static_assert(sizeof(Type) == 8); +}; + +template<> struct BytesToType<4> { + using Type = uint32_t; + static_assert(sizeof(Type) == 4); +}; + +template<> struct BytesToType<2> { + using Type = uint16_t; + static_assert(sizeof(Type) == 2); +}; + +template<> struct BytesToType<1> { + using Type = uint8_t; + static_assert(sizeof(Type) == 1); +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct SumOp { +__device__ inline T operator()(T const & x, T const & y) { return x + y; } +}; + +template +struct Allreduce { + static_assert(THREADS == 32 || THREADS == 16 || THREADS == 8 || THREADS == 4); + template + static __device__ inline T run(T x, Operator &op) { + constexpr int OFFSET = THREADS / 2; + x = op(x, __shfl_xor_sync(uint32_t(-1), x, OFFSET)); + return Allreduce::run(x, op); + } +}; + +template<> +struct Allreduce<2> { +template +static __device__ inline T run(T x, Operator &op) { + x = op(x, __shfl_xor_sync(uint32_t(-1), x, 1)); + return x; +} +}; diff --git a/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu b/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu new file mode 100644 index 0000000000000000000000000000000000000000..74a1459f88a87ef427075a25e5081899e382efc0 --- /dev/null +++ b/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu @@ -0,0 +1,350 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK + +#include +#include + +#include "causal_conv1d.h" +#include "causal_conv1d_common.h" +#include "static_switch.h" + +template +struct Causal_conv1d_fwd_kernel_traits { + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static constexpr int kWidth = kWidth_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : 8; + static_assert(kWidth <= kNElts); + static constexpr bool kIsVecLoad = kIsVecLoad_; + using vec_t = typename BytesToType::Type; + using BlockLoadT = cub::BlockLoad; + using BlockLoadVecT = cub::BlockLoad; + using BlockStoreT = cub::BlockStore; + using BlockStoreVecT = cub::BlockStore; + static constexpr int kSmemIOSize = kIsVecLoad + ? 0 + : std::max({sizeof(typename BlockLoadT::TempStorage), sizeof(typename BlockStoreT::TempStorage)}); + static constexpr int kSmemExchangeSize = kNThreads * kNBytes * kNElts; + static constexpr int kSmemSize = kSmemIOSize + kSmemExchangeSize; +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads) +void causal_conv1d_fwd_kernel(ConvParamsBase params) { + constexpr int kWidth = Ktraits::kWidth; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr int kNElts = Ktraits::kNElts; + constexpr bool kIsVecLoad = Ktraits::kIsVecLoad; + using input_t = typename Ktraits::input_t; + using vec_t = typename Ktraits::vec_t; + using weight_t = typename Ktraits::weight_t; + + // Shared memory. + extern __shared__ char smem_[]; + auto& smem_load = reinterpret_cast(smem_); + auto& smem_load_vec = reinterpret_cast(smem_); + auto& smem_store = reinterpret_cast(smem_); + auto& smem_store_vec = reinterpret_cast(smem_); + vec_t *smem_exchange = reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + + const int tidx = threadIdx.x; + const int batch_id = blockIdx.x; + const int channel_id = blockIdx.y; + input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride + + channel_id * params.x_c_stride; + weight_t *weight = reinterpret_cast(params.weight_ptr) + channel_id * params.weight_c_stride; + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + channel_id * params.out_c_stride; + float bias_val = params.bias_ptr == nullptr ? 0.f : float(reinterpret_cast(params.bias_ptr)[channel_id]); + + // Thread 0 will load the last elements of the previous chunk, so we initialize those to 0. + if (tidx == 0) { + input_t zeros[kNElts] = {0}; + smem_exchange[kNThreads - 1] = reinterpret_cast(zeros)[0]; + } + + float weight_vals[kWidth]; + #pragma unroll + for (int i = 0; i < kWidth; ++i) { weight_vals[i] = float(weight[i * params.weight_width_stride]); } + + constexpr int kChunkSize = kNThreads * kNElts; + const int n_chunks = (params.seqlen + kChunkSize - 1) / kChunkSize; + for (int chunk = 0; chunk < n_chunks; ++chunk) { + input_t x_vals_load[2 * kNElts] = {0}; + if constexpr(kIsVecLoad) { + Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast(x), *reinterpret_cast(&x_vals_load[kNElts]), (params.seqlen - chunk * kChunkSize) / kNElts); + } else { + __syncthreads(); + Ktraits::BlockLoadT(smem_load).Load(x, *reinterpret_cast(&x_vals_load[kNElts]), params.seqlen - chunk * kChunkSize); + } + x += kChunkSize; + __syncthreads(); + // Thread kNThreads - 1 don't write yet, so that thread 0 can read + // the last elements of the previous chunk. + if (tidx < kNThreads - 1) { smem_exchange[tidx] = reinterpret_cast(x_vals_load)[1]; } + __syncthreads(); + reinterpret_cast(x_vals_load)[0] = smem_exchange[tidx > 0 ? tidx - 1 : kNThreads - 1]; + __syncthreads(); + // Now thread kNThreads - 1 can write the last elements of the current chunk. + if (tidx == kNThreads - 1) { smem_exchange[tidx] = reinterpret_cast(x_vals_load)[1]; } + + float x_vals[2 * kNElts]; + #pragma unroll + for (int i = 0; i < 2 * kNElts; ++i) { x_vals[i] = float(x_vals_load[i]); } + + float out_vals[kNElts]; + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + out_vals[i] = bias_val; + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + out_vals[i] += weight_vals[w] * x_vals[kNElts + i - (kWidth - w - 1)]; + } + } + + if (params.silu_activation) { + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + out_vals[i] = out_vals[i] / (1 + expf(-out_vals[i])); + } + } + + input_t out_vals_store[kNElts]; + #pragma unroll + for (int i = 0; i < kNElts; ++i) { out_vals_store[i] = out_vals[i]; } + if constexpr(kIsVecLoad) { + Ktraits::BlockStoreVecT(smem_store_vec).Store(reinterpret_cast(out), reinterpret_cast(out_vals_store), (params.seqlen - chunk * kChunkSize) / kNElts); + } else { + Ktraits::BlockStoreT(smem_store).Store(out, out_vals_store, params.seqlen - chunk * kChunkSize); + } + out += kChunkSize; + } +} + +template +void causal_conv1d_fwd_launch(ConvParamsBase ¶ms, cudaStream_t stream) { + static constexpr int kNElts = sizeof(input_t) == 4 ? 4 : 8; + BOOL_SWITCH(params.seqlen % kNElts == 0, kIsVecLoad, [&] { + using Ktraits = Causal_conv1d_fwd_kernel_traits; + constexpr int kSmemSize = Ktraits::kSmemSize; + dim3 grid(params.batch, params.dim); + auto kernel = &causal_conv1d_fwd_kernel; + if (kSmemSize >= 48 * 1024) { + C10_CUDA_CHECK(cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + } + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); +} + +template +void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream) { + if (params.width == 2) { + causal_conv1d_fwd_launch<128, 2, input_t, weight_t>(params, stream); + } else if (params.width == 3) { + causal_conv1d_fwd_launch<128, 3, input_t, weight_t>(params, stream); + } else if (params.width == 4) { + causal_conv1d_fwd_launch<128, 4, input_t, weight_t>(params, stream); + } +} + +template +struct Causal_conv1d_channellast_fwd_kernel_traits { + // The cache line is 128 bytes, and we try to read 16 bytes per thread. + // So we have 8 threads per "row", so 32 or 64 elements in the channel dimension. + // That leaves 4 columns per warp, and so 16 columns per block (assuming each block has 128 + // threads). Each each load is 16 x 32|64 elements in the L x C dimensions. + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static_assert(kNThreads % 32 == 0); + static constexpr int kNWarps = kNThreads / 32; + static constexpr int kWidth = kWidth_; + static constexpr int kChunkSizeL = kChunkSizeL_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : 8; + static constexpr int kNEltsPerRow = 128 / kNBytes; + static constexpr int kNThreadsPerRow = kNEltsPerRow / kNElts; // Always 8 for now + static_assert(kNThreadsPerRow * kNBytes * kNElts == 128); + static constexpr int kNColsPerWarp = 32 / kNThreadsPerRow; // Always 4 for now + static_assert(kNColsPerWarp * kNThreadsPerRow == 32); + static constexpr int kNColsPerLoad = kNColsPerWarp * kNWarps; + static constexpr int kNLoads = kChunkSizeL / kNColsPerLoad; + static_assert(kNLoads * kNColsPerLoad == kChunkSizeL); + static constexpr bool kIsVecLoad = kIsVecLoad_; + using vec_t = typename BytesToType::Type; + // using BlockLoadT = cub::BlockLoad; + // using BlockStoreT = cub::BlockStore; + // static constexpr int kSmemSize = std::max({sizeof(typename BlockLoadT::TempStorage), + // sizeof(typename BlockStoreT::TempStorage)}); + // static constexpr int kSmemSize = kChunkSizeL * kNEltsPerRow * kNBytes; +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads) +void causal_conv1d_channellast_fwd_kernel(ConvParamsBase params) { + constexpr int kWidth = Ktraits::kWidth; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr int kNElts = Ktraits::kNElts; + constexpr int kNWarp = Ktraits::kNWarps; + constexpr int kNThreadsPerC = Ktraits::kNThreadsPerRow; + constexpr int kLPerLoad = Ktraits::kNColsPerLoad; + constexpr int kChunkSizeL = Ktraits::kChunkSizeL; + constexpr int kChunkSizeC = Ktraits::kNEltsPerRow; + using input_t = typename Ktraits::input_t; + using vec_t = typename Ktraits::vec_t; + using weight_t = typename Ktraits::weight_t; + + // Shared memory. + __shared__ input_t x_smem[kWidth - 1 + kChunkSizeL][kChunkSizeC + kNElts]; + + const int tid = threadIdx.x; + const int l_idx = tid / kNThreadsPerC; + const int c_idx = tid % kNThreadsPerC; + const int batch_id = blockIdx.x; + const int chunk_l_id = blockIdx.y; + const int chunk_c_id = blockIdx.z; + input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride + + (chunk_l_id * kChunkSizeL + l_idx) * params.x_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts; + weight_t *weight = reinterpret_cast(params.weight_ptr) + + chunk_c_id * kChunkSizeC * params.weight_c_stride; + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + (chunk_l_id * kChunkSizeL + l_idx) * params.out_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts; + + #pragma unroll + for (int l = 0; l < Ktraits::kNLoads; ++l) { + input_t x_vals_load[kNElts] = {0}; + if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x + l * kLPerLoad * params.x_l_stride); + } + reinterpret_cast(x_smem[kWidth - 1 + l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0]; + } + // Load the elements from the previous chunk that are needed for convolution. + if (l_idx < kWidth - 1) { + input_t x_vals_load[kNElts] = {0}; + if (chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) >= 0 + && chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x - (kWidth - 1) * params.x_l_stride); + } + reinterpret_cast(x_smem[l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0]; + } + + __syncthreads(); + + constexpr int kLPerThread = std::min(kChunkSizeL * kChunkSizeC / kNThreads, kChunkSizeL); + static_assert(kLPerThread * kNThreads == kChunkSizeL * kChunkSizeC); + constexpr int kNThreadsPerRow = kChunkSizeL / kLPerThread; + static_assert(kNThreadsPerRow * kLPerThread == kChunkSizeL); + // kChunkSizeL, kLPerThread, kNThreadsPerRow should be powers of 2 for simplicity + static_assert((kChunkSizeL & (kChunkSizeL - 1)) == 0); + static_assert((kLPerThread & (kLPerThread - 1)) == 0); + static_assert((kNThreadsPerRow & (kNThreadsPerRow - 1)) == 0); + static_assert(kNThreadsPerRow <= 32); + + const int row_idx = tid / kNThreadsPerRow; + const int col_idx = tid % kNThreadsPerRow; + + float bias_val = params.bias_ptr == nullptr || chunk_c_id * kChunkSizeC + row_idx >= params.dim ? 0.f : float(reinterpret_cast(params.bias_ptr)[chunk_c_id * kChunkSizeC + row_idx]); + float weight_vals[kWidth] = {0}; + if (chunk_c_id + kChunkSizeC + row_idx < params.dim) { + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + weight_vals[w] = weight[row_idx * params.weight_c_stride + w * params.weight_width_stride]; + } + } + float x_vals[kWidth - 1 + kLPerThread]; + #pragma unroll + for (int i = 0; i < kWidth - 1 + kLPerThread; ++i) { + x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]); + } + + float out_vals[kLPerThread]; + #pragma unroll + for (int i = 0; i < kLPerThread; ++i) { + out_vals[i] = bias_val; + #pragma unroll + for (int w = 0; w < kWidth; ++w) { out_vals[i] += weight_vals[w] * x_vals[i + w]; } + if (params.silu_activation) {out_vals[i] = out_vals[i] / (1 + expf(-out_vals[i])); } + } + + // Since kNThreadsPerRow is a power of 2 and <= 32, we only need syncwarp and not syncthreads. + __syncwarp(); + #pragma unroll + for (int i = 0; i < kLPerThread; ++i) { x_smem[col_idx * kLPerThread + i][row_idx] = out_vals[i]; } + __syncthreads(); + + #pragma unroll + for (int l = 0; l < Ktraits::kNLoads; ++l) { + input_t out_vals_store[kNElts]; + reinterpret_cast(out_vals_store)[0] = reinterpret_cast(x_smem[l * kLPerLoad + l_idx])[c_idx]; + if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + *reinterpret_cast(out + l * kLPerLoad * params.out_l_stride) = reinterpret_cast(out_vals_store)[0]; + } + } + +} + +template +void causal_conv1d_channellast_fwd_launch(ConvParamsBase ¶ms, cudaStream_t stream) { + using Ktraits = Causal_conv1d_channellast_fwd_kernel_traits; + // constexpr int kSmemSize = Ktraits::kSmemSize; + constexpr int kChunkSizeL = Ktraits::kChunkSizeL; + constexpr int kChunkSizeC = Ktraits::kNEltsPerRow; + const int n_chunks_L = (params.seqlen + kChunkSizeL - 1) / kChunkSizeL; + const int n_chunks_C = (params.dim + kChunkSizeC - 1) / kChunkSizeC; + // printf("n_chunks_L: %d, n_chunks_C: %d\n", n_chunks_L, n_chunks_C); + dim3 grid(params.batch, n_chunks_L, n_chunks_C); + dim3 block(Ktraits::kNThreads); + auto kernel = &causal_conv1d_channellast_fwd_kernel; + // if (kSmemSize >= 48 * 1024) { + // C10_CUDA_CHECK(cudaFuncSetAttribute( + // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + // } + // kernel<<>>(params); + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream) { + if (params.width == 2) { + causal_conv1d_channellast_fwd_launch<128, 2, input_t, weight_t>(params, stream); + } else if (params.width == 3) { + causal_conv1d_channellast_fwd_launch<128, 3, input_t, weight_t>(params, stream); + } else if (params.width == 4) { + causal_conv1d_channellast_fwd_launch<128, 4, input_t, weight_t>(params, stream); + } +} + +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); + +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu b/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu new file mode 100644 index 0000000000000000000000000000000000000000..713e0ac883853491f9bdb0015b578657c228c1e7 --- /dev/null +++ b/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu @@ -0,0 +1,96 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK + +#include +#include + +#include "causal_conv1d.h" +#include "causal_conv1d_common.h" +#include "static_switch.h" + +template +struct Causal_conv1d_update_kernel_traits { + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static constexpr int kWidth = kWidth_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads) +void causal_conv1d_update_kernel(ConvParamsBase params) { + constexpr int kWidth = Ktraits::kWidth; + constexpr int kNThreads = Ktraits::kNThreads; + using input_t = typename Ktraits::input_t; + using weight_t = typename Ktraits::weight_t; + + const int tidx = threadIdx.x; + const int batch_id = blockIdx.x; + const int channel_id = blockIdx.y * kNThreads + tidx; + input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride + + channel_id * params.x_c_stride; + input_t *conv_state = reinterpret_cast(params.conv_state_ptr) + batch_id * params.conv_state_batch_stride + + channel_id * params.conv_state_c_stride; + weight_t *weight = reinterpret_cast(params.weight_ptr) + channel_id * params.weight_c_stride; + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + channel_id * params.out_c_stride; + float bias_val = params.bias_ptr == nullptr || channel_id >= params.dim ? 0.f : float(reinterpret_cast(params.bias_ptr)[channel_id]); + + float weight_vals[kWidth] = {0}; + if (channel_id < params.dim) { + #pragma unroll + for (int i = 0; i < kWidth; ++i) { weight_vals[i] = float(weight[i * params.weight_width_stride]); } + } + + float x_vals[kWidth] = {0}; + if (channel_id < params.dim) { + #pragma unroll + for (int i = 0; i < kWidth - 1; ++i) { x_vals[i] = float(conv_state[(i + 1) * params.conv_state_l_stride]); } + x_vals[kWidth - 1] = float(x[0]); + #pragma unroll + for (int i = 0; i < kWidth; ++i) { conv_state[i * params.conv_state_l_stride] = input_t(x_vals[i]); } + } + + float out_val = bias_val; + #pragma unroll + for (int i = 0; i < kWidth; ++i) { out_val += weight_vals[i] * x_vals[i]; } + if (params.silu_activation) { out_val = out_val / (1 + expf(-out_val)); } + if (channel_id < params.dim) { out[0] = input_t(out_val); } +} + +template +void causal_conv1d_update_launch(ConvParamsBase ¶ms, cudaStream_t stream) { + using Ktraits = Causal_conv1d_update_kernel_traits; + dim3 grid(params.batch, (params.dim + kNThreads - 1) / kNThreads); + auto kernel = &causal_conv1d_update_kernel; + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream) { + if (params.width == 2) { + causal_conv1d_update_launch<64, 2, input_t, weight_t>(params, stream); + } else if (params.width == 3) { + causal_conv1d_update_launch<64, 3, input_t, weight_t>(params, stream); + } else if (params.width == 4) { + causal_conv1d_update_launch<64, 4, input_t, weight_t>(params, stream); + } +} + +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/causal-conv1d/csrc/static_switch.h b/SegMamba/causal-conv1d/csrc/static_switch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f4ad3eb62235443d15c454b6691c2ec63645219 --- /dev/null +++ b/SegMamba/causal-conv1d/csrc/static_switch.h @@ -0,0 +1,25 @@ +// Inspired by https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h +// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h + +#pragma once + +/// @param COND - a boolean expression to switch by +/// @param CONST_NAME - a name given for the constexpr bool variable. +/// @param ... - code to execute for true and false +/// +/// Usage: +/// ``` +/// BOOL_SWITCH(flag, BoolConst, [&] { +/// some_function(...); +/// }); +/// ``` +#define BOOL_SWITCH(COND, CONST_NAME, ...) \ + [&] { \ + if (COND) { \ + static constexpr bool CONST_NAME = true; \ + return __VA_ARGS__(); \ + } else { \ + static constexpr bool CONST_NAME = false; \ + return __VA_ARGS__(); \ + } \ + }() diff --git a/SegMamba/causal-conv1d/setup.py b/SegMamba/causal-conv1d/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..12e36bf988215a4c536278026e6f4401e66534da --- /dev/null +++ b/SegMamba/causal-conv1d/setup.py @@ -0,0 +1,264 @@ +# Copyright (c) 2023, Tri Dao. +import sys +import warnings +import os +import re +import ast +from pathlib import Path +from packaging.version import parse, Version +import platform + +from setuptools import setup, find_packages +import subprocess + +import urllib.request +import urllib.error +from wheel.bdist_wheel import bdist_wheel as _bdist_wheel + +import torch +from torch.utils.cpp_extension import ( + BuildExtension, + CppExtension, + CUDAExtension, + CUDA_HOME, +) + + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + + +# ninja build does not work unless include_dirs are abs path +this_dir = os.path.dirname(os.path.abspath(__file__)) + +PACKAGE_NAME = "causal_conv1d" + +BASE_WHEEL_URL = "https://github.com/Dao-AILab/causal-conv1d/releases/download/{tag_name}/{wheel_name}" + +# FORCE_BUILD: Force a fresh build locally, instead of attempting to find prebuilt wheels +# SKIP_CUDA_BUILD: Intended to allow CI to use a simple `python setup.py sdist` run to copy over raw files, without any cuda compilation +FORCE_BUILD = os.getenv("CAUSAL_CONV1D_FORCE_BUILD", "FALSE") == "TRUE" +SKIP_CUDA_BUILD = os.getenv("CAUSAL_CONV1D_SKIP_CUDA_BUILD", "FALSE") == "TRUE" +# For CI, we want the option to build with C++11 ABI since the nvcr images use C++11 ABI +FORCE_CXX11_ABI = os.getenv("CAUSAL_CONV1D_FORCE_CXX11_ABI", "FALSE") == "TRUE" + + +def get_platform(): + """ + Returns the platform name as used in wheel filenames. + """ + if sys.platform.startswith("linux"): + return "linux_x86_64" + elif sys.platform == "darwin": + mac_version = ".".join(platform.mac_ver()[0].split(".")[:2]) + return f"macosx_{mac_version}_x86_64" + elif sys.platform == "win32": + return "win_amd64" + else: + raise ValueError("Unsupported platform: {}".format(sys.platform)) + + +def get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True + ) + output = raw_output.split() + release_idx = output.index("release") + 1 + bare_metal_version = parse(output[release_idx].split(",")[0]) + + return raw_output, bare_metal_version + + +def check_if_cuda_home_none(global_option: str) -> None: + if CUDA_HOME is not None: + return + # warn instead of error because user could be downloading prebuilt wheels, so nvcc won't be necessary + # in that case. + warnings.warn( + f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? " + "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, " + "only images whose names contain 'devel' will provide nvcc." + ) + + +def append_nvcc_threads(nvcc_extra_args): + return nvcc_extra_args + ["--threads", "4"] + + +cmdclass = {} +ext_modules = [] + +if not SKIP_CUDA_BUILD: + print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__)) + TORCH_MAJOR = int(torch.__version__.split(".")[0]) + TORCH_MINOR = int(torch.__version__.split(".")[1]) + + check_if_cuda_home_none("causal_conv1d") + # Check, if CUDA11 is installed for compute capability 8.0 + cc_flag = [] + if CUDA_HOME is not None: + _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME) + if bare_metal_version < Version("11.6"): + raise RuntimeError( + "causal_conv1d is only supported on CUDA 11.6 and above. " + "Note: make sure nvcc has a supported version by running nvcc -V." + ) + + cc_flag.append("-gencode") + cc_flag.append("arch=compute_70,code=sm_70") + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + if bare_metal_version >= Version("11.8"): + cc_flag.append("-gencode") + cc_flag.append("arch=compute_90,code=sm_90") + + # HACK: The compiler flag -D_GLIBCXX_USE_CXX11_ABI is set to be the same as + # torch._C._GLIBCXX_USE_CXX11_ABI + # https://github.com/pytorch/pytorch/blob/8472c24e3b5b60150096486616d98b7bea01500b/torch/utils/cpp_extension.py#L920 + if FORCE_CXX11_ABI: + torch._C._GLIBCXX_USE_CXX11_ABI = True + + ext_modules.append( + CUDAExtension( + name="causal_conv1d_cuda", + sources=[ + "csrc/causal_conv1d.cpp", + "csrc/causal_conv1d_fwd.cu", + "csrc/causal_conv1d_bwd.cu", + "csrc/causal_conv1d_update.cu", + ], + extra_compile_args={ + "cxx": ["-O3"], + "nvcc": append_nvcc_threads( + [ + "-O3", + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "-U__CUDA_NO_BFLOAT16_OPERATORS__", + "-U__CUDA_NO_BFLOAT16_CONVERSIONS__", + "-U__CUDA_NO_BFLOAT162_OPERATORS__", + "-U__CUDA_NO_BFLOAT162_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", + "--use_fast_math", + "--ptxas-options=-v", + "-lineinfo", + ] + + cc_flag + ), + }, + include_dirs=[this_dir], + ) + ) + + +def get_package_version(): + with open(Path(this_dir) / "causal_conv1d" / "__init__.py", "r") as f: + version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE) + public_version = ast.literal_eval(version_match.group(1)) + local_version = os.environ.get("CAUSAL_CONV1D_LOCAL_VERSION") + if local_version: + return f"{public_version}+{local_version}" + else: + return str(public_version) + + +def get_wheel_url(): + # Determine the version numbers that will be used to determine the correct wheel + # We're using the CUDA version used to build torch, not the one currently installed + # _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME) + torch_cuda_version = parse(torch.version.cuda) + torch_version_raw = parse(torch.__version__) + # For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.2 + # to save CI time. Minor versions should be compatible. + torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.2") + python_version = f"cp{sys.version_info.major}{sys.version_info.minor}" + platform_name = get_platform() + causal_conv1d_version = get_package_version() + # cuda_version = f"{cuda_version_raw.major}{cuda_version_raw.minor}" + cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}" + torch_version = f"{torch_version_raw.major}.{torch_version_raw.minor}" + cxx11_abi = str(torch._C._GLIBCXX_USE_CXX11_ABI).upper() + + # Determine wheel URL based on CUDA version, torch version, python version and OS + wheel_filename = f"{PACKAGE_NAME}-{causal_conv1d_version}+cu{cuda_version}torch{torch_version}cxx11abi{cxx11_abi}-{python_version}-{python_version}-{platform_name}.whl" + wheel_url = BASE_WHEEL_URL.format( + tag_name=f"v{causal_conv1d_version}", wheel_name=wheel_filename + ) + return wheel_url, wheel_filename + + +class CachedWheelsCommand(_bdist_wheel): + """ + The CachedWheelsCommand plugs into the default bdist wheel, which is ran by pip when it cannot + find an existing wheel (which is currently the case for all installs). We use + the environment parameters to detect whether there is already a pre-built version of a compatible + wheel available and short-circuits the standard full build pipeline. + """ + + def run(self): + if FORCE_BUILD: + return super().run() + + wheel_url, wheel_filename = get_wheel_url() + print("Guessing wheel URL: ", wheel_url) + try: + urllib.request.urlretrieve(wheel_url, wheel_filename) + + # Make the archive + # Lifted from the root wheel processing command + # https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85 + if not os.path.exists(self.dist_dir): + os.makedirs(self.dist_dir) + + impl_tag, abi_tag, plat_tag = self.get_tag() + archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}" + + wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl") + print("Raw wheel path", wheel_path) + os.rename(wheel_filename, wheel_path) + except urllib.error.HTTPError: + print("Precompiled wheel not found. Building from source...") + # If the wheel could not be downloaded, build from source + super().run() + + +setup( + name=PACKAGE_NAME, + version=get_package_version(), + packages=find_packages( + exclude=( + "build", + "csrc", + "include", + "tests", + "dist", + "docs", + "benchmarks", + "causal_conv1d.egg-info", + ) + ), + author="Tri Dao", + author_email="tri@tridao.me", + description="Causal depthwise conv1d in CUDA, with a PyTorch interface", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/Dao-AILab/causal-conv1d", + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: BSD License", + "Operating System :: Unix", + ], + ext_modules=ext_modules, + cmdclass={"bdist_wheel": CachedWheelsCommand, "build_ext": BuildExtension} + if ext_modules + else { + "bdist_wheel": CachedWheelsCommand, + }, + python_requires=">=3.7", + install_requires=[ + "torch", + "packaging", + "ninja", + ], +) diff --git a/SegMamba/causal-conv1d/tests/test_causal_conv1d.py b/SegMamba/causal-conv1d/tests/test_causal_conv1d.py new file mode 100644 index 0000000000000000000000000000000000000000..6e5985cfb0582e6656afb1d8b5c1de78f24f4276 --- /dev/null +++ b/SegMamba/causal-conv1d/tests/test_causal_conv1d.py @@ -0,0 +1,173 @@ +# Copyright (C) 2023, Tri Dao. + +import math + +import torch +import pytest + +from einops import rearrange + +from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_ref +from causal_conv1d.causal_conv1d_interface import causal_conv1d_update, causal_conv1d_update_ref + + +@pytest.mark.parametrize("channel_last", [False, True]) +# @pytest.mark.parametrize('channel_last', [True]) +@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16]) +# @pytest.mark.parametrize('itype', [torch.float16]) +@pytest.mark.parametrize("silu_activation", [False, True]) +# @pytest.mark.parametrize('silu_activation', [True]) +@pytest.mark.parametrize("has_bias", [False, True]) +# @pytest.mark.parametrize('has_bias', [True]) +@pytest.mark.parametrize("width", [2, 3, 4]) +# @pytest.mark.parametrize('width', [2]) +@pytest.mark.parametrize( + "seqlen", [8, 16, 32, 64, 128, 151, 256, 372, 512, 784, 1024, 1134, 2048, 4096] +) +# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 512, 784, 1024, 2048, 4096]) +# @pytest.mark.parametrize('seqlen', [128]) +def test_causal_conv1d(seqlen, width, has_bias, silu_activation, itype, channel_last): + device = "cuda" + rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3) + if itype == torch.bfloat16: + rtol, atol = 1e-2, 5e-2 + rtolw, atolw = (1e-3, 1e-3) + # set seed + torch.random.manual_seed(0) + batch_size = 2 + # batch_size = 1 + dim = 4096 + 32 # Try dim not divisible by 64 + # dim = 64 + if not channel_last: + x = torch.randn(batch_size, 4096 + dim + 64, seqlen, device=device, dtype=itype)[:, 4096:4096 + dim, :].requires_grad_() + else: + x = rearrange( + torch.randn(batch_size, seqlen, 4096 + dim + 64, device=device, dtype=itype)[:, :, 4096:4096 + dim], "b s d -> b d s" + ).requires_grad_() + weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True) + if has_bias: + bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + else: + bias = None + x_ref = x.detach().clone().requires_grad_() + weight_ref = weight.detach().clone().requires_grad_() + bias_ref = bias.detach().clone().requires_grad_() if bias is not None else None + activation = None if not silu_activation else "silu" + out = causal_conv1d_fn(x, weight, bias, activation=activation) + out_ref = causal_conv1d_ref(x_ref, weight_ref, bias_ref, activation=activation) + + print(f"Output max diff: {(out - out_ref).abs().max().item()}") + print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) + + g = torch.randn_like(out) + out_ref.backward(g) + out.backward(g) + + print(f"dx max diff: {(x.grad - x_ref.grad).abs().max().item()}") + print(f"dweight max diff: {(weight.grad - weight_ref.grad).abs().max().item()}") + if has_bias: + print(f"dbias max diff: {(bias.grad - bias_ref.grad).abs().max().item()}") + + assert torch.allclose(x.grad, x_ref.grad.to(dtype=itype), rtol=rtol, atol=atol) + assert torch.allclose(weight.grad, weight_ref.grad, rtol=rtolw, atol=atolw) + if has_bias: + assert torch.allclose(bias.grad, bias_ref.grad, rtol=rtolw, atol=atolw) + + +@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16]) +# @pytest.mark.parametrize('itype', [torch.float16]) +@pytest.mark.parametrize("silu_activation", [False, True]) +# @pytest.mark.parametrize('silu_activation', [False]) +@pytest.mark.parametrize("has_bias", [False, True]) +# @pytest.mark.parametrize('has_bias', [True]) +@pytest.mark.parametrize("width", [2, 3, 4]) +# @pytest.mark.parametrize('width', [2]) +@pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096]) +# @pytest.mark.parametrize("dim", [2048]) +def test_causal_conv1d_update(dim, width, has_bias, silu_activation, itype): + device = "cuda" + rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3) + if itype == torch.bfloat16: + rtol, atol = 1e-2, 5e-2 + rtolw, atolw = (1e-3, 1e-3) + # set seed + torch.random.manual_seed(0) + batch_size = 2 + # batch_size = 1 + # dim = 64 + x = torch.randn(batch_size, dim, device=device, dtype=itype) + conv_state = torch.randn(batch_size, dim, width, device=device, dtype=itype) + weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True) + if has_bias: + bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + else: + bias = None + conv_state_ref = conv_state.detach().clone() + activation = None if not silu_activation else "silu" + out = causal_conv1d_update(x, conv_state, weight, bias, activation=activation) + out_ref = causal_conv1d_update_ref(x, conv_state_ref, weight, bias, activation=activation) + + print(f"Output max diff: {(out - out_ref).abs().max().item()}") + print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") + assert torch.equal(conv_state, conv_state_ref) + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) + + +# @pytest.mark.parametrize("channel_last", [False, True]) +@pytest.mark.parametrize('channel_last', [True]) +# @pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize('itype', [torch.bfloat16]) +# @pytest.mark.parametrize("silu_activation", [False, True]) +@pytest.mark.parametrize('silu_activation', [True]) +# @pytest.mark.parametrize("has_bias", [False, True]) +@pytest.mark.parametrize('has_bias', [True]) +# @pytest.mark.parametrize("width", [2, 3, 4]) +@pytest.mark.parametrize('width', [4]) +@pytest.mark.parametrize( + # "seqlen", [8, 16, 32, 64, 128, 151, 256, 372, 512, 784, 1024, 1134, 2048, 4096] + "seqlen", [2048] +) +# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 512, 784, 1024, 2048, 4096]) +# @pytest.mark.parametrize('seqlen', [128]) +def test_causal_conv1d_race_condition(seqlen, width, has_bias, silu_activation, itype, channel_last): + device = "cuda" + # set seed + torch.random.manual_seed(0) + batch_size = 2 + # batch_size = 1 + dim = 4096 + 32 # Try dim not divisible by 64 + # dim = 64 + if not channel_last: + x = torch.randn(batch_size, 4096 + dim + 64, seqlen, device=device, dtype=itype)[:, 4096:4096 + dim, :].requires_grad_() + else: + x = rearrange( + torch.randn(batch_size, seqlen, 4096 + dim + 64, device=device, dtype=itype)[:, :, 4096:4096 + dim], "b s d -> b d s" + ).requires_grad_() + weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True) + if has_bias: + bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + else: + bias = None + activation = None if not silu_activation else "silu" + out0 = causal_conv1d_fn(x, weight, bias, activation=activation) + g = torch.randn_like(out0) + dx0, dw0, db0 = torch.autograd.grad(out0, (x, weight, bias), g) + dw_atol = 1e-4 + db_atol = 1e-4 + + for i in range(10000): + out = causal_conv1d_fn(x, weight, bias, activation=activation) + dx, dw, db = torch.autograd.grad(out, (x, weight, bias), g) + dw_equal = torch.allclose(dw, dw0, atol=dw_atol) + # if not dw_equal: + # breakpoint() + if has_bias: + db_equal = torch.allclose(db, db0, atol=db_atol) + # if not db_equal: + # breakpoint() + assert torch.equal(out, out0) + assert torch.equal(dx, dx0) + assert dw_equal + if has_bias: + assert dw_equal diff --git a/SegMamba/images/data_structure.jpg b/SegMamba/images/data_structure.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b5f2715d2295080924d8fdf64d3c849639538e0 --- /dev/null +++ b/SegMamba/images/data_structure.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:811073efa09d8196f0b0dd9a37418025d5969f204f226b055be9349dec8117db +size 45010 diff --git a/SegMamba/images/method_figure.jpg b/SegMamba/images/method_figure.jpg new file mode 100644 index 0000000000000000000000000000000000000000..682d984ff8c8f6e15104ea4bcf6cf88baf5b60fa --- /dev/null +++ b/SegMamba/images/method_figure.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91fc1b545acec5d4f48af3a5d4498e25bc5c973639b7527076eebab46e78e2ba +size 215324 diff --git a/SegMamba/images/modules.jpg b/SegMamba/images/modules.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2dc0fcdcc8a392473ee01dfb739941c28eceeb3 --- /dev/null +++ b/SegMamba/images/modules.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e511d5a09ef804053ea85d19075f7a8d922ea59aba475f8b223f2c9a6a15c23c +size 152643 diff --git a/SegMamba/images/segmamba_ablation.jpg b/SegMamba/images/segmamba_ablation.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe655bb596358f285f12aeced37ba79c161cce71 --- /dev/null +++ b/SegMamba/images/segmamba_ablation.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bbaaa25995dd1f3b7a3b1c326dd939dce40c80722c74f4104ae787ed6d936db +size 99994 diff --git a/SegMamba/light_training/.DS_Store b/SegMamba/light_training/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..bbf3ff2ee2fe133356efb34f5891a28e41a58550 Binary files /dev/null and b/SegMamba/light_training/.DS_Store differ diff --git a/SegMamba/light_training/augment/multi_processor.py b/SegMamba/light_training/augment/multi_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..84efb9bd7d8b65f8f05020cf2bfef3db396eaa2c --- /dev/null +++ b/SegMamba/light_training/augment/multi_processor.py @@ -0,0 +1,10 @@ +from batchgenerators.dataloading.nondet_multi_threaded_augmenter import NonDetMultiThreadedAugmenter + + +class LimitedLenWrapper(NonDetMultiThreadedAugmenter): + def __init__(self, my_imaginary_length, *args, **kwargs): + super().__init__(*args, **kwargs) + self.len = my_imaginary_length + + def __len__(self): + return self.len \ No newline at end of file diff --git a/SegMamba/light_training/augment/train_augment.py b/SegMamba/light_training/augment/train_augment.py new file mode 100644 index 0000000000000000000000000000000000000000..086f133487cc8e4920531b0284edc166b3b20c79 --- /dev/null +++ b/SegMamba/light_training/augment/train_augment.py @@ -0,0 +1,279 @@ +import inspect +import multiprocessing +import os +import shutil +import sys +import warnings +from copy import deepcopy +from datetime import datetime +from time import time, sleep +from typing import Union, Tuple, List +import numpy as np +import torch +from batchgenerators.dataloading.single_threaded_augmenter import SingleThreadedAugmenter +from batchgenerators.transforms.abstract_transforms import AbstractTransform, Compose +from batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, \ + ContrastAugmentationTransform, GammaTransform +from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform +from batchgenerators.transforms.resample_transforms import SimulateLowResolutionTransform +from batchgenerators.transforms.spatial_transforms import SpatialTransform, MirrorTransform +from batchgenerators.transforms.utility_transforms import RemoveLabelTransform, RenameTransform, NumpyToTensor + + +def get_train_transforms(patch_size, mirror_axes=None): + tr_transforms = [] + patch_size_spatial = patch_size + ignore_axes = None + angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) + + tr_transforms.append(SpatialTransform( + patch_size_spatial, patch_center_dist_from_border=None, + do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle, + p_rot_per_axis=1, # todo experiment with this + do_scale=True, scale=(0.7, 1.4), + border_mode_data="constant", border_cval_data=0, order_data=3, + border_mode_seg="constant", border_cval_seg=-1, order_seg=1, + random_crop=False, # random cropping is part of our dataloaders + p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + independent_scale_for_each_axis=False # todo experiment with this + )) + + tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + p_per_channel=0.5)) + tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + p_per_channel=0.5, + order_downsample=0, order_upsample=3, p_per_sample=0.25, + ignore_axes=ignore_axes)) + tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + if mirror_axes is not None and len(mirror_axes) > 0: + tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + + tr_transforms = Compose(tr_transforms) + + return tr_transforms + +def get_train_transforms_nomirror(patch_size, mirror_axes=None): + tr_transforms = [] + patch_size_spatial = patch_size + ignore_axes = None + angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) + + tr_transforms.append(SpatialTransform( + patch_size_spatial, patch_center_dist_from_border=None, + do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle, + p_rot_per_axis=1, # todo experiment with this + do_scale=True, scale=(0.7, 1.4), + border_mode_data="constant", border_cval_data=0, order_data=3, + border_mode_seg="constant", border_cval_seg=-1, order_seg=1, + random_crop=False, # random cropping is part of our dataloaders + p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + independent_scale_for_each_axis=False # todo experiment with this + )) + + tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + p_per_channel=0.5)) + tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + p_per_channel=0.5, + order_downsample=0, order_upsample=3, p_per_sample=0.25, + ignore_axes=ignore_axes)) + tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + # if mirror_axes is not None and len(mirror_axes) > 0: + # tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + + tr_transforms = Compose(tr_transforms) + + return tr_transforms + +def get_train_transforms_onlymirror(patch_size, mirror_axes=None): + tr_transforms = [] + patch_size_spatial = patch_size + ignore_axes = None + angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) + + # tr_transforms.append(SpatialTransform( + # patch_size_spatial, patch_center_dist_from_border=None, + # do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + # do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle, + # p_rot_per_axis=1, # todo experiment with this + # do_scale=True, scale=(0.7, 1.4), + # border_mode_data="constant", border_cval_data=0, order_data=3, + # border_mode_seg="constant", border_cval_seg=-1, order_seg=1, + # random_crop=False, # random cropping is part of our dataloaders + # p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + # independent_scale_for_each_axis=False # todo experiment with this + # )) + + tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + p_per_channel=0.5)) + tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + p_per_channel=0.5, + order_downsample=0, order_upsample=3, p_per_sample=0.25, + ignore_axes=ignore_axes)) + tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + if mirror_axes is not None and len(mirror_axes) > 0: + tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + + tr_transforms = Compose(tr_transforms) + + return tr_transforms + +def get_train_transforms_onlyspatial(patch_size, mirror_axes=None): + tr_transforms = [] + patch_size_spatial = patch_size + ignore_axes = None + angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) + + tr_transforms.append(SpatialTransform( + patch_size_spatial, patch_center_dist_from_border=None, + do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle, + p_rot_per_axis=1, # todo experiment with this + do_scale=True, scale=(0.7, 1.4), + border_mode_data="constant", border_cval_data=0, order_data=3, + border_mode_seg="constant", border_cval_seg=-1, order_seg=1, + random_crop=False, # random cropping is part of our dataloaders + p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + independent_scale_for_each_axis=False # todo experiment with this + )) + + # tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + # tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + # p_per_channel=0.5)) + # tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + # tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + # tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + # p_per_channel=0.5, + # order_downsample=0, order_upsample=3, p_per_sample=0.25, + # ignore_axes=ignore_axes)) + # tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + # tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + if mirror_axes is not None and len(mirror_axes) > 0: + tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + + tr_transforms = Compose(tr_transforms) + + return tr_transforms + +def get_train_transforms_noaug(patch_size, mirror_axes=None): + tr_transforms = [] + # patch_size_spatial = patch_size + # ignore_axes = None + # angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) + + # tr_transforms.append(SpatialTransform( + # patch_size_spatial, patch_center_dist_from_border=None, + # do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + # do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle, + # p_rot_per_axis=1, # todo experiment with this + # do_scale=True, scale=(0.7, 1.4), + # border_mode_data="constant", border_cval_data=0, order_data=3, + # border_mode_seg="constant", border_cval_seg=-1, order_seg=1, + # random_crop=False, # random cropping is part of our dataloaders + # p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + # independent_scale_for_each_axis=False # todo experiment with this + # )) + + # tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + # tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + # p_per_channel=0.5)) + # tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + # tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + # tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + # p_per_channel=0.5, + # order_downsample=0, order_upsample=3, p_per_sample=0.25, + # ignore_axes=ignore_axes)) + # tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + # tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + # if mirror_axes is not None and len(mirror_axes) > 0: + # tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + + tr_transforms = Compose(tr_transforms) + + return tr_transforms + +def get_validation_transforms() -> AbstractTransform: + val_transforms = [] + val_transforms.append(RemoveLabelTransform(-1, 0)) + + # val_transforms.append(RenameTransform('seg', 'target', True)) + + val_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + val_transforms = Compose(val_transforms) + return val_transforms + +# import SimpleITK as sitk +# import matplotlib.pyplot as plt + +# image = sitk.ReadImage("/Users/xingzhaohu/Documents/工作/code/medical_image_processing/SSL/BraTS20_Training_365/BraTS20_Training_365_flair.nii.gz") +# label = sitk.ReadImage("/Users/xingzhaohu/Documents/工作/code/medical_image_processing/SSL/BraTS20_Training_365/BraTS20_Training_365_seg.nii.gz") + +# # image = sitk.ReadImage("./AIIB/image/AIIB23_171.nii.gz") +# # label = sitk.ReadImage("./AIIB/gt/AIIB23_171.nii.gz") + +# image_arr = sitk.GetArrayFromImage(image) +# label_arr = sitk.GetArrayFromImage(label) +# intensityproperties = {} + +# norm = RescaleTo01Normalization(intensityproperties=intensityproperties) +# image_arr = image_arr[0:128, 0:128, 0:128][None, None] +# label_arr = label_arr[0:128, 0:128, 0:128][None, None] + + +# image_arr = norm.run(image_arr, label_arr) + +# print(image_arr.shape, label_arr.shape) + +# tr_transforms = Compose(tr_transforms) + +# trans_out = tr_transforms(data=image_arr, seg=label_arr) + +# image_arr_aug = trans_out["data"] +# label_arr_aug = trans_out["seg"] + +# print(image_arr_aug.shape, label_arr_aug.shape) + + +# for i in range(40, 128): +# plt.subplot(1, 4, 1) +# plt.imshow(image_arr[0, 0, i], cmap="gray") +# plt.subplot(1, 4, 2) +# plt.imshow(label_arr[0, 0, i], cmap="gray") +# plt.subplot(1, 4, 3) +# plt.imshow(image_arr_aug[0, 0, i], cmap="gray") +# plt.subplot(1, 4, 4) +# plt.imshow(label_arr_aug[0, 0, i], cmap="gray") +# plt.show() \ No newline at end of file diff --git a/SegMamba/light_training/dataloading/__init__.py b/SegMamba/light_training/dataloading/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/dataloading/base_data_loader.py b/SegMamba/light_training/dataloading/base_data_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..e22a4438c6f0f2e2ac6715caff55eefcf2855a1a --- /dev/null +++ b/SegMamba/light_training/dataloading/base_data_loader.py @@ -0,0 +1,213 @@ +import numpy as np +from typing import Union, Tuple +import time + +class DataLoaderMultiProcess: + def __init__(self, dataset, + patch_size, + batch_size=2, + oversample_foreground_percent=0.33, + probabilistic_oversampling=False, + print_time=False): + pass + self.dataset = dataset + self.patch_size = patch_size + # self.annotated_classes_key = annotated_classes_key ## (1, 2, 3 ..) + self.batch_size = batch_size + self.keys = [i for i in range(len(dataset))] + self.thread_id = 0 + self.oversample_foreground_percent = oversample_foreground_percent + self.need_to_pad = (np.array([0, 0, 0])).astype(int) + + self.get_do_oversample = self._oversample_last_XX_percent if not probabilistic_oversampling \ + else self._probabilistic_oversampling + self.data_shape = None + self.seg_shape = None + self.print_time = print_time + + def determine_shapes(self): + # load one case + item = self.dataset.__getitem__(0) + data, seg, properties = item["data"], item["seg"], item["properties"] + num_color_channels = data.shape[0] + num_output_channels = seg.shape[0] + patch_size = self.patch_size + data_shape = (self.batch_size, num_color_channels, patch_size[0], patch_size[1], patch_size[2]) + seg_shape = (self.batch_size, num_output_channels, patch_size[0], patch_size[1], patch_size[2]) + return data_shape, seg_shape + + def generate_train_batch(self): + + selected_keys = np.random.choice(self.keys, self.batch_size, True, None) + if self.data_shape is None: + self.data_shape, self.seg_shape = self.determine_shapes() + + data_all = np.zeros(self.data_shape, dtype=np.float32) + data_all_global = np.zeros(self.data_shape, dtype=np.float32) + seg_all_global = np.zeros(self.seg_shape, dtype=np.float32) + data_global = None + seg_global = None + seg_all = np.zeros(self.seg_shape, dtype=np.float32) + + case_properties = [] + + index = 0 + for j, key in enumerate(selected_keys): + + force_fg = self.get_do_oversample(j) + s = time.time() + item = self.dataset.__getitem__(key) + e = time.time() + if self.print_time: + print(f"read single data time is {e - s}") + # print(f"read data time is {e - s}") + data, seg, properties = item["data"], item["seg"], item["properties"] + + if "data_global" in item: + data_global = item["data_global"] + + if "seg_global" in item: + seg_global = item["seg_global"] + + case_properties.append(properties) + # If we are doing the cascade then the segmentation from the previous stage will already have been loaded by + # self._data.load_case(i) (see nnUNetDataset.load_case) + shape = data.shape[1:] + dim = len(shape) + + s = time.time() + bbox_lbs, bbox_ubs = self.get_bbox(shape, force_fg, properties['class_locations']) + e = time.time() + if self.print_time: + print(f"get bbox time is {e - s}") + # whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the + # bbox that actually lies within the data. This will result in a smaller array which is then faster to pad. + # valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size + # later + valid_bbox_lbs = [max(0, bbox_lbs[i]) for i in range(dim)] + valid_bbox_ubs = [min(shape[i], bbox_ubs[i]) for i in range(dim)] + + # At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage. + # Why not just concatenate them here and forget about the if statements? Well that's because segneeds to + # be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also + # remove label -1 in the data augmentation but this way it is less error prone) + this_slice = tuple([slice(0, data.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)]) + data = data[this_slice] + + this_slice = tuple([slice(0, seg.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)]) + seg = seg[this_slice] + + + s = time.time() + padding = [(-min(0, bbox_lbs[i]), max(bbox_ubs[i] - shape[i], 0)) for i in range(dim)] + # print(f"box is {bbox_lbs, bbox_ubs}, padding is {padding}") + data_all[j] = np.pad(data, ((0, 0), *padding), 'constant', constant_values=0) + seg_all[j] = np.pad(seg, ((0, 0), *padding), 'constant', constant_values=0) + + if data_global is not None : + data_all_global[j] = data_global + + if seg_global is not None : + seg_all_global[j] = seg_global + + + e = time.time() + if self.print_time: + print(f"box is {bbox_lbs, bbox_ubs}, padding is {padding}") + print(f"setting data value time is {e - s}") + + + if data_global is None: + return {'data': data_all, + 'seg': seg_all, 'properties': case_properties, + 'keys': selected_keys} + + return {'data': data_all, "data_global": data_all_global, + "seg_global": seg_all_global, + 'seg': seg_all, 'properties': case_properties, + 'keys': selected_keys} + + def __next__(self): + + return self.generate_train_batch() + + def set_thread_id(self, thread_id): + self.thread_id = thread_id + + def _oversample_last_XX_percent(self, sample_idx: int) -> bool: + """ + determines whether sample sample_idx in a minibatch needs to be guaranteed foreground + """ + return not sample_idx < round(self.batch_size * (1 - self.oversample_foreground_percent)) + + def _probabilistic_oversampling(self, sample_idx: int) -> bool: + # print('YEAH BOIIIIII') + return np.random.uniform() < self.oversample_foreground_percent + + def get_bbox(self, data_shape: np.ndarray, force_fg: bool, class_locations: Union[dict, None], + overwrite_class: Union[int, Tuple[int, ...]] = None, verbose: bool = False): + # in dataloader 2d we need to select the slice prior to this and also modify the class_locations to only have + # locations for the given slice + need_to_pad = self.need_to_pad.copy() + dim = len(data_shape) + + for d in range(dim): + # if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides + # always + if need_to_pad[d] + data_shape[d] < self.patch_size[d]: + need_to_pad[d] = self.patch_size[d] - data_shape[d] + + # we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we + # define what the upper and lower bound can be to then sample form them with np.random.randint + lbs = [- need_to_pad[i] // 2 for i in range(dim)] + ubs = [data_shape[i] + need_to_pad[i] // 2 + need_to_pad[i] % 2 - self.patch_size[i] for i in range(dim)] + + # if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get + # at least one of the foreground classes in the patch + if not force_fg: + bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)] + # print('I want a random location') + else: + assert class_locations is not None, 'if force_fg is set class_locations cannot be None' + if overwrite_class is not None: + assert overwrite_class in class_locations.keys(), 'desired class ("overwrite_class") does not ' \ + 'have class_locations (missing key)' + # this saves us a np.unique. Preprocessing already did that for all cases. Neat. + # class_locations keys can also be tuple + eligible_classes_or_regions = [i for i in class_locations.keys() if len(class_locations[i]) > 0] + + # if we have annotated_classes_key locations and other classes are present, remove the annotated_classes_key from the list + # strange formulation needed to circumvent + # ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() + # tmp = [i == self.annotated_classes_key if isinstance(i, tuple) else False for i in eligible_classes_or_regions] + # if any(tmp): + # if len(eligible_classes_or_regions) > 1: + # eligible_classes_or_regions.pop(np.where(tmp)[0][0]) + + if len(eligible_classes_or_regions) == 0: + # this only happens if some image does not contain foreground voxels at all + selected_class = None + if verbose: + print('case does not contain any foreground classes') + else: + # I hate myself. Future me aint gonna be happy to read this + # 2022_11_25: had to read it today. Wasn't too bad + selected_class = eligible_classes_or_regions[np.random.choice(len(eligible_classes_or_regions))] if \ + (overwrite_class is None or (overwrite_class not in eligible_classes_or_regions)) else overwrite_class + # print(f'I want to have foreground, selected class: {selected_class}') + + voxels_of_that_class = class_locations[selected_class] if selected_class is not None else None + + if voxels_of_that_class is not None and len(voxels_of_that_class) > 0: + selected_voxel = voxels_of_that_class[np.random.choice(len(voxels_of_that_class))] + # selected voxel is center voxel. Subtract half the patch size to get lower bbox voxel. + # Make sure it is within the bounds of lb and ub + # i + 1 because we have first dimension 0! + bbox_lbs = [max(lbs[i], selected_voxel[i + 1] - self.patch_size[i] // 2) for i in range(dim)] + else: + # If the image does not contain any foreground classes, we fall back to random cropping + bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)] + + bbox_ubs = [bbox_lbs[i] + self.patch_size[i] for i in range(dim)] + + return bbox_lbs, bbox_ubs \ No newline at end of file diff --git a/SegMamba/light_training/dataloading/dataset.py b/SegMamba/light_training/dataloading/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..51a9d9631ff7eb35651eb4acf140c684f83b44fc --- /dev/null +++ b/SegMamba/light_training/dataloading/dataset.py @@ -0,0 +1,319 @@ + +# Copyright 2020 - 2022 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from sklearn.model_selection import KFold ## K折交叉验证 +import pickle +import os +import json +import math +import numpy as np +import torch +from monai import transforms +import SimpleITK as sitk +from tqdm import tqdm +from torch.utils.data import Dataset +import glob +from light_training.dataloading.utils import unpack_dataset +import random + +class MedicalDataset(Dataset): + def __init__(self, datalist, test=False) -> None: + super().__init__() + + self.datalist = datalist + self.test = test + + self.data_cached = [] + for p in tqdm(self.datalist, total=len(self.datalist)): + info = self.load_pkl(p) + + self.data_cached.append(info) + + ## unpacking + print(f"unpacking data ....") + # for + folder = [] + for p in self.datalist: + f = os.path.dirname(p) + if f not in folder: + folder.append(f) + for f in folder: + unpack_dataset(f, + unpack_segmentation=True, + overwrite_existing=False, + num_processes=8) + + + print(f"data length is {len(self.datalist)}") + + def load_pkl(self, data_path): + pass + properties_path = f"{data_path[:-4]}.pkl" + df = open(properties_path, "rb") + info = pickle.load(df) + + return info + + def post(self, batch_data): + return batch_data + + def read_data(self, data_path): + + image_path = data_path.replace(".npz", ".npy") + seg_path = data_path.replace(".npz", "_seg.npy") + image_data = np.load(image_path, "r+") + + seg_data = None + if not self.test: + seg_data = np.load(seg_path, "r+") + return image_data, seg_data + + def __getitem__(self, i): + + image, seg = self.read_data(self.datalist[i]) + + properties = self.data_cached[i] + + if seg is None: + return { + "data": image, + "properties": properties + } + else : + return { + "data": image, + "seg": seg, + "properties": properties + } + + def __len__(self): + return len(self.datalist) + +def get_train_test_loader_from_test_list(data_dir, test_list): + all_paths = glob.glob(f"{data_dir}/*.npz") + + test_datalist = [] + train_datalist = [] + + test_list_1 = [] + for t in test_list: + test_list_1.append(t.replace(".nii.gz", "")) + + test_list = test_list_1 + for p in all_paths: + p2 = p.split("/")[-1].split(".")[0] + if p2 in test_list: + test_datalist.append(p) + else : + train_datalist.append(p) + + print(f"training data is {len(train_datalist)}") + print(f"test data is {len(test_datalist)}", test_datalist) + + train_ds = MedicalDataset(train_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, test_ds] + + return loader + +def get_kfold_data(data_paths, n_splits, shuffle=False): + X = np.arange(len(data_paths)) + kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象 + return_res = [] + for a, b in kfold.split(X): + fold_train = [] + fold_val = [] + for i in a: + fold_train.append(data_paths[i]) + for j in b: + fold_val.append(data_paths[j]) + return_res.append({"train_data": fold_train, "val_data": fold_val}) + + return return_res + +def get_kfold_loader(data_dir, fold=0, test_dir=None): + + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = fold_data["train_data"] + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_all_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None): + train_datalist = glob.glob(f"{train_dir}/*.npz") + val_datalist = glob.glob(f"{val_dir}/*.npz") + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + + if test_dir is not None: + test_datalist = glob.glob(f"{test_dir}/*.npz") + print(f"test data is {len(test_datalist)}") + test_ds = MedicalDataset(test_datalist, test=True) + else : + test_ds = None + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_from_split_json(data_dir, split_json_file): + import json + + with open(split_json_file, "r") as f: + + datalist = json.loads(f.read()) + + train_datalist = datalist["train"] + val_datalist = datalist["validation"] + test_datalist = datalist["test"] + + def add_pre(datalist): + for i in range(len(datalist)): + datalist[i] = os.path.join(data_dir, datalist[i]) + + add_pre(train_datalist) + add_pre(val_datalist) + add_pre(test_datalist) + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}", sorted(test_datalist)) + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + + +def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + # fold_data = get_kfold_data(all_paths, 5)[fold] + + train_number = int(len(all_paths) * train_rate) + val_number = int(len(all_paths) * val_rate) + test_number = int(len(all_paths) * test_rate) + random.seed(seed) + # random_state = random.random + random.shuffle(all_paths) + + train_datalist = all_paths[:train_number] + val_datalist = all_paths[train_number: train_number + val_number] + test_datalist = all_paths[-test_number:] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}", sorted(test_datalist)) + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_loader_from_train(data_dir): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + # fold_data = get_kfold_data(all_paths, 5)[fold] + + train_ds = MedicalDataset(all_paths) + + return train_ds + +def get_test_loader_from_test(data_dir): + all_paths = glob.glob(f"{data_dir}/*.npz") + + test_ds = MedicalDataset(all_paths) + + return test_ds + +def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = [] + for p in data_dir: + paths = glob.glob(f"{p}/*.npz") + for pp in paths: + all_paths.append(pp) + + # print(all_paths) + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader \ No newline at end of file diff --git a/SegMamba/light_training/dataloading/dataset_sdm_edge.py b/SegMamba/light_training/dataloading/dataset_sdm_edge.py new file mode 100644 index 0000000000000000000000000000000000000000..496d906b6b50b5fc2dde0b265ce4684b9ebc2394 --- /dev/null +++ b/SegMamba/light_training/dataloading/dataset_sdm_edge.py @@ -0,0 +1,331 @@ + +# Copyright 2020 - 2022 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from sklearn.model_selection import KFold ## K折交叉验证 +import pickle +import os +import json +import math +import numpy as np +import torch +from monai import transforms +import SimpleITK as sitk +from tqdm import tqdm +from torch.utils.data import Dataset +import glob +from light_training.dataloading.utils import unpack_dataset +import random +import torch +import numpy as np +from scipy.ndimage import distance_transform_edt as distance +from skimage import segmentation as skimage_seg +from skimage.morphology import dilation, disk +import scipy.ndimage as ndimage + +def get_edge_points(img): + """ + get edge points of a binary segmentation result + """ + dim = len(img.shape) + if (dim == 2): + strt = ndimage.generate_binary_structure(2, 1) + else: + strt = ndimage.generate_binary_structure(3, 1) + ero = ndimage.binary_erosion(img, strt) + edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8) + return edge + +def edge_3d(image_3d): + # image_3d = torch.from_numpy(image_3d) + return_edge = np.zeros_like(image_3d) + + for i in range(image_3d.shape[0]): + for j in range(image_3d.shape[1]): + return_edge[i, j] = get_edge_points(image_3d[i, j]) + + return return_edge + +def compute_sdf(img_gt, out_shape): + """ + compute the signed distance map of binary mask + input: segmentation, shape = (batch_size,c, x, y, z) + output: the Signed Distance Map (SDM) + sdf(x) = 0; x in segmentation boundary + -inf|x-y|; x in segmentation + +inf|x-y|; x out of segmentation + normalize sdf to [-1,1] + + """ + + img_gt = img_gt.astype(np.uint8) + normalized_sdf = np.zeros(out_shape) + + for b in range(out_shape[0]): # batch size + for c in range(out_shape[1]): + posmask = img_gt[b, c].astype(np.bool_) + if posmask.any(): + negmask = ~posmask + posdis = distance(posmask) + negdis = distance(negmask) + boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8) + sdf = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) - (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis)) + sdf[boundary==1] = 0 + normalized_sdf[b][c] = sdf + assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis)) + assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis)) + + return normalized_sdf + +def convert_labels(labels): + ## TC, WT and ET + labels = labels[None, None] + result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] + + return torch.cat(result, dim=1).float() + +class MedicalDataset(Dataset): + def __init__(self, datalist, test=False) -> None: + super().__init__() + + self.datalist = datalist + self.test = test + + self.data_cached = [] + for p in tqdm(self.datalist, total=len(self.datalist)): + info = self.load_pkl(p) + + self.data_cached.append(info) + + ## unpacking + print(f"unpacking data ....") + # for + folder = [] + for p in self.datalist: + f = os.path.dirname(p) + if f not in folder: + folder.append(f) + for f in folder: + unpack_dataset(f, + unpack_segmentation=True, + overwrite_existing=False, + num_processes=8) + + + print(f"data length is {len(self.datalist)}") + + def load_pkl(self, data_path): + pass + properties_path = f"{data_path[:-4]}.pkl" + df = open(properties_path, "rb") + info = pickle.load(df) + + return info + + def read_data(self, data_path): + + image_path = data_path.replace(".npz", ".npy") + seg_path = data_path.replace(".npz", "_seg.npy") + image_data = np.load(image_path, "r") + + seg_data = None + if not self.test: + seg_data = np.load(seg_path, "r") + return image_data, seg_data + + # def post(self, batch_data): + # seg = convert_labels(batch_data["seg"]).numpy() + # seg_shape = seg.shape + # seg_edge = edge_3d(seg) + # seg_sdm = 1 - compute_sdf(seg, out_shape=seg_shape) + # seg_sdm = seg_sdm + seg_edge + + # seg_edge = torch.from_numpy(seg_edge) + # seg_sdm = torch.from_numpy(seg_sdm) + + # batch_data["seg_edge"] = seg_edge + # batch_data["seg_sdm"] = seg_sdm + + # print(f"post!!!!!!!!!") + # return batch_data + + def __getitem__(self, i): + + image, seg = self.read_data(self.datalist[i]) + + properties = self.data_cached[i] + case_name = properties["name"] + + if seg is not None: + sdm = np.load(os.path.join("./data/fullres/train_sdm/", f"{case_name}_seg_sdm.npy"), "r") + + # print(seg.shape, sdm.shape) + sdm = sdm[0] + seg = np.concatenate([seg, sdm], axis=0) + + # print(f"sdm sum is {sdm.sum()}") + if seg is None: + return { + "data": image, + "properties": properties + } + else : + return { + "data": image, + "seg": seg, + "properties": properties + } + + def __len__(self): + return len(self.datalist) + +def get_kfold_data(data_paths, n_splits, shuffle=False): + X = np.arange(len(data_paths)) + kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象 + return_res = [] + for a, b in kfold.split(X): + fold_train = [] + fold_val = [] + for i in a: + fold_train.append(data_paths[i]) + for j in b: + fold_val.append(data_paths[j]) + return_res.append({"train_data": fold_train, "val_data": fold_val}) + + return return_res + +def get_kfold_loader(data_dir, fold=0, test_dir=None): + + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = fold_data["train_data"] + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_all_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None): + train_datalist = glob.glob(f"{train_dir}/*.npz") + val_datalist = glob.glob(f"{val_dir}/*.npz") + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + + if test_dir is not None: + test_datalist = glob.glob(f"{test_dir}/*.npz") + print(f"test data is {len(test_datalist)}") + test_ds = MedicalDataset(test_datalist, test=True) + else : + test_ds = None + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + # fold_data = get_kfold_data(all_paths, 5)[fold] + + train_number = int(len(all_paths) * train_rate) + val_number = int(len(all_paths) * val_rate) + test_number = int(len(all_paths) * test_rate) + + random.shuffle(all_paths) + + train_datalist = all_paths[:train_number] + val_datalist = all_paths[train_number: train_number + val_number] + test_datalist = all_paths[-test_number:] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}") + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = [] + for p in data_dir: + paths = glob.glob(f"{p}/*.npz") + for pp in paths: + all_paths.append(pp) + + # print(all_paths) + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader \ No newline at end of file diff --git a/SegMamba/light_training/dataloading/get_train_val_test_datalist.py b/SegMamba/light_training/dataloading/get_train_val_test_datalist.py new file mode 100644 index 0000000000000000000000000000000000000000..22edcd46c83c6347fc8dbcc59c4cd5bb0789515a --- /dev/null +++ b/SegMamba/light_training/dataloading/get_train_val_test_datalist.py @@ -0,0 +1,36 @@ + +import glob +import random +import json + +def get_train_val_test_list_from_fulldata(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42): + all_paths = glob.glob(f"{data_dir}/*.npz") + + ## eliminate the pre + all_paths_save = [] + for p in all_paths: + all_paths_save.append(p.split("/")[-1]) + all_paths = all_paths_save + train_number = int(len(all_paths) * train_rate) + val_number = int(len(all_paths) * val_rate) + test_number = int(len(all_paths) * test_rate) + random.seed(seed) + random.shuffle(all_paths) + train_datalist = all_paths[:train_number] + val_datalist = all_paths[train_number: train_number + val_number] + test_datalist = all_paths[-test_number:] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}", sorted(test_datalist)) + + datalist = { + "train": train_datalist, + "validation": val_datalist, + "test": test_datalist + } + + datalist = json.dumps(datalist) + + with open("./data_split.json", "w") as f: + f.write(datalist) diff --git a/SegMamba/light_training/dataloading/utils.py b/SegMamba/light_training/dataloading/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc60e7b992fb915c01bb7a7a5f6857bb1fb4dc8 --- /dev/null +++ b/SegMamba/light_training/dataloading/utils.py @@ -0,0 +1,25 @@ +import numpy as np +import os +from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles +import multiprocessing + +def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None: + # try: + a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata + if overwrite_existing or not isfile(npz_file[:-3] + "npy"): + np.save(npz_file[:-3] + "npy", a['data']) + + if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")): + np.save(npz_file[:-4] + "_seg.npy", a['seg']) + +def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False, + num_processes: int = 8): + """ + all npz files in this folder belong to the dataset, unpack them all + """ + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + npz_files = subfiles(folder, True, None, ".npz", True) + p.starmap(_convert_to_npy, zip(npz_files, + [unpack_segmentation] * len(npz_files), + [overwrite_existing] * len(npz_files)) + ) diff --git a/SegMamba/light_training/dataloading_global/__init__.py b/SegMamba/light_training/dataloading_global/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/dataloading_global/dataset.py b/SegMamba/light_training/dataloading_global/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..2fbf3646921db9d0a390fd250099ab76e2296d1b --- /dev/null +++ b/SegMamba/light_training/dataloading_global/dataset.py @@ -0,0 +1,329 @@ + +# Copyright 2020 - 2022 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from sklearn.model_selection import KFold ## K折交叉验证 +import pickle +import os +import json +import math +import numpy as np +import torch +from monai import transforms +import SimpleITK as sitk +from tqdm import tqdm +from torch.utils.data import Dataset +import glob +from light_training.dataloading_global.utils import unpack_dataset +import random + +class MedicalDataset(Dataset): + def __init__(self, datalist, test=False) -> None: + super().__init__() + + self.datalist = datalist + self.test = test + + self.data_cached = [] + for p in tqdm(self.datalist, total=len(self.datalist)): + info = self.load_pkl(p) + + self.data_cached.append(info) + + ## unpacking + print(f"unpacking data ....") + # for + folder = [] + for p in self.datalist: + f = os.path.dirname(p) + if f not in folder: + folder.append(f) + for f in folder: + unpack_dataset(f, + unpack_segmentation=True, + overwrite_existing=False, + num_processes=8) + + + print(f"data length is {len(self.datalist)}") + + def load_pkl(self, data_path): + pass + properties_path = f"{data_path[:-4]}.pkl" + df = open(properties_path, "rb") + info = pickle.load(df) + + return info + + def post(self, batch_data): + return batch_data + + def read_data(self, data_path): + + image_path = data_path.replace(".npz", ".npy") + seg_path = data_path.replace(".npz", "_seg.npy") + image_global_path = data_path.replace(".npz", "_global.npy") + seg_global_path = data_path.replace(".npz", "_global_seg.npy") + + image_data = np.load(image_path, "r+") + image_data_global = np.load(image_global_path, "r+") + + seg_data = None + if not self.test: + seg_data = np.load(seg_path, "r+") + seg_global_data = np.load(seg_global_path, "r+") + + return image_data, image_data_global, seg_data, seg_global_data + + + def __getitem__(self, i): + + image, image_data_global, seg, seg_global = self.read_data(self.datalist[i]) + + # print(image_data_global.shape) + properties = self.data_cached[i] + + if seg is None: + return { + "data": image, + "data_global": image_data_global, + "properties": properties + } + else : + return { + "data": image, + "data_global": image_data_global, + "seg": seg, + "seg_global": seg_global, + "properties": properties + } + + def __len__(self): + return len(self.datalist) + +def get_train_test_loader_from_test_list(data_dir, test_list): + all_paths = glob.glob(f"{data_dir}/*.npz") + + test_datalist = [] + train_datalist = [] + + test_list_1 = [] + for t in test_list: + test_list_1.append(t.replace(".nii.gz", "")) + + test_list = test_list_1 + for p in all_paths: + p2 = p.split("/")[-1].split(".")[0] + if p2 in test_list: + test_datalist.append(p) + else : + train_datalist.append(p) + + print(f"training data is {len(train_datalist)}") + print(f"test data is {len(test_datalist)}", test_datalist) + + train_ds = MedicalDataset(train_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, test_ds] + + return loader + +def get_kfold_data(data_paths, n_splits, shuffle=False): + X = np.arange(len(data_paths)) + kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象 + return_res = [] + for a, b in kfold.split(X): + fold_train = [] + fold_val = [] + for i in a: + fold_train.append(data_paths[i]) + for j in b: + fold_val.append(data_paths[j]) + return_res.append({"train_data": fold_train, "val_data": fold_val}) + + return return_res + +def get_kfold_loader(data_dir, fold=0, test_dir=None): + + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = fold_data["train_data"] + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_all_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None): + train_datalist = glob.glob(f"{train_dir}/*.npz") + val_datalist = glob.glob(f"{val_dir}/*.npz") + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + + if test_dir is not None: + test_datalist = glob.glob(f"{test_dir}/*.npz") + print(f"test data is {len(test_datalist)}") + test_ds = MedicalDataset(test_datalist, test=True) + else : + test_ds = None + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + # fold_data = get_kfold_data(all_paths, 5)[fold] + + train_number = int(len(all_paths) * train_rate) + val_number = int(len(all_paths) * val_rate) + test_number = int(len(all_paths) * test_rate) + random.seed(seed) + # random_state = random.random + random.shuffle(all_paths) + train_datalist = all_paths[:train_number] + val_datalist = all_paths[train_number: train_number + val_number] + test_datalist = all_paths[-test_number:] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}", sorted(test_datalist)) + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_from_split_json(data_dir, split_json_file): + import json + + with open(split_json_file, "r") as f: + + datalist = json.loads(f.read()) + + train_datalist = datalist["train"] + val_datalist = datalist["validation"] + test_datalist = datalist["test"] + + def add_pre(datalist): + for i in range(len(datalist)): + datalist[i] = os.path.join(data_dir, datalist[i]) + + add_pre(train_datalist) + add_pre(val_datalist) + add_pre(test_datalist) + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}", sorted(test_datalist)) + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_loader_from_train(data_dir): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + # fold_data = get_kfold_data(all_paths, 5)[fold] + + train_ds = MedicalDataset(all_paths) + + return train_ds + +def get_test_loader_from_test(data_dir): + all_paths = glob.glob(f"{data_dir}/*.npz") + + test_ds = MedicalDataset(all_paths) + + return test_ds + +def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = [] + for p in data_dir: + paths = glob.glob(f"{p}/*.npz") + for pp in paths: + all_paths.append(pp) + + # print(all_paths) + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader \ No newline at end of file diff --git a/SegMamba/light_training/dataloading_global/utils.py b/SegMamba/light_training/dataloading_global/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cf25e09c6e722316c21b4fefddaf146de1cd4358 --- /dev/null +++ b/SegMamba/light_training/dataloading_global/utils.py @@ -0,0 +1,27 @@ +import numpy as np +import os +from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles +import multiprocessing + +def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None: + # try: + a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata + if overwrite_existing or not isfile(npz_file[:-3] + "npy"): + np.save(npz_file[:-3] + "npy", a['data']) + np.save(npz_file[:-4] + "_global.npy", a['data_global']) + np.save(npz_file[:-4] + "_global_seg.npy", a['seg_global']) + + if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")): + np.save(npz_file[:-4] + "_seg.npy", a['seg']) + +def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False, + num_processes: int = 8): + """ + all npz files in this folder belong to the dataset, unpack them all + """ + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + npz_files = subfiles(folder, True, None, ".npz", True) + p.starmap(_convert_to_npy, zip(npz_files, + [unpack_segmentation] * len(npz_files), + [overwrite_existing] * len(npz_files)) + ) diff --git a/SegMamba/light_training/evaluation/metric.py b/SegMamba/light_training/evaluation/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..eed0b2c9debaad190b3807ff14b113920789059c --- /dev/null +++ b/SegMamba/light_training/evaluation/metric.py @@ -0,0 +1,406 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from medpy import metric + + +def assert_shape(test, reference): + + assert test.shape == reference.shape, "Shape mismatch: {} and {}".format( + test.shape, reference.shape) + + +class ConfusionMatrix: + + def __init__(self, test=None, reference=None): + + self.tp = None + self.fp = None + self.tn = None + self.fn = None + self.size = None + self.reference_empty = None + self.reference_full = None + self.test_empty = None + self.test_full = None + self.set_reference(reference) + self.set_test(test) + + def set_test(self, test): + + self.test = test + self.reset() + + def set_reference(self, reference): + + self.reference = reference + self.reset() + + def reset(self): + + self.tp = None + self.fp = None + self.tn = None + self.fn = None + self.size = None + self.test_empty = None + self.test_full = None + self.reference_empty = None + self.reference_full = None + + def compute(self): + + if self.test is None or self.reference is None: + raise ValueError("'test' and 'reference' must both be set to compute confusion matrix.") + + assert_shape(self.test, self.reference) + + self.tp = int(((self.test != 0) * (self.reference != 0)).sum()) + self.fp = int(((self.test != 0) * (self.reference == 0)).sum()) + self.tn = int(((self.test == 0) * (self.reference == 0)).sum()) + self.fn = int(((self.test == 0) * (self.reference != 0)).sum()) + self.size = int(np.prod(self.reference.shape, dtype=np.int64)) + self.test_empty = not np.any(self.test) + self.test_full = np.all(self.test) + self.reference_empty = not np.any(self.reference) + self.reference_full = np.all(self.reference) + + def get_matrix(self): + + for entry in (self.tp, self.fp, self.tn, self.fn): + if entry is None: + self.compute() + break + + return self.tp, self.fp, self.tn, self.fn + + def get_size(self): + + if self.size is None: + self.compute() + return self.size + + def get_existence(self): + + for case in (self.test_empty, self.test_full, self.reference_empty, self.reference_full): + if case is None: + self.compute() + break + + return self.test_empty, self.test_full, self.reference_empty, self.reference_full + + +def dice(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """2TP / (2TP + FP + FN)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty and reference_empty: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(2. * tp / (2 * tp + fp + fn)) + + +def jaccard(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TP / (TP + FP + FN)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty and reference_empty: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(tp / (tp + fp + fn)) + + +def precision(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TP / (TP + FP)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(tp / (tp + fp)) + + +def sensitivity(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TP / (TP + FN)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if reference_empty: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(tp / (tp + fn)) + + +def recall(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TP / (TP + FN)""" + + return sensitivity(test, reference, confusion_matrix, nan_for_nonexisting, **kwargs) + + +def specificity(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TN / (TN + FP)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if reference_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(tn / (tn + fp)) + + +def accuracy(test=None, reference=None, confusion_matrix=None, **kwargs): + """(TP + TN) / (TP + FP + FN + TN)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + + return float((tp + tn) / (tp + fp + tn + fn)) + + +def fscore(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, beta=1., **kwargs): + """(1 + b^2) * TP / ((1 + b^2) * TP + b^2 * FN + FP)""" + + precision_ = precision(test, reference, confusion_matrix, nan_for_nonexisting) + recall_ = recall(test, reference, confusion_matrix, nan_for_nonexisting) + + return (1 + beta*beta) * precision_ * recall_ /\ + ((beta*beta * precision_) + recall_) + + +def false_positive_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """FP / (FP + TN)""" + + return 1 - specificity(test, reference, confusion_matrix, nan_for_nonexisting) + + +def false_omission_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """FN / (TN + FN)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(fn / (fn + tn)) + + +def false_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """FN / (TP + FN)""" + + return 1 - sensitivity(test, reference, confusion_matrix, nan_for_nonexisting) + + +def true_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TN / (TN + FP)""" + + return specificity(test, reference, confusion_matrix, nan_for_nonexisting) + + +def false_discovery_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """FP / (TP + FP)""" + + return 1 - precision(test, reference, confusion_matrix, nan_for_nonexisting) + + +def negative_predictive_value(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TN / (TN + FN)""" + + return 1 - false_omission_rate(test, reference, confusion_matrix, nan_for_nonexisting) + + +def total_positives_test(test=None, reference=None, confusion_matrix=None, **kwargs): + """TP + FP""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + + return tp + fp + + +def total_negatives_test(test=None, reference=None, confusion_matrix=None, **kwargs): + """TN + FN""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + + return tn + fn + + +def total_positives_reference(test=None, reference=None, confusion_matrix=None, **kwargs): + """TP + FN""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + + return tp + fn + + +def total_negatives_reference(test=None, reference=None, confusion_matrix=None, **kwargs): + """TN + FP""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + + return tn + fp + + +def hausdorff_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs): + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty or test_full or reference_empty or reference_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0 + + test, reference = confusion_matrix.test, confusion_matrix.reference + + return metric.hd(test, reference, voxel_spacing, connectivity) + + +def hausdorff_distance_95(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs): + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty or test_full or reference_empty or reference_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0 + + test, reference = confusion_matrix.test, confusion_matrix.reference + + return metric.hd95(test, reference, voxel_spacing, connectivity) + + +def avg_surface_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs): + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty or test_full or reference_empty or reference_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0 + + test, reference = confusion_matrix.test, confusion_matrix.reference + + return metric.asd(test, reference, voxel_spacing, connectivity) + + +def avg_surface_distance_symmetric(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs): + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty or test_full or reference_empty or reference_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0 + + test, reference = confusion_matrix.test, confusion_matrix.reference + + return metric.assd(test, reference, voxel_spacing, connectivity) + + +ALL_METRICS = { + "False Positive Rate": false_positive_rate, + "Dice": dice, + "Jaccard": jaccard, + "Hausdorff Distance": hausdorff_distance, + "Hausdorff Distance 95": hausdorff_distance_95, + "Precision": precision, + "Recall": recall, + "Avg. Symmetric Surface Distance": avg_surface_distance_symmetric, + "Avg. Surface Distance": avg_surface_distance, + "Accuracy": accuracy, + "False Omission Rate": false_omission_rate, + "Negative Predictive Value": negative_predictive_value, + "False Negative Rate": false_negative_rate, + "True Negative Rate": true_negative_rate, + "False Discovery Rate": false_discovery_rate, + "Total Positives Test": total_positives_test, + "Total Negatives Test": total_negatives_test, + "Total Positives Reference": total_positives_reference, + "total Negatives Reference": total_negatives_reference +} \ No newline at end of file diff --git a/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py b/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py new file mode 100644 index 0000000000000000000000000000000000000000..223733edcf5f4c52c832b52df8c1a9d29513182d --- /dev/null +++ b/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py @@ -0,0 +1,27 @@ + + + +import os + +# data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/" +data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-ValidationData/" + +all_cases = os.listdir(data_dir) + +for case_name in all_cases: + case_dir = os.path.join(data_dir, case_name) + + for data_name in os.listdir(case_dir): + + if "-" not in data_name: + continue + new_name = data_name.split("-")[-1] + + new_path = os.path.join(case_dir, new_name) + + old_path = os.path.join(case_dir, data_name) + + os.rename(old_path, new_path) + + print(f"{new_path} 命名成功") + diff --git a/SegMamba/light_training/examples/2_preprocessing_AIIB23.py b/SegMamba/light_training/examples/2_preprocessing_AIIB23.py new file mode 100644 index 0000000000000000000000000000000000000000..77b52c2050af19bdbfe8a2998d9cf35d918cef95 --- /dev/null +++ b/SegMamba/light_training/examples/2_preprocessing_AIIB23.py @@ -0,0 +1,130 @@ + +from light_training.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor +import numpy as np +import pickle +import json + + +def process_train(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/AIIB23_Train_T1" + image_dir = "img" + label_dir = "gt" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=label_dir, + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + output_dir = "./data/fullres/train/" + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + num_processes=16, + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + +def process_val(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/Val" + image_dir = "img" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=None, + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + output_dir = "./data/fullres/val_test/" + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel, + num_processes=16) + +def process_val_semi(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/Val_semi_postprocess" + image_dir = "img" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir="gt", + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + output_dir = "./data/fullres/val_semi_postprocess/" + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + + +def plan(): + base_dir = "./data/raw_data/AIIB23_Train_T1" + image_dir = "img" + label_dir = "gt" + + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=label_dir, + ) + + preprocessor.run_plan() + +if __name__ == "__main__": + + # plan() + + process_train() + # import time + # s = time.time() + # process_val() + # e = time.time() + + # print(f"preprocessing time is {e - s}") + + # process_val_semi() + + +# + # preprocessor.run(output_spacing=[3, 0.9765625, 0.9765625], output_dir=output_dir) + + # data = np.load("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.npz") + + # image = data["data"] + # label = data["seg"] + # print(image.shape) + # print(label.shape) + + # import matplotlib.pyplot as plt + + # for i in range(20): + # plt.imshow(image[0, i], cmap="gray") + # plt.show() + + # df = open("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.pkl", "rb") + + # info = pickle.load(df) + # print(info) \ No newline at end of file diff --git a/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py b/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py new file mode 100644 index 0000000000000000000000000000000000000000..ac97dbdeea89eab4c281db066b23452599b638cd --- /dev/null +++ b/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py @@ -0,0 +1,94 @@ + +from light_training.preprocessing.preprocessors.preprocessor_mri import MultiModalityPreprocessor +import numpy as np +import pickle +import json + +data_filename = ["t2w.nii.gz", + "t2f.nii.gz", + "t1n.nii.gz", + "t1c.nii.gz"] +seg_filename = "seg.nii.gz" + +def process_train(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/BraTS2023/" + image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData" + preprocessor = MultiModalityPreprocessor(base_dir=base_dir, + image_dir=image_dir, + data_filenames=data_filename, + seg_filename=seg_filename + ) + + out_spacing = [1.0, 1.0, 1.0] + output_dir = "./data/fullres/train/" + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3], + ) + +def process_val(): + base_dir = "./data/raw_data/BraTS2023/" + image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-ValidationData" + preprocessor = MultiModalityPreprocessor(base_dir=base_dir, + image_dir=image_dir, + data_filenames=data_filename, + seg_filename="" + ) + + out_spacing = [1.0, 1.0, 1.0] + output_dir = "./data/fullres/val/" + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3], + ) + +def process_test(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "/home/xingzhaohu/sharefs/datasets/WORD-V0.1.0/" + image_dir = "imagesTs" + label_dir = "labelsTs" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=label_dir, + ) + + out_spacing = [3.0, 0.9765625, 0.9765625] + + output_dir = "./data/fullres/test/" + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = json.loads(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + + +def plan(): + base_dir = "./data/raw_data/BraTS2023/" + image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData" + preprocessor = MultiModalityPreprocessor(base_dir=base_dir, + image_dir=image_dir, + data_filenames=data_filename, + seg_filename=seg_filename + ) + + preprocessor.run_plan() + + +if __name__ == "__main__": +# + # plan() + + process_train() + # process_val() + # process_test() + diff --git a/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/2_preprocessing_AbdomenAtlas1_0Mini.py b/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/2_preprocessing_AbdomenAtlas1_0Mini.py new file mode 100644 index 0000000000000000000000000000000000000000..00d5a587a8ee9b197990ee1f6538fce386062963 --- /dev/null +++ b/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/2_preprocessing_AbdomenAtlas1_0Mini.py @@ -0,0 +1,122 @@ + +from light_training.preprocessing.preprocessors.default_preprocessor_AbdomenAtlas1_0Mini import DefaultPreprocessor +import numpy as np +import pickle +import json + + +def process_train(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "/home/xingzhaohu/data/AbdomenAtlas1.0Mini" + + preprocessor = DefaultPreprocessor(base_dir=base_dir) + + out_spacing = [2.0, 0.8134765, 0.83007812] + output_dir = "./data/fullres/train/" + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9], + num_processes=16, + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + +def process_val(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/Val" + image_dir = "img" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=None, + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + output_dir = "./data/fullres/val_test/" + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel, + num_processes=16) + +def process_test(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "/home/xingzhaohu/data/AbdomenAtlas1.0Mini_test" + preprocessor = DefaultPreprocessor(base_dir=base_dir) + + out_spacing = [2.0, 0.8134765, 0.83007812] + output_dir = "./data/fullres/test/" + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9], + num_processes=16, + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + + +def plan(): + base_dir = "/home/xingzhaohu/data/AbdomenAtlas1.0Mini" + + preprocessor = DefaultPreprocessor(base_dir=base_dir, + + ) + + preprocessor.run_plan() + +if __name__ == "__main__": + + # plan() + + # process_train() + + process_test() + # import time + # s = time.time() + # process_val() + # e = time.time() + + # print(f"preprocessing time is {e - s}") + + # process_val_semi() + + +# + # preprocessor.run(output_spacing=[3, 0.9765625, 0.9765625], output_dir=output_dir) + + # data = np.load("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.npz") + + # image = data["data"] + # label = data["seg"] + # print(image.shape) + # print(label.shape) + + # import matplotlib.pyplot as plt + + # for i in range(20): + # plt.imshow(image[0, i], cmap="gray") + # plt.show() + + # df = open("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.pkl", "rb") + + # info = pickle.load(df) + # print(info) \ No newline at end of file diff --git a/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet.py b/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet.py new file mode 100644 index 0000000000000000000000000000000000000000..eebd98d634e7dbd2efc4e8fc31c9745ee84b46da --- /dev/null +++ b/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet.py @@ -0,0 +1,215 @@ +import numpy as np +from light_training.dataloading.dataset import get_train_val_test_loader_from_train +# from dataset.brats_data_utils_resample128 import get_loader_brats +import torch +import torch.nn as nn +# from ddim_seg.basic_unet import BasicUNet +from monai.networks.nets.unetr import UNETR +from monai.networks.nets.swin_unetr import SwinUNETR +from monai.inferers import SlidingWindowInferer +from light_training.evaluation.metric import dice +from light_training.trainer import Trainer +from monai.utils import set_determinism +from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR +from light_training.utils.files_helper import save_new_model_and_delete_last +from models.uent2d import UNet2D +from models.uent3d import UNet3D +from monai.networks.nets.segresnet import SegResNet +# from ddim_seg.unet3d import DiffusionUNet +# from ddim_seg.ddim import DDIM +# from ddim_seg.nnunet3d_raw import Generic_UNet +# from ddim_seg.basic_unet_denose import BasicUNetDe +# from ddim_seg.basic_unet import BasicUNetEncoder +from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS +import argparse +from monai.losses.dice import DiceLoss +# from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal + +# from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType +# from guided_diffusion.respace import SpacedDiffusion, space_timesteps +# from guided_diffusion.resample import UniformSampler +set_determinism(123) +import os +from scipy import ndimage + + +os.environ["CUDA_VISIBLE_DEVICES"] = "6,7" +data_dir = "./data/fullres/train" + +logdir = f"./logs_gpu4/diffunet_ep2000" + +model_save_path = os.path.join(logdir, "model") +# augmentation = "nomirror" +augmentation = True + +env = "pytorch" +max_epoch = 2000 +batch_size = 2 +val_every = 2 +num_gpus = 1 +device = "cuda:0" +roi_size = [128, 128, 128] + +def get_edge_points(img): + """ + get edge points of a binary segmentation result + """ + dim = len(img.shape) + if (dim == 2): + strt = ndimage.generate_binary_structure(2, 1) + else: + strt = ndimage.generate_binary_structure(3, 1) + ero = ndimage.binary_erosion(img, strt) + edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8) + return edge + +def edge_3d(image_3d): + # image_3d = torch.from_numpy(image_3d) + b, c, d, h, w = image_3d.shape + + image_3d = image_3d[:, 0] > 0 + + return_edge = [] + + for i in range(image_3d.shape[0]): + return_edge.append(get_edge_points(image_3d[i])[None,]) + + return_edge = np.concatenate(return_edge, axis=0) + + return return_edge + +class BraTSTrainer(Trainer): + def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"): + super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script) + self.window_infer = SlidingWindowInferer(roi_size=roi_size, + sw_batch_size=1, + overlap=0.5) + self.augmentation = augmentation + + from models.nnunet_denoise_ddp_infer.get_unet3d_denoise_uncer_edge import DiffUNet + self.model = DiffUNet(1, 10, 3, 1, bta=True) + + self.patch_size = roi_size + self.best_mean_dice = 0.0 + self.ce = nn.CrossEntropyLoss() + self.mse = nn.MSELoss() + self.train_process = 20 + self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5, + momentum=0.99, nesterov=True) + + self.scheduler_type = "poly" + self.bce = nn.BCEWithLogitsLoss() + self.dice_loss = DiceLoss(sigmoid=True) + self.cross = nn.CrossEntropyLoss() + + def training_step(self, batch): + image, label = self.get_input(batch) + + pred, pred_edge = self.model(image, label) + + loss_edge = self.cross(pred_edge, label) + loss_seg = self.cross(pred, label) + + self.log("loss_seg", loss_seg, step=self.global_step) + self.log("loss_edge", loss_edge, step=self.global_step) + + loss = loss_edge + loss_seg + return loss + + + def get_input(self, batch): + image = batch["data"] + label = batch["seg"] + # label = self.convert_labels(label) + + # label = label.float() + label = label[:, 0].long() + return image, label + + def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]): + if pred.sum() > 0 and gt.sum() > 0: + d = dice(pred, gt) + # hd95 = metric.binary.hd95(pred, gt) + return np.array([d, 50]) + + elif gt.sum() == 0 and pred.sum() == 0: + return np.array([1.0, 50]) + + else: + return np.array([0.0, 50]) + + def validation_step(self, batch): + image, label = self.get_input(batch) + + output = self.model(image, ddim=True) + + # output = output > 0 + output = output.argmax(dim=1) + + output = output.cpu().numpy() + target = label.cpu().numpy() + + dices = [] + + c = 10 + for i in range(1, c): + pred_c = output == i + target_c = target == i + + cal_dice, _ = self.cal_metric(target_c, pred_c) + dices.append(cal_dice) + + return dices + + def validation_end(self, val_outputs): + dices = val_outputs + + dices_mean = [] + c = 9 + for i in range(0, c): + dices_mean.append(dices[i].mean()) + + mean_dice = sum(dices_mean) / len(dices_mean) + + self.log("0", dices_mean[0], step=self.epoch) + self.log("1", dices_mean[1], step=self.epoch) + self.log("2", dices_mean[2], step=self.epoch) + self.log("3", dices_mean[3], step=self.epoch) + self.log("4", dices_mean[4], step=self.epoch) + self.log("5", dices_mean[5], step=self.epoch) + self.log("6", dices_mean[6], step=self.epoch) + self.log("7", dices_mean[7], step=self.epoch) + self.log("8", dices_mean[8], step=self.epoch) + + self.log("mean_dice", mean_dice, step=self.epoch) + + if mean_dice > self.best_mean_dice: + self.best_mean_dice = mean_dice + save_new_model_and_delete_last(self.model, + os.path.join(model_save_path, + f"best_model_{mean_dice:.4f}.pt"), + delete_symbol="best_model") + + save_new_model_and_delete_last(self.model, + os.path.join(model_save_path, + f"final_model_{mean_dice:.4f}.pt"), + delete_symbol="final_model") + + + print(f"mean_dice is {mean_dice}") + +if __name__ == "__main__": + + trainer = BraTSTrainer(env_type=env, + max_epochs=max_epoch, + batch_size=batch_size, + device=device, + logdir=logdir, + val_every=val_every, + num_gpus=num_gpus, + master_port=17759, + training_script=__file__) + + train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir) + + trainer.train(train_dataset=train_ds, val_dataset=val_ds) diff --git a/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet_train_all.py b/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet_train_all.py new file mode 100644 index 0000000000000000000000000000000000000000..de03789c5e55196b542fdfe678e2e79e6466caa5 --- /dev/null +++ b/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet_train_all.py @@ -0,0 +1,215 @@ +import numpy as np +from light_training.dataloading.dataset import get_train_loader_from_train +# from dataset.brats_data_utils_resample128 import get_loader_brats +import torch +import torch.nn as nn +# from ddim_seg.basic_unet import BasicUNet +from monai.networks.nets.unetr import UNETR +from monai.networks.nets.swin_unetr import SwinUNETR +from monai.inferers import SlidingWindowInferer +from light_training.evaluation.metric import dice +from light_training.trainer import Trainer +from monai.utils import set_determinism +from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR +from light_training.utils.files_helper import save_new_model_and_delete_last +from models.uent2d import UNet2D +from models.uent3d import UNet3D +from monai.networks.nets.segresnet import SegResNet +# from ddim_seg.unet3d import DiffusionUNet +# from ddim_seg.ddim import DDIM +# from ddim_seg.nnunet3d_raw import Generic_UNet +# from ddim_seg.basic_unet_denose import BasicUNetDe +# from ddim_seg.basic_unet import BasicUNetEncoder +from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS +import argparse +from monai.losses.dice import DiceLoss +# from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal + +# from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType +# from guided_diffusion.respace import SpacedDiffusion, space_timesteps +# from guided_diffusion.resample import UniformSampler +set_determinism(123) +import os +from scipy import ndimage + + +os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7" +data_dir = "./data/fullres/train" + +logdir = f"./logs_gpu4/diffunet_ep2000_train_all_data" + +model_save_path = os.path.join(logdir, "model") +# augmentation = "nomirror" +augmentation = True + +env = "pytorch" +max_epoch = 2000 +batch_size = 2 +val_every = 2 +num_gpus = 1 +device = "cuda:0" +roi_size = [128, 128, 128] + +def get_edge_points(img): + """ + get edge points of a binary segmentation result + """ + dim = len(img.shape) + if (dim == 2): + strt = ndimage.generate_binary_structure(2, 1) + else: + strt = ndimage.generate_binary_structure(3, 1) + ero = ndimage.binary_erosion(img, strt) + edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8) + return edge + +def edge_3d(image_3d): + # image_3d = torch.from_numpy(image_3d) + b, c, d, h, w = image_3d.shape + + image_3d = image_3d[:, 0] > 0 + + return_edge = [] + + for i in range(image_3d.shape[0]): + return_edge.append(get_edge_points(image_3d[i])[None,]) + + return_edge = np.concatenate(return_edge, axis=0) + + return return_edge + +class BraTSTrainer(Trainer): + def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"): + super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script) + self.window_infer = SlidingWindowInferer(roi_size=roi_size, + sw_batch_size=1, + overlap=0.5) + self.augmentation = augmentation + + from models.nnunet_denoise_ddp_infer.get_unet3d_denoise_uncer_edge import DiffUNet + self.model = DiffUNet(1, 10, 3, 1, bta=True) + + self.patch_size = roi_size + self.best_mean_dice = 0.0 + self.ce = nn.CrossEntropyLoss() + self.mse = nn.MSELoss() + self.train_process = 24 + self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5, + momentum=0.99, nesterov=True) + + self.scheduler_type = "poly" + self.bce = nn.BCEWithLogitsLoss() + self.dice_loss = DiceLoss(sigmoid=True) + self.cross = nn.CrossEntropyLoss() + + def training_step(self, batch): + image, label = self.get_input(batch) + + pred, pred_edge = self.model(image, label) + + loss_edge = self.cross(pred_edge, label) + loss_seg = self.cross(pred, label) + + self.log("loss_seg", loss_seg, step=self.global_step) + self.log("loss_edge", loss_edge, step=self.global_step) + + loss = loss_edge + loss_seg + return loss + + + def get_input(self, batch): + image = batch["data"] + label = batch["seg"] + # label = self.convert_labels(label) + + # label = label.float() + label = label[:, 0].long() + return image, label + + def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]): + if pred.sum() > 0 and gt.sum() > 0: + d = dice(pred, gt) + # hd95 = metric.binary.hd95(pred, gt) + return np.array([d, 50]) + + elif gt.sum() == 0 and pred.sum() == 0: + return np.array([1.0, 50]) + + else: + return np.array([0.0, 50]) + + def validation_step(self, batch): + image, label = self.get_input(batch) + + output = self.model(image, ddim=True) + + # output = output > 0 + output = output.argmax(dim=1) + + output = output.cpu().numpy() + target = label.cpu().numpy() + + dices = [] + + c = 10 + for i in range(1, c): + pred_c = output == i + target_c = target == i + + cal_dice, _ = self.cal_metric(target_c, pred_c) + dices.append(cal_dice) + + return dices + + def validation_end(self, val_outputs): + dices = val_outputs + + dices_mean = [] + c = 9 + for i in range(0, c): + dices_mean.append(dices[i].mean()) + + mean_dice = sum(dices_mean) / len(dices_mean) + + self.log("0", dices_mean[0], step=self.epoch) + self.log("1", dices_mean[1], step=self.epoch) + self.log("2", dices_mean[2], step=self.epoch) + self.log("3", dices_mean[3], step=self.epoch) + self.log("4", dices_mean[4], step=self.epoch) + self.log("5", dices_mean[5], step=self.epoch) + self.log("6", dices_mean[6], step=self.epoch) + self.log("7", dices_mean[7], step=self.epoch) + self.log("8", dices_mean[8], step=self.epoch) + + self.log("mean_dice", mean_dice, step=self.epoch) + + if mean_dice > self.best_mean_dice: + self.best_mean_dice = mean_dice + save_new_model_and_delete_last(self.model, + os.path.join(model_save_path, + f"best_model_{mean_dice:.4f}.pt"), + delete_symbol="best_model") + + save_new_model_and_delete_last(self.model, + os.path.join(model_save_path, + f"final_model_{mean_dice:.4f}.pt"), + delete_symbol="final_model") + + + print(f"mean_dice is {mean_dice}") + +if __name__ == "__main__": + + trainer = BraTSTrainer(env_type=env, + max_epochs=max_epoch, + batch_size=batch_size, + device=device, + logdir=logdir, + val_every=val_every, + num_gpus=num_gpus, + master_port=17759, + training_script=__file__) + + train_ds = get_train_loader_from_train(data_dir) + + trainer.train(train_dataset=train_ds, val_dataset=train_ds) diff --git a/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/4_predict_diffunet.py b/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/4_predict_diffunet.py new file mode 100644 index 0000000000000000000000000000000000000000..470b04e65af3d1032ee7f2fcb83dca8d8d441d37 --- /dev/null +++ b/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/4_predict_diffunet.py @@ -0,0 +1,141 @@ +import numpy as np +from light_training.dataloading.dataset import get_test_loader_from_test +import torch +import torch.nn as nn +from monai.networks.nets.basic_unet import BasicUNet +from monai.networks.nets.swin_unetr import SwinUNETR +from monai.inferers import SlidingWindowInferer +from light_training.evaluation.metric import dice +from light_training.trainer import Trainer +from monai.utils import set_determinism +from light_training.utils.files_helper import save_new_model_and_delete_last +from models.uent3d import UNet3D +from monai.networks.nets.segresnet import SegResNet +from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS +from einops import rearrange +from models.modelgenesis.unet3d import UNet3DModelGen +from models.transvw.models.ynet3d import UNet3DTransVW +from monai.networks.nets.basic_unet import BasicUNet +from monai.networks.nets.attentionunet import AttentionUnet +from light_training.loss.compound_losses import DC_and_CE_loss +from light_training.loss.dice import MemoryEfficientSoftDiceLoss +from light_training.evaluation.metric import dice +set_determinism(123) +from light_training.loss.compound_losses import DC_and_CE_loss +import os +from medpy import metric +from light_training.prediction import Predictor + + +data_dir = "./data/fullres/test" +env = "pytorch" +max_epoch = 1000 +batch_size = 2 +val_every = 2 +num_gpus = 1 +device = "cuda:2" +patch_size = [128, 128, 128] + +class BraTSTrainer(Trainer): + def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"): + super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script) + + self.patch_size = patch_size + + def get_input(self, batch): + image = batch["data"] + label = batch["seg"] + properties = batch["properties"] + # label = self.convert_labels(label) + del batch + return image, label, properties + + def define_model_diffunet(self): + from models.nnunet_denoise_ddp_infer.get_unet3d_denoise_uncer_edge import DiffUNet + model = DiffUNet(1, 10, 3, 1, bta=True) + + model_path = "/home/xingzhaohu/zongweizhou/logs_gpu4/diffunet/model/final_model_0.8384.pt" + new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu")) + model.load_state_dict(new_sd, strict=False) + model.eval() + window_infer = SlidingWindowInferer(roi_size=patch_size, + sw_batch_size=2, + overlap=0.3, + progress=True, + mode="gaussian") + + predictor = Predictor(window_infer=window_infer, + mirror_axes=[0,1,2]) + save_path = "./prediction_results/diffunet_ep1000_test" + + os.makedirs(save_path, exist_ok=True) + + return model, predictor, save_path + + def validation_step(self, batch): + image, label, properties = self.get_input(batch) + print(properties['spacing']) + + ddim = True + model, predictor, save_path = self.define_model_diffunet() + + if ddim: + model_output = predictor.maybe_mirror_and_predict(image, model, device=device, ddim=True) + else : + model_output = predictor.maybe_mirror_and_predict(image, model, device=device) + + model_output = predictor.predict_raw_probability(model_output, + properties=properties).cpu() + + + model_output = model_output.argmax(dim=0) + + model_output = predictor.predict_noncrop_probability(model_output, properties) + print(f"save shape is {model_output.shape}") + + + seg_list = ["aorta", "gall_bladder", "kidney_left", + "kidney_right", "liver", "pancreas", + "postcava", "spleen", "stomach"] + + save_path = os.path.join(save_path, properties['name'][0], "predictions") + # print(f"save_path is {save_path}") + os.makedirs(save_path, exist_ok=True) + for i in range(1, len(seg_list) + 1): + model_output_c = model_output == i + predictor.save_to_nii(model_output_c, + raw_spacing=properties['spacing'], + case_name=seg_list[i-1], + save_dir=save_path) + + return 0 + + + def filte_state_dict(self, sd): + if "module" in sd : + sd = sd["module"] + new_sd = {} + for k, v in sd.items(): + k = str(k) + new_k = k[7:] if k.startswith("module") else k + new_sd[new_k] = v + del sd + return new_sd + +if __name__ == "__main__": + + trainer = BraTSTrainer(env_type=env, + max_epochs=max_epoch, + batch_size=batch_size, + device=device, + logdir="", + val_every=val_every, + num_gpus=num_gpus, + master_port=17751, + training_script=__file__) + + test_ds = get_test_loader_from_test(data_dir=data_dir) + + trainer.validation_single_gpu(test_ds) + + diff --git a/SegMamba/light_training/examples/liver2017/2_preprocessing_liver2017.py b/SegMamba/light_training/examples/liver2017/2_preprocessing_liver2017.py new file mode 100644 index 0000000000000000000000000000000000000000..dcb78601a53ae6141d80fff7ba791e4cab922d5d --- /dev/null +++ b/SegMamba/light_training/examples/liver2017/2_preprocessing_liver2017.py @@ -0,0 +1,123 @@ + +from light_training.preprocessing.preprocessors.default_preprocessor_liver_2017 import DefaultPreprocessor +import numpy as np +import pickle +import json + + +def process_train(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "/home/xingzhaohu/data/Liver_2017" + + preprocessor = DefaultPreprocessor(base_dir=base_dir) + + out_spacing = [1.0, 0.76757812, 0.76757812] + output_dir = "./data/fullres/train/" + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2], + num_processes=16, + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + +def process_val(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/Val" + image_dir = "img" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=None, + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + output_dir = "./data/fullres/val_test/" + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel, + num_processes=16) + +def process_val_semi(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/Val_semi_postprocess" + image_dir = "img" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir="gt", + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + output_dir = "./data/fullres/val_semi_postprocess/" + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + + +def plan(): + base_dir = "/home/xingzhaohu/data/Liver_2017" + + preprocessor = DefaultPreprocessor(base_dir=base_dir, + + ) + + preprocessor.run_plan() + +if __name__ == "__main__": + + # plan() + + process_train() + # import time + # s = time.time() + # process_val() + # e = time.time() + + # print(f"preprocessing time is {e - s}") + + # process_val_semi() + + +# + # preprocessor.run(output_spacing=[3, 0.9765625, 0.9765625], output_dir=output_dir) + + # data = np.load("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.npz") + + # image = data["data"] + # label = data["seg"] + # print(image.shape) + # print(label.shape) + + # import matplotlib.pyplot as plt + + # for i in range(20): + # plt.imshow(image[0, i], cmap="gray") + # plt.show() + + # df = open("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.pkl", "rb") + + # info = pickle.load(df) + # print(info) \ No newline at end of file diff --git a/SegMamba/light_training/examples/read_pickle.py b/SegMamba/light_training/examples/read_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..3833df033e6aff5fcc2b505140f7f6d15ecd584e --- /dev/null +++ b/SegMamba/light_training/examples/read_pickle.py @@ -0,0 +1,8 @@ +import pickle + +f = "/home/xingzhaohu/jiuding_code/SegRap2023/data/fullres/train/segrap_0000.pkl" + +with open(f, "rb") as ff: + s = pickle.load(ff) + + print(s) \ No newline at end of file diff --git a/SegMamba/light_training/launch.py b/SegMamba/light_training/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..35bdc0017c419750c68fc99f9319f093a85eb6f1 --- /dev/null +++ b/SegMamba/light_training/launch.py @@ -0,0 +1,117 @@ +# Copyright 2020 The Microsoft DeepSpeed Team +""" +sailing runner is the main front-end to launching multi-worker +training jobs with DeepSpeed. By default this uses pdsh to parallel +ssh into multiple worker nodes and launch all the necessary processes +per rank for training. +""" + +import os +import sys +import json +import subprocess +import collections +import socket +import signal +import logging + +import torch.distributed as dist + + +def fetch_hostfile(hostfile_path): + if not os.path.isfile(hostfile_path): + print("Unable to find hostfile, will proceed with training " + "with local resources only.") + return None + # e.g., worker-0 slots=16 + with open(hostfile_path, 'r') as fd: + resource_pool = collections.OrderedDict() + for line in fd.readlines(): + line = line.strip() + if line == '': + # skip empty lines + continue + try: + hostname, slots = line.split() + _, slot_count = slots.split("=") + slot_count = int(slot_count) + except ValueError as err: + raise err + if hostname in resource_pool: + raise ValueError(f"host {hostname} is already defined") + resource_pool[hostname] = slot_count + + return resource_pool + + +def cmd_load_hyperparam(config_path=None, format="json", encoding="utf-8"): + """ + shell load arguments form argparse and config file + """ + # config_path='config/config_block_large_chinese.json' + format = config_path.rsplit('.')[-1] + with open(config_path, 'r', encoding=encoding) as f: + if format == "json": + config_dict = json.load(f) + else: + raise NameError("current format%s for hyperparam file is invalid" % + format) + config_cmd = [] + for key in config_dict: + if len(str(config_dict[key])) == 0: + config_cmd.append('--' + key) + else: + config_cmd.append('--' + key) + config_cmd.append(str(config_dict[key])) + return config_cmd + + +def launch_dist( + env_type="DDP", + num_nodes=1, + gpus_per_node=1, + master_addr='localhost', + master_port=17500, + training_script='train.py', + ): + + if num_nodes != 1: + print("多机多卡待测试。暂不支持。") + os._exit(0) + if env_type == "DDP": + cmd_launch = [] + cmd_launch.extend([ + # 'export NUM_NODES=' + str(num_nodes) + ';', + # 'export GPUS_PER_NODE=' + str(gpus_per_node) + ';', + # sys.executable, + # "python", + # '-m', + "torchrun" + # 'torch.distributed.launch' + ]) + torch_distributed_args = [ + '--nproc_per_node', + str(gpus_per_node), + '--nnodes', + str(num_nodes), + '--node_rank', + str(0), + '--master_addr', + master_addr, + '--master_port', + str(master_port), + ] + cmd_launch.extend(torch_distributed_args) + cmd_launch.append(training_script) + cmd_launch.append('--not_call_launch') + run_cmd = ' '.join(cmd_launch) + p = subprocess.Popen(run_cmd, shell=True, preexec_fn=os.setsid) + def signal_handler(signal, frame): + os.killpg(os.getpgid(p.pid), 9) + signal.signal(signal.SIGINT, signal_handler) + p.wait() + print ('finish') + + else : + print("不支持的env_type") + os._exit(0) diff --git a/SegMamba/light_training/loss/__init__.py b/SegMamba/light_training/loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/loss/compound_losses.py b/SegMamba/light_training/loss/compound_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..c7b1912a83e70cd8083fc6f2aafc915fae20e9e6 --- /dev/null +++ b/SegMamba/light_training/loss/compound_losses.py @@ -0,0 +1,151 @@ +import torch +from .dice import SoftDiceLoss, MemoryEfficientSoftDiceLoss +from .robust_ce_loss import RobustCrossEntropyLoss, TopKLoss +from .helpers import softmax_helper_dim1 +from torch import nn + + +class DC_and_CE_loss(nn.Module): + def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None, + dice_class=SoftDiceLoss): + """ + Weights for CE and Dice do not need to sum to one. You can set whatever you want. + :param soft_dice_kwargs: + :param ce_kwargs: + :param aggregate: + :param square_dice: + :param weight_ce: + :param weight_dice: + """ + super(DC_and_CE_loss, self).__init__() + if ignore_label is not None: + ce_kwargs['ignore_index'] = ignore_label + + self.weight_dice = weight_dice + self.weight_ce = weight_ce + self.ignore_label = ignore_label + + self.ce = RobustCrossEntropyLoss(**ce_kwargs) + self.dc = dice_class(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs) + + def forward(self, net_output: torch.Tensor, target: torch.Tensor): + """ + target must be b, c, x, y(, z) with c=1 + :param net_output: + :param target: + :return: + """ + if self.ignore_label is not None: + assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \ + '(DC_and_CE_loss)' + mask = (target != self.ignore_label).bool() + # remove ignore label from target, replace with one of the known labels. It doesn't matter because we + # ignore gradients in those areas anyway + target_dice = torch.clone(target) + target_dice[target == self.ignore_label] = 0 + num_fg = mask.sum() + else: + target_dice = target + mask = None + + dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \ + if self.weight_dice != 0 else 0 + ce_loss = self.ce(net_output, target[:, 0].long()) \ + if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0 + + result = self.weight_ce * ce_loss + self.weight_dice * dc_loss + return result + + +class DC_and_BCE_loss(nn.Module): + def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False, + dice_class=MemoryEfficientSoftDiceLoss): + """ + DO NOT APPLY NONLINEARITY IN YOUR NETWORK! + + target mut be one hot encoded + IMPORTANT: We assume use_ignore_label is located in target[:, -1]!!! + + :param soft_dice_kwargs: + :param bce_kwargs: + :param aggregate: + """ + super(DC_and_BCE_loss, self).__init__() + if use_ignore_label: + bce_kwargs['reduction'] = 'none' + + self.weight_dice = weight_dice + self.weight_ce = weight_ce + self.use_ignore_label = use_ignore_label + + self.ce = nn.BCEWithLogitsLoss(**bce_kwargs) + self.dc = dice_class(apply_nonlin=torch.sigmoid, **soft_dice_kwargs) + + def forward(self, net_output: torch.Tensor, target: torch.Tensor): + if self.use_ignore_label: + # target is one hot encoded here. invert it so that it is True wherever we can compute the loss + mask = (1 - target[:, -1:]).bool() + # remove ignore channel now that we have the mask + target_regions = torch.clone(target[:, :-1]) + else: + target_regions = target + mask = None + + dc_loss = self.dc(net_output, target_regions, loss_mask=mask) + if mask is not None: + ce_loss = (self.ce(net_output, target_regions) * mask).sum() / torch.clip(mask.sum(), min=1e-8) + else: + ce_loss = self.ce(net_output, target_regions) + result = self.weight_ce * ce_loss + self.weight_dice * dc_loss + return result + + +class DC_and_topk_loss(nn.Module): + def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None): + """ + Weights for CE and Dice do not need to sum to one. You can set whatever you want. + :param soft_dice_kwargs: + :param ce_kwargs: + :param aggregate: + :param square_dice: + :param weight_ce: + :param weight_dice: + """ + super().__init__() + if ignore_label is not None: + ce_kwargs['ignore_index'] = ignore_label + + self.weight_dice = weight_dice + self.weight_ce = weight_ce + self.ignore_label = ignore_label + + self.ce = TopKLoss(**ce_kwargs) + self.dc = SoftDiceLoss(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs) + + def forward(self, net_output: torch.Tensor, target: torch.Tensor): + """ + target must be b, c, x, y(, z) with c=1 + :param net_output: + :param target: + :return: + """ + if self.ignore_label is not None: + assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \ + '(DC_and_CE_loss)' + mask = (target != self.ignore_label).bool() + # remove ignore label from target, replace with one of the known labels. It doesn't matter because we + # ignore gradients in those areas anyway + target_dice = torch.clone(target) + target_dice[target == self.ignore_label] = 0 + num_fg = mask.sum() + else: + target_dice = target + mask = None + + dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \ + if self.weight_dice != 0 else 0 + ce_loss = self.ce(net_output, target) \ + if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0 + + result = self.weight_ce * ce_loss + self.weight_dice * dc_loss + return result diff --git a/SegMamba/light_training/loss/ddp_allgather.py b/SegMamba/light_training/loss/ddp_allgather.py new file mode 100644 index 0000000000000000000000000000000000000000..c42b3ef654f361904d5fe1868621b3f6f5cd29a6 --- /dev/null +++ b/SegMamba/light_training/loss/ddp_allgather.py @@ -0,0 +1,49 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional, Tuple + +import torch +from torch import distributed + + +def print_if_rank0(*args): + if distributed.get_rank() == 0: + print(*args) + + +class AllGatherGrad(torch.autograd.Function): + # stolen from pytorch lightning + @staticmethod + def forward( + ctx: Any, + tensor: torch.Tensor, + group: Optional["torch.distributed.ProcessGroup"] = None, + ) -> torch.Tensor: + ctx.group = group + + gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())] + + torch.distributed.all_gather(gathered_tensor, tensor, group=group) + gathered_tensor = torch.stack(gathered_tensor, dim=0) + + return gathered_tensor + + @staticmethod + def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]: + grad_output = torch.cat(grad_output) + + torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group) + + return grad_output[torch.distributed.get_rank()], None + diff --git a/SegMamba/light_training/loss/deepsupervision.py b/SegMamba/light_training/loss/deepsupervision.py new file mode 100644 index 0000000000000000000000000000000000000000..e8a3cf639c7b961317859aadf55c93c9744de657 --- /dev/null +++ b/SegMamba/light_training/loss/deepsupervision.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn +import numpy as np + +class DeepSupervisionWrapper(nn.Module): + def __init__(self, loss, weight_factors=None): + """ + Wraps a loss function so that it can be applied to multiple outputs. Forward accepts an arbitrary number of + inputs. Each input is expected to be a tuple/list. Each tuple/list must have the same length. The loss is then + applied to each entry like this: + l = w0 * loss(input0[0], input1[0], ...) + w1 * loss(input0[1], input1[1], ...) + ... + If weights are None, all w will be 1. + """ + super(DeepSupervisionWrapper, self).__init__() + self.weight_factors = weight_factors + self.loss = loss + + def forward(self, *args): + for i in args: + assert isinstance(i, (tuple, list)), "all args must be either tuple or list, got %s" % type(i) + # we could check for equal lengths here as well but we really shouldn't overdo it with checks because + # this code is executed a lot of times! + + if self.weight_factors is None: + weights = [1] * len(args[0]) + else: + weights = self.weight_factors + + # we initialize the loss like this instead of 0 to ensure it sits on the correct device, not sure if that's + # really necessary + l = weights[0] * self.loss(*[j[0] for j in args]) + for i, inputs in enumerate(zip(*args)): + if i == 0: + continue + l += weights[i] * self.loss(*inputs) + return l + + + +class AutoDeepSupervision(nn.Module): + def __init__(self, loss, label_scale) -> None: + super().__init__() + + weights = np.array([1 / (2 ** i) for i in range(len(label_scale))]) + weights[-1] = 0 + # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 + weights = weights / weights.sum() + print(f"loss weights is {weights}") + + self.warpper = DeepSupervisionWrapper(loss, weights) + self.label_scale = label_scale + + def forward(self, preds, label): + pred_len = len(preds) + assert pred_len == len(self.label_scale) + labels = [] + for scale in self.label_scale: + labels.append(torch.nn.functional.interpolate(label, scale_factor=scale, mode="nearest")) + # label_1_2 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[1], mode="nearest") + # label_1_4 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[2], mode="nearest") + # label_1_8 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[3], mode="nearest") + # label_1_16 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[4], mode="nearest") + # labels = [label, label_1_2, label_1_4, label_1_8, label_1_16] + + return self.warpper(preds, labels) \ No newline at end of file diff --git a/SegMamba/light_training/loss/dice.py b/SegMamba/light_training/loss/dice.py new file mode 100644 index 0000000000000000000000000000000000000000..7ae7d0dd75c9d8582279ea5dd28a9c13f9f533a8 --- /dev/null +++ b/SegMamba/light_training/loss/dice.py @@ -0,0 +1,192 @@ +from typing import Callable + +import torch +from .ddp_allgather import AllGatherGrad +from .tensor_utilities import sum_tensor +from torch import nn + + +class SoftDiceLoss(nn.Module): + def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1., + ddp: bool = True, clip_tp: float = None): + """ + """ + super(SoftDiceLoss, self).__init__() + + self.do_bg = do_bg + self.batch_dice = batch_dice + self.apply_nonlin = apply_nonlin + self.smooth = smooth + self.clip_tp = clip_tp + self.ddp = ddp + + def forward(self, x, y, loss_mask=None): + shp_x = x.shape + + if self.batch_dice: + axes = [0] + list(range(2, len(shp_x))) + else: + axes = list(range(2, len(shp_x))) + + if self.apply_nonlin is not None: + x = self.apply_nonlin(x) + + tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False) + + if self.ddp and self.batch_dice: + tp = AllGatherGrad.apply(tp).sum(0) + fp = AllGatherGrad.apply(fp).sum(0) + fn = AllGatherGrad.apply(fn).sum(0) + + if self.clip_tp is not None: + tp = torch.clip(tp, min=self.clip_tp , max=None) + + nominator = 2 * tp + denominator = 2 * tp + fp + fn + + dc = (nominator + self.smooth) / (torch.clip(denominator + self.smooth, 1e-8)) + + if not self.do_bg: + if self.batch_dice: + dc = dc[1:] + else: + dc = dc[:, 1:] + dc = dc.mean() + + return -dc + +class MemoryEfficientSoftDiceLoss(nn.Module): + def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1., + ddp: bool = True): + """ + saves 1.6 GB on Dataset017 3d_lowres + """ + super(MemoryEfficientSoftDiceLoss, self).__init__() + + self.do_bg = do_bg + self.batch_dice = batch_dice + self.apply_nonlin = apply_nonlin + self.smooth = smooth + self.ddp = ddp + + def forward(self, x, y, loss_mask=None): + shp_x, shp_y = x.shape, y.shape + + if self.apply_nonlin is not None: + x = self.apply_nonlin(x) + + if not self.do_bg: + x = x[:, 1:] + + # make everything shape (b, c) + axes = list(range(2, len(shp_x))) + + with torch.no_grad(): + if len(shp_x) != len(shp_y): + y = y.view((shp_y[0], 1, *shp_y[1:])) + + if all([i == j for i, j in zip(shp_x, shp_y)]): + # if this is the case then gt is probably already a one hot encoding + y_onehot = y + else: + gt = y.long() + y_onehot = torch.zeros(shp_x, device=x.device, dtype=torch.bool) + y_onehot.scatter_(1, gt, 1) + + if not self.do_bg: + y_onehot = y_onehot[:, 1:] + sum_gt = y_onehot.sum(axes) if loss_mask is None else (y_onehot * loss_mask).sum(axes) + + intersect = (x * y_onehot).sum(axes) if loss_mask is None else (x * y_onehot * loss_mask).sum(axes) + sum_pred = x.sum(axes) if loss_mask is None else (x * loss_mask).sum(axes) + + if self.ddp and self.batch_dice: + intersect = AllGatherGrad.apply(intersect).sum(0) + sum_pred = AllGatherGrad.apply(sum_pred).sum(0) + sum_gt = AllGatherGrad.apply(sum_gt).sum(0) + + if self.batch_dice: + intersect = intersect.sum(0) + sum_pred = sum_pred.sum(0) + sum_gt = sum_gt.sum(0) + + dc = (2 * intersect + self.smooth) / (torch.clip(sum_gt + sum_pred + self.smooth, 1e-8)) + + dc = dc.mean() + return -dc + +def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): + """ + net_output must be (b, c, x, y(, z))) + gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) + if mask is provided it must have shape (b, 1, x, y(, z))) + :param net_output: + :param gt: + :param axes: can be (, ) = no summation + :param mask: mask must be 1 for valid pixels and 0 for invalid pixels + :param square: if True then fp, tp and fn will be squared before summation + :return: + """ + if axes is None: + axes = tuple(range(2, len(net_output.size()))) + + shp_x = net_output.shape + shp_y = gt.shape + + with torch.no_grad(): + if len(shp_x) != len(shp_y): + gt = gt.view((shp_y[0], 1, *shp_y[1:])) + + if all([i == j for i, j in zip(net_output.shape, gt.shape)]): + # if this is the case then gt is probably already a one hot encoding + y_onehot = gt + else: + gt = gt.long() + y_onehot = torch.zeros(shp_x, device=net_output.device) + y_onehot.scatter_(1, gt, 1) + + tp = net_output * y_onehot + fp = net_output * (1 - y_onehot) + fn = (1 - net_output) * y_onehot + tn = (1 - net_output) * (1 - y_onehot) + + if mask is not None: + with torch.no_grad(): + mask_here = torch.tile(mask, (1, tp.shape[1], *[1 for i in range(2, len(tp.shape))])) + tp *= mask_here + fp *= mask_here + fn *= mask_here + tn *= mask_here + # benchmark whether tiling the mask would be faster (torch.tile). It probably is for large batch sizes + # OK it barely makes a difference but the implementation above is a tiny bit faster + uses less vram + # (using nnUNetv2_train 998 3d_fullres 0) + # tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) + # fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) + # fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) + # tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1) + + if square: + tp = tp ** 2 + fp = fp ** 2 + fn = fn ** 2 + tn = tn ** 2 + + if len(axes) > 0: + tp = sum_tensor(tp, axes, keepdim=False) + fp = sum_tensor(fp, axes, keepdim=False) + fn = sum_tensor(fn, axes, keepdim=False) + tn = sum_tensor(tn, axes, keepdim=False) + + return tp, fp, fn, tn + + +if __name__ == '__main__': + from nnunetv2.utilities.helpers import softmax_helper_dim1 + pred = torch.rand((2, 3, 32, 32, 32)) + ref = torch.randint(0, 3, (2, 32, 32, 32)) + + dl_old = SoftDiceLoss(apply_nonlin=softmax_helper_dim1, batch_dice=True, do_bg=False, smooth=0, ddp=False) + dl_new = MemoryEfficientSoftDiceLoss(apply_nonlin=softmax_helper_dim1, batch_dice=True, do_bg=False, smooth=0, ddp=False) + res_old = dl_old(pred, ref) + res_new = dl_new(pred, ref) + print(res_old, res_new) diff --git a/SegMamba/light_training/loss/helpers.py b/SegMamba/light_training/loss/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..42448e3f9c3de88ba13568ff7585797ee29607ab --- /dev/null +++ b/SegMamba/light_training/loss/helpers.py @@ -0,0 +1,27 @@ +import torch + + +def softmax_helper_dim0(x: torch.Tensor) -> torch.Tensor: + return torch.softmax(x, 0) + + +def softmax_helper_dim1(x: torch.Tensor) -> torch.Tensor: + return torch.softmax(x, 1) + + +def empty_cache(device: torch.device): + if device.type == 'cuda': + torch.cuda.empty_cache() + elif device.type == 'mps': + from torch import mps + mps.empty_cache() + else: + pass + + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass diff --git a/SegMamba/light_training/loss/robust_ce_loss.py b/SegMamba/light_training/loss/robust_ce_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..ad46659193ce1dbbff8ee6829bbf5e4223b6ed8f --- /dev/null +++ b/SegMamba/light_training/loss/robust_ce_loss.py @@ -0,0 +1,33 @@ +import torch +from torch import nn, Tensor +import numpy as np + + +class RobustCrossEntropyLoss(nn.CrossEntropyLoss): + """ + this is just a compatibility layer because my target tensor is float and has an extra dimension + + input must be logits, not probabilities! + """ + def forward(self, input: Tensor, target: Tensor) -> Tensor: + if len(target.shape) == len(input.shape): + assert target.shape[1] == 1 + target = target[:, 0] + return super().forward(input, target.long()) + + +class TopKLoss(RobustCrossEntropyLoss): + """ + input must be logits, not probabilities! + """ + def __init__(self, weight=None, ignore_index: int = -100, k: float = 10, label_smoothing: float = 0): + self.k = k + super(TopKLoss, self).__init__(weight, False, ignore_index, reduce=False, label_smoothing=label_smoothing) + + def forward(self, inp, target): + target = target[:, 0].long() + res = super(TopKLoss, self).forward(inp, target) + num_voxels = np.prod(res.shape, dtype=np.int64) + res, _ = torch.topk(res.view((-1, )), int(num_voxels * self.k / 100), sorted=False) + return res.mean() + diff --git a/SegMamba/light_training/loss/tensor_utilities.py b/SegMamba/light_training/loss/tensor_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..b16ffcac2e46d93c19522937098f0af5b208aca7 --- /dev/null +++ b/SegMamba/light_training/loss/tensor_utilities.py @@ -0,0 +1,15 @@ +from typing import Union, List, Tuple + +import numpy as np +import torch + + +def sum_tensor(inp: torch.Tensor, axes: Union[np.ndarray, Tuple, List], keepdim: bool = False) -> torch.Tensor: + axes = np.unique(axes).astype(int) + if keepdim: + for ax in axes: + inp = inp.sum(int(ax), keepdim=True) + else: + for ax in sorted(axes, reverse=True): + inp = inp.sum(int(ax)) + return inp diff --git a/SegMamba/light_training/prediction.py b/SegMamba/light_training/prediction.py new file mode 100644 index 0000000000000000000000000000000000000000..a90f5c943cab192b083a8d2d0b414593baa56430 --- /dev/null +++ b/SegMamba/light_training/prediction.py @@ -0,0 +1,227 @@ + +import torch +import numpy as np +import SimpleITK as sitk +import os +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape +from scipy import ndimage +import skimage.measure as measure + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + +def large_connected_domain(label): + cd, num = measure.label(label, return_num=True, connectivity=1) + volume = np.zeros([num]) + for k in range(num): + volume[k] = ((cd == (k + 1)).astype(np.uint8)).sum() + volume_sort = np.argsort(volume) + # print(volume_sort) + label = (cd == (volume_sort[-1] + 1)).astype(np.uint8) + label = ndimage.binary_fill_holes(label) + label = label.astype(np.uint8) + return label + +class Predictor: + def __init__(self, window_infer, mirror_axes=None) -> None: + self.window_infer = window_infer + self.mirror_axes = mirror_axes + + @staticmethod + def predict_raw_probability(model_output, properties): + if len(model_output.shape) == 5: + model_output = model_output[0] + + device = model_output.device + shape_after_cropping_before_resample = properties["shape_after_cropping_before_resample"] + d, w, h = shape_after_cropping_before_resample[0], shape_after_cropping_before_resample[1], shape_after_cropping_before_resample[2] + print(f"resample....") + channel = model_output.shape[0] + + try: + with torch.no_grad(): + resample_output = torch.zeros((channel, d, w, h), dtype=torch.half, device=device) + for c in range(channel): + resample_output[c] = torch.nn.functional.interpolate(model_output[c][None, None], mode="trilinear", size=(d, w, h))[0, 0] + + del model_output + + except RuntimeError: + with torch.no_grad(): + model_output = model_output.to("cpu") + resample_output = torch.zeros((channel, d, w, h)) + for c in range(channel): + resample_output[c] = torch.nn.functional.interpolate(model_output[c][None, None], mode="trilinear", size=(d, w, h))[0, 0] + del model_output + + torch.cuda.empty_cache() + + return resample_output + + @staticmethod + def predict_noncrop_probability(model_output, properties): + + print(f"restoring noncrop region......") + if isinstance(model_output, torch.Tensor): + model_output = model_output.cpu().numpy() + + torch.cuda.empty_cache() + + if len(model_output.shape) == 3: + shape_before_cropping = properties["shape_before_cropping"] + if isinstance(shape_before_cropping[0], torch.Tensor): + shape_before_cropping = [shape_before_cropping[0].item(), shape_before_cropping[1].item(), shape_before_cropping[2].item()] + + none_crop_pred = np.zeros([shape_before_cropping[0], shape_before_cropping[1], shape_before_cropping[2]], dtype=np.uint8) + bbox_used_for_cropping = properties["bbox_used_for_cropping"] + + none_crop_pred[ + bbox_used_for_cropping[0][0]: bbox_used_for_cropping[0][1], + bbox_used_for_cropping[1][0]: bbox_used_for_cropping[1][1], + bbox_used_for_cropping[2][0]: bbox_used_for_cropping[2][1]] = model_output + del model_output + return none_crop_pred + + elif len(model_output.shape) == 4: + shape_before_cropping = properties["shape_before_cropping"] + if isinstance(shape_before_cropping[0], torch.Tensor): + shape_before_cropping = [shape_before_cropping[0].item(), shape_before_cropping[1].item(), shape_before_cropping[2].item()] + + none_crop_pred = np.zeros([model_output.shape[0], shape_before_cropping[0], shape_before_cropping[1], shape_before_cropping[2]], dtype=np.uint8) + bbox_used_for_cropping = properties["bbox_used_for_cropping"] + + none_crop_pred[ + :, + bbox_used_for_cropping[0][0]: bbox_used_for_cropping[0][1], + bbox_used_for_cropping[1][0]: bbox_used_for_cropping[1][1], + bbox_used_for_cropping[2][0]: bbox_used_for_cropping[2][1]] = model_output + del model_output + + return none_crop_pred + + else: + print(f"restore crop error") + exit(0) + + def maybe_mirror_and_predict(self, x, model, device=torch.device("cpu"), **kwargs) -> torch.Tensor: + # mirror_axes = [0, 1, 2] + window_infer = self.window_infer + if type(device) is str: + device = torch.device(device) + + model.to(device) + # if type(x) is list: + # for i in range(len(x)): + # x[i] = x[i].to(device) + # else : + x = x.to(device) + with torch.no_grad(): + print(f"predicting....") + with torch.autocast("cuda", enabled=True) if device.type == "cuda" else dummy_context(): + prediction = window_infer(x, model, **kwargs).cpu() + mirror_axes = self.mirror_axes + + if mirror_axes is not None: + # check for invalid numbers in mirror_axes + # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3 + assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!' + + num_predictons = 2 ** len(mirror_axes) + if 0 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2,)), model, **kwargs), (2,)).cpu() + torch.cuda.empty_cache() + if 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3,)), model, **kwargs), (3,)).cpu() + torch.cuda.empty_cache() + if 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (4,)), model, **kwargs), (4,)).cpu() + torch.cuda.empty_cache() + if 0 in mirror_axes and 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3)), model, **kwargs), (2, 3)).cpu() + torch.cuda.empty_cache() + if 0 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 4)), model, **kwargs), (2, 4)).cpu() + torch.cuda.empty_cache() + if 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3, 4)), model, **kwargs), (3, 4)).cpu() + torch.cuda.empty_cache() + if 0 in mirror_axes and 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3, 4)), model, **kwargs), (2, 3, 4)).cpu() + torch.cuda.empty_cache() + prediction /= num_predictons + + torch.cuda.empty_cache() + del x + return prediction + + def maybe_mirror_and_predict_cuda(self, x, model, device=torch.device("cpu"), **kwargs) -> torch.Tensor: + # mirror_axes = [0, 1, 2] + window_infer = self.window_infer + if type(device) is str: + device = torch.device(device) + + model.to(device) + x = x.to(device) + with torch.no_grad(): + print(f"predicting....") + with torch.autocast("cuda", enabled=True) if device.type == "cuda" else dummy_context(): + prediction = window_infer(x, model, **kwargs) + mirror_axes = self.mirror_axes + + if mirror_axes is not None: + # check for invalid numbers in mirror_axes + # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3 + assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!' + + num_predictons = 2 ** len(mirror_axes) + if 0 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2,)), model, **kwargs), (2,)) + torch.cuda.empty_cache() + if 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3,)), model, **kwargs), (3,)) + torch.cuda.empty_cache() + if 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (4,)), model, **kwargs), (4,)) + torch.cuda.empty_cache() + if 0 in mirror_axes and 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3)), model, **kwargs), (2, 3)) + torch.cuda.empty_cache() + if 0 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 4)), model, **kwargs), (2, 4)) + torch.cuda.empty_cache() + if 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3, 4)), model, **kwargs), (3, 4)) + torch.cuda.empty_cache() + if 0 in mirror_axes and 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3, 4)), model, **kwargs), (2, 3, 4)).cpu() + torch.cuda.empty_cache() + prediction /= num_predictons + + torch.cuda.empty_cache() + del x + return prediction + + def save_to_nii(self, return_output, + raw_spacing, + save_dir, + case_name, + postprocess=False): + return_output = return_output.astype(np.uint8) + + # # postprocessing + if postprocess: + return_output = large_connected_domain(return_output) + + return_output = sitk.GetImageFromArray(return_output) + if isinstance(raw_spacing[0], torch.Tensor): + raw_spacing = [raw_spacing[0].item(), raw_spacing[1].item(), raw_spacing[2].item()] + + return_output.SetSpacing((raw_spacing[0], raw_spacing[1], raw_spacing[2])) + + sitk.WriteImage(return_output, os.path.join(save_dir, f"{case_name}.nii.gz")) + + print(f"{os.path.join(save_dir, f'{case_name}.nii.gz')} is saved successfully") \ No newline at end of file diff --git a/SegMamba/light_training/prediction_fp32.py b/SegMamba/light_training/prediction_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..be3776bbade56b98a1fb55143dd26b355607d491 --- /dev/null +++ b/SegMamba/light_training/prediction_fp32.py @@ -0,0 +1,142 @@ + +import torch +import numpy as np +import SimpleITK as sitk +import os +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape +from scipy import ndimage +import skimage.measure as measure + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + +def large_connected_domain(label): + cd, num = measure.label(label, return_num=True, connectivity=1) + volume = np.zeros([num]) + for k in range(num): + volume[k] = ((cd == (k + 1)).astype(np.uint8)).sum() + volume_sort = np.argsort(volume) + # print(volume_sort) + label = (cd == (volume_sort[-1] + 1)).astype(np.uint8) + label = ndimage.binary_fill_holes(label) + label = label.astype(np.uint8) + return label + +class Predictor: + def __init__(self, window_infer, mirror_axes=None) -> None: + self.window_infer = window_infer + self.mirror_axes = mirror_axes + + @staticmethod + def predict_raw_probability(model_output, properties): + if len(model_output.shape) == 5: + model_output = model_output[0] + + shape_before_resample = model_output.shape + if isinstance(model_output, torch.Tensor): + model_output = model_output.cpu().numpy() + + spacing = properties["spacing"] + new_spacing = [spacing[0].item(), spacing[1].item(), spacing[2].item()] + new_spacing_trans = new_spacing[::-1] + + print(f"current spacing is {[0.5, 0.70410156, 0.70410156]}, new_spacing is {new_spacing_trans}") + shape_after_cropping_before_resample = properties["shape_after_cropping_before_resample"] + d, w, h = shape_after_cropping_before_resample[0].item(), shape_after_cropping_before_resample[1].item(), shape_after_cropping_before_resample[2].item() + # model_output = torch.nn.functional.interpolate(model_output, mode="nearest", size=(d, w, h)) + model_output = resample_data_or_seg_to_shape(model_output, + new_shape=(d, w, h), + current_spacing=[0.5, 0.70410156, 0.70410156], + new_spacing=new_spacing_trans, + is_seg=False, + order=1, + order_z=0) + shape_after_resample = model_output.shape + print(f"before resample shape: {shape_before_resample}, after resample shape: {shape_after_resample}") + + return model_output + + @staticmethod + def apply_nonlinear(model_output, nonlinear_type="softmax"): + if isinstance(model_output, np.ndarray): + model_output = torch.from_numpy(model_output) + assert len(model_output.shape) == 4 + + assert nonlinear_type in ["softmax", "sigmoid"] + + if nonlinear_type == "softmax": + model_output = torch.softmax(model_output, dim=0) + model_output = model_output.argmax(dim=0) + else : + model_output = torch.sigmoid(model_output) + + return model_output.numpy() + + + @staticmethod + def predict_noncrop_probability(model_output, properties): + assert len(model_output.shape) == 3 + + shape_before_cropping = properties["shape_before_cropping"] + none_crop_pred = np.zeros([shape_before_cropping[0], shape_before_cropping[1], shape_before_cropping[2]], dtype=np.uint8) + bbox_used_for_cropping = properties["bbox_used_for_cropping"] + + none_crop_pred[ + bbox_used_for_cropping[0][0]: bbox_used_for_cropping[0][1], + bbox_used_for_cropping[1][0]: bbox_used_for_cropping[1][1], + bbox_used_for_cropping[2][0]: bbox_used_for_cropping[2][1]] = model_output + + return model_output + + def maybe_mirror_and_predict(self, x, model, **kwargs) -> torch.Tensor: + # mirror_axes = [0, 1, 2] + window_infer = self.window_infer + device = next(model.parameters()).device + + with torch.no_grad(): + prediction = window_infer(x, model, **kwargs) + mirror_axes = self.mirror_axes + + if mirror_axes is not None: + # check for invalid numbers in mirror_axes + # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3 + assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!' + + num_predictons = 2 ** len(mirror_axes) + if 0 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2,)), model, **kwargs), (2,)) + if 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3,)), model, **kwargs), (3,)) + if 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (4,)), model, **kwargs), (4,)) + if 0 in mirror_axes and 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3)), model, **kwargs), (2, 3)) + if 0 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 4)), model, **kwargs), (2, 4)) + if 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3, 4)), model, **kwargs), (3, 4)) + if 0 in mirror_axes and 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3, 4)), model, **kwargs), (2, 3, 4)) + prediction /= num_predictons + + return prediction + + def save_to_nii(self, return_output, + raw_spacing, + save_dir, + case_name, + postprocess=False): + return_output = return_output.astype(np.uint8) + + # # postprocessing + if postprocess: + return_output = large_connected_domain(return_output) + + return_output = sitk.GetImageFromArray(return_output) + return_output.SetSpacing((raw_spacing[0].item(), raw_spacing[1].item(), raw_spacing[2].item())) + + sitk.WriteImage(return_output, os.path.join(save_dir, f"{case_name}.nii.gz")) \ No newline at end of file diff --git a/SegMamba/light_training/preprocessing/__init__.py b/SegMamba/light_training/preprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/preprocessing/cropping/__init__.py b/SegMamba/light_training/preprocessing/cropping/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/preprocessing/cropping/cropping.py b/SegMamba/light_training/preprocessing/cropping/cropping.py new file mode 100644 index 0000000000000000000000000000000000000000..cb6052c7adaf8322e94c1003e1a86a5396e4afe4 --- /dev/null +++ b/SegMamba/light_training/preprocessing/cropping/cropping.py @@ -0,0 +1,51 @@ +import numpy as np + + +# Hello! crop_to_nonzero is the function you are looking for. Ignore the rest. +from acvl_utils.cropping_and_padding.bounding_boxes import get_bbox_from_mask, crop_to_bbox, bounding_box_to_slice + + +def create_nonzero_mask(data): + """ + + :param data: + :return: the mask is True where the data is nonzero + """ + from scipy.ndimage import binary_fill_holes + assert len(data.shape) == 4 or len(data.shape) == 3, "data must have shape (C, X, Y, Z) or shape (C, X, Y)" + nonzero_mask = np.zeros(data.shape[1:], dtype=bool) + for c in range(data.shape[0]): + this_mask = data[c] != 0 + nonzero_mask = nonzero_mask | this_mask + nonzero_mask = binary_fill_holes(nonzero_mask) + return nonzero_mask + + +def crop_to_nonzero(data, seg=None, nonzero_label=-1): + """ + + :param data: + :param seg: + :param nonzero_label: this will be written into the segmentation map + :return: + """ + nonzero_mask = create_nonzero_mask(data) + bbox = get_bbox_from_mask(nonzero_mask) + + slicer = bounding_box_to_slice(bbox) + data = data[tuple([slice(None), *slicer])] + + if seg is not None: + seg = seg[tuple([slice(None), *slicer])] + + nonzero_mask = nonzero_mask[slicer][None] + if seg is not None: + seg[(seg == 0) & (~nonzero_mask)] = nonzero_label + else: + nonzero_mask = nonzero_mask.astype(np.int8) + nonzero_mask[nonzero_mask == 0] = nonzero_label + nonzero_mask[nonzero_mask > 0] = 0 + seg = nonzero_mask + return data, seg, bbox + + diff --git a/SegMamba/light_training/preprocessing/normalization/__init__.py b/SegMamba/light_training/preprocessing/normalization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/preprocessing/normalization/default_normalization_schemes.py b/SegMamba/light_training/preprocessing/normalization/default_normalization_schemes.py new file mode 100644 index 0000000000000000000000000000000000000000..ef96408773b6f53247173b83999313aa8ad4ff8d --- /dev/null +++ b/SegMamba/light_training/preprocessing/normalization/default_normalization_schemes.py @@ -0,0 +1,126 @@ +from abc import ABC, abstractmethod +from typing import Type + +import numpy as np +from numpy import number +from monai.transforms.utils_pytorch_numpy_unification import clip + + +class ImageNormalization(ABC): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = None + + def __init__(self, use_mask_for_norm: bool = None, intensityproperties: dict = None, + target_dtype: Type[number] = np.float32): + assert use_mask_for_norm is None or isinstance(use_mask_for_norm, bool) + self.use_mask_for_norm = use_mask_for_norm + assert isinstance(intensityproperties, dict) + self.intensityproperties = intensityproperties + self.target_dtype = target_dtype + + @abstractmethod + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + """ + Image and seg must have the same shape. Seg is not always used + """ + pass + + +class ZScoreNormalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = True + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + """ + here seg is used to store the zero valued region. The value for that region in the segmentation is -1 by + default. + """ + image = image.astype(self.target_dtype) + if self.use_mask_for_norm is not None and self.use_mask_for_norm: + # negative values in the segmentation encode the 'outside' region (think zero values around the brain as + # in BraTS). We want to run the normalization only in the brain region, so we need to mask the image. + # The default nnU-net sets use_mask_for_norm to True if cropping to the nonzero region substantially + # reduced the image size. + mask = seg >= 0 + mean = image[mask].mean() + std = image[mask].std() + image[mask] = (image[mask] - mean) / (max(std, 1e-8)) + else: + mean = image.mean() + std = image.std() + image = (image - mean) / (max(std, 1e-8)) + return image + + +class CTNormStandard: + def __init__( + self, + a_min: float, + a_max: float, + b_min, + b_max, + clip=False, + dtype=np.float32, + ): + self.a_min = a_min + self.a_max = a_max + self.b_min = b_min + self.b_max = b_max + self.clip = clip + self.dtype = dtype + + def __call__(self, img): + """ + Apply the transform to `img`. + """ + + img = (img - self.a_min) / (self.a_max - self.a_min) + if (self.b_min is not None) and (self.b_max is not None): + img = img * (self.b_max - self.b_min) + self.b_min + if self.clip: + img = clip(img, self.b_min, self.b_max) + + return img + +class CTNormalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + assert self.intensityproperties is not None, "CTNormalization requires intensity properties" + image = image.astype(self.target_dtype) + mean_intensity = self.intensityproperties['mean'] + std_intensity = self.intensityproperties['std'] + lower_bound = self.intensityproperties['percentile_00_5'] + upper_bound = self.intensityproperties['percentile_99_5'] + image = np.clip(image, lower_bound, upper_bound) + image = (image - mean_intensity) / max(std_intensity, 1e-8) + return image + + +class NoNormalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + return image.astype(self.target_dtype) + + +class RescaleTo01Normalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + image = image.astype(self.target_dtype) + image = image - image.min() + image = image / np.clip(image.max(), a_min=1e-8, a_max=None) + return image + + +class RGBTo01Normalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + assert image.min() >= 0, "RGB images are uint 8, for whatever reason I found pixel values smaller than 0. " \ + "Your images do not seem to be RGB images" + assert image.max() <= 255, "RGB images are uint 8, for whatever reason I found pixel values greater than 255" \ + ". Your images do not seem to be RGB images" + image = image.astype(self.target_dtype) + image = image / 255. + return image + diff --git a/SegMamba/light_training/preprocessing/normalization/map_channel_name_to_normalization.py b/SegMamba/light_training/preprocessing/normalization/map_channel_name_to_normalization.py new file mode 100644 index 0000000000000000000000000000000000000000..e82165069a078b1290e1ba96e2061e4d450cb12d --- /dev/null +++ b/SegMamba/light_training/preprocessing/normalization/map_channel_name_to_normalization.py @@ -0,0 +1,24 @@ +from typing import Type + +from nnunetv2.preprocessing.normalization.default_normalization_schemes import CTNormalization, NoNormalization, \ + ZScoreNormalization, RescaleTo01Normalization, RGBTo01Normalization, ImageNormalization + +channel_name_to_normalization_mapping = { + 'CT': CTNormalization, + 'noNorm': NoNormalization, + 'zscore': ZScoreNormalization, + 'rescale_0_1': RescaleTo01Normalization, + 'rgb_to_0_1': RGBTo01Normalization +} + + +def get_normalization_scheme(channel_name: str) -> Type[ImageNormalization]: + """ + If we find the channel_name in channel_name_to_normalization_mapping return the corresponding normalization. If it is + not found, use the default (ZScoreNormalization) + """ + norm_scheme = channel_name_to_normalization_mapping.get(channel_name) + if norm_scheme is None: + norm_scheme = ZScoreNormalization + # print('Using %s for image normalization' % norm_scheme.__name__) + return norm_scheme diff --git a/SegMamba/light_training/preprocessing/normalization/readme.md b/SegMamba/light_training/preprocessing/normalization/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..7b5439612571240eba0926370bb1fed5044eecce --- /dev/null +++ b/SegMamba/light_training/preprocessing/normalization/readme.md @@ -0,0 +1,5 @@ +The channel_names entry in dataset.json only determines the normlaization scheme. So if you want to use something different +then you can just +- create a new subclass of ImageNormalization +- map your custom channel identifier to that subclass in channel_name_to_normalization_mapping +- run plan and preprocess again with your custom normlaization scheme \ No newline at end of file diff --git a/SegMamba/light_training/preprocessing/preprocessors/__init__.py b/SegMamba/light_training/preprocessing/preprocessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor.py b/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..15b7f599733069c5542930588f8b5e070fea3c5e --- /dev/null +++ b/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor.py @@ -0,0 +1,528 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json + +def create_image(image_arr, spacing): + image = sitk.GetImageFromArray(image_arr) + image.SetSpacing(spacing) + return image + +def get_shape_must_be_divisible_by(net_numpool_per_axis): + return 2 ** np.array(net_numpool_per_axis) + +def pad_shape(shape, must_be_divisible_by): + """ + pads shape so that it is divisible by must_be_divisible_by + :param shape: + :param must_be_divisible_by: + :return: + """ + if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)): + must_be_divisible_by = [must_be_divisible_by] * len(shape) + else: + assert len(must_be_divisible_by) == len(shape) + + new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))] + + for i in range(len(shape)): + if shape[i] % must_be_divisible_by[i] == 0: + new_shp[i] -= must_be_divisible_by[i] + new_shp = np.array(new_shp).astype(int) + return new_shp + +def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool): + """ + this is the same as get_pool_and_conv_props_v2 from old nnunet + + :param spacing: + :param patch_size: + :param min_feature_map_size: min edge length of feature maps in bottleneck + :param max_numpool: + :return: + """ + # todo review this code + dim = len(spacing) + + current_spacing = deepcopy(list(spacing)) + current_size = deepcopy(list(patch_size)) + + pool_op_kernel_sizes = [[1] * len(spacing)] + conv_kernel_sizes = [] + + num_pool_per_axis = [0] * dim + kernel_size = [1] * dim + + while True: + # exclude axes that we cannot pool further because of min_feature_map_size constraint + valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size] + if len(valid_axes_for_pool) < 1: + break + + spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool] + + # find axis that are within factor of 2 within smallest spacing + min_spacing_of_valid = min(spacings_of_axes) + valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2] + + # max_numpool constraint + valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool] + + if len(valid_axes_for_pool) == 1: + if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size: + pass + else: + break + if len(valid_axes_for_pool) < 1: + break + + # now we need to find kernel sizes + # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within + # factor 2 of min_spacing. Once they are 3 they remain 3 + for d in range(dim): + if kernel_size[d] == 3: + continue + else: + if spacings_of_axes[d] / min(current_spacing) < 2: + kernel_size[d] = 3 + + other_axes = [i for i in range(dim) if i not in valid_axes_for_pool] + + pool_kernel_sizes = [0] * dim + for v in valid_axes_for_pool: + pool_kernel_sizes[v] = 2 + num_pool_per_axis[v] += 1 + current_spacing[v] *= 2 + current_size[v] = np.ceil(current_size[v] / 2) + for nv in other_axes: + pool_kernel_sizes[nv] = 1 + + pool_op_kernel_sizes.append(pool_kernel_sizes) + conv_kernel_sizes.append(deepcopy(kernel_size)) + #print(conv_kernel_sizes) + + must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis) + patch_size = pad_shape(patch_size, must_be_divisible_by) + + # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here + conv_kernel_sizes.append([3]*dim) + return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by + + +class DefaultPreprocessor(object): + def __init__(self, + base_dir, + image_dir, + # output_dir, + # out_spacing, + label_dir=None, + data_type="CT"): + """ + Everything we need is in the plans. Those are given when run() is called + """ + self.base_dir = base_dir + self.image_dir = image_dir + self.label_dir = label_dir + + self.data_type = data_type + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + properties['bbox_used_for_cropping'] = bbox + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + ) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + # assert len(seg.shape) == 4 + # seg = create_image(seg[0], original_spacing) + # seg = resample_img(seg, out_spacing=self.out_spacing, is_label=True) + # seg = sitk.GetArrayFromImage(seg)[None] + # print(f"all_labels is {np.unique(seg)}") + # if np.max(seg) > 127: + # seg = seg.astype(np.int16) + # else: + # seg = seg.astype(np.int8) + # seg = resample_data_or_seg_to_spacing(seg, current_spacing=original_spacing, + # new_spacing=self.out_spacing, is_seg=True) + + print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, seg + + # need to modify + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir)) + + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = CTNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel[str(c)]) + data[c] = normalizer.run(data[c], seg[0]) + return data + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + data = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name)) + seg_arr = None + ## 一定要是float32!!!! + data_arr = sitk.GetArrayFromImage(data).astype(np.float32) + data_arr = data_arr[None] + + if self.label_dir is not None: + seg = sitk.ReadImage(os.path.join(self.base_dir, self.label_dir, case_name)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data_arr) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": data.GetSpacing(), + "raw_size": data_arr.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data_arr, seg_arr, properties + + def run_case(self, case_name): + """ + seg file can be none (test cases) + + order of operations is: transpose -> crop -> resample + so when we export we need to run the following order: resample -> crop -> transpose (we could also run + transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner) + """ + data, seg, properties = self.read_data(case_name) + + data, seg = self.run_case_npy(data, seg, properties) + return data, seg, properties + + def run_case_save(self, case_name): + print(case_name + "~~~~~~~~" * 10) + data, seg, properties = self.run_case(case_name) + # print('dtypes', data.dtype, seg.dtype) + case_name = case_name.split(".")[0] + np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, seg=seg) + write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl') + print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}") + + def experiment_plan(self, case_name): + + data, seg, properties = self.read_data(case_name) + print(f"labels is {np.unique(seg)}") + spacing = properties["spacing"] + raw_size = properties["raw_size"] + intensities_per_channel = properties["intensities_per_channel"] + + return spacing, raw_size, intensities_per_channel + + def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray: + # if self.overwrite_target_spacing is not None: + # return np.array(self.overwrite_target_spacing) + + # spacings = self.dataset_fingerprint['spacings'] + # sizes = self.dataset_fingerprint['shapes_after_crop'] + + target = np.percentile(np.vstack(spacings), 50, 0) + target_size = np.percentile(np.vstack(sizes), 50, 0) + # we need to identify datasets for which a different target spacing could be beneficial. These datasets have + # the following properties: + # - one axis which much lower resolution than the others + # - the lowres axis has much less voxels than the others + # - (the size in mm of the lowres axis is also reduced) + worst_spacing_axis = np.argmax(target) + other_axes = [i for i in range(len(target)) if i != worst_spacing_axis] + other_spacings = [target[i] for i in other_axes] + other_sizes = [target_size[i] for i in other_axes] + + has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings)) + has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes) + + if has_aniso_spacing and has_aniso_voxels: + spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis] + target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10) + # don't let the spacing of that axis get higher than the other axes + if target_spacing_of_that_axis < max(other_spacings): + target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5 + target[worst_spacing_axis] = target_spacing_of_that_axis + return target + + def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + ## spacing need to be transposed + old_spacing = list(old_spacing)[::-1] + new_spacing = list(new_spacing)[::-1] + + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + def run_plan(self): + all_iter = self.get_iterable_list() + spacings = [] + sizes = [] + intensities_per_channels = [] + print(f"analysing data......") + for case in tqdm(all_iter, total=len(all_iter)): + spacing, size, intensities_per_channel = self.experiment_plan(case) + spacings.append(spacing) + sizes.append(size) + intensities_per_channels.append(intensities_per_channel) + + print(f"all spacing is {spacings}") + print(f"all sizes is {sizes}") + foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in + range(len(intensities_per_channels[0]))] + + num_channels = len(intensities_per_channels[0]) + + intensity_statistics_per_channel = {} + for i in range(num_channels): + intensity_statistics_per_channel[i] = { + 'mean': float(np.mean(foreground_intensities_per_channel[i])), + 'median': float(np.median(foreground_intensities_per_channel[i])), + 'std': float(np.std(foreground_intensities_per_channel[i])), + 'min': float(np.min(foreground_intensities_per_channel[i])), + 'max': float(np.max(foreground_intensities_per_channel[i])), + 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)), + 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)), + } + + print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}") + + fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes) + print(f"fullres spacing is {fullres_spacing[::-1]}") + + # get transposed new median shape (what we would have after resampling) + new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in + zip(spacings, sizes)] + new_median_shape = np.median(new_shapes, 0) + print(f"median_shape is {new_median_shape}") + + tmp = 1 / np.array(fullres_spacing) + initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)] + + print(f"initial_patch_size is {initial_patch_size[::-1]}") + + network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \ + shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size, + 4, + 999999) + print(f"target medium patch size is {patch_size[::-1]}") + + analysis_path = "./data_analysis_result.txt" + with open(analysis_path, "w") as f: + + f.write(json.dumps({ + "intensity_statistics_per_channel": intensity_statistics_per_channel, + "fullres spacing": fullres_spacing.tolist(), + "median_shape": new_median_shape.tolist(), + "initial_patch_size": initial_patch_size, + "target medium patch size": patch_size[::-1].tolist() + })) + print(f"Analysis done, save to {analysis_path}") + + + def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234, + num_samples: int = 10000): + """ + images=image with multiple channels = shape (c, x, y(, z)) + """ + assert len(images.shape) == 4 + assert len(segmentation.shape) == 4 + + assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-(" + assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-(" + + rs = np.random.RandomState(seed) + + intensities_per_channel = [] + # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have + intensity_statistics_per_channel = [] + + # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work + foreground_mask = segmentation[0] > 0 + + for i in range(len(images)): + foreground_pixels = images[i][foreground_mask] + num_fg = len(foreground_pixels) + # sample with replacement so that we don't get issues with cases that have less than num_samples + # foreground_pixels. We could also just sample less in those cases but that would than cause these + # training cases to be underrepresented + intensities_per_channel.append( + rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else []) + intensity_statistics_per_channel.append({ + 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan, + 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan, + 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan, + 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan, + 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan, + 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan, + + }) + + return intensities_per_channel, intensity_statistics_per_channel + + @staticmethod + def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]], + seed: int = 1234, verbose: bool = False): + num_samples = 10000 + min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too + # sparse + rndst = np.random.RandomState(seed) + class_locs = {} + for c in classes_or_regions: + k = c if not isinstance(c, list) else tuple(c) + if isinstance(c, (tuple, list)): + ## region + mask = seg == c[0] + for cc in c[1:]: + mask = mask | (seg == cc) + all_locs = np.argwhere(mask) + else: + all_locs = np.argwhere(seg == c) + if len(all_locs) == 0: + class_locs[k] = [] + continue + target_num_samples = min(num_samples, len(all_locs)) + target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage))) + + selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)] + class_locs[k] = selected + if verbose: + print(c, target_num_samples) + + return class_locs + + def run(self, output_spacing, + output_dir, + all_labels, + foreground_intensity_properties_per_channel=None, + num_processes=8): + self.out_spacing = output_spacing + self.all_labels = all_labels + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + # multiprocessing magic. + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_AbdomenAtlas1_0Mini.py b/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_AbdomenAtlas1_0Mini.py new file mode 100644 index 0000000000000000000000000000000000000000..05672b29a3e7546e9cfdbaa8354d184f1af72182 --- /dev/null +++ b/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_AbdomenAtlas1_0Mini.py @@ -0,0 +1,540 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json + +def create_image(image_arr, spacing): + image = sitk.GetImageFromArray(image_arr) + image.SetSpacing(spacing) + return image + +def get_shape_must_be_divisible_by(net_numpool_per_axis): + return 2 ** np.array(net_numpool_per_axis) + +def pad_shape(shape, must_be_divisible_by): + """ + pads shape so that it is divisible by must_be_divisible_by + :param shape: + :param must_be_divisible_by: + :return: + """ + if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)): + must_be_divisible_by = [must_be_divisible_by] * len(shape) + else: + assert len(must_be_divisible_by) == len(shape) + + new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))] + + for i in range(len(shape)): + if shape[i] % must_be_divisible_by[i] == 0: + new_shp[i] -= must_be_divisible_by[i] + new_shp = np.array(new_shp).astype(int) + return new_shp + +def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool): + """ + this is the same as get_pool_and_conv_props_v2 from old nnunet + + :param spacing: + :param patch_size: + :param min_feature_map_size: min edge length of feature maps in bottleneck + :param max_numpool: + :return: + """ + # todo review this code + dim = len(spacing) + + current_spacing = deepcopy(list(spacing)) + current_size = deepcopy(list(patch_size)) + + pool_op_kernel_sizes = [[1] * len(spacing)] + conv_kernel_sizes = [] + + num_pool_per_axis = [0] * dim + kernel_size = [1] * dim + + while True: + # exclude axes that we cannot pool further because of min_feature_map_size constraint + valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size] + if len(valid_axes_for_pool) < 1: + break + + spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool] + + # find axis that are within factor of 2 within smallest spacing + min_spacing_of_valid = min(spacings_of_axes) + valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2] + + # max_numpool constraint + valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool] + + if len(valid_axes_for_pool) == 1: + if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size: + pass + else: + break + if len(valid_axes_for_pool) < 1: + break + + # now we need to find kernel sizes + # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within + # factor 2 of min_spacing. Once they are 3 they remain 3 + for d in range(dim): + if kernel_size[d] == 3: + continue + else: + if spacings_of_axes[d] / min(current_spacing) < 2: + kernel_size[d] = 3 + + other_axes = [i for i in range(dim) if i not in valid_axes_for_pool] + + pool_kernel_sizes = [0] * dim + for v in valid_axes_for_pool: + pool_kernel_sizes[v] = 2 + num_pool_per_axis[v] += 1 + current_spacing[v] *= 2 + current_size[v] = np.ceil(current_size[v] / 2) + for nv in other_axes: + pool_kernel_sizes[nv] = 1 + + pool_op_kernel_sizes.append(pool_kernel_sizes) + conv_kernel_sizes.append(deepcopy(kernel_size)) + #print(conv_kernel_sizes) + + must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis) + patch_size = pad_shape(patch_size, must_be_divisible_by) + + # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here + conv_kernel_sizes.append([3]*dim) + return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by + + +class DefaultPreprocessor(object): + def __init__(self, + base_dir, + ): + """ + Everything we need is in the plans. Those are given when run() is called + """ + self.base_dir = base_dir + self.image_name = "ct.nii.gz" + self.seg_dir = "segmentations" + self.seg_list = ["aorta.nii.gz", "gall_bladder.nii.gz", "kidney_left.nii.gz", + "kidney_right.nii.gz", "liver.nii.gz", "pancreas.nii.gz", + "postcava.nii.gz", "spleen.nii.gz", "stomach.nii.gz"] + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + properties['bbox_used_for_cropping'] = bbox + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + ) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, seg + + # need to modify + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir)) + + all_cases_2 = [] + for c in all_cases: + if os.path.isdir(os.path.join(self.base_dir, c)): + all_cases_2.append(c) + + return all_cases_2 + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = CTNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel[str(c)]) + data[c] = normalizer.run(data[c], seg[0]) + return data + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + try: + data = sitk.ReadImage(os.path.join(self.base_dir, case_name, self.image_name)) + except: + print(f"data read error: {self.base_dir, case_name}") + return None, None, None + seg_arr = None + ## 一定要是float32!!!! + data_arr = sitk.GetArrayFromImage(data).astype(np.float32) + data_arr = data_arr[None] + + if os.path.exists(os.path.join(self.base_dir, case_name, self.seg_dir)): + segs = None + index = 0 + for target in self.seg_list: + index += 1 + seg = sitk.ReadImage(os.path.join(self.base_dir, case_name, self.seg_dir, target)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + if segs is None: + segs = seg_arr + else : + segs[seg_arr == 1] = index + + segs = segs[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(segs, data_arr) + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": data.GetSpacing(), + "raw_size": data_arr.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data_arr, segs, properties + + def run_case(self, case_name): + """ + seg file can be none (test cases) + + order of operations is: transpose -> crop -> resample + so when we export we need to run the following order: resample -> crop -> transpose (we could also run + transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner) + """ + data, seg, properties = self.read_data(case_name) + if data is not None: + data, seg = self.run_case_npy(data, seg, properties) + return data, seg, properties + else : + return None, None, None + + def run_case_save(self, case_name): + print(case_name + "~~~~~~~~" * 10) + data, seg, properties = self.run_case(case_name) + if data is not None: + # print('dtypes', data.dtype, seg.dtype) + case_name = case_name.split(".")[0] + np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, seg=seg) + write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl') + print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}") + + def experiment_plan(self, case_name): + + data, seg, properties = self.read_data(case_name) + if data is None: + return None, None, None + + print(f"labels is {np.unique(seg)}") + spacing = properties["spacing"] + raw_size = properties["raw_size"] + intensities_per_channel = properties["intensities_per_channel"] + + return spacing, raw_size, intensities_per_channel + + def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray: + # if self.overwrite_target_spacing is not None: + # return np.array(self.overwrite_target_spacing) + + # spacings = self.dataset_fingerprint['spacings'] + # sizes = self.dataset_fingerprint['shapes_after_crop'] + + target = np.percentile(np.vstack(spacings), 50, 0) + target_size = np.percentile(np.vstack(sizes), 50, 0) + # we need to identify datasets for which a different target spacing could be beneficial. These datasets have + # the following properties: + # - one axis which much lower resolution than the others + # - the lowres axis has much less voxels than the others + # - (the size in mm of the lowres axis is also reduced) + worst_spacing_axis = np.argmax(target) + other_axes = [i for i in range(len(target)) if i != worst_spacing_axis] + other_spacings = [target[i] for i in other_axes] + other_sizes = [target_size[i] for i in other_axes] + + has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings)) + has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes) + + if has_aniso_spacing and has_aniso_voxels: + spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis] + target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10) + # don't let the spacing of that axis get higher than the other axes + if target_spacing_of_that_axis < max(other_spacings): + target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5 + target[worst_spacing_axis] = target_spacing_of_that_axis + return target + + def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + ## spacing need to be transposed + old_spacing = list(old_spacing)[::-1] + new_spacing = list(new_spacing)[::-1] + + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + def run_plan(self): + all_iter = self.get_iterable_list() + spacings = [] + sizes = [] + intensities_per_channels = [] + print(f"analysing data......") + for case in tqdm(all_iter, total=len(all_iter)): + if os.path.isdir(os.path.join(self.base_dir, case)): + spacing, size, intensities_per_channel = self.experiment_plan(case) + if spacing is None: + continue + + spacings.append(spacing) + sizes.append(size) + intensities_per_channels.append(intensities_per_channel) + + print(f"all spacing is {spacings}") + print(f"all sizes is {sizes}") + foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in + range(len(intensities_per_channels[0]))] + + num_channels = len(intensities_per_channels[0]) + + intensity_statistics_per_channel = {} + for i in range(num_channels): + intensity_statistics_per_channel[i] = { + 'mean': float(np.mean(foreground_intensities_per_channel[i])), + 'median': float(np.median(foreground_intensities_per_channel[i])), + 'std': float(np.std(foreground_intensities_per_channel[i])), + 'min': float(np.min(foreground_intensities_per_channel[i])), + 'max': float(np.max(foreground_intensities_per_channel[i])), + 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)), + 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)), + } + + print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}") + + fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes) + print(f"fullres spacing is {fullres_spacing[::-1]}") + + # get transposed new median shape (what we would have after resampling) + new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in + zip(spacings, sizes)] + new_median_shape = np.median(new_shapes, 0) + print(f"median_shape is {new_median_shape}") + + tmp = 1 / np.array(fullres_spacing) + initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)] + + print(f"initial_patch_size is {initial_patch_size[::-1]}") + + network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \ + shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size, + 4, + 999999) + print(f"target medium patch size is {patch_size[::-1]}") + + analysis_path = "./data_analysis_result.txt" + with open(analysis_path, "w") as f: + + f.write(json.dumps({ + "intensity_statistics_per_channel": intensity_statistics_per_channel, + "fullres spacing": fullres_spacing.tolist(), + "median_shape": new_median_shape.tolist(), + "initial_patch_size": initial_patch_size, + "target medium patch size": patch_size[::-1].tolist() + })) + print(f"Analysis done, save to {analysis_path}") + + + def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234, + num_samples: int = 10000): + """ + images=image with multiple channels = shape (c, x, y(, z)) + """ + assert len(images.shape) == 4 + assert len(segmentation.shape) == 4 + + assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-(" + assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-(" + + rs = np.random.RandomState(seed) + + intensities_per_channel = [] + # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have + intensity_statistics_per_channel = [] + + # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work + foreground_mask = segmentation[0] > 0 + + for i in range(len(images)): + foreground_pixels = images[i][foreground_mask] + num_fg = len(foreground_pixels) + # sample with replacement so that we don't get issues with cases that have less than num_samples + # foreground_pixels. We could also just sample less in those cases but that would than cause these + # training cases to be underrepresented + intensities_per_channel.append( + rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else []) + intensity_statistics_per_channel.append({ + 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan, + 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan, + 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan, + 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan, + 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan, + 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan, + + }) + + return intensities_per_channel, intensity_statistics_per_channel + + @staticmethod + def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]], + seed: int = 1234, verbose: bool = False): + num_samples = 10000 + min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too + # sparse + rndst = np.random.RandomState(seed) + class_locs = {} + for c in classes_or_regions: + k = c if not isinstance(c, list) else tuple(c) + if isinstance(c, (tuple, list)): + ## region + mask = seg == c[0] + for cc in c[1:]: + mask = mask | (seg == cc) + all_locs = np.argwhere(mask) + else: + all_locs = np.argwhere(seg == c) + if len(all_locs) == 0: + class_locs[k] = [] + continue + target_num_samples = min(num_samples, len(all_locs)) + target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage))) + + selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)] + class_locs[k] = selected + if verbose: + print(c, target_num_samples) + + return class_locs + + def run(self, output_spacing, + output_dir, + all_labels, + foreground_intensity_properties_per_channel=None, + num_processes=8): + self.out_spacing = output_spacing + self.all_labels = all_labels + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + # multiprocessing magic. + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_liver_2017.py b/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_liver_2017.py new file mode 100644 index 0000000000000000000000000000000000000000..5fb40d91dd6644d76fad9e1a0efe4b610dc5659b --- /dev/null +++ b/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_liver_2017.py @@ -0,0 +1,526 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json + +def create_image(image_arr, spacing): + image = sitk.GetImageFromArray(image_arr) + image.SetSpacing(spacing) + return image + +def get_shape_must_be_divisible_by(net_numpool_per_axis): + return 2 ** np.array(net_numpool_per_axis) + +def pad_shape(shape, must_be_divisible_by): + """ + pads shape so that it is divisible by must_be_divisible_by + :param shape: + :param must_be_divisible_by: + :return: + """ + if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)): + must_be_divisible_by = [must_be_divisible_by] * len(shape) + else: + assert len(must_be_divisible_by) == len(shape) + + new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))] + + for i in range(len(shape)): + if shape[i] % must_be_divisible_by[i] == 0: + new_shp[i] -= must_be_divisible_by[i] + new_shp = np.array(new_shp).astype(int) + return new_shp + +def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool): + """ + this is the same as get_pool_and_conv_props_v2 from old nnunet + + :param spacing: + :param patch_size: + :param min_feature_map_size: min edge length of feature maps in bottleneck + :param max_numpool: + :return: + """ + # todo review this code + dim = len(spacing) + + current_spacing = deepcopy(list(spacing)) + current_size = deepcopy(list(patch_size)) + + pool_op_kernel_sizes = [[1] * len(spacing)] + conv_kernel_sizes = [] + + num_pool_per_axis = [0] * dim + kernel_size = [1] * dim + + while True: + # exclude axes that we cannot pool further because of min_feature_map_size constraint + valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size] + if len(valid_axes_for_pool) < 1: + break + + spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool] + + # find axis that are within factor of 2 within smallest spacing + min_spacing_of_valid = min(spacings_of_axes) + valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2] + + # max_numpool constraint + valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool] + + if len(valid_axes_for_pool) == 1: + if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size: + pass + else: + break + if len(valid_axes_for_pool) < 1: + break + + # now we need to find kernel sizes + # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within + # factor 2 of min_spacing. Once they are 3 they remain 3 + for d in range(dim): + if kernel_size[d] == 3: + continue + else: + if spacings_of_axes[d] / min(current_spacing) < 2: + kernel_size[d] = 3 + + other_axes = [i for i in range(dim) if i not in valid_axes_for_pool] + + pool_kernel_sizes = [0] * dim + for v in valid_axes_for_pool: + pool_kernel_sizes[v] = 2 + num_pool_per_axis[v] += 1 + current_spacing[v] *= 2 + current_size[v] = np.ceil(current_size[v] / 2) + for nv in other_axes: + pool_kernel_sizes[nv] = 1 + + pool_op_kernel_sizes.append(pool_kernel_sizes) + conv_kernel_sizes.append(deepcopy(kernel_size)) + #print(conv_kernel_sizes) + + must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis) + patch_size = pad_shape(patch_size, must_be_divisible_by) + + # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here + conv_kernel_sizes.append([3]*dim) + return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by + + +class DefaultPreprocessor(object): + def __init__(self, + base_dir, + ): + """ + Everything we need is in the plans. Those are given when run() is called + """ + self.base_dir = base_dir + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + properties['bbox_used_for_cropping'] = bbox + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + ) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, seg + + # need to modify + def get_iterable_list(self): + all_cases = os.listdir(self.base_dir) + + all_cases_2 = [] + for c in all_cases: + if "volume" in c and ".nii" in c: + ## get data id + all_cases_2.append(c.split("-")[-1].split(".")[0]) + + return all_cases_2 + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = CTNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel[str(c)]) + data[c] = normalizer.run(data[c], seg[0]) + return data + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + try: + data = sitk.ReadImage(os.path.join(self.base_dir, f"volume-{case_name}.nii")) + except: + print(f"data read error: {self.base_dir, case_name}") + return None, None, None + seg_arr = None + ## 一定要是float32!!!! + data_arr = sitk.GetArrayFromImage(data).astype(np.float32) + data_arr = data_arr[None] + + if os.path.exists(os.path.join(self.base_dir, f"segmentation-{case_name}.nii")): + seg = sitk.ReadImage(os.path.join(self.base_dir, f"segmentation-{case_name}.nii")) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32)[None,] + + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data_arr) + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": data.GetSpacing(), + "raw_size": data_arr.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data_arr, seg_arr, properties + + def run_case(self, case_name): + """ + seg file can be none (test cases) + + order of operations is: transpose -> crop -> resample + so when we export we need to run the following order: resample -> crop -> transpose (we could also run + transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner) + """ + data, seg, properties = self.read_data(case_name) + if data is not None: + data, seg = self.run_case_npy(data, seg, properties) + return data, seg, properties + else : + return None, None, None + + def run_case_save(self, case_name): + print(case_name + "~~~~~~~~" * 10) + data, seg, properties = self.run_case(case_name) + if data is not None: + # print('dtypes', data.dtype, seg.dtype) + case_name = case_name.split(".")[0] + np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, seg=seg) + write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl') + print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}") + + def experiment_plan(self, case_name): + + data, seg, properties = self.read_data(case_name) + if data is None: + return None, None, None + + print(f"labels is {np.unique(seg)}") + spacing = properties["spacing"] + raw_size = properties["raw_size"] + intensities_per_channel = properties["intensities_per_channel"] + + return spacing, raw_size, intensities_per_channel + + def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray: + # if self.overwrite_target_spacing is not None: + # return np.array(self.overwrite_target_spacing) + + # spacings = self.dataset_fingerprint['spacings'] + # sizes = self.dataset_fingerprint['shapes_after_crop'] + + target = np.percentile(np.vstack(spacings), 50, 0) + target_size = np.percentile(np.vstack(sizes), 50, 0) + # we need to identify datasets for which a different target spacing could be beneficial. These datasets have + # the following properties: + # - one axis which much lower resolution than the others + # - the lowres axis has much less voxels than the others + # - (the size in mm of the lowres axis is also reduced) + worst_spacing_axis = np.argmax(target) + other_axes = [i for i in range(len(target)) if i != worst_spacing_axis] + other_spacings = [target[i] for i in other_axes] + other_sizes = [target_size[i] for i in other_axes] + + has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings)) + has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes) + + if has_aniso_spacing and has_aniso_voxels: + spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis] + target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10) + # don't let the spacing of that axis get higher than the other axes + if target_spacing_of_that_axis < max(other_spacings): + target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5 + target[worst_spacing_axis] = target_spacing_of_that_axis + return target + + def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + ## spacing need to be transposed + old_spacing = list(old_spacing)[::-1] + new_spacing = list(new_spacing)[::-1] + + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + def run_plan(self): + all_iter = self.get_iterable_list() + spacings = [] + sizes = [] + intensities_per_channels = [] + print(f"analysing data......") + for case in tqdm(all_iter, total=len(all_iter)): + spacing, size, intensities_per_channel = self.experiment_plan(case) + if spacing is None: + continue + + spacings.append(spacing) + sizes.append(size) + intensities_per_channels.append(intensities_per_channel) + + print(f"all spacing is {spacings}") + print(f"all sizes is {sizes}") + foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in + range(len(intensities_per_channels[0]))] + + num_channels = len(intensities_per_channels[0]) + + intensity_statistics_per_channel = {} + for i in range(num_channels): + intensity_statistics_per_channel[i] = { + 'mean': float(np.mean(foreground_intensities_per_channel[i])), + 'median': float(np.median(foreground_intensities_per_channel[i])), + 'std': float(np.std(foreground_intensities_per_channel[i])), + 'min': float(np.min(foreground_intensities_per_channel[i])), + 'max': float(np.max(foreground_intensities_per_channel[i])), + 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)), + 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)), + } + + print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}") + + fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes) + print(f"fullres spacing is {fullres_spacing[::-1]}") + + # get transposed new median shape (what we would have after resampling) + new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in + zip(spacings, sizes)] + new_median_shape = np.median(new_shapes, 0) + print(f"median_shape is {new_median_shape}") + + tmp = 1 / np.array(fullres_spacing) + initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)] + + print(f"initial_patch_size is {initial_patch_size[::-1]}") + + network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \ + shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size, + 4, + 999999) + print(f"target medium patch size is {patch_size[::-1]}") + + analysis_path = "./data_analysis_result.txt" + with open(analysis_path, "w") as f: + + f.write(json.dumps({ + "intensity_statistics_per_channel": intensity_statistics_per_channel, + "fullres spacing": fullres_spacing.tolist(), + "median_shape": new_median_shape.tolist(), + "initial_patch_size": initial_patch_size, + "target medium patch size": patch_size[::-1].tolist() + })) + print(f"Analysis done, save to {analysis_path}") + + + def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234, + num_samples: int = 10000): + """ + images=image with multiple channels = shape (c, x, y(, z)) + """ + assert len(images.shape) == 4 + assert len(segmentation.shape) == 4 + + assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-(" + assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-(" + + rs = np.random.RandomState(seed) + + intensities_per_channel = [] + # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have + intensity_statistics_per_channel = [] + + # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work + foreground_mask = segmentation[0] > 0 + + for i in range(len(images)): + foreground_pixels = images[i][foreground_mask] + num_fg = len(foreground_pixels) + # sample with replacement so that we don't get issues with cases that have less than num_samples + # foreground_pixels. We could also just sample less in those cases but that would than cause these + # training cases to be underrepresented + intensities_per_channel.append( + rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else []) + intensity_statistics_per_channel.append({ + 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan, + 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan, + 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan, + 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan, + 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan, + 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan, + + }) + + return intensities_per_channel, intensity_statistics_per_channel + + @staticmethod + def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]], + seed: int = 1234, verbose: bool = False): + num_samples = 10000 + min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too + # sparse + rndst = np.random.RandomState(seed) + class_locs = {} + for c in classes_or_regions: + k = c if not isinstance(c, list) else tuple(c) + if isinstance(c, (tuple, list)): + ## region + mask = seg == c[0] + for cc in c[1:]: + mask = mask | (seg == cc) + all_locs = np.argwhere(mask) + else: + all_locs = np.argwhere(seg == c) + if len(all_locs) == 0: + class_locs[k] = [] + continue + target_num_samples = min(num_samples, len(all_locs)) + target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage))) + + selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)] + class_locs[k] = selected + if verbose: + print(c, target_num_samples) + + return class_locs + + def run(self, output_spacing, + output_dir, + all_labels, + foreground_intensity_properties_per_channel=None, + num_processes=8): + self.out_spacing = output_spacing + self.all_labels = all_labels + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + # multiprocessing magic. + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/SegMamba/light_training/preprocessing/preprocessors/preprocessor_brats23_global.py b/SegMamba/light_training/preprocessing/preprocessors/preprocessor_brats23_global.py new file mode 100644 index 0000000000000000000000000000000000000000..a328ff0eaddda3f179ce99b5d3a5c9b8337c101c --- /dev/null +++ b/SegMamba/light_training/preprocessing/preprocessors/preprocessor_brats23_global.py @@ -0,0 +1,542 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json +# from .default_preprocessor import DefaultPreprocessor + +def create_image(image_arr, spacing): + image = sitk.GetImageFromArray(image_arr) + image.SetSpacing(spacing) + return image + +def get_shape_must_be_divisible_by(net_numpool_per_axis): + return 2 ** np.array(net_numpool_per_axis) + +def pad_shape(shape, must_be_divisible_by): + """ + pads shape so that it is divisible by must_be_divisible_by + :param shape: + :param must_be_divisible_by: + :return: + """ + if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)): + must_be_divisible_by = [must_be_divisible_by] * len(shape) + else: + assert len(must_be_divisible_by) == len(shape) + + new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))] + + for i in range(len(shape)): + if shape[i] % must_be_divisible_by[i] == 0: + new_shp[i] -= must_be_divisible_by[i] + new_shp = np.array(new_shp).astype(int) + return new_shp + +def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool): + """ + this is the same as get_pool_and_conv_props_v2 from old nnunet + + :param spacing: + :param patch_size: + :param min_feature_map_size: min edge length of feature maps in bottleneck + :param max_numpool: + :return: + """ + # todo review this code + dim = len(spacing) + + current_spacing = deepcopy(list(spacing)) + current_size = deepcopy(list(patch_size)) + + pool_op_kernel_sizes = [[1] * len(spacing)] + conv_kernel_sizes = [] + + num_pool_per_axis = [0] * dim + kernel_size = [1] * dim + + while True: + # exclude axes that we cannot pool further because of min_feature_map_size constraint + valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size] + if len(valid_axes_for_pool) < 1: + break + + spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool] + + # find axis that are within factor of 2 within smallest spacing + min_spacing_of_valid = min(spacings_of_axes) + valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2] + + # max_numpool constraint + valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool] + + if len(valid_axes_for_pool) == 1: + if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size: + pass + else: + break + if len(valid_axes_for_pool) < 1: + break + + # now we need to find kernel sizes + # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within + # factor 2 of min_spacing. Once they are 3 they remain 3 + for d in range(dim): + if kernel_size[d] == 3: + continue + else: + if spacings_of_axes[d] / min(current_spacing) < 2: + kernel_size[d] = 3 + + other_axes = [i for i in range(dim) if i not in valid_axes_for_pool] + + pool_kernel_sizes = [0] * dim + for v in valid_axes_for_pool: + pool_kernel_sizes[v] = 2 + num_pool_per_axis[v] += 1 + current_spacing[v] *= 2 + current_size[v] = np.ceil(current_size[v] / 2) + for nv in other_axes: + pool_kernel_sizes[nv] = 1 + + pool_op_kernel_sizes.append(pool_kernel_sizes) + conv_kernel_sizes.append(deepcopy(kernel_size)) + #print(conv_kernel_sizes) + + must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis) + patch_size = pad_shape(patch_size, must_be_divisible_by) + + # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here + conv_kernel_sizes.append([3]*dim) + return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by + + + +class MultiModalityPreprocessor(object): + def __init__(self, + base_dir, + global_size=[128, 128, 128], + ): + + self.global_size = global_size + self.base_dir = base_dir + # self.image_dir = image_dir + # self.data_filenames = data_filenames + # self.seg_filename = seg_filename + # base_dir = "./data/raw_data/BraTS2023/" + self.base_dir = base_dir + self.data_filenames = ["t2w.nii.gz", + "t2f.nii.gz", + "t1n.nii.gz", + "t1c.nii.gz"] + self.seg_filename = "seg.nii.gz" + + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir)) + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = ZScoreNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel) + data[c] = normalizer.run(data[c], seg[0]) + return data + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + properties['bbox_used_for_cropping'] = bbox + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + + ## global view + data_global = resample_data_or_seg_to_shape(data, self.global_size, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + + # print(data.shape, data_global.shape) + # data = np.concatenate([data, data_global], axis=0) + + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + ) + + ## global view + seg_global = resample_data_or_seg_to_shape(seg, self.global_size, + original_spacing, + self.out_spacing, + order=1, + order_z=0) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, data_global, seg, seg_global + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + assert len(self.data_filenames) != 0 + data = [] + for dfname in self.data_filenames: + d = sitk.ReadImage(os.path.join(self.base_dir, case_name, dfname)) + spacing = d.GetSpacing() + data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,]) + + data = np.concatenate(data, axis=0) + + seg_arr = None + ## 一定要是float32!!!! + + if self.seg_filename != "": + seg = sitk.ReadImage(os.path.join(self.base_dir, case_name, self.seg_filename)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": spacing, + "raw_size": data.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data, seg_arr, properties + + def run_case(self, case_name): + """ + seg file can be none (test cases) + + order of operations is: transpose -> crop -> resample + so when we export we need to run the following order: resample -> crop -> transpose (we could also run + transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner) + """ + data, seg, properties = self.read_data(case_name) + + data, data_global, seg, seg_global = self.run_case_npy(data, seg, properties) + + return data, data_global, seg, seg_global, properties + + def run_case_save(self, case_name): + print(case_name + "~~~~~~~~" * 10) + data, data_global, seg, seg_global, properties = self.run_case(case_name) + # print('dtypes', data.dtype, seg.dtype) + case_name = case_name.split(".")[0] + np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, data_global=data_global, seg=seg, seg_global=seg_global) + write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl') + print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}, data shape is {data.shape}, data_global shape is {data_global.shape}") + + def experiment_plan(self, case_name): + + data, seg, properties = self.read_data(case_name) + print(f"labels is {np.unique(seg)}") + spacing = properties["spacing"] + raw_size = properties["raw_size"] + intensities_per_channel = properties["intensities_per_channel"] + + return spacing, raw_size, intensities_per_channel + + def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray: + # if self.overwrite_target_spacing is not None: + # return np.array(self.overwrite_target_spacing) + + # spacings = self.dataset_fingerprint['spacings'] + # sizes = self.dataset_fingerprint['shapes_after_crop'] + + target = np.percentile(np.vstack(spacings), 50, 0) + target_size = np.percentile(np.vstack(sizes), 50, 0) + # we need to identify datasets for which a different target spacing could be beneficial. These datasets have + # the following properties: + # - one axis which much lower resolution than the others + # - the lowres axis has much less voxels than the others + # - (the size in mm of the lowres axis is also reduced) + worst_spacing_axis = np.argmax(target) + other_axes = [i for i in range(len(target)) if i != worst_spacing_axis] + other_spacings = [target[i] for i in other_axes] + other_sizes = [target_size[i] for i in other_axes] + + has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings)) + has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes) + + if has_aniso_spacing and has_aniso_voxels: + spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis] + target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10) + # don't let the spacing of that axis get higher than the other axes + if target_spacing_of_that_axis < max(other_spacings): + target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5 + target[worst_spacing_axis] = target_spacing_of_that_axis + return target + + def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + ## spacing need to be transposed + old_spacing = list(old_spacing)[::-1] + new_spacing = list(new_spacing)[::-1] + + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + def run_plan(self): + all_iter = self.get_iterable_list() + spacings = [] + sizes = [] + intensities_per_channels = [] + print(f"analysing data......") + for case in tqdm(all_iter, total=len(all_iter)): + spacing, size, intensities_per_channel = self.experiment_plan(case) + spacings.append(spacing) + sizes.append(size) + intensities_per_channels.append(intensities_per_channel) + + print(f"all spacing is {spacings}") + print(f"all sizes is {sizes}") + foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in + range(len(intensities_per_channels[0]))] + + num_channels = len(intensities_per_channels[0]) + + intensity_statistics_per_channel = {} + for i in range(num_channels): + intensity_statistics_per_channel[i] = { + 'mean': float(np.mean(foreground_intensities_per_channel[i])), + 'median': float(np.median(foreground_intensities_per_channel[i])), + 'std': float(np.std(foreground_intensities_per_channel[i])), + 'min': float(np.min(foreground_intensities_per_channel[i])), + 'max': float(np.max(foreground_intensities_per_channel[i])), + 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)), + 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)), + } + + print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}") + + fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes) + print(f"fullres spacing is {fullres_spacing[::-1]}") + + # get transposed new median shape (what we would have after resampling) + new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in + zip(spacings, sizes)] + new_median_shape = np.median(new_shapes, 0) + print(f"median_shape is {new_median_shape}") + + tmp = 1 / np.array(fullres_spacing) + initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)] + + print(f"initial_patch_size is {initial_patch_size[::-1]}") + + network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \ + shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size, + 4, + 999999) + print(f"target medium patch size is {patch_size[::-1]}") + + analysis_path = "./data_analysis_result.txt" + with open(analysis_path, "w") as f: + + f.write(json.dumps({ + "intensity_statistics_per_channel": intensity_statistics_per_channel, + "fullres spacing": fullres_spacing.tolist(), + "median_shape": new_median_shape.tolist(), + "initial_patch_size": initial_patch_size, + "target medium patch size": patch_size[::-1].tolist() + })) + print(f"Analysis done, save to {analysis_path}") + + + def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234, + num_samples: int = 10000): + """ + images=image with multiple channels = shape (c, x, y(, z)) + """ + assert len(images.shape) == 4 + assert len(segmentation.shape) == 4 + + assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-(" + assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-(" + + rs = np.random.RandomState(seed) + + intensities_per_channel = [] + # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have + intensity_statistics_per_channel = [] + + # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work + foreground_mask = segmentation[0] > 0 + + for i in range(len(images)): + foreground_pixels = images[i][foreground_mask] + num_fg = len(foreground_pixels) + # sample with replacement so that we don't get issues with cases that have less than num_samples + # foreground_pixels. We could also just sample less in those cases but that would than cause these + # training cases to be underrepresented + intensities_per_channel.append( + rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else []) + intensity_statistics_per_channel.append({ + 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan, + 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan, + 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan, + 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan, + 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan, + 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan, + + }) + + return intensities_per_channel, intensity_statistics_per_channel + + @staticmethod + def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]], + seed: int = 1234, verbose: bool = False): + num_samples = 10000 + min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too + # sparse + rndst = np.random.RandomState(seed) + class_locs = {} + for c in classes_or_regions: + k = c if not isinstance(c, list) else tuple(c) + if isinstance(c, (tuple, list)): + ## region + mask = seg == c[0] + for cc in c[1:]: + mask = mask | (seg == cc) + all_locs = np.argwhere(mask) + else: + all_locs = np.argwhere(seg == c) + if len(all_locs) == 0: + class_locs[k] = [] + continue + target_num_samples = min(num_samples, len(all_locs)) + target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage))) + + selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)] + class_locs[k] = selected + if verbose: + print(c, target_num_samples) + + return class_locs + + def run(self, + output_spacing, + output_dir, + all_labels, + num_processes=8): + self.out_spacing = output_spacing + self.all_labels = all_labels + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = {} + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/SegMamba/light_training/preprocessing/preprocessors/preprocessor_mri.py b/SegMamba/light_training/preprocessing/preprocessors/preprocessor_mri.py new file mode 100644 index 0000000000000000000000000000000000000000..1dd6283d331372c574e545f371d7755eb3552d56 --- /dev/null +++ b/SegMamba/light_training/preprocessing/preprocessors/preprocessor_mri.py @@ -0,0 +1,134 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json +from .default_preprocessor import DefaultPreprocessor + +class MultiModalityPreprocessor(DefaultPreprocessor): + def __init__(self, + base_dir, + image_dir, + data_filenames=[], + seg_filename="", + ): + self.base_dir = base_dir + self.image_dir = image_dir + self.data_filenames = data_filenames + self.seg_filename = seg_filename + + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir)) + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = ZScoreNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel) + data[c] = normalizer.run(data[c], seg[0]) + return data + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + assert len(self.data_filenames) != 0 + data = [] + for dfname in self.data_filenames: + d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname)) + spacing = d.GetSpacing() + data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,]) + + data = np.concatenate(data, axis=0) + + seg_arr = None + ## 一定要是float32!!!! + + if self.seg_filename != "": + seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": spacing, + "raw_size": data.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data, seg_arr, properties + + def run(self, + output_spacing, + output_dir, + all_labels, + num_processes=8): + self.out_spacing = output_spacing + self.all_labels = all_labels + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = {} + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region.py b/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region.py new file mode 100644 index 0000000000000000000000000000000000000000..7eaf2a0e22b906758bd5203e95b866e6f0bf5327 --- /dev/null +++ b/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region.py @@ -0,0 +1,209 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization, CTNormStandard +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json +from .default_preprocessor import DefaultPreprocessor + +class MultiInputAndRegionPreprocessor(DefaultPreprocessor): + def __init__(self, + base_dir, + image_dir, + data_filenames=[], + seg_filename="", + ): + + self.base_dir = base_dir + self.image_dir = image_dir + self.data_filenames = data_filenames + self.seg_filename = seg_filename + + + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir)) + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = CTNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel[str(c)]) + data[c] = normalizer.run(data[c], seg[0]) + return data + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + + properties['bbox_used_for_cropping'] = bbox + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + True) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}, shape_after_cropping_before_resample is {shape_before_resample}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, seg + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + assert len(self.data_filenames) != 0 + data = [] + for dfname in self.data_filenames: + d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname)) + spacing = d.GetSpacing() + data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,]) + + data = np.concatenate(data, axis=0) + + seg_arr = None + ## 一定要是float32!!!! + + if self.seg_filename != "": + seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": spacing, + "raw_size": data.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data, seg_arr, properties + + def run(self, + output_spacing, + output_dir, + all_labels_dict, + num_processes=8, + foreground_intensity_properties_per_channel={} + ): + self.out_spacing = output_spacing + # all_labels 必须为region格式,例如[[0, 1, 2, 3], [4, 5], [6, 7, 8], 9, 10] + + self.all_labels_dict = all_labels_dict + self.all_labels = [] + + for k, v in all_labels_dict.items(): + self.all_labels.append(v) + + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region_01norm_first.py b/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region_01norm_first.py new file mode 100644 index 0000000000000000000000000000000000000000..05697234cfc7c8b6276ab71de26d1670246f2a0a --- /dev/null +++ b/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region_01norm_first.py @@ -0,0 +1,239 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization, CTNormStandard +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json +from .default_preprocessor import DefaultPreprocessor + +class MultiInputAndRegionPreprocessor(DefaultPreprocessor): + def __init__(self, + base_dir, + image_dir, + data_filenames=[], + seg_filename="", + norm_clip_min=-175, + norm_clip_max=250, + ): + + self.base_dir = base_dir + self.image_dir = image_dir + self.data_filenames = data_filenames + self.seg_filename = seg_filename + self.norm_clip_min = norm_clip_min + self.norm_clip_max = norm_clip_max + + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir)) + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + # for c in range(data.shape[0]): + normalizer = CTNormStandard(a_min=self.norm_clip_min, + a_max=self.norm_clip_max, + b_min=0.0, + b_max=1.0, clip=True) + + data = normalizer(data) + return data + + # def convert_labels_to_region(self, labels): + # patch_size = labels.shape[1:] + # one_hot_labels = np.zeros([self.all_labels_num, + # patch_size[0], + # patch_size[1], + # patch_size[2]]) + + # for k, v in self.all_labels_dict.items(): + # if isinstance(v, list): + # for vv in v: + # one_hot_labels[vv-1] = (labels == vv)[0] + + # return one_hot_labels + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + ### norm first + need_to_check = False + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + before_crop_seg_sum = np.sum(seg.astype(np.uint8)) + need_to_check = True + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + + if need_to_check: + seg_temp = np.copy(seg) + seg_temp[seg_temp==-1] = 0 + after_crop_seg_sum = np.sum(seg_temp.astype(np.uint8)) + print(f"before crop seg sum is {before_crop_seg_sum}, after is {after_crop_seg_sum}") + + properties['bbox_used_for_cropping'] = bbox + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + True) + + ## convert to one-hot + # seg = self.convert_labels_to_region(seg) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}, shape_after_cropping_before_resample is {shape_before_resample}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, seg + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + assert len(self.data_filenames) != 0 + data = [] + for dfname in self.data_filenames: + d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname)) + spacing = d.GetSpacing() + data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,]) + + data = np.concatenate(data, axis=0) + + seg_arr = None + ## 一定要是float32!!!! + + if self.seg_filename != "": + seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": spacing, + "raw_size": data.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data, seg_arr, properties + + def run(self, + output_spacing, + output_dir, + all_labels_dict, + num_processes=8): + self.out_spacing = output_spacing + # all_labels 必须为region格式,例如[[0, 1, 2, 3], [4, 5], [6, 7, 8], 9, 10] + + self.all_labels_dict = all_labels_dict + self.all_labels = [] + + for k, v in all_labels_dict.items(): + self.all_labels.append(v) + + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = {} + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_mrinorm_noresample_nocrop.py b/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_mrinorm_noresample_nocrop.py new file mode 100644 index 0000000000000000000000000000000000000000..dc243413f747e40ee14437cba5a2489d97819c4e --- /dev/null +++ b/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_mrinorm_noresample_nocrop.py @@ -0,0 +1,167 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json +from .default_preprocessor import DefaultPreprocessor + +class Preprocessor(DefaultPreprocessor): + def __init__(self, + base_dir, + image_dir, + data_filenames=[], + seg_filename="", + ): + self.base_dir = base_dir + self.image_dir = image_dir + self.data_filenames = data_filenames + self.seg_filename = seg_filename + + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir)) + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = ZScoreNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel) + data[c] = normalizer.run(data[c], seg[0]) + return data + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + + assert len(data.shape) == 4 + + if seg is not None : + assert len(seg.shape) == 4 + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + True) + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}') + + return data, seg + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + assert len(self.data_filenames) != 0 + data = [] + for dfname in self.data_filenames: + d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname)) + spacing = d.GetSpacing() + data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,]) + + data = np.concatenate(data, axis=0) + + seg_arr = None + ## 一定要是float32!!!! + + if self.seg_filename != "": + seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": spacing, + "raw_size": data.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data, seg_arr, properties + + def run(self, + output_dir, + all_labels_dict, + num_processes=8): + self.all_labels_dict = all_labels_dict + self.all_labels = [] + + for k, v in all_labels_dict.items(): + self.all_labels.append(v) + + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = {} + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/SegMamba/light_training/preprocessing/resampling/__init__.py b/SegMamba/light_training/preprocessing/resampling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/preprocessing/resampling/default_resampling.py b/SegMamba/light_training/preprocessing/resampling/default_resampling.py new file mode 100644 index 0000000000000000000000000000000000000000..ed1ceb0230255d911dae887caed23a63f27ba6e0 --- /dev/null +++ b/SegMamba/light_training/preprocessing/resampling/default_resampling.py @@ -0,0 +1,217 @@ +from collections import OrderedDict +from typing import Union, Tuple, List + +import numpy as np +import pandas as pd +import torch +from batchgenerators.augmentations.utils import resize_segmentation +from scipy.ndimage.interpolation import map_coordinates +from skimage.transform import resize + +ANISO_THRESHOLD = 3 + +def get_do_separate_z(spacing: Union[Tuple[float, ...], List[float], np.ndarray], anisotropy_threshold=ANISO_THRESHOLD): + do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold + return do_separate_z + + +def get_lowres_axis(new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]): + axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0] # find which axis is anisotropic + return axis + + +def compute_new_shape(old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + +def resample_data_or_seg_to_spacing(data: np.ndarray, + current_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + is_seg: bool = False, + order: int = 3, order_z: int = 0, + force_separate_z: Union[bool, None] = False, + separate_z_anisotropy_threshold: float = ANISO_THRESHOLD): + if force_separate_z is not None: + do_separate_z = force_separate_z + if force_separate_z: + axis = get_lowres_axis(current_spacing) + else: + axis = None + else: + if get_do_separate_z(current_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(current_spacing) + elif get_do_separate_z(new_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(new_spacing) + else: + do_separate_z = False + axis = None + + if axis is not None: + if len(axis) == 3: + # every axis has the same spacing, this should never happen, why is this code here? + do_separate_z = False + elif len(axis) == 2: + # this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample + # separately in the out of plane axis + do_separate_z = False + else: + pass + + if data is not None: + assert len(data.shape) == 4, "data must be c x y z" + + shape = np.array(data[0].shape) + new_shape = compute_new_shape(shape[1:], current_spacing, new_spacing) + + data_reshaped = resample_data_or_seg(data, new_shape, is_seg, axis, order, do_separate_z, order_z=order_z) + return data_reshaped + + +def resample_data_or_seg_to_shape(data: Union[torch.Tensor, np.ndarray], + new_shape: Union[Tuple[int, ...], List[int], np.ndarray], + current_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + is_seg: bool = False, + order: int = 3, order_z: int = 0, + force_separate_z: Union[bool, None] = False, + separate_z_anisotropy_threshold: float = ANISO_THRESHOLD): + """ + needed for segmentation export. Stupid, I know. Maybe we can fix that with Leos new resampling functions + """ + if isinstance(data, torch.Tensor): + data = data.cpu().numpy() + if force_separate_z is not None: + do_separate_z = force_separate_z + if force_separate_z: + axis = get_lowres_axis(current_spacing) + else: + axis = None + else: + if get_do_separate_z(current_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(current_spacing) + elif get_do_separate_z(new_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(new_spacing) + else: + do_separate_z = False + axis = None + + if axis is not None: + if len(axis) == 3: + # every axis has the same spacing, this should never happen, why is this code here? + do_separate_z = False + elif len(axis) == 2: + # this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample + # separately in the out of plane axis + do_separate_z = False + else: + pass + + if data is not None: + assert len(data.shape) == 4, "data must be c x y z" + + data_reshaped = resample_data_or_seg(data, new_shape, is_seg, axis, order, do_separate_z, order_z=order_z) + return data_reshaped + + +def resample_data_or_seg(data: np.ndarray, new_shape: Union[Tuple[float, ...], List[float], np.ndarray], + is_seg: bool = False, axis: Union[None, int] = None, order: int = 3, + do_separate_z: bool = False, order_z: int = 0): + """ + separate_z=True will resample with order 0 along z + :param data: + :param new_shape: + :param is_seg: + :param axis: + :param order: + :param do_separate_z: + :param order_z: only applies if do_separate_z is True + :return: + """ + assert len(data.shape) == 4, "data must be (c, x, y, z)" + assert len(new_shape) == len(data.shape) - 1 + + if is_seg: + resize_fn = resize_segmentation + kwargs = OrderedDict() + else: + resize_fn = resize + kwargs = {'mode': 'edge', 'anti_aliasing': False} + dtype_data = data.dtype + shape = np.array(data[0].shape) + new_shape = np.array(new_shape) + if np.any(shape != new_shape): + data = data.astype(float) + if do_separate_z: + # print("separate z, order in z is", order_z, "order inplane is", order) + assert len(axis) == 1, "only one anisotropic axis supported" + axis = axis[0] + if axis == 0: + new_shape_2d = new_shape[1:] + elif axis == 1: + new_shape_2d = new_shape[[0, 2]] + else: + new_shape_2d = new_shape[:-1] + + reshaped_final_data = [] + for c in range(data.shape[0]): + reshaped_data = [] + for slice_id in range(shape[axis]): + if axis == 0: + reshaped_data.append(resize_fn(data[c, slice_id], new_shape_2d, order, **kwargs)) + elif axis == 1: + reshaped_data.append(resize_fn(data[c, :, slice_id], new_shape_2d, order, **kwargs)) + else: + reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order, **kwargs)) + reshaped_data = np.stack(reshaped_data, axis) + if shape[axis] != new_shape[axis]: + + # The following few lines are blatantly copied and modified from sklearn's resize() + rows, cols, dim = new_shape[0], new_shape[1], new_shape[2] + orig_rows, orig_cols, orig_dim = reshaped_data.shape + + row_scale = float(orig_rows) / rows + col_scale = float(orig_cols) / cols + dim_scale = float(orig_dim) / dim + + map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim] + map_rows = row_scale * (map_rows + 0.5) - 0.5 + map_cols = col_scale * (map_cols + 0.5) - 0.5 + map_dims = dim_scale * (map_dims + 0.5) - 0.5 + + coord_map = np.array([map_rows, map_cols, map_dims]) + if not is_seg or order_z == 0: + reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z, + mode='nearest')[None]) + else: + unique_labels = np.sort(pd.unique(reshaped_data.ravel())) # np.unique(reshaped_data) + reshaped = np.zeros(new_shape, dtype=dtype_data) + + for i, cl in enumerate(unique_labels): + reshaped_multihot = np.round( + map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z, + mode='nearest')) + reshaped[reshaped_multihot > 0.5] = cl + reshaped_final_data.append(reshaped[None]) + else: + reshaped_final_data.append(reshaped_data[None]) + reshaped_final_data = np.vstack(reshaped_final_data) + else: + # print("no separate z, order", order) + reshaped = [] + for c in range(data.shape[0]): + reshaped.append(resize_fn(data[c], new_shape, order, **kwargs)[None]) + reshaped_final_data = np.vstack(reshaped) + return reshaped_final_data.astype(dtype_data) + else: + # print("no resampling necessary") + return data diff --git a/SegMamba/light_training/process_framework/norm.py b/SegMamba/light_training/process_framework/norm.py new file mode 100644 index 0000000000000000000000000000000000000000..d294fe5c9d81054b8dae218aa61927e276c673a6 --- /dev/null +++ b/SegMamba/light_training/process_framework/norm.py @@ -0,0 +1,16 @@ + + + +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, CTNormStandard + + +## need to custom, this example is about Segrap competition. +def norm_func(data, seg=None, **kwargs): + normalizer = CTNormStandard(a_min=-175, + a_max=250, + b_min=0.0, + b_max=1.0, clip=True) + + data = normalizer(data) + + return data diff --git a/SegMamba/light_training/process_framework/process.py b/SegMamba/light_training/process_framework/process.py new file mode 100644 index 0000000000000000000000000000000000000000..12e81de1130168b02ca31e90d5dce463249d851c --- /dev/null +++ b/SegMamba/light_training/process_framework/process.py @@ -0,0 +1,235 @@ +import torch +import numpy as np +import SimpleITK +import os +import sys +from monai.inferers import SlidingWindowInferer + +class Customalgorithm(): # SegmentationAlgorithm is not inherited in this class anymore + def __init__(self): + """ + Do not modify the `self.input_dir` and `self.output_dir`. + (Check https://grand-challenge.org/algorithms/interfaces/) + """ + self.input_dir = "/input/" + self.output_dir = "/output/images/head-neck-segmentation/" + + # self.out_spacing = [3.0, 0.54199219, 0.54199219] + self.out_spacing = [3.0, 1.0, 1.0] + + # self.device = "cpu" + + self.device = torch.device("cuda") + + self.patch_size = [64, 128, 128] + + def filte_state_dict(self, sd): + if "module" in sd : + sd = sd["module"] + new_sd = {} + for k, v in sd.items(): + k = str(k) + new_k = k[7:] if k.startswith("module") else k + new_sd[new_k] = v + del sd + return new_sd + + def convert_mha_to_nii(self, mha_input_path, nii_out_path): # nnUNet specific + img = SimpleITK.ReadImage(mha_input_path) + print(img.GetSize()) + SimpleITK.WriteImage(img, nii_out_path, True) + + def convert_nii_to_mha(self, nii_input_path, mha_out_path): # nnUNet specific + img = SimpleITK.ReadImage(nii_input_path) + SimpleITK.WriteImage(img, mha_out_path, True) + + def read(self, mha_path): + img = SimpleITK.ReadImage(mha_path) + spacing = img.GetSpacing() + raw_size = SimpleITK.GetArrayFromImage(img).shape + img = SimpleITK.GetArrayFromImage(img)[None,].astype(np.float32) + properties = { + "spacing": spacing, + "raw_size": raw_size + } + return img, properties + + def check_gpu(self): + """ + Check if GPU is available. Note that the Grand Challenge only has one available GPU. + """ + print('Checking GPU availability') + is_available = torch.cuda.is_available() + print('Available: ' + str(is_available)) + print(f'Device count: {torch.cuda.device_count()}') + if is_available: + print(f'Current device: {torch.cuda.current_device()}') + print('Device name: ' + torch.cuda.get_device_name(0)) + print('Device memory: ' + + str(torch.cuda.get_device_properties(0).total_memory)) + + def load_inputs(self): # use two modalities input data + """ + Read input data (two modalities) from `self.input_dir` (/input/). + Please do not modify the path for CT and contrast-CT images. + """ + ct_mha = os.listdir(os.path.join(self.input_dir, 'images/head-neck-ct/'))[0] + ctc_mha = os.listdir(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0] + uuid = os.path.splitext(ct_mha)[0] + + img, properties = self.read(os.path.join(self.input_dir, 'images/head-neck-ct/', ct_mha)) + img_c, _ = self.read(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ctc_mha)) + + data = np.concatenate([img, img_c], axis=0) + del img + del img_c + # data is (2, d, w, h) + return uuid, data, properties + + def crop(self, data, properties): + from light_training.preprocessing.cropping.cropping import crop_to_nonzero + + seg = np.zeros_like(data) + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + del seg + + properties['bbox_used_for_cropping'] = bbox + + return data, properties + + def resample(self, data, properties): + from light_training.preprocessing.resampling.default_resampling import compute_new_shape, resample_data_or_seg_to_shape + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + return data, properties + + def preprocess(self, data, properties, crop_first=True): + from light_training.process_framework.norm import norm_func + + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + if crop_first: + data, properties = self.crop(data, properties) + + data = norm_func(data) + + if not crop_first: + data, properties = self.crop(data, properties) + + + data, properties = self.resample(data, properties) + + data = data[None,] + + data = torch.from_numpy(data) + + return data, properties + + def predict(self, data, properties, uid): + torch.cuda.empty_cache() + + from models.nnunet3d import NNUNetWrapper + model = NNUNetWrapper(norm="ins") + + new_sd = self.filte_state_dict(torch.load("./weight/unet3d_0_addaug_bs2_ep1000_ds_gpu4/final_model_0.8552.pt", map_location="cpu")) + model.load_state_dict(new_sd) + + del new_sd + torch.cuda.empty_cache() + # data = data.to(self.deivce) + # model.to(self.device) + model.eval() + window_infer = SlidingWindowInferer(roi_size=self.patch_size, + sw_batch_size=1, + overlap=0.5, + progress=True, + mode="gaussian") + + predictor = Predictor(window_infer, mirror_axes=None) + try: + ensemble_output = predictor.maybe_mirror_and_predict(data, model, self.device) + + except RuntimeError: + ensemble_output = predictor.maybe_mirror_and_predict(data, model, torch.device("cpu")) + torch.cuda.empty_cache() + del model + del data + + print(f"prediction done") + ensemble_output = predictor.predict_raw_probability(ensemble_output, properties) + print(f"non linear....") + # ensemble_output = predictor.apply_nonlinear(ensemble_output, nonlinear_type="sigmoid") + ensemble_output = ensemble_output > 0 + + print(f"restore crop...") + ensemble_output = predictor.predict_noncrop_probability(ensemble_output, properties) + + raw_spacing = properties["spacing"] + case_name = uid + print(f"uuid is {uid}") + os.makedirs(os.path.dirname(self.output_dir), exist_ok=True) + + print(f"saving....") + predictor.save_to_nii_multi_organ(ensemble_output, + raw_spacing, + save_dir=self.output_dir, + case_name=case_name, + postprocess=False) + + # """ + # load the model and checkpoint, and generate the predictions. You can replace this part with your own model. + # """ + # predict_from_folder_segrap2023(self.weight, self.nii_path, self.result_path, 0, 0, 1) + # print("nnUNet segmentation done!") + # if not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): + # print('waiting for nnUNet segmentation to be created') + + # while not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): + # import time + # print('.', end='') + # time.sleep(5) + # # print(cproc) # since nnUNet_predict call is split into prediction and postprocess, a pre-mature exit code is received but segmentation file not yet written. This hack ensures that all spawned subprocesses are finished before being printed. + # print('Prediction finished !') + + def post_process(self): + self.check_gpu() + print('Start processing') + uuid, data, properties = self.load_inputs() + + data, properties = self.preprocess(data, properties) + print(properties) + print('Start prediction') + self.predict(data, properties, uuid) + # print('Start output writing') + # self.write_outputs(uuid) + + def process(self): + """ + Read inputs from /input, process with your algorithm and write to /output + """ + self.post_process() + + +if __name__ == "__main__": + Customalgorithm().process() diff --git a/SegMamba/light_training/sampler.py b/SegMamba/light_training/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..b46a80bddbbadf4532b206fd06e62e608909765e --- /dev/null +++ b/SegMamba/light_training/sampler.py @@ -0,0 +1,48 @@ +import torch +import math +import numpy as np + +class SequentialDistributedSampler(torch.utils.data.sampler.Sampler): + """ + Distributed Sampler that subsamples indicies sequentially, + making it easier to collate all results at the end. + Even though we only use this sampler for eval and predict (no training), + which means that the model params won't have to be synced (i.e. will not hang + for synchronization even if varied number of forward passes), we still add extra + samples to the sampler to make it evenly divisible (like in `DistributedSampler`) + to make it easy to `gather` or `reduce` resulting tensors at the end of the loop. + """ + + def __init__(self, dataset, batch_size, rank=None, num_replicas=None): + if num_replicas is None: + if not torch.distributed.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = torch.distributed.get_world_size() + if rank is None: + if not torch.distributed.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = torch.distributed.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.batch_size = batch_size + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.batch_size / self.num_replicas)) * self.batch_size + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + indices = list(range(len(self.dataset))) + # add extra samples to make it evenly divisible + indices += [indices[-1]] * (self.total_size - len(indices)) + # subsample + indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples] + return iter(indices) + + def __len__(self): + return self.num_samples + + +def distributed_concat(tensor, num_total_examples): + output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())] + torch.distributed.all_gather(output_tensors, tensor) + concat = torch.cat(output_tensors, dim=0) + return concat[:num_total_examples] \ No newline at end of file diff --git a/SegMamba/light_training/trainer.py b/SegMamba/light_training/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..3117702b215077b58df9d83d37c7bd9faebd4259 --- /dev/null +++ b/SegMamba/light_training/trainer.py @@ -0,0 +1,516 @@ +import os +from tqdm import tqdm +import numpy as np +import torch +import torch.nn.parallel +import torch.utils.data.distributed +from light_training.utils.lr_scheduler import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup +from monai.data import DataLoader +import argparse +from .launch import launch_dist +from monai.utils import set_determinism +from .sampler import SequentialDistributedSampler, distributed_concat +from torch.utils.tensorboard import SummaryWriter +from torch.cuda.amp import GradScaler +from torch import autocast, nn +import time + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + +class Trainer: + def __init__(self, env_type, + max_epochs, + batch_size, + device="cpu", + val_every=1, + num_gpus=1, + logdir="./logs/", + master_ip='localhost', + master_port=17750, + training_script="train.py", + train_process=12, + ): + assert env_type in ["pytorch", "ddp", "DDP"], f"not support this env_type: {env_type}" + self.env_type = env_type + self.val_every = val_every + self.max_epochs = max_epochs + self.ddp = False + self.num_gpus = num_gpus + self.device = device + self.local_rank = 0 + self.batch_size = batch_size + self.not_call_launch = True + self.logdir = logdir + self.scheduler = None + self.model = None + self.auto_optim = True + self.warmup = 0.0 + self.scheduler_type = None + + self.optimizer = None + self.patch_size = None + + self.num_step_per_epoch = 250 // self.num_gpus + self.val_number = 100 // self.num_gpus + self.augmentation = True + self.train_process = train_process + self.print_time = False + + if self.device == "cpu": + self.grad_scaler = None + else : + self.grad_scaler = GradScaler() + + torch.backends.cudnn.enabled = True + + gpu_count = torch.cuda.device_count() + if num_gpus > gpu_count: + print("gpu数量不符") + os._exit(0) + + if env_type == "DDP" or env_type == "ddp": + self.ddp = True + self.get_dist_args() + if not self.not_call_launch: + launch_dist(env_type=env_type, + num_nodes=1, + gpus_per_node=num_gpus, + master_addr=master_ip, + master_port=master_port, + training_script=training_script, + ) + os._exit(1) + self.initialize_distributed() + + def initialize_distributed(self): + """Initialize torch.distributed.""" + if self.env_type == 'pytorch': + self.print_rank_0('No need to initialize') + return + if self.env_type == 'DDP' or "deepspeed" in self.env_type: + + if self.local_rank is not None: + device = self.local_rank + torch.cuda.set_device(device) + # Call the init process + init_method = 'env://' + torch.distributed.init_process_group( + backend='nccl', + init_method=init_method) + self.world_size = torch.distributed.get_world_size() + + print(f"world size is {self.world_size}") + + def get_dataloader(self, dataset, shuffle=False, batch_size=1, train=True): + if dataset is None : + return None + if self.env_type == 'pytorch': + return DataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle, + num_workers=12) + else : + if not train: + sampler = SequentialDistributedSampler(dataset, batch_size=batch_size) + + else : + sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True) + return DataLoader(dataset, + batch_size=batch_size, + num_workers=12, + sampler=sampler, + drop_last=True) + + def get_multi_processor_loader(self, train_ds, val_ds): + from .augment.multi_processor import LimitedLenWrapper + from .augment.train_augment import get_train_transforms, get_validation_transforms, get_train_transforms_noaug, get_train_transforms_nomirror, get_train_transforms_onlymirror, get_train_transforms_onlyspatial + from light_training.dataloading.base_data_loader import DataLoaderMultiProcess + + assert self.patch_size != None + if self.augmentation: + if self.augmentation == "nomirror": + print(f"use augmentation: no mirror") + tr_transforms = get_train_transforms_nomirror(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + elif self.augmentation == "onlymirror": + print(f"use augmentation: only mirror") + tr_transforms = get_train_transforms_onlymirror(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + elif self.augmentation == "onlyspatial": + print(f"use augmentation: only spatial") + tr_transforms = get_train_transforms_onlyspatial(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + + else : + tr_transforms = get_train_transforms(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + else: + tr_transforms = get_train_transforms_noaug(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + + val_transforms = get_validation_transforms() + + # train_loader = DataLoader(train_ds, num_workers=1, drop_last=True, shuffle=True, batch_size=self.batch_size) + train_loader = DataLoaderMultiProcess(train_ds, + batch_size=self.batch_size, + patch_size=self.patch_size, + print_time=self.print_time) + + data_generator = LimitedLenWrapper(self.num_step_per_epoch, data_loader=train_loader, + transform=tr_transforms, + num_processes=self.train_process, num_cached=6, seeds=None, + pin_memory=True, wait_time=0.02) + if val_ds is None: + val_data_generator = None + else : + val_loader = DataLoaderMultiProcess(val_ds, + batch_size=1, + patch_size=self.patch_size, + oversample_foreground_percent=1.0) + + val_data_generator = LimitedLenWrapper(self.val_number, data_loader=val_loader, transform=val_transforms, + num_processes=6, num_cached=3, seeds=None, + pin_memory=True, wait_time=0.02) + return data_generator, val_data_generator + + + def get_dist_args(self): + parser = argparse.ArgumentParser() + # parser.add_argument('--local_rank', type=int, default = 0, help="local_rank") + parser.add_argument('--not_call_launch', + action='store_true', + help="not call launch!") + ds_args = parser.parse_args() + self.local_rank = int(os.environ.get("LOCAL_RANK", 0)) + + print(f"self.local_rank is {self.local_rank}") + self.not_call_launch = ds_args.not_call_launch + self.device = self.local_rank + + def to_device(self, batch): + if isinstance(batch, dict): + for k, v in batch.items(): + if isinstance(batch[k], np.ndarray): + batch[k] = torch.from_numpy(batch[k]) + + if (isinstance(batch[k], torch.Tensor) or isinstance(batch[k], torch.FloatTensor)): + batch[k] = batch[k].to(self.device).contiguous() + + elif isinstance(batch, list) : + batch = [torch.from_numpy(x) for x in batch if isinstance(x, np.ndarray)] + batch = [x.to(self.device).contiguous() for x in batch if (isinstance(x, torch.Tensor) or isinstance(x, torch.FloatTensor))] + + elif isinstance(batch, np.ndarray): + batch = torch.from_numpy(batch) + batch = batch.to(self.device).contiguous() + + else : + print("not support data type") + exit(0) + + return batch + + def validation_single_gpu(self, val_dataset,): + if self.ddp: + print(f"single gpu model not support the ddp") + exit(0) + val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, pin_memory=True) + if self.model is not None: + self.model.to(self.device) + self.model.eval() + val_outputs = [] + + for idx, batch in tqdm(enumerate(val_loader), total=len(val_loader)): + batch = self.before_data_to_device(batch) + batch = self.to_device(batch) + + with torch.no_grad(): + val_out = self.validation_step(batch) + assert val_out is not None + + return_list = False + val_outputs.append(val_out) + if isinstance(val_out, list) or isinstance(val_out, tuple): + return_list = True + + val_outputs = torch.tensor(val_outputs) + if not return_list: + # 说明只有一个变量 + length = 0 + v_sum = 0.0 + for v in val_outputs: + if not torch.isnan(v): + v_sum += v + length += 1 + + if length == 0: + v_sum = 0 + else : + v_sum = v_sum / length + else : + num_val = len(val_outputs[0]) + length = [0.0 for i in range(num_val)] + v_sum = [0.0 for i in range(num_val)] + + for v in val_outputs: + for i in range(num_val): + if not torch.isnan(v[i]): + v_sum[i] += v[i] + length[i] += 1 + + for i in range(num_val): + if length[i] == 0: + v_sum[i] = 0 + else : + v_sum[i] = v_sum[i] / length[i] + return v_sum, val_outputs + + def validate(self): + val_outputs = [] + if self.global_step % self.val_every == 0 \ + and self.val_loader is not None : + if self.model is not None: + self.model.eval() + if self.ddp: + torch.distributed.barrier() + outputs_split = None + # for idx, batch in tqdm(enumerate(self.val_loader), total=len(self.val_loader)): + for i in tqdm(range(len(self.val_loader)), total=len(self.val_loader)): + batch = next(self.val_loader) + + batch = self.before_data_to_device(batch) + + batch = self.to_device(batch) + + with torch.no_grad(): + with torch.autocast("cuda", enabled=True) if (self.ddp or 'cuda' in self.device) else dummy_context(): + val_out = self.validation_step(batch) + assert val_out is not None + if type(val_out) is not list and type(val_out) is not tuple: + val_out = [val_out] + + if outputs_split is None: + outputs_split = [[] for i in range(len(val_out))] + + for i, v in enumerate(val_out): + outputs_split[i].append(v) + + # val_outputs.append(val_out) + + ## 先汇总结果。 + if self.ddp: + val_outputs = torch.tensor(val_outputs).cuda(self.local_rank) + torch.distributed.barrier() + val_outputs_merge = [] + for i in range(len(outputs_split)): + val_outputs = torch.tensor(outputs_split[i]).cuda(self.local_rank) + val_outputs_merge.append(distributed_concat(val_outputs, num_total_examples=len(self.val_loader) * self.num_gpus)) + + # val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader.sampler.dataset)) + # val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader) * self.num_gpus) + else : + val_outputs_merge = [] + for i in range(len(outputs_split)): + val_outputs = torch.tensor(outputs_split[i]) + val_outputs_merge.append(val_outputs) + # val_outputs = torch.tensor(val_outputs) + + if self.local_rank == 0: + if len(val_outputs_merge) == 1: + val_outputs_merge = val_outputs_merge[0] + self.validation_end(val_outputs_merge) + # self.validation_end(val_outputs) + + def train(self, + train_dataset, + val_dataset=None, + ): + print(f"augmentation: {self.augmentation}") + assert self.patch_size is not None, "please define the patch_size" + + set_determinism(42 + self.local_rank) + if self.model is not None: + print(f"check model parameter: {next(self.model.parameters()).sum()}, keep model parameters on different processes consistent") + para = sum([np.prod(list(p.size())) for p in self.model.parameters()]) + if self.local_rank == 0: + print(f"model parameters is {para / 1000 / 1000}M ") + + self.global_step = 0 + if self.env_type == "pytorch": + if self.model is not None: + self.model.to(self.device) + os.makedirs(self.logdir, exist_ok=True) + self.writer = SummaryWriter(self.logdir) + + elif self.ddp: + if self.local_rank == 0: + os.makedirs(self.logdir, exist_ok=True) + self.writer = SummaryWriter(self.logdir) + else: + self.writer = None + if self.model is not None: + self.model.cuda(self.local_rank) + self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model) + self.model = torch.nn.parallel.DistributedDataParallel(self.model, + device_ids=[self.local_rank], + output_device=self.local_rank, + find_unused_parameters=True) + else : + print("not support env_type") + exit(0) + + # self.train_loader = self.get_dataloader(train_dataset, shuffle=True, batch_size=self.batch_size) + self.train_loader, self.val_loader = self.get_multi_processor_loader(train_dataset, val_dataset) + + self.max_steps = self.max_epochs * len(self.train_loader) + + print(f"step number is {self.max_steps}") + + if self.scheduler_type == "cosine_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_cosine_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=self.max_steps) + print(f"warmup steps is {warmup_steps}") + elif self.scheduler_type == "constant_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_constant_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + ) + print(f"warmup steps is {warmup_steps}") + + elif self.scheduler_type == "poly_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_polynomial_decay_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=self.max_steps + ) + print(f"warmup steps is {warmup_steps}") + + elif self.scheduler_type == "poly": + from light_training.utils.lr_scheduler import PolyLRScheduler + lr = self.optimizer.state_dict()['param_groups'][0]['lr'] + print(f"initial lr is {lr}") + self.scheduler = PolyLRScheduler(self.optimizer, initial_lr=lr, max_steps=self.max_steps) + print(f"scheduler_type is poly, warmup steps is {0}") + + for epoch in range(0, self.max_epochs): + self.epoch = epoch + if self.ddp: + torch.distributed.barrier() + self.train_epoch( + epoch, + ) + if (self.epoch + 1) % self.val_every == 0: + self.validate() + + if self.model is not None: + self.model.train() + + def before_data_to_device(self, batch_data): + return batch_data + + def train_epoch(self, + epoch, + ): + if self.model is not None: + self.model.train() + # if self.local_rank == 0: + with tqdm(total=self.num_step_per_epoch, disable=(self.local_rank != 0)) as t: + for i in range(self.num_step_per_epoch): + self.global_step += 1 + t.set_description('Epoch %i' % epoch) + + if self.print_time: + s = time.time() + batch = next(self.train_loader) + if self.print_time: + e = time.time() + print(f"get batch time is {e - s}") + + batch = self.before_data_to_device(batch) + + batch = self.to_device(batch) + + if self.model is not None: + for param in self.model.parameters(): param.grad = None + + if not self.auto_optim: + loss = self.training_step(batch) + else: + with autocast("cuda", enabled=True) if (self.ddp or 'cuda' in self.device) else dummy_context(): + if self.print_time: + s = time.time() + loss = self.training_step(batch) + if self.print_time: + e = time.time() + print(f"training step time is {e - s}") + + if self.print_time: + s = time.time() + + if self.grad_scaler is not None: + self.grad_scaler.scale(loss).backward() + self.grad_scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_(self.model.parameters(), 12) + self.grad_scaler.step(self.optimizer) + self.grad_scaler.update() + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(self.model.parameters(), 12) + self.optimizer.step() + + if self.print_time: + e = time.time() + print(f"backward time is {e - s}") + + if self.scheduler is not None: + self.scheduler.step() + lr = self.optimizer.state_dict()['param_groups'][0]['lr'] + self.log("lr", lr, self.global_step) + + t.set_postfix(loss=loss.item(), lr=lr) + + t.update(1) + + def training_step(self, batch): + raise NotImplementedError + + def validation_step(self, batch): + raise NotImplementedError + + def validation_end(self, mean_val_outputs, val_outputs): + pass + + def log(self, k, v, step): + if self.local_rank == 0: + self.writer.add_scalar(k, scalar_value=v, global_step=step) + + def log_dict(self, dict_, step): + if self.local_rank == 0: + for k, v in dict_.items(): + self.writer.add_scalar(k, scalar_value=v, global_step=step) + + def load_state_dict(self, weight_path, strict=True): + sd = torch.load(weight_path, map_location="cpu") + if "module" in sd : + sd = sd["module"] + new_sd = {} + for k, v in sd.items(): + k = str(k) + new_k = k[7:] if k.startswith("module") else k + new_sd[new_k] = v + + self.model.load_state_dict(new_sd, strict=strict) + + print(f"model parameters are loaded successed.") + diff --git a/SegMamba/light_training/trainer_fp32.py b/SegMamba/light_training/trainer_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..aa30b2e10d4375e70e69ac19ac30cf566a6c8b72 --- /dev/null +++ b/SegMamba/light_training/trainer_fp32.py @@ -0,0 +1,471 @@ +import os +from tqdm import tqdm +import numpy as np +import torch +import torch.nn.parallel +import torch.utils.data.distributed +from light_training.utils.lr_scheduler import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup +from monai.data import DataLoader +import argparse +from .launch import launch_dist +from monai.utils import set_determinism +from .sampler import SequentialDistributedSampler, distributed_concat +from torch.utils.tensorboard import SummaryWriter + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + +class Trainer: + def __init__(self, env_type, + max_epochs, + batch_size, + device="cpu", + val_every=1, + num_gpus=1, + logdir="./logs/", + master_ip='localhost', + master_port=17750, + training_script="train.py", + ): + assert env_type in ["pytorch", "ddp", "DDP"], f"not support this env_type: {env_type}" + self.env_type = env_type + self.val_every = val_every + self.max_epochs = max_epochs + self.ddp = False + self.num_gpus = num_gpus + self.device = device + self.local_rank = 0 + self.batch_size = batch_size + self.not_call_launch = True + self.logdir = logdir + self.scheduler = None + self.model = None + self.auto_optim = True + self.warmup = 0.0 + self.scheduler_type = None + + self.optimizer = None + self.patch_size = None + + self.num_step_per_epoch = 250 // self.num_gpus + self.val_number = 100 // self.num_gpus + self.augmentation = True + + torch.backends.cudnn.enabled = True + + gpu_count = torch.cuda.device_count() + if num_gpus > gpu_count: + print("gpu数量不符") + os._exit(0) + + if env_type == "DDP" or env_type == "ddp": + self.ddp = True + self.get_dist_args() + if not self.not_call_launch: + launch_dist(env_type=env_type, + num_nodes=1, + gpus_per_node=num_gpus, + master_addr=master_ip, + master_port=master_port, + training_script=training_script, + ) + os._exit(1) + self.initialize_distributed() + + def initialize_distributed(self): + """Initialize torch.distributed.""" + if self.env_type == 'pytorch': + self.print_rank_0('No need to initialize') + return + if self.env_type == 'DDP' or "deepspeed" in self.env_type: + + if self.local_rank is not None: + device = self.local_rank + torch.cuda.set_device(device) + # Call the init process + init_method = 'env://' + torch.distributed.init_process_group( + backend='nccl', + init_method=init_method) + self.world_size = torch.distributed.get_world_size() + + print(f"world size is {self.world_size}") + + def get_dataloader(self, dataset, shuffle=False, batch_size=1, train=True): + if dataset is None : + return None + if self.env_type == 'pytorch': + return DataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle, + num_workers=12) + else : + if not train: + sampler = SequentialDistributedSampler(dataset, batch_size=batch_size) + + else : + sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True) + return DataLoader(dataset, + batch_size=batch_size, + num_workers=12, + sampler=sampler, + drop_last=True) + + def get_multi_processor_loader(self, train_ds, val_ds): + from .augment.multi_processor import LimitedLenWrapper + from .augment.train_augment import get_train_transforms, get_validation_transforms, get_train_transforms_noaug + from light_training.dataloading.base_data_loader import DataLoaderMultiProcess + + assert self.patch_size != None + if self.augmentation: + tr_transforms = get_train_transforms(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + else: + tr_transforms = get_train_transforms_noaug(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + + val_transforms = get_validation_transforms() + + # train_loader = DataLoader(train_ds, num_workers=1, drop_last=True, shuffle=True, batch_size=self.batch_size) + train_loader = DataLoaderMultiProcess(train_ds, annotated_classes_key=self.all_labels, + batch_size=self.batch_size, + patch_size=self.patch_size) + + data_generator = LimitedLenWrapper(self.num_step_per_epoch, data_loader=train_loader, + transform=tr_transforms, + num_processes=12, num_cached=6, seeds=None, + pin_memory=True, wait_time=0.02) + if val_ds is None: + val_data_generator = None + else : + val_loader = DataLoaderMultiProcess(val_ds, annotated_classes_key=self.all_labels, + batch_size=1, + patch_size=self.patch_size, + oversample_foreground_percent=1.0) + + val_data_generator = LimitedLenWrapper(self.val_number, data_loader=val_loader, transform=val_transforms, + num_processes=6, num_cached=3, seeds=None, + pin_memory=True, wait_time=0.02) + return data_generator, val_data_generator + + + def get_dist_args(self): + parser = argparse.ArgumentParser() + # parser.add_argument('--local_rank', type=int, default = 0, help="local_rank") + parser.add_argument('--not_call_launch', + action='store_true', + help="not call launch!") + ds_args = parser.parse_args() + self.local_rank = int(os.environ.get("LOCAL_RANK", 0)) + + print(f"self.local_rank is {self.local_rank}") + self.not_call_launch = ds_args.not_call_launch + self.device = self.local_rank + + def to_device(self, batch): + if isinstance(batch, dict): + for k, v in batch.items(): + if isinstance(batch[k], np.ndarray): + batch[k] = torch.from_numpy(batch[k]) + + if (isinstance(batch[k], torch.Tensor) or isinstance(batch[k], torch.FloatTensor)): + batch[k] = batch[k].to(self.device).contiguous() + + elif isinstance(batch, list) : + batch = [torch.from_numpy(x) for x in batch if isinstance(x, np.ndarray)] + batch = [x.to(self.device).contiguous() for x in batch if (isinstance(x, torch.Tensor) or isinstance(x, torch.FloatTensor))] + + elif isinstance(batch, np.ndarray): + batch = torch.from_numpy(batch) + batch = batch.to(self.device).contiguous() + + else : + print("not support data type") + exit(0) + + return batch + + def validation_single_gpu(self, val_dataset,): + if self.ddp: + print(f"single gpu model not support the ddp") + exit(0) + val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False) + self.model.to(self.device) + val_outputs = [] + self.model.eval() + for idx, batch in tqdm(enumerate(val_loader), total=len(val_loader)): + batch = self.to_device(batch) + + with torch.no_grad(): + val_out = self.validation_step(batch) + assert val_out is not None + + return_list = False + val_outputs.append(val_out) + if isinstance(val_out, list) or isinstance(val_out, tuple): + return_list = True + + val_outputs = torch.tensor(val_outputs) + if not return_list: + # 说明只有一个变量 + length = 0 + v_sum = 0.0 + for v in val_outputs: + if not torch.isnan(v): + v_sum += v + length += 1 + + if length == 0: + v_sum = 0 + else : + v_sum = v_sum / length + else : + num_val = len(val_outputs[0]) + length = [0.0 for i in range(num_val)] + v_sum = [0.0 for i in range(num_val)] + + for v in val_outputs: + for i in range(num_val): + if not torch.isnan(v[i]): + v_sum[i] += v[i] + length[i] += 1 + + for i in range(num_val): + if length[i] == 0: + v_sum[i] = 0 + else : + v_sum[i] = v_sum[i] / length[i] + return v_sum, val_outputs + + def validate(self): + val_outputs = [] + if self.global_step % self.val_every == 0 \ + and self.val_loader is not None : + if self.model is not None: + self.model.eval() + if self.ddp: + torch.distributed.barrier() + # for idx, batch in tqdm(enumerate(self.val_loader), total=len(self.val_loader)): + for i in tqdm(range(len(self.val_loader)), total=len(self.val_loader)): + batch = next(self.val_loader) + + batch = self.to_device(batch) + + with torch.no_grad(): + val_out = self.validation_step(batch) + assert val_out is not None + + return_list = False + val_outputs.append(val_out) + if isinstance(val_out, list) or isinstance(val_out, tuple): + return_list = True + + ## 先汇总结果。 + if self.ddp: + val_outputs = torch.tensor(val_outputs).cuda(self.local_rank) + torch.distributed.barrier() + # val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader.sampler.dataset)) + val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader) * self.num_gpus) + else : + val_outputs = torch.tensor(val_outputs) + + if self.local_rank == 0: + if not return_list: + # 说明只有一个变量 + length = 0 + v_sum = 0.0 + for v in val_outputs: + if not torch.isnan(v): + v_sum += v + length += 1 + + if length == 0: + v_sum = 0 + else : + v_sum = v_sum / length + self.validation_end(mean_val_outputs=v_sum, val_outputs=val_outputs) + + else : + num_val = len(val_outputs[0]) + length = [0.0 for i in range(num_val)] + v_sum = [0.0 for i in range(num_val)] + + for v in val_outputs: + for i in range(num_val): + if not torch.isnan(v[i]): + v_sum[i] += v[i] + length[i] += 1 + + for i in range(num_val): + if length[i] == 0: + v_sum[i] = 0 + else : + v_sum[i] = v_sum[i] / length[i] + + self.validation_end(mean_val_outputs=v_sum, val_outputs=val_outputs) + + def train(self, + train_dataset, + val_dataset=None, + ): + print(f"augmentation: {self.augmentation}") + assert self.patch_size is not None, "please define the patch_size" + assert self.all_labels is not None, "please define all the labels, for example, [1, 2, 3, ]" + + set_determinism(42 + self.local_rank) + if self.model is not None: + print(f"check model parameter: {next(self.model.parameters()).sum()}, keep model parameters on different processes consistent") + para = sum([np.prod(list(p.size())) for p in self.model.parameters()]) + if self.local_rank == 0: + print(f"model parameters is {para * 4 / 1000 / 1000}M ") + + self.global_step = 0 + if self.env_type == "pytorch": + if self.model is not None: + self.model.to(self.device) + os.makedirs(self.logdir, exist_ok=True) + self.writer = SummaryWriter(self.logdir) + + elif self.ddp: + if self.local_rank == 0: + os.makedirs(self.logdir, exist_ok=True) + self.writer = SummaryWriter(self.logdir) + else: + self.writer = None + if self.model is not None: + self.model.cuda(self.local_rank) + # self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model) + self.model = torch.nn.parallel.DistributedDataParallel(self.model, + device_ids=[self.local_rank], + output_device=self.local_rank, + find_unused_parameters=True) + else : + print("not support env_type") + exit(0) + + # self.train_loader = self.get_dataloader(train_dataset, shuffle=True, batch_size=self.batch_size) + self.train_loader, self.val_loader = self.get_multi_processor_loader(train_dataset, val_dataset) + + self.max_steps = self.max_epochs * len(self.train_loader) + + print(f"step number is {self.max_steps}") + + if self.scheduler_type == "cosine_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_cosine_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=self.max_steps) + print(f"warmup steps is {warmup_steps}") + elif self.scheduler_type == "constant_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_constant_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + ) + print(f"warmup steps is {warmup_steps}") + + elif self.scheduler_type == "poly_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_polynomial_decay_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=self.max_steps + ) + print(f"warmup steps is {warmup_steps}") + + elif self.scheduler_type == "poly": + from light_training.utils.lr_scheduler import PolyLRScheduler + lr = self.optimizer.state_dict()['param_groups'][0]['lr'] + print(f"initial lr is {lr}") + self.scheduler = PolyLRScheduler(self.optimizer, initial_lr=lr, max_steps=self.max_steps) + print(f"scheduler_type is poly, warmup steps is {0}") + + for epoch in range(0, self.max_epochs): + self.epoch = epoch + if self.ddp: + torch.distributed.barrier() + self.train_epoch( + epoch, + ) + if (self.epoch + 1) % self.val_every == 0: + self.validate() + + if self.model is not None: + self.model.train() + + def train_epoch(self, + epoch, + ): + if self.model is not None: + self.model.train() + with tqdm(total=self.num_step_per_epoch, disable=(self.local_rank != 0)) as t: + for i in range(self.num_step_per_epoch): + # for idx, batch in enumerate(loader): + self.global_step += 1 + t.set_description('Epoch %i' % epoch) + + batch = next(self.train_loader) + + batch = self.to_device(batch) + + if self.model is not None: + for param in self.model.parameters(): param.grad = None + + if not self.auto_optim: + loss = self.training_step(batch) + else: + loss = self.training_step(batch) + loss.backward() + torch.nn.utils.clip_grad_norm_(self.model.parameters(), 12) + self.optimizer.step() + + if self.scheduler is not None: + self.scheduler.step() + lr = self.optimizer.state_dict()['param_groups'][0]['lr'] + self.log("lr", lr, self.global_step) + + t.set_postfix(loss=loss.item(), lr=lr) + + t.update(1) + + def training_step(self, batch): + raise NotImplementedError + + def validation_step(self, batch): + raise NotImplementedError + + def validation_end(self, mean_val_outputs, val_outputs): + pass + + def log(self, k, v, step): + if self.local_rank == 0: + self.writer.add_scalar(k, scalar_value=v, global_step=step) + + def log_dict(self, dict_, step): + if self.local_rank == 0: + for k, v in dict_.items(): + self.writer.add_scalar(k, scalar_value=v, global_step=step) + + def load_state_dict(self, weight_path, strict=True): + sd = torch.load(weight_path, map_location="cpu") + if "module" in sd : + sd = sd["module"] + new_sd = {} + for k, v in sd.items(): + k = str(k) + new_k = k[7:] if k.startswith("module") else k + new_sd[new_k] = v + + self.model.load_state_dict(new_sd, strict=strict) + + print(f"model parameters are loaded successed.") + diff --git a/SegMamba/light_training/utilities/__init__.py b/SegMamba/light_training/utilities/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/utilities/collate_outputs.py b/SegMamba/light_training/utilities/collate_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..c9d67984febd927b946b8e44f33eaab0530e4b73 --- /dev/null +++ b/SegMamba/light_training/utilities/collate_outputs.py @@ -0,0 +1,24 @@ +from typing import List + +import numpy as np + + +def collate_outputs(outputs: List[dict]): + """ + used to collate default train_step and validation_step outputs. If you want something different then you gotta + extend this + + we expect outputs to be a list of dictionaries where each of the dict has the same set of keys + """ + collated = {} + for k in outputs[0].keys(): + if np.isscalar(outputs[0][k]): + collated[k] = [o[k] for o in outputs] + elif isinstance(outputs[0][k], np.ndarray): + collated[k] = np.vstack([o[k][None] for o in outputs]) + elif isinstance(outputs[0][k], list): + collated[k] = [item for o in outputs for item in o[k]] + else: + raise ValueError(f'Cannot collate input of type {type(outputs[0][k])}. ' + f'Modify collate_outputs to add this functionality') + return collated \ No newline at end of file diff --git a/SegMamba/light_training/utilities/dataset_name_id_conversion.py b/SegMamba/light_training/utilities/dataset_name_id_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..1f2c35078009249d1c493639363be54059b1c2c7 --- /dev/null +++ b/SegMamba/light_training/utilities/dataset_name_id_conversion.py @@ -0,0 +1,74 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Union + +from nnunetv2.paths import nnUNet_preprocessed, nnUNet_raw, nnUNet_results +from batchgenerators.utilities.file_and_folder_operations import * +import numpy as np + + +def find_candidate_datasets(dataset_id: int): + startswith = "Dataset%03.0d" % dataset_id + if nnUNet_preprocessed is not None and isdir(nnUNet_preprocessed): + candidates_preprocessed = subdirs(nnUNet_preprocessed, prefix=startswith, join=False) + else: + candidates_preprocessed = [] + + if nnUNet_raw is not None and isdir(nnUNet_raw): + candidates_raw = subdirs(nnUNet_raw, prefix=startswith, join=False) + else: + candidates_raw = [] + + candidates_trained_models = [] + if nnUNet_results is not None and isdir(nnUNet_results): + candidates_trained_models += subdirs(nnUNet_results, prefix=startswith, join=False) + + all_candidates = candidates_preprocessed + candidates_raw + candidates_trained_models + unique_candidates = np.unique(all_candidates) + return unique_candidates + + +def convert_id_to_dataset_name(dataset_id: int): + unique_candidates = find_candidate_datasets(dataset_id) + if len(unique_candidates) > 1: + raise RuntimeError("More than one dataset name found for dataset id %d. Please correct that. (I looked in the " + "following folders:\n%s\n%s\n%s" % (dataset_id, nnUNet_raw, nnUNet_preprocessed, nnUNet_results)) + if len(unique_candidates) == 0: + raise RuntimeError(f"Could not find a dataset with the ID {dataset_id}. Make sure the requested dataset ID " + f"exists and that nnU-Net knows where raw and preprocessed data are located " + f"(see Documentation - Installation). Here are your currently defined folders:\n" + f"nnUNet_preprocessed={os.environ.get('nnUNet_preprocessed') if os.environ.get('nnUNet_preprocessed') is not None else 'None'}\n" + f"nnUNet_results={os.environ.get('nnUNet_results') if os.environ.get('nnUNet_results') is not None else 'None'}\n" + f"nnUNet_raw={os.environ.get('nnUNet_raw') if os.environ.get('nnUNet_raw') is not None else 'None'}\n" + f"If something is not right, adapt your environment variables.") + return unique_candidates[0] + + +def convert_dataset_name_to_id(dataset_name: str): + assert dataset_name.startswith("Dataset") + dataset_id = int(dataset_name[7:10]) + return dataset_id + + +def maybe_convert_to_dataset_name(dataset_name_or_id: Union[int, str]) -> str: + if isinstance(dataset_name_or_id, str) and dataset_name_or_id.startswith("Dataset"): + return dataset_name_or_id + if isinstance(dataset_name_or_id, str): + try: + dataset_name_or_id = int(dataset_name_or_id) + except ValueError: + raise ValueError("dataset_name_or_id was a string and did not start with 'Dataset' so we tried to " + "convert it to a dataset ID (int). That failed, however. Please give an integer number " + "('1', '2', etc) or a correct tast name. Your input: %s" % dataset_name_or_id) + return convert_id_to_dataset_name(dataset_name_or_id) \ No newline at end of file diff --git a/SegMamba/light_training/utilities/ddp_allgather.py b/SegMamba/light_training/utilities/ddp_allgather.py new file mode 100644 index 0000000000000000000000000000000000000000..c42b3ef654f361904d5fe1868621b3f6f5cd29a6 --- /dev/null +++ b/SegMamba/light_training/utilities/ddp_allgather.py @@ -0,0 +1,49 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional, Tuple + +import torch +from torch import distributed + + +def print_if_rank0(*args): + if distributed.get_rank() == 0: + print(*args) + + +class AllGatherGrad(torch.autograd.Function): + # stolen from pytorch lightning + @staticmethod + def forward( + ctx: Any, + tensor: torch.Tensor, + group: Optional["torch.distributed.ProcessGroup"] = None, + ) -> torch.Tensor: + ctx.group = group + + gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())] + + torch.distributed.all_gather(gathered_tensor, tensor, group=group) + gathered_tensor = torch.stack(gathered_tensor, dim=0) + + return gathered_tensor + + @staticmethod + def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]: + grad_output = torch.cat(grad_output) + + torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group) + + return grad_output[torch.distributed.get_rank()], None + diff --git a/SegMamba/light_training/utilities/default_n_proc_DA.py b/SegMamba/light_training/utilities/default_n_proc_DA.py new file mode 100644 index 0000000000000000000000000000000000000000..3ecc9228296355d01087f216a9ea2640b90403f8 --- /dev/null +++ b/SegMamba/light_training/utilities/default_n_proc_DA.py @@ -0,0 +1,44 @@ +import subprocess +import os + + +def get_allowed_n_proc_DA(): + """ + This function is used to set the number of processes used on different Systems. It is specific to our cluster + infrastructure at DKFZ. You can modify it to suit your needs. Everything is allowed. + + IMPORTANT: if the environment variable nnUNet_n_proc_DA is set it will overwrite anything in this script + (see first line). + + Interpret the output as the number of processes used for data augmentation PER GPU. + + The way it is implemented here is simply a look up table. We know the hostnames, CPU and GPU configurations of our + systems and set the numbers accordingly. For example, a system with 4 GPUs and 48 threads can use 12 threads per + GPU without overloading the CPU (technically 11 because we have a main process as well), so that's what we use. + """ + + if 'nnUNet_n_proc_DA' in os.environ.keys(): + use_this = int(os.environ['nnUNet_n_proc_DA']) + else: + hostname = subprocess.getoutput(['hostname']) + if hostname in ['Fabian', ]: + use_this = 12 + elif hostname in ['hdf19-gpu16', 'hdf19-gpu17', 'hdf19-gpu18', 'hdf19-gpu19', 'e230-AMDworkstation']: + use_this = 16 + elif hostname.startswith('e230-dgx1'): + use_this = 10 + elif hostname.startswith('hdf18-gpu') or hostname.startswith('e132-comp'): + use_this = 16 + elif hostname.startswith('e230-dgx2'): + use_this = 6 + elif hostname.startswith('e230-dgxa100-'): + use_this = 28 + elif hostname.startswith('lsf22-gpu'): + use_this = 28 + elif hostname.startswith('hdf19-gpu') or hostname.startswith('e071-gpu'): + use_this = 12 + else: + use_this = 12 # default value + + use_this = min(use_this, os.cpu_count()) + return use_this diff --git a/SegMamba/light_training/utilities/file_path_utilities.py b/SegMamba/light_training/utilities/file_path_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..611f6e24dbcd12b69b1b1695e6a0e6a6318981bf --- /dev/null +++ b/SegMamba/light_training/utilities/file_path_utilities.py @@ -0,0 +1,123 @@ +from multiprocessing import Pool +from typing import Union, Tuple +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * + +from nnunetv2.configuration import default_num_processes +from nnunetv2.paths import nnUNet_results +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + +def convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration): + return f'{trainer_name}__{plans_identifier}__{configuration}' + + +def convert_identifier_to_trainer_plans_config(identifier: str): + return os.path.basename(identifier).split('__') + + +def get_output_folder(dataset_name_or_id: Union[str, int], trainer_name: str = 'nnUNetTrainer', + plans_identifier: str = 'nnUNetPlans', configuration: str = '3d_fullres', + fold: Union[str, int] = None) -> str: + tmp = join(nnUNet_results, maybe_convert_to_dataset_name(dataset_name_or_id), + convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration)) + if fold is not None: + tmp = join(tmp, f'fold_{fold}') + return tmp + + +def parse_dataset_trainer_plans_configuration_from_path(path: str): + folders = split_path(path) + # this here can be a little tricky because we are making assumptions. Let's hope this never fails lol + + # safer to make this depend on two conditions, the fold_x and the DatasetXXX + # first let's see if some fold_X is present + fold_x_present = [i.startswith('fold_') for i in folders] + if any(fold_x_present): + idx = fold_x_present.index(True) + # OK now two entries before that there should be DatasetXXX + assert len(folders[:idx]) >= 2, 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + if folders[idx - 2].startswith('Dataset'): + splitted = folders[idx - 1].split('__') + assert len(splitted) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + return folders[idx - 2], *splitted + else: + # we can only check for dataset followed by a string that is separable into three strings by splitting with '__' + # look for DatasetXXX + dataset_folder = [i.startswith('Dataset') for i in folders] + if any(dataset_folder): + idx = dataset_folder.index(True) + assert len(folders) >= (idx + 1), 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + splitted = folders[idx + 1].split('__') + assert len(splitted) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + return folders[idx], *splitted + + +def get_ensemble_name(model1_folder, model2_folder, folds: Tuple[int, ...]): + identifier = 'ensemble___' + os.path.basename(model1_folder) + '___' + \ + os.path.basename(model2_folder) + '___' + folds_tuple_to_string(folds) + return identifier + + +def get_ensemble_name_from_d_tr_c(dataset, tr1, p1, c1, tr2, p2, c2, folds: Tuple[int, ...]): + model1_folder = get_output_folder(dataset, tr1, p1, c1) + model2_folder = get_output_folder(dataset, tr2, p2, c2) + + get_ensemble_name(model1_folder, model2_folder, folds) + + +def convert_ensemble_folder_to_model_identifiers_and_folds(ensemble_folder: str): + prefix, *models, folds = os.path.basename(ensemble_folder).split('___') + return models, folds + + +def folds_tuple_to_string(folds: Union[List[int], Tuple[int, ...]]): + s = str(folds[0]) + for f in folds[1:]: + s += f"_{f}" + return s + + +def folds_string_to_tuple(folds_string: str): + folds = folds_string.split('_') + res = [] + for f in folds: + try: + res.append(int(f)) + except ValueError: + res.append(f) + return res + + +def check_workers_alive_and_busy(export_pool: Pool, worker_list: List, results_list: List, allowed_num_queued: int = 0): + """ + + returns True if the number of results that are not ready is greater than the number of available workers + allowed_num_queued + """ + alive = [i.is_alive() for i in worker_list] + if not all(alive): + raise RuntimeError('Some background workers are no longer alive') + + not_ready = [not i.ready() for i in results_list] + if sum(not_ready) >= (len(export_pool._pool) + allowed_num_queued): + return True + return False + + +if __name__ == '__main__': + ### well at this point I could just write tests... + path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + path = 'Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres/fold_all' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + try: + path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + except AssertionError: + print('yayy, assertion works') diff --git a/SegMamba/light_training/utilities/find_class_by_name.py b/SegMamba/light_training/utilities/find_class_by_name.py new file mode 100644 index 0000000000000000000000000000000000000000..a345d99a707ad9f70eea6c991d9726b1efb4c062 --- /dev/null +++ b/SegMamba/light_training/utilities/find_class_by_name.py @@ -0,0 +1,24 @@ +import importlib +import pkgutil + +from batchgenerators.utilities.file_and_folder_operations import * + + +def recursive_find_python_class(folder: str, class_name: str, current_module: str): + tr = None + for importer, modname, ispkg in pkgutil.iter_modules([folder]): + # print(modname, ispkg) + if not ispkg: + m = importlib.import_module(current_module + "." + modname) + if hasattr(m, class_name): + tr = getattr(m, class_name) + break + + if tr is None: + for importer, modname, ispkg in pkgutil.iter_modules([folder]): + if ispkg: + next_current_module = current_module + "." + modname + tr = recursive_find_python_class(join(folder, modname), class_name, current_module=next_current_module) + if tr is not None: + break + return tr \ No newline at end of file diff --git a/SegMamba/light_training/utilities/get_network_from_plans.py b/SegMamba/light_training/utilities/get_network_from_plans.py new file mode 100644 index 0000000000000000000000000000000000000000..447d1d5e944c5cd24078338679912e3ba19915b5 --- /dev/null +++ b/SegMamba/light_training/utilities/get_network_from_plans.py @@ -0,0 +1,77 @@ +from dynamic_network_architectures.architectures.unet import PlainConvUNet, ResidualEncoderUNet +from dynamic_network_architectures.building_blocks.helper import get_matching_instancenorm, convert_dim_to_conv_op +from dynamic_network_architectures.initialization.weight_init import init_last_bn_before_add_to_0 +from nnunetv2.utilities.network_initialization import InitWeights_He +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from torch import nn + + +def get_network_from_plans(plans_manager: PlansManager, + dataset_json: dict, + configuration_manager: ConfigurationManager, + num_input_channels: int, + deep_supervision: bool = True): + """ + we may have to change this in the future to accommodate other plans -> network mappings + + num_input_channels can differ depending on whether we do cascade. Its best to make this info available in the + trainer rather than inferring it again from the plans here. + """ + num_stages = len(configuration_manager.conv_kernel_sizes) + + dim = len(configuration_manager.conv_kernel_sizes[0]) + conv_op = convert_dim_to_conv_op(dim) + + label_manager = plans_manager.get_label_manager(dataset_json) + + segmentation_network_class_name = configuration_manager.UNet_class_name + mapping = { + 'PlainConvUNet': PlainConvUNet, + 'ResidualEncoderUNet': ResidualEncoderUNet + } + kwargs = { + 'PlainConvUNet': { + 'conv_bias': True, + 'norm_op': get_matching_instancenorm(conv_op), + 'norm_op_kwargs': {'eps': 1e-5, 'affine': True}, + 'dropout_op': None, 'dropout_op_kwargs': None, + 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True}, + }, + 'ResidualEncoderUNet': { + 'conv_bias': True, + 'norm_op': get_matching_instancenorm(conv_op), + 'norm_op_kwargs': {'eps': 1e-5, 'affine': True}, + 'dropout_op': None, 'dropout_op_kwargs': None, + 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True}, + } + } + assert segmentation_network_class_name in mapping.keys(), 'The network architecture specified by the plans file ' \ + 'is non-standard (maybe your own?). Yo\'ll have to dive ' \ + 'into either this ' \ + 'function (get_network_from_plans) or ' \ + 'the init of your nnUNetModule to accomodate that.' + network_class = mapping[segmentation_network_class_name] + + conv_or_blocks_per_stage = { + 'n_conv_per_stage' + if network_class != ResidualEncoderUNet else 'n_blocks_per_stage': configuration_manager.n_conv_per_stage_encoder, + 'n_conv_per_stage_decoder': configuration_manager.n_conv_per_stage_decoder + } + # network class name!! + model = network_class( + input_channels=num_input_channels, + n_stages=num_stages, + features_per_stage=[min(configuration_manager.UNet_base_num_features * 2 ** i, + configuration_manager.unet_max_num_features) for i in range(num_stages)], + conv_op=conv_op, + kernel_sizes=configuration_manager.conv_kernel_sizes, + strides=configuration_manager.pool_op_kernel_sizes, + num_classes=label_manager.num_segmentation_heads, + deep_supervision=deep_supervision, + **conv_or_blocks_per_stage, + **kwargs[segmentation_network_class_name] + ) + model.apply(InitWeights_He(1e-2)) + if network_class == ResidualEncoderUNet: + model.apply(init_last_bn_before_add_to_0) + return model diff --git a/SegMamba/light_training/utilities/helpers.py b/SegMamba/light_training/utilities/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..42448e3f9c3de88ba13568ff7585797ee29607ab --- /dev/null +++ b/SegMamba/light_training/utilities/helpers.py @@ -0,0 +1,27 @@ +import torch + + +def softmax_helper_dim0(x: torch.Tensor) -> torch.Tensor: + return torch.softmax(x, 0) + + +def softmax_helper_dim1(x: torch.Tensor) -> torch.Tensor: + return torch.softmax(x, 1) + + +def empty_cache(device: torch.device): + if device.type == 'cuda': + torch.cuda.empty_cache() + elif device.type == 'mps': + from torch import mps + mps.empty_cache() + else: + pass + + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass diff --git a/SegMamba/light_training/utilities/json_export.py b/SegMamba/light_training/utilities/json_export.py new file mode 100644 index 0000000000000000000000000000000000000000..faed954f4a57f39c56851f899e447caab213f29d --- /dev/null +++ b/SegMamba/light_training/utilities/json_export.py @@ -0,0 +1,59 @@ +from collections.abc import Iterable + +import numpy as np +import torch + + +def recursive_fix_for_json_export(my_dict: dict): + # json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro. + keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys.... + for k in keys: + if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)): + tmp = my_dict[k] + del my_dict[k] + my_dict[int(k)] = tmp + del tmp + k = int(k) + + if isinstance(my_dict[k], dict): + recursive_fix_for_json_export(my_dict[k]) + elif isinstance(my_dict[k], np.ndarray): + assert len(my_dict[k].shape) == 1, 'only 1d arrays are supported' + my_dict[k] = fix_types_iterable(my_dict[k], output_type=list) + elif isinstance(my_dict[k], (np.bool_,)): + my_dict[k] = bool(my_dict[k]) + elif isinstance(my_dict[k], (np.int64, np.int32, np.int8, np.uint8)): + my_dict[k] = int(my_dict[k]) + elif isinstance(my_dict[k], (np.float32, np.float64, np.float16)): + my_dict[k] = float(my_dict[k]) + elif isinstance(my_dict[k], list): + my_dict[k] = fix_types_iterable(my_dict[k], output_type=type(my_dict[k])) + elif isinstance(my_dict[k], tuple): + my_dict[k] = fix_types_iterable(my_dict[k], output_type=tuple) + elif isinstance(my_dict[k], torch.device): + my_dict[k] = str(my_dict[k]) + else: + pass # pray it can be serialized + + +def fix_types_iterable(iterable, output_type): + # this sh!t is hacky as hell and will break if you use it for anything outside nnunet. Keep you hands off of this. + out = [] + for i in iterable: + if type(i) in (np.int64, np.int32, np.int8, np.uint8): + out.append(int(i)) + elif isinstance(i, dict): + recursive_fix_for_json_export(i) + out.append(i) + elif type(i) in (np.float32, np.float64, np.float16): + out.append(float(i)) + elif type(i) in (np.bool_,): + out.append(bool(i)) + elif isinstance(i, str): + out.append(i) + elif isinstance(i, Iterable): + # print('recursive call on', i, type(i)) + out.append(fix_types_iterable(i, type(i))) + else: + out.append(i) + return output_type(out) diff --git a/SegMamba/light_training/utilities/label_handling/__init__.py b/SegMamba/light_training/utilities/label_handling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/utilities/label_handling/label_handling.py b/SegMamba/light_training/utilities/label_handling/label_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..32f1b6d020189614fd0574b40f2d165def51a786 --- /dev/null +++ b/SegMamba/light_training/utilities/label_handling/label_handling.py @@ -0,0 +1,319 @@ +from __future__ import annotations +from time import time +from typing import Union, List, Tuple, Type + +import numpy as np +import torch +from acvl_utils.cropping_and_padding.bounding_boxes import bounding_box_to_slice +from batchgenerators.utilities.file_and_folder_operations import join + +import nnunetv2 +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class +from nnunetv2.utilities.helpers import softmax_helper_dim0 + +from typing import TYPE_CHECKING + +# see https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/ +if TYPE_CHECKING: + from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager + + +class LabelManager(object): + def __init__(self, label_dict: dict, regions_class_order: Union[List[int], None], force_use_labels: bool = False, + inference_nonlin=None): + self._sanity_check(label_dict) + self.label_dict = label_dict + self.regions_class_order = regions_class_order + self._force_use_labels = force_use_labels + + if force_use_labels: + self._has_regions = False + else: + self._has_regions: bool = any( + [isinstance(i, (tuple, list)) and len(i) > 1 for i in self.label_dict.values()]) + + self._ignore_label: Union[None, int] = self._determine_ignore_label() + self._all_labels: List[int] = self._get_all_labels() + + self._regions: Union[None, List[Union[int, Tuple[int, ...]]]] = self._get_regions() + + if self.has_ignore_label: + assert self.ignore_label == max( + self.all_labels) + 1, 'If you use the ignore label it must have the highest ' \ + 'label value! It cannot be 0 or in between other labels. ' \ + 'Sorry bro.' + + if inference_nonlin is None: + self.inference_nonlin = torch.sigmoid if self.has_regions else softmax_helper_dim0 + else: + self.inference_nonlin = inference_nonlin + + def _sanity_check(self, label_dict: dict): + if not 'background' in label_dict.keys(): + raise RuntimeError('Background label not declared (remeber that this should be label 0!)') + bg_label = label_dict['background'] + if isinstance(bg_label, (tuple, list)): + raise RuntimeError(f"Background label must be 0. Not a list. Not a tuple. Your background label: {bg_label}") + assert int(bg_label) == 0, f"Background label must be 0. Your background label: {bg_label}" + # not sure if we want to allow regions that contain background. I don't immediately see how this could cause + # problems so we allow it for now. That doesn't mean that this is explicitly supported. It could be that this + # just crashes. + + def _get_all_labels(self) -> List[int]: + all_labels = [] + for k, r in self.label_dict.items(): + # ignore label is not going to be used, hence the name. Duh. + if k == 'ignore': + continue + if isinstance(r, (tuple, list)): + for ri in r: + all_labels.append(int(ri)) + else: + all_labels.append(int(r)) + all_labels = list(np.unique(all_labels)) + all_labels.sort() + return all_labels + + def _get_regions(self) -> Union[None, List[Union[int, Tuple[int, ...]]]]: + if not self._has_regions or self._force_use_labels: + return None + else: + assert self.regions_class_order is not None, 'if region-based training is requested then you need to ' \ + 'define regions_class_order!' + regions = [] + for k, r in self.label_dict.items(): + # ignore ignore label + if k == 'ignore': + continue + # ignore regions that are background + if (np.isscalar(r) and r == 0) \ + or \ + (isinstance(r, (tuple, list)) and len(np.unique(r)) == 1 and np.unique(r)[0] == 0): + continue + if isinstance(r, list): + r = tuple(r) + regions.append(r) + assert len(self.regions_class_order) == len(regions), 'regions_class_order must have as ' \ + 'many entries as there are ' \ + 'regions' + return regions + + def _determine_ignore_label(self) -> Union[None, int]: + ignore_label = self.label_dict.get('ignore') + if ignore_label is not None: + assert isinstance(ignore_label, int), f'Ignore label has to be an integer. It cannot be a region ' \ + f'(list/tuple). Got {type(ignore_label)}.' + return ignore_label + + @property + def has_regions(self) -> bool: + return self._has_regions + + @property + def has_ignore_label(self) -> bool: + return self.ignore_label is not None + + @property + def all_regions(self) -> Union[None, List[Union[int, Tuple[int, ...]]]]: + return self._regions + + @property + def all_labels(self) -> List[int]: + return self._all_labels + + @property + def ignore_label(self) -> Union[None, int]: + return self._ignore_label + + def apply_inference_nonlin(self, logits: Union[np.ndarray, torch.Tensor]) -> \ + Union[np.ndarray, torch.Tensor]: + """ + logits has to have shape (c, x, y(, z)) where c is the number of classes/regions + """ + if isinstance(logits, np.ndarray): + logits = torch.from_numpy(logits) + + with torch.no_grad(): + # softmax etc is not implemented for half + logits = logits.float() + probabilities = self.inference_nonlin(logits) + + return probabilities + + def convert_probabilities_to_segmentation(self, predicted_probabilities: Union[np.ndarray, torch.Tensor]) -> \ + Union[np.ndarray, torch.Tensor]: + """ + assumes that inference_nonlinearity was already applied! + + predicted_probabilities has to have shape (c, x, y(, z)) where c is the number of classes/regions + """ + if not isinstance(predicted_probabilities, (np.ndarray, torch.Tensor)): + raise RuntimeError(f"Unexpected input type. Expected np.ndarray or torch.Tensor," + f" got {type(predicted_probabilities)}") + + if self.has_regions: + assert self.regions_class_order is not None, 'if region-based training is requested then you need to ' \ + 'define regions_class_order!' + # check correct number of outputs + assert predicted_probabilities.shape[0] == self.num_segmentation_heads, \ + f'unexpected number of channels in predicted_probabilities. Expected {self.num_segmentation_heads}, ' \ + f'got {predicted_probabilities.shape[0]}. Remeber that predicted_probabilities should have shape ' \ + f'(c, x, y(, z)).' + + if self.has_regions: + if isinstance(predicted_probabilities, np.ndarray): + segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.uint16) + else: + # no uint16 in torch + segmentation = torch.zeros(predicted_probabilities.shape[1:], dtype=torch.int16, + device=predicted_probabilities.device) + for i, c in enumerate(self.regions_class_order): + segmentation[predicted_probabilities[i] > 0.5] = c + else: + segmentation = predicted_probabilities.argmax(0) + + return segmentation + + def convert_logits_to_segmentation(self, predicted_logits: Union[np.ndarray, torch.Tensor]) -> \ + Union[np.ndarray, torch.Tensor]: + probabilities = self.apply_inference_nonlin(predicted_logits) + return self.convert_probabilities_to_segmentation(probabilities) + + def revert_cropping_on_probabilities(self, predicted_probabilities: Union[torch.Tensor, np.ndarray], + bbox: List[List[int]], + original_shape: Union[List[int], Tuple[int, ...]]): + """ + ONLY USE THIS WITH PROBABILITIES, DO NOT USE LOGITS AND DO NOT USE FOR SEGMENTATION MAPS!!! + + predicted_probabilities must be (c, x, y(, z)) + + Why do we do this here? Well if we pad probabilities we need to make sure that convert_logits_to_segmentation + correctly returns background in the padded areas. Also we want to ba able to look at the padded probabilities + and not have strange artifacts. + Only LabelManager knows how this needs to be done. So let's let him/her do it, ok? + """ + # revert cropping + probs_reverted_cropping = np.zeros((predicted_probabilities.shape[0], *original_shape), + dtype=predicted_probabilities.dtype) \ + if isinstance(predicted_probabilities, np.ndarray) else \ + torch.zeros((predicted_probabilities.shape[0], *original_shape), dtype=predicted_probabilities.dtype) + + if not self.has_regions: + probs_reverted_cropping[0] = 1 + + slicer = bounding_box_to_slice(bbox) + probs_reverted_cropping[tuple([slice(None)] + list(slicer))] = predicted_probabilities + return probs_reverted_cropping + + @staticmethod + def filter_background(classes_or_regions: Union[List[int], List[Union[int, Tuple[int, ...]]]]): + # heck yeah + # This is definitely taking list comprehension too far. Enjoy. + return [i for i in classes_or_regions if + ((not isinstance(i, (tuple, list))) and i != 0) + or + (isinstance(i, (tuple, list)) and not ( + len(np.unique(i)) == 1 and np.unique(i)[0] == 0))] + + @property + def foreground_regions(self): + return self.filter_background(self.all_regions) + + @property + def foreground_labels(self): + return self.filter_background(self.all_labels) + + @property + def num_segmentation_heads(self): + if self.has_regions: + return len(self.foreground_regions) + else: + return len(self.all_labels) + + +def get_labelmanager_class_from_plans(plans: dict) -> Type[LabelManager]: + if 'label_manager' not in plans.keys(): + print('No label manager specified in plans. Using default: LabelManager') + return LabelManager + else: + labelmanager_class = recursive_find_python_class(join(nnunetv2.__path__[0], "utilities", "label_handling"), + plans['label_manager'], + current_module="nnunetv2.utilities.label_handling") + return labelmanager_class + + +def convert_labelmap_to_one_hot(segmentation: Union[np.ndarray, torch.Tensor], + all_labels: Union[List, torch.Tensor, np.ndarray, tuple], + output_dtype=None) -> Union[np.ndarray, torch.Tensor]: + """ + if output_dtype is None then we use np.uint8/torch.uint8 + if input is torch.Tensor then output will be on the same device + + np.ndarray is faster than torch.Tensor + + if segmentation is torch.Tensor, this function will be faster if it is LongTensor. If it is somethine else we have + to cast which takes time. + + IMPORTANT: This function only works properly if your labels are consecutive integers, so something like 0, 1, 2, 3, ... + DO NOT use it with 0, 32, 123, 255, ... or whatever (fix your labels, yo) + """ + if isinstance(segmentation, torch.Tensor): + result = torch.zeros((len(all_labels), *segmentation.shape), + dtype=output_dtype if output_dtype is not None else torch.uint8, + device=segmentation.device) + # variant 1, 2x faster than 2 + result.scatter_(0, segmentation[None].long(), 1) # why does this have to be long!? + # variant 2, slower than 1 + # for i, l in enumerate(all_labels): + # result[i] = segmentation == l + else: + result = np.zeros((len(all_labels), *segmentation.shape), + dtype=output_dtype if output_dtype is not None else np.uint8) + # variant 1, fastest in my testing + for i, l in enumerate(all_labels): + result[i] = segmentation == l + # variant 2. Takes about twice as long so nah + # result = np.eye(len(all_labels))[segmentation].transpose((3, 0, 1, 2)) + return result + + +def determine_num_input_channels(plans_manager: PlansManager, + configuration_or_config_manager: Union[str, ConfigurationManager], + dataset_json: dict) -> int: + if isinstance(configuration_or_config_manager, str): + config_manager = plans_manager.get_configuration(configuration_or_config_manager) + else: + config_manager = configuration_or_config_manager + + label_manager = plans_manager.get_label_manager(dataset_json) + num_modalities = len(dataset_json['modality']) if 'modality' in dataset_json.keys() else len(dataset_json['channel_names']) + + # cascade has different number of input channels + if config_manager.previous_stage_name is not None: + num_label_inputs = len(label_manager.foreground_labels) + num_input_channels = num_modalities + num_label_inputs + else: + num_input_channels = num_modalities + return num_input_channels + + +if __name__ == '__main__': + # this code used to be able to differentiate variant 1 and 2 to measure time. + num_labels = 7 + seg = np.random.randint(0, num_labels, size=(256, 256, 256), dtype=np.uint8) + seg_torch = torch.from_numpy(seg) + st = time() + onehot_npy = convert_labelmap_to_one_hot(seg, np.arange(num_labels)) + time_1 = time() + onehot_npy2 = convert_labelmap_to_one_hot(seg, np.arange(num_labels)) + time_2 = time() + onehot_torch = convert_labelmap_to_one_hot(seg_torch, np.arange(num_labels)) + time_torch = time() + onehot_torch2 = convert_labelmap_to_one_hot(seg_torch, np.arange(num_labels)) + time_torch2 = time() + print( + f'np: {time_1 - st}, np2: {time_2 - time_1}, torch: {time_torch - time_2}, torch2: {time_torch2 - time_torch}') + onehot_torch = onehot_torch.numpy() + onehot_torch2 = onehot_torch2.numpy() + print(np.all(onehot_torch == onehot_npy)) + print(np.all(onehot_torch2 == onehot_npy)) diff --git a/SegMamba/light_training/utilities/network_initialization.py b/SegMamba/light_training/utilities/network_initialization.py new file mode 100644 index 0000000000000000000000000000000000000000..1ead271800b20873040973280726ee51093d7919 --- /dev/null +++ b/SegMamba/light_training/utilities/network_initialization.py @@ -0,0 +1,12 @@ +from torch import nn + + +class InitWeights_He(object): + def __init__(self, neg_slope=1e-2): + self.neg_slope = neg_slope + + def __call__(self, module): + if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): + module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) + if module.bias is not None: + module.bias = nn.init.constant_(module.bias, 0) diff --git a/SegMamba/light_training/utilities/overlay_plots.py b/SegMamba/light_training/utilities/overlay_plots.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d7f9439accd1644feddf7cc74846a2f74d7580 --- /dev/null +++ b/SegMamba/light_training/utilities/overlay_plots.py @@ -0,0 +1,274 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +from multiprocessing.pool import Pool +from typing import Tuple, Union + +import numpy as np +import pandas as pd +from batchgenerators.utilities.file_and_folder_operations import * +from nnunetv2.configuration import default_num_processes +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +from nnunetv2.imageio.reader_writer_registry import determine_reader_writer_from_dataset_json +from nnunetv2.paths import nnUNet_raw, nnUNet_preprocessed +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.utilities.utils import get_identifiers_from_splitted_dataset_folder + +color_cycle = ( + "000000", + "4363d8", + "f58231", + "3cb44b", + "e6194B", + "911eb4", + "ffe119", + "bfef45", + "42d4f4", + "f032e6", + "000075", + "9A6324", + "808000", + "800000", + "469990", +) + + +def hex_to_rgb(hex: str): + assert len(hex) == 6 + return tuple(int(hex[i:i + 2], 16) for i in (0, 2, 4)) + + +def generate_overlay(input_image: np.ndarray, segmentation: np.ndarray, mapping: dict = None, + color_cycle: Tuple[str, ...] = color_cycle, + overlay_intensity: float = 0.6): + """ + image can be 2d greyscale or 2d RGB (color channel in last dimension!) + + Segmentation must be label map of same shape as image (w/o color channels) + + mapping can be label_id -> idx_in_cycle or None + + returned image is scaled to [0, 255] (uint8)!!! + """ + # create a copy of image + image = np.copy(input_image) + + if len(image.shape) == 2: + image = np.tile(image[:, :, None], (1, 1, 3)) + elif len(image.shape) == 3: + if image.shape[2] == 1: + image = np.tile(image, (1, 1, 3)) + else: + raise RuntimeError(f'if 3d image is given the last dimension must be the color channels (3 channels). ' + f'Only 2D images are supported. Your image shape: {image.shape}') + else: + raise RuntimeError("unexpected image shape. only 2D images and 2D images with color channels (color in " + "last dimension) are supported") + + # rescale image to [0, 255] + image = image - image.min() + image = image / image.max() * 255 + + # create output + if mapping is None: + uniques = np.sort(pd.unique(segmentation.ravel())) # np.unique(segmentation) + mapping = {i: c for c, i in enumerate(uniques)} + + for l in mapping.keys(): + image[segmentation == l] += overlay_intensity * np.array(hex_to_rgb(color_cycle[mapping[l]])) + + # rescale result to [0, 255] + image = image / image.max() * 255 + return image.astype(np.uint8) + + +def select_slice_to_plot(image: np.ndarray, segmentation: np.ndarray) -> int: + """ + image and segmentation are expected to be 3D + + selects the slice with the largest amount of fg (regardless of label) + + we give image so that we can easily replace this function if needed + """ + fg_mask = segmentation != 0 + fg_per_slice = fg_mask.sum((1, 2)) + selected_slice = int(np.argmax(fg_per_slice)) + return selected_slice + + +def select_slice_to_plot2(image: np.ndarray, segmentation: np.ndarray) -> int: + """ + image and segmentation are expected to be 3D (or 1, x, y) + + selects the slice with the largest amount of fg (how much percent of each class are in each slice? pick slice + with highest avg percent) + + we give image so that we can easily replace this function if needed + """ + classes = [i for i in np.sort(pd.unique(segmentation.ravel())) if i != 0] + fg_per_slice = np.zeros((image.shape[0], len(classes))) + for i, c in enumerate(classes): + fg_mask = segmentation == c + fg_per_slice[:, i] = fg_mask.sum((1, 2)) + fg_per_slice[:, i] /= fg_per_slice.sum() + fg_per_slice = fg_per_slice.mean(1) + return int(np.argmax(fg_per_slice)) + + +def plot_overlay(image_file: str, segmentation_file: str, image_reader_writer: BaseReaderWriter, output_file: str, + overlay_intensity: float = 0.6): + import matplotlib.pyplot as plt + + image, props = image_reader_writer.read_images((image_file, )) + image = image[0] + seg, props_seg = image_reader_writer.read_seg(segmentation_file) + seg = seg[0] + + assert all([i == j for i, j in zip(image.shape, seg.shape)]), "image and seg do not have the same shape: %s, %s" % ( + image_file, segmentation_file) + + assert len(image.shape) == 3, 'only 3D images/segs are supported' + + selected_slice = select_slice_to_plot2(image, seg) + # print(image.shape, selected_slice) + + overlay = generate_overlay(image[selected_slice], seg[selected_slice], overlay_intensity=overlay_intensity) + + plt.imsave(output_file, overlay) + + +def plot_overlay_preprocessed(case_file: str, output_file: str, overlay_intensity: float = 0.6, channel_idx=0): + import matplotlib.pyplot as plt + data = np.load(case_file)['data'] + seg = np.load(case_file)['seg'][0] + + assert channel_idx < (data.shape[0]), 'This dataset only supports channel index up to %d' % (data.shape[0] - 1) + + image = data[channel_idx] + seg[seg < 0] = 0 + + selected_slice = select_slice_to_plot2(image, seg) + + overlay = generate_overlay(image[selected_slice], seg[selected_slice], overlay_intensity=overlay_intensity) + + plt.imsave(output_file, overlay) + + +def multiprocessing_plot_overlay(list_of_image_files, list_of_seg_files, image_reader_writer, + list_of_output_files, overlay_intensity, + num_processes=8): + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + r = p.starmap_async(plot_overlay, zip( + list_of_image_files, list_of_seg_files, [image_reader_writer] * len(list_of_output_files), + list_of_output_files, [overlay_intensity] * len(list_of_output_files) + )) + r.get() + + +def multiprocessing_plot_overlay_preprocessed(list_of_case_files, list_of_output_files, overlay_intensity, + num_processes=8, channel_idx=0): + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + r = p.starmap_async(plot_overlay_preprocessed, zip( + list_of_case_files, list_of_output_files, [overlay_intensity] * len(list_of_output_files), + [channel_idx] * len(list_of_output_files) + )) + r.get() + + +def generate_overlays_from_raw(dataset_name_or_id: Union[int, str], output_folder: str, + num_processes: int = 8, channel_idx: int = 0, overlay_intensity: float = 0.6): + dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + folder = join(nnUNet_raw, dataset_name) + dataset_json = load_json(join(folder, 'dataset.json')) + identifiers = get_identifiers_from_splitted_dataset_folder(join(folder, 'imagesTr'), dataset_json['file_ending']) + + image_files = [join(folder, 'imagesTr', i + "_%04.0d.nii.gz" % channel_idx) for i in identifiers] + seg_files = [join(folder, 'labelsTr', i + ".nii.gz") for i in identifiers] + + assert all([isfile(i) for i in image_files]) + assert all([isfile(i) for i in seg_files]) + + maybe_mkdir_p(output_folder) + output_files = [join(output_folder, i + '.png') for i in identifiers] + + image_reader_writer = determine_reader_writer_from_dataset_json(dataset_json, image_files[0])() + multiprocessing_plot_overlay(image_files, seg_files, image_reader_writer, output_files, overlay_intensity, num_processes) + + +def generate_overlays_from_preprocessed(dataset_name_or_id: Union[int, str], output_folder: str, + num_processes: int = 8, channel_idx: int = 0, + configuration: str = None, + plans_identifier: str = 'nnUNetPlans', + overlay_intensity: float = 0.6): + dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + folder = join(nnUNet_preprocessed, dataset_name) + if not isdir(folder): raise RuntimeError("run preprocessing for that task first") + + plans = load_json(join(folder, plans_identifier + '.json')) + if configuration is None: + if '3d_fullres' in plans['configurations'].keys(): + configuration = '3d_fullres' + else: + configuration = '2d' + data_identifier = plans['configurations'][configuration]["data_identifier"] + preprocessed_folder = join(folder, data_identifier) + + if not isdir(preprocessed_folder): + raise RuntimeError(f"Preprocessed data folder for configuration {configuration} of plans identifier " + f"{plans_identifier} ({dataset_name}) does not exist. Run preprocessing for this " + f"configuration first!") + + identifiers = [i[:-4] for i in subfiles(preprocessed_folder, suffix='.npz', join=False)] + + output_files = [join(output_folder, i + '.png') for i in identifiers] + image_files = [join(preprocessed_folder, i + ".npz") for i in identifiers] + + maybe_mkdir_p(output_folder) + multiprocessing_plot_overlay_preprocessed(image_files, output_files, overlay_intensity=overlay_intensity, + num_processes=num_processes, channel_idx=channel_idx) + + +def entry_point_generate_overlay(): + import argparse + parser = argparse.ArgumentParser("Plots png overlays of the slice with the most foreground. Note that this " + "disregards spacing information!") + parser.add_argument('-d', type=str, help="Dataset name or id", required=True) + parser.add_argument('-o', type=str, help="output folder", required=True) + parser.add_argument('-np', type=int, default=default_num_processes, required=False, + help=f"number of processes used. Default: {default_num_processes}") + parser.add_argument('-channel_idx', type=int, default=0, required=False, + help="channel index used (0 = _0000). Default: 0") + parser.add_argument('--use_raw', action='store_true', required=False, help="if set then we use raw data. else " + "we use preprocessed") + parser.add_argument('-p', type=str, required=False, default='nnUNetPlans', + help='plans identifier. Only used if --use_raw is not set! Default: nnUNetPlans') + parser.add_argument('-c', type=str, required=False, default=None, + help='configuration name. Only used if --use_raw is not set! Default: None = ' + '3d_fullres if available, else 2d') + parser.add_argument('-overlay_intensity', type=float, required=False, default=0.6, + help='overlay intensity. Higher = brighter/less transparent') + + + args = parser.parse_args() + + if args.use_raw: + generate_overlays_from_raw(args.d, args.o, args.np, args.channel_idx, + overlay_intensity=args.overlay_intensity) + else: + generate_overlays_from_preprocessed(args.d, args.o, args.np, args.channel_idx, args.c, args.p, + overlay_intensity=args.overlay_intensity) + + +if __name__ == '__main__': + entry_point_generate_overlay() \ No newline at end of file diff --git a/SegMamba/light_training/utilities/plans_handling/__init__.py b/SegMamba/light_training/utilities/plans_handling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/light_training/utilities/plans_handling/plans_handler.py b/SegMamba/light_training/utilities/plans_handling/plans_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..6c39fd1ede290094c2b4d5b12a1f2182cb1226dc --- /dev/null +++ b/SegMamba/light_training/utilities/plans_handling/plans_handler.py @@ -0,0 +1,307 @@ +from __future__ import annotations + +import dynamic_network_architectures +from copy import deepcopy +from functools import lru_cache, partial +from typing import Union, Tuple, List, Type, Callable + +import numpy as np +import torch + +from nnunetv2.preprocessing.resampling.utils import recursive_find_resampling_fn_by_name +from torch import nn + +import nnunetv2 +from batchgenerators.utilities.file_and_folder_operations import load_json, join + +from nnunetv2.imageio.reader_writer_registry import recursive_find_reader_writer_by_name +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class +from nnunetv2.utilities.label_handling.label_handling import get_labelmanager_class_from_plans + + +# see https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/ +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from nnunetv2.utilities.label_handling.label_handling import LabelManager + from nnunetv2.imageio.base_reader_writer import BaseReaderWriter + from nnunetv2.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor + from nnunetv2.experiment_planning.experiment_planners.default_experiment_planner import ExperimentPlanner + + +class ConfigurationManager(object): + def __init__(self, configuration_dict: dict): + self.configuration = configuration_dict + + def __repr__(self): + return self.configuration.__repr__() + + @property + def data_identifier(self) -> str: + return self.configuration['data_identifier'] + + @property + def preprocessor_name(self) -> str: + return self.configuration['preprocessor_name'] + + @property + @lru_cache(maxsize=1) + def preprocessor_class(self) -> Type[DefaultPreprocessor]: + preprocessor_class = recursive_find_python_class(join(nnunetv2.__path__[0], "preprocessing"), + self.preprocessor_name, + current_module="nnunetv2.preprocessing") + return preprocessor_class + + @property + def batch_size(self) -> int: + return self.configuration['batch_size'] + + @property + def patch_size(self) -> List[int]: + return self.configuration['patch_size'] + + @property + def median_image_size_in_voxels(self) -> List[int]: + return self.configuration['median_image_size_in_voxels'] + + @property + def spacing(self) -> List[float]: + return self.configuration['spacing'] + + @property + def normalization_schemes(self) -> List[str]: + return self.configuration['normalization_schemes'] + + @property + def use_mask_for_norm(self) -> List[bool]: + return self.configuration['use_mask_for_norm'] + + @property + def UNet_class_name(self) -> str: + return self.configuration['UNet_class_name'] + + @property + @lru_cache(maxsize=1) + def UNet_class(self) -> Type[nn.Module]: + unet_class = recursive_find_python_class(join(dynamic_network_architectures.__path__[0], "architectures"), + self.UNet_class_name, + current_module="dynamic_network_architectures.architectures") + if unet_class is None: + raise RuntimeError('The network architecture specified by the plans file ' + 'is non-standard (maybe your own?). Fix this by not using ' + 'ConfigurationManager.UNet_class to instantiate ' + 'it (probably just overwrite build_network_architecture of your trainer.') + return unet_class + + @property + def UNet_base_num_features(self) -> int: + return self.configuration['UNet_base_num_features'] + + @property + def n_conv_per_stage_encoder(self) -> List[int]: + return self.configuration['n_conv_per_stage_encoder'] + + @property + def n_conv_per_stage_decoder(self) -> List[int]: + return self.configuration['n_conv_per_stage_decoder'] + + @property + def num_pool_per_axis(self) -> List[int]: + return self.configuration['num_pool_per_axis'] + + @property + def pool_op_kernel_sizes(self) -> List[List[int]]: + return self.configuration['pool_op_kernel_sizes'] + + @property + def conv_kernel_sizes(self) -> List[List[int]]: + return self.configuration['conv_kernel_sizes'] + + @property + def unet_max_num_features(self) -> int: + return self.configuration['unet_max_num_features'] + + @property + @lru_cache(maxsize=1) + def resampling_fn_data(self) -> Callable[ + [Union[torch.Tensor, np.ndarray], + Union[Tuple[int, ...], List[int], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray] + ], + Union[torch.Tensor, np.ndarray]]: + fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_data']) + fn = partial(fn, **self.configuration['resampling_fn_data_kwargs']) + return fn + + @property + @lru_cache(maxsize=1) + def resampling_fn_probabilities(self) -> Callable[ + [Union[torch.Tensor, np.ndarray], + Union[Tuple[int, ...], List[int], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray] + ], + Union[torch.Tensor, np.ndarray]]: + fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_probabilities']) + fn = partial(fn, **self.configuration['resampling_fn_probabilities_kwargs']) + return fn + + @property + @lru_cache(maxsize=1) + def resampling_fn_seg(self) -> Callable[ + [Union[torch.Tensor, np.ndarray], + Union[Tuple[int, ...], List[int], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray] + ], + Union[torch.Tensor, np.ndarray]]: + fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_seg']) + fn = partial(fn, **self.configuration['resampling_fn_seg_kwargs']) + return fn + + @property + def batch_dice(self) -> bool: + return self.configuration['batch_dice'] + + @property + def next_stage_names(self) -> Union[List[str], None]: + ret = self.configuration.get('next_stage') + if ret is not None: + if isinstance(ret, str): + ret = [ret] + return ret + + @property + def previous_stage_name(self) -> Union[str, None]: + return self.configuration.get('previous_stage') + + +class PlansManager(object): + def __init__(self, plans_file_or_dict: Union[str, dict]): + """ + Why do we need this? + 1) resolve inheritance in configurations + 2) expose otherwise annoying stuff like getting the label manager or IO class from a string + 3) clearly expose the things that are in the plans instead of hiding them in a dict + 4) cache shit + + This class does not prevent you from going wild. You can still use the plans directly if you prefer + (PlansHandler.plans['key']) + """ + self.plans = plans_file_or_dict if isinstance(plans_file_or_dict, dict) else load_json(plans_file_or_dict) + + def __repr__(self): + return self.plans.__repr__() + + def _internal_resolve_configuration_inheritance(self, configuration_name: str, + visited: Tuple[str, ...] = None) -> dict: + if configuration_name not in self.plans['configurations'].keys(): + raise ValueError(f'The configuration {configuration_name} does not exist in the plans I have. Valid ' + f'configuration names are {list(self.plans["configurations"].keys())}.') + configuration = deepcopy(self.plans['configurations'][configuration_name]) + if 'inherits_from' in configuration: + parent_config_name = configuration['inherits_from'] + + if visited is None: + visited = (configuration_name,) + else: + if parent_config_name in visited: + raise RuntimeError(f"Circular dependency detected. The following configurations were visited " + f"while solving inheritance (in that order!): {visited}. " + f"Current configuration: {configuration_name}. Its parent configuration " + f"is {parent_config_name}.") + visited = (*visited, configuration_name) + + base_config = self._internal_resolve_configuration_inheritance(parent_config_name, visited) + base_config.update(configuration) + configuration = base_config + return configuration + + @lru_cache(maxsize=10) + def get_configuration(self, configuration_name: str): + if configuration_name not in self.plans['configurations'].keys(): + raise RuntimeError(f"Requested configuration {configuration_name} not found in plans. " + f"Available configurations: {list(self.plans['configurations'].keys())}") + + configuration_dict = self._internal_resolve_configuration_inheritance(configuration_name) + return ConfigurationManager(configuration_dict) + + @property + def dataset_name(self) -> str: + return self.plans['dataset_name'] + + @property + def plans_name(self) -> str: + return self.plans['plans_name'] + + @property + def original_median_spacing_after_transp(self) -> List[float]: + return self.plans['original_median_spacing_after_transp'] + + @property + def original_median_shape_after_transp(self) -> List[float]: + return self.plans['original_median_shape_after_transp'] + + @property + @lru_cache(maxsize=1) + def image_reader_writer_class(self) -> Type[BaseReaderWriter]: + return recursive_find_reader_writer_by_name(self.plans['image_reader_writer']) + + @property + def transpose_forward(self) -> List[int]: + return self.plans['transpose_forward'] + + @property + def transpose_backward(self) -> List[int]: + return self.plans['transpose_backward'] + + @property + def available_configurations(self) -> List[str]: + return list(self.plans['configurations'].keys()) + + @property + @lru_cache(maxsize=1) + def experiment_planner_class(self) -> Type[ExperimentPlanner]: + planner_name = self.experiment_planner_name + experiment_planner = recursive_find_python_class(join(nnunetv2.__path__[0], "experiment_planning"), + planner_name, + current_module="nnunetv2.experiment_planning") + return experiment_planner + + @property + def experiment_planner_name(self) -> str: + return self.plans['experiment_planner_used'] + + @property + @lru_cache(maxsize=1) + def label_manager_class(self) -> Type[LabelManager]: + return get_labelmanager_class_from_plans(self.plans) + + def get_label_manager(self, dataset_json: dict, **kwargs) -> LabelManager: + return self.label_manager_class(label_dict=dataset_json['labels'], + regions_class_order=dataset_json.get('regions_class_order'), + **kwargs) + + @property + def foreground_intensity_properties_per_channel(self) -> dict: + if 'foreground_intensity_properties_per_channel' not in self.plans.keys(): + if 'foreground_intensity_properties_by_modality' in self.plans.keys(): + return self.plans['foreground_intensity_properties_by_modality'] + return self.plans['foreground_intensity_properties_per_channel'] + + +if __name__ == '__main__': + from nnunetv2.paths import nnUNet_preprocessed + from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + plans = load_json(join(nnUNet_preprocessed, maybe_convert_to_dataset_name(3), 'nnUNetPlans.json')) + # build new configuration that inherits from 3d_fullres + plans['configurations']['3d_fullres_bs4'] = { + 'batch_size': 4, + 'inherits_from': '3d_fullres' + } + # now get plans and configuration managers + plans_manager = PlansManager(plans) + configuration_manager = plans_manager.get_configuration('3d_fullres_bs4') + print(configuration_manager) # look for batch size 4 diff --git a/SegMamba/light_training/utilities/tensor_utilities.py b/SegMamba/light_training/utilities/tensor_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..b16ffcac2e46d93c19522937098f0af5b208aca7 --- /dev/null +++ b/SegMamba/light_training/utilities/tensor_utilities.py @@ -0,0 +1,15 @@ +from typing import Union, List, Tuple + +import numpy as np +import torch + + +def sum_tensor(inp: torch.Tensor, axes: Union[np.ndarray, Tuple, List], keepdim: bool = False) -> torch.Tensor: + axes = np.unique(axes).astype(int) + if keepdim: + for ax in axes: + inp = inp.sum(int(ax), keepdim=True) + else: + for ax in sorted(axes, reverse=True): + inp = inp.sum(int(ax)) + return inp diff --git a/SegMamba/light_training/utilities/utils.py b/SegMamba/light_training/utilities/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8703e58055498e47eaf89d3bff445799cc8cc64b --- /dev/null +++ b/SegMamba/light_training/utilities/utils.py @@ -0,0 +1,56 @@ +# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center +# (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Union + +from batchgenerators.utilities.file_and_folder_operations import * +import numpy as np +import re + +def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None: + try: + a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata + if overwrite_existing or not isfile(npz_file[:-3] + "npy"): + np.save(npz_file[:-3] + "npy", a['data']) + if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")): + np.save(npz_file[:-4] + "_seg.npy", a['seg']) + except KeyboardInterrupt: + if isfile(npz_file[:-3] + "npy"): + os.remove(npz_file[:-3] + "npy") + if isfile(npz_file[:-4] + "_seg.npy"): + os.remove(npz_file[:-4] + "_seg.npy") + raise KeyboardInterrupt + +def get_identifiers_from_splitted_dataset_folder(folder: str, file_ending: str): + files = subfiles(folder, suffix=file_ending, join=False) + # all files must be .nii.gz and have 4 digit channel index + crop = len(file_ending) + 5 + files = [i[:-crop] for i in files] + # only unique image ids + files = np.unique(files) + return files + + +def create_lists_from_splitted_dataset_folder(folder: str, file_ending: str, identifiers: List[str] = None) -> List[List[str]]: + """ + does not rely on dataset.json + """ + if identifiers is None: + identifiers = get_identifiers_from_splitted_dataset_folder(folder, file_ending) + files = subfiles(folder, suffix=file_ending, join=False, sort=True) + list_of_lists = [] + for f in identifiers: + p = re.compile(re.escape(f) + r"_\d\d\d\d" + re.escape(file_ending)) + list_of_lists.append([join(folder, i) for i in files if p.fullmatch(i)]) + return list_of_lists diff --git a/SegMamba/light_training/utils/files_helper.py b/SegMamba/light_training/utils/files_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..471e746fd45de31db297b0de47417728a1c9128e --- /dev/null +++ b/SegMamba/light_training/utils/files_helper.py @@ -0,0 +1,22 @@ + +import os +import glob +import torch + +def delete_last_model(model_dir, symbol): + + last_model = glob.glob(f"{model_dir}/{symbol}*.pt") + if len(last_model) != 0: + os.remove(last_model[0]) + + +def save_new_model_and_delete_last(model, save_path, delete_symbol=None): + save_dir = os.path.dirname(save_path) + + os.makedirs(save_dir, exist_ok=True) + if delete_last_model is not None: + delete_last_model(save_dir, delete_symbol) + + torch.save(model.state_dict(), save_path) + + print(f"model is saved in {save_path}") diff --git a/SegMamba/light_training/utils/log_image.py b/SegMamba/light_training/utils/log_image.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9b590f4859cce6cbfc60b94a3dd0fffdf05ab4 --- /dev/null +++ b/SegMamba/light_training/utils/log_image.py @@ -0,0 +1,20 @@ + + +import os +from PIL import Image + + +def log_image(save_dir, split, images, + global_step, current_epoch): + root = os.path.join(save_dir, "images", split) + for k in images: + + filename = "{}_gs-{:06}_e-{:06}.png".format( + k, + global_step, + current_epoch, + ) + path = os.path.join(root, filename) + os.makedirs(os.path.split(path)[0], exist_ok=True) + + Image.fromarray(images[k]).save(path) \ No newline at end of file diff --git a/SegMamba/light_training/utils/lr_scheduler.py b/SegMamba/light_training/utils/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..f42d0c92a56389c197017e61add9ca1dc51a6e1c --- /dev/null +++ b/SegMamba/light_training/utils/lr_scheduler.py @@ -0,0 +1,222 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import warnings +from typing import List + +from torch.optim import Adam, Optimizer +from torch.optim.lr_scheduler import _LRScheduler +from torch.optim.lr_scheduler import LambdaLR +import math +from torch.optim import Optimizer + +class PolyLRScheduler(_LRScheduler): + def __init__(self, optimizer, initial_lr: float, max_steps: int, exponent: float = 0.9, current_step: int = None): + self.optimizer = optimizer + self.initial_lr = initial_lr + self.max_steps = max_steps + self.exponent = exponent + self.ctr = 0 + super().__init__(optimizer, current_step if current_step is not None else -1, False) + + def step(self, current_step=None): + if current_step is None or current_step == -1: + current_step = self.ctr + self.ctr += 1 + + new_lr = self.initial_lr * (1 - current_step / self.max_steps) ** self.exponent + for param_group in self.optimizer.param_groups: + param_group['lr'] = new_lr + +def get_polynomial_decay_schedule_with_warmup( + optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1 +): + """ + Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the + optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + lr_end (`float`, *optional*, defaults to 1e-7): + The end LR. + power (`float`, *optional*, defaults to 1.0): + Power factor. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT + implementation at + https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + + """ + + lr_init = optimizer.defaults["lr"] + if not (lr_init > lr_end): + raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})") + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + elif current_step > num_training_steps: + return lr_end / lr_init # as LambdaLR multiplies by lr_init + else: + lr_range = lr_init - lr_end + decay_steps = num_training_steps - num_warmup_steps + pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps + decay = lr_range * pct_remaining**power + lr_end + return decay / lr_init # as LambdaLR multiplies by lr_init + + return LambdaLR(optimizer, lr_lambda, last_epoch) + +def get_cosine_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 +): + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_periods (`float`, *optional*, defaults to 0.5): + The number of periods of the cosine function in a schedule (the default is to just decrease from the max + value to 0 following a half-cosine). + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + +def get_constant_schedule_with_warmup(optimizer, num_warmup_steps: int, last_epoch: int = -1): + """ + Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate + increases linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1.0, num_warmup_steps)) + return 1.0 + + return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) + +class LinearWarmupCosineAnnealingLR(_LRScheduler): + + def __init__( + self, + optimizer: Optimizer, + warmup_epochs: int, + max_epochs: int, + warmup_start_lr: float = 0.0, + eta_min: float = 0.0, + last_epoch: int = -1, + ) -> None: + """ + Args: + optimizer (Optimizer): Wrapped optimizer. + warmup_epochs (int): Maximum number of iterations for linear warmup + max_epochs (int): Maximum number of iterations + warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0. + eta_min (float): Minimum learning rate. Default: 0. + last_epoch (int): The index of last epoch. Default: -1. + """ + self.warmup_epochs = warmup_epochs + self.max_epochs = max_epochs + self.warmup_start_lr = warmup_start_lr + self.eta_min = eta_min + + super(LinearWarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + """ + Compute learning rate using chainable form of the scheduler + """ + if not self._get_lr_called_within_step: + warnings.warn( + "To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", + UserWarning, + ) + + if self.last_epoch == 0: + return [self.warmup_start_lr] * len(self.base_lrs) + elif self.last_epoch < self.warmup_epochs: + return [ + group["lr"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1) + for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) + ] + elif self.last_epoch == self.warmup_epochs: + return self.base_lrs + elif (self.last_epoch - 1 - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs)) == 0: + return [ + group["lr"] + (base_lr - self.eta_min) * + (1 - math.cos(math.pi / (self.max_epochs - self.warmup_epochs))) / 2 + for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) + ] + + return [ + (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) / + ( + 1 + + math.cos(math.pi * (self.last_epoch - self.warmup_epochs - 1) / (self.max_epochs - self.warmup_epochs)) + ) * (group["lr"] - self.eta_min) + self.eta_min for group in self.optimizer.param_groups + ] + + def _get_closed_form_lr(self) -> List[float]: + """ + Called when epoch is passed as a param to the `step` function of the scheduler. + """ + if self.last_epoch < self.warmup_epochs: + return [ + self.warmup_start_lr + self.last_epoch * (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1) + for base_lr in self.base_lrs + ] + + return [ + self.eta_min + 0.5 * (base_lr - self.eta_min) * + (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) + for base_lr in self.base_lrs + ] diff --git a/SegMamba/mamba/.DS_Store b/SegMamba/mamba/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..d3b3e2def10b2c5558a50a9a3604687560b957ed Binary files /dev/null and b/SegMamba/mamba/.DS_Store differ diff --git a/SegMamba/mamba/.gitmodules b/SegMamba/mamba/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..a7445800fb64f3ae664c0b994a54235105986d2e --- /dev/null +++ b/SegMamba/mamba/.gitmodules @@ -0,0 +1,3 @@ +[submodule "3rdparty/lm-evaluation-harness"] + path = 3rdparty/lm-evaluation-harness + url = https://github.com/EleutherAI/lm-evaluation-harness/ diff --git a/SegMamba/mamba/AUTHORS b/SegMamba/mamba/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..38557a872f8d603ed963a05c211de7032de5926b --- /dev/null +++ b/SegMamba/mamba/AUTHORS @@ -0,0 +1,2 @@ +Tri Dao, tri@tridao.me +Albert Gu, agu@andrew.cmu.edu diff --git a/SegMamba/mamba/LICENSE b/SegMamba/mamba/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f4abe24eb520fbb077753ae4f34bfaa43cb3b83f --- /dev/null +++ b/SegMamba/mamba/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Tri Dao, Albert Gu + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/SegMamba/mamba/README.md b/SegMamba/mamba/README.md new file mode 100644 index 0000000000000000000000000000000000000000..754cefd7f862a90bad8fbdff71e3793a4e7849e3 --- /dev/null +++ b/SegMamba/mamba/README.md @@ -0,0 +1,149 @@ +# Mamba + +![Mamba](assets/selection.png "Selective State Space") +> **Mamba: Linear-Time Sequence Modeling with Selective State Spaces**\ +> Albert Gu*, Tri Dao*\ +> Paper: https://arxiv.org/abs/2312.00752 + +## About + +Mamba is a new state space model architecture showing promising performance on information-dense data such as language modeling, where previous subquadratic models fall short of Transformers. +It is based on the line of progress on [structured state space models](https://github.com/state-spaces/s4), +with an efficient hardware-aware design and implementation in the spirit of [FlashAttention](https://github.com/Dao-AILab/flash-attention). + +## Installation + +- `pip install causal-conv1d`: an efficient implementation of a simple causal Conv1d layer used inside the Mamba block. +- `pip install mamba-ssm`: the core Mamba package. + +It can also be built from source with `pip install .` from this repository. + +If `pip` complains about PyTorch versions, try passing `--no-build-isolation` to `pip`. + +Other requirements: +- Linux +- NVIDIA GPU +- PyTorch 1.12+ +- CUDA 11.6+ + +## Usage + +We expose several levels of interface with the Mamba model. + +### Selective SSM + +Mamba is based on a selective SSM layer, which is the focus of the paper (Section 3; Algorithm 2). + +Source: [ops/selective_scan_interface.py](mamba_ssm/ops/selective_scan_interface.py). + +### Mamba Block + +The main module of this repository is the Mamba architecture block wrapping the selective SSM. + +Source: [modules/mamba_simple.py](mamba_ssm/modules/mamba_simple.py). + +Usage: +``` +from mamba_ssm import Mamba + +batch, length, dim = 2, 64, 16 +x = torch.randn(batch, length, dim).to("cuda") +model = Mamba( + # This module uses roughly 3 * expand * d_model^2 parameters + d_model=dim, # Model dimension d_model + d_state=16, # SSM state expansion factor + d_conv=4, # Local convolution width + expand=2, # Block expansion factor +).to("cuda") +y = model(x) +assert y.shape == x.shape +``` + +### Mamba Language Model + +Finally, we provide an example of a complete language model: a deep sequence model backbone (with repeating Mamba blocks) + language model head. + +Source: [models/mixer_seq_simple.py](mamba_ssm/models/mixer_seq_simple.py). + +This is an example of how to integrate Mamba into an end-to-end neural network. +This example is used in the generation scripts below. + + + +## Pretrained Models + +Pretrained models are uploaded to +[HuggingFace](https://huggingface.co/state-spaces): `mamba-130m`, `mamba-370m`, +`mamba-790m`, `mamba-1.4b`, `mamba-2.8b`. + +The models will be autodownloaded by the generation script below. + +These models were trained on the [Pile](https://huggingface.co/datasets/EleutherAI/pile), and follow the standard model dimensions described by GPT-3 and followed by many open source models: + +| Parameters | Layers | Model dim. | +|------------|--------|------------| +| 130M | 12 | 768 | +| 370M | 24 | 1024 | +| 790M | 24 | 1536 | +| 1.4B | 24 | 2048 | +| 2.8B | 32 | 2560 | + +(The layer count of Mamba should be doubled, as two Mamba blocks are needed for each "layer" (MHA block + MLP block) of a Transformer.) + +Note: these are base models trained only for 300B tokens, without any form of downstream modification (instruction tuning, etc.). +Performance is expected to be comparable or better than other architectures trained on similar data, but not to match larger or fine-tuned models. + + +## Evaluations + +To run zero-shot evaluations of models (corresponding to Table 3 of the paper), +we use the +[lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) +library. + +1. Pull the `lm-evaluation-harness` repo by `git submodule update --init + --recursive`. We use the `big-refactor` branch. +2. Install `lm-evaluation-harness`: `pip install -e 3rdparty/lm-evaluation-harness` +3. Run evaluation with (more documentation at the [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) repo): +``` +python evals/lm_harness_eval.py --model mamba --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64 +python evals/lm_harness_eval.py --model hf --model_args pretrained=EleutherAI/pythia-160m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64 +``` + +Note that the result of each task might differ from reported values by 0.1-0.3 due to noise in the evaluation process. + +## Inference + +The script [benchmarks/benchmark_generation_mamba_simple.py](benchmarks/benchmark_generation_mamba_simple.py) +1. autoloads a model from the HuggingFace Hub, +2. generates completions of a user-specified prompt, +3. benchmarks the inference speed of this generation. + +Other configurable options include the top-p (nucleus sampling) probability, and the softmax temperature. + +### Examples + +To test generation latency (e.g. batch size = 1) with different sampling strategies: + +``` +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.5 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.5 +``` + +To test generation throughput with random prompts (e.g. large batch size): +``` +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --batch 128 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --batch 128 +``` + +## Citation + +If you use this codebase, or otherwise found our work valuable, please cite Mamba: +``` +@article{mamba, + title={Mamba: Linear-Time Sequence Modeling with Selective State Spaces}, + author={Gu, Albert and Dao, Tri}, + journal={arXiv preprint arXiv:2312.00752}, + year={2023} +} +``` diff --git a/SegMamba/mamba/assets/selection.png b/SegMamba/mamba/assets/selection.png new file mode 100644 index 0000000000000000000000000000000000000000..1adeff6ecac841e1eb7067f20517705f82697cfc --- /dev/null +++ b/SegMamba/mamba/assets/selection.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d57aeeeca3250d0551956494aa0ef7f56f5758563d849cb3a3576f836e13b914 +size 818624 diff --git a/SegMamba/mamba/benchmarks/benchmark_generation_mamba_simple.py b/SegMamba/mamba/benchmarks/benchmark_generation_mamba_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..8f2943cb4bde6f25eddb82b7b999c5c5f8b39acc --- /dev/null +++ b/SegMamba/mamba/benchmarks/benchmark_generation_mamba_simple.py @@ -0,0 +1,88 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import argparse +import time +import json + +import torch +import torch.nn.functional as F + +from einops import rearrange + +from transformers import AutoTokenizer, AutoModelForCausalLM + +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel + + +parser = argparse.ArgumentParser(description="Generation benchmarking") +parser.add_argument("--model-name", type=str, default="state-spaces/mamba-130m") +parser.add_argument("--prompt", type=str, default=None) +parser.add_argument("--promptlen", type=int, default=100) +parser.add_argument("--genlen", type=int, default=100) +parser.add_argument("--temperature", type=float, default=1.0) +parser.add_argument("--topk", type=int, default=1) +parser.add_argument("--topp", type=float, default=1.0) +parser.add_argument("--batch", type=int, default=1) +args = parser.parse_args() + +repeats = 3 +device = "cuda" +dtype = torch.float16 + +print(f"Loading model {args.model_name}") +is_mamba = args.model_name.startswith("state-spaces/mamba-") or "mamba" in args.model_name + +if is_mamba: + tokenizer = AutoTokenizer.from_pretrained("/home/zhulianghui/VisionProjects/mamba/ckpts/gpt-neox-20b-tokenizer") + model = MambaLMHeadModel.from_pretrained(args.model_name, device=device, dtype=dtype) +else: + tokenizer = AutoTokenizer.from_pretrained(args.model_name) + model = AutoModelForCausalLM.from_pretrained(args.model_name, device_map={"": device}, torch_dtype=dtype) +model.eval() +print(f"Number of parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}") + +torch.random.manual_seed(0) +if args.prompt is None: + input_ids = torch.randint(1, 1000, (args.batch, args.promptlen), dtype=torch.long, device="cuda") + attn_mask = torch.ones_like(input_ids, dtype=torch.long, device="cuda") +else: + tokens = tokenizer(args.prompt, return_tensors="pt") + input_ids = tokens.input_ids.to(device=device) + attn_mask = tokens.attention_mask.to(device=device) +max_length = input_ids.shape[1] + args.genlen + +if is_mamba: + fn = lambda: model.generate( + input_ids=input_ids, + max_length=max_length, + cg=True, + return_dict_in_generate=True, + output_scores=True, + enable_timing=False, + temperature=args.temperature, + top_k=args.topk, + top_p=args.topp, + ) +else: + fn = lambda: model.generate( + input_ids=input_ids, + attention_mask=attn_mask, + max_length=max_length, + return_dict_in_generate=True, + pad_token_id=tokenizer.eos_token_id, + do_sample=True, + temperature=args.temperature, + top_k=args.topk, + top_p=args.topp, + ) +out = fn() +if args.prompt is not None: + print(tokenizer.batch_decode(out.sequences.tolist())) + +torch.cuda.synchronize() +start = time.time() +for _ in range(repeats): + fn() +torch.cuda.synchronize() +print(f"Prompt length: {len(input_ids[0])}, generation length: {len(out.sequences[0]) - len(input_ids[0])}") +print(f"{args.model_name} prompt processing + decoding time: {(time.time() - start) / repeats * 1000:.0f}ms") diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/__init__.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ecd144db5dbec72bcfcdcea28c624a7e2bf053b --- /dev/null +++ b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/__init__.py @@ -0,0 +1,5 @@ +__version__ = "1.0.1" + +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn, bimamba_inner_fn +from mamba_ssm.modules.mamba_simple import Mamba +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/__init__.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/mixer_seq_simple.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/mixer_seq_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..383f773f1f700cd53176e51327a5d8dc58158da0 --- /dev/null +++ b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/mixer_seq_simple.py @@ -0,0 +1,233 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. + +import math +from functools import partial + +from collections import namedtuple + +import torch +import torch.nn as nn + +from mamba_ssm.modules.mamba_simple import Mamba, Block +from mamba_ssm.utils.generation import GenerationMixin +from mamba_ssm.utils.hf import load_config_hf, load_state_dict_hf + +try: + from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +def create_block( + d_model, + ssm_cfg=None, + norm_epsilon=1e-5, + rms_norm=False, + residual_in_fp32=False, + fused_add_norm=False, + layer_idx=None, + device=None, + dtype=None, +): + if ssm_cfg is None: + ssm_cfg = {} + factory_kwargs = {"device": device, "dtype": dtype} + mixer_cls = partial(Mamba, layer_idx=layer_idx, **ssm_cfg, **factory_kwargs) + norm_cls = partial( + nn.LayerNorm if not rms_norm else RMSNorm, eps=norm_epsilon, **factory_kwargs + ) + block = Block( + d_model, + mixer_cls, + norm_cls=norm_cls, + fused_add_norm=fused_add_norm, + residual_in_fp32=residual_in_fp32, + ) + block.layer_idx = layer_idx + return block + + +# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454 +def _init_weights( + module, + n_layer, + initializer_range=0.02, # Now only used for embedding layer. + rescale_prenorm_residual=True, + n_residuals_per_layer=1, # Change to 2 if we have MLP +): + if isinstance(module, nn.Linear): + if module.bias is not None: + if not getattr(module.bias, "_no_reinit", False): + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Embedding): + nn.init.normal_(module.weight, std=initializer_range) + + if rescale_prenorm_residual: + # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: + # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale + # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. + # > -- GPT-2 :: https://openai.com/blog/better-language-models/ + # + # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py + for name, p in module.named_parameters(): + if name in ["out_proj.weight", "fc2.weight"]: + # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block + # Following Pytorch init, except scale by 1/sqrt(2 * n_layer) + # We need to reinit p since this code could be called multiple times + # Having just p *= scale would repeatedly scale it down + nn.init.kaiming_uniform_(p, a=math.sqrt(5)) + with torch.no_grad(): + p /= math.sqrt(n_residuals_per_layer * n_layer) + + +class MixerModel(nn.Module): + def __init__( + self, + d_model: int, + n_layer: int, + vocab_size: int, + ssm_cfg=None, + norm_epsilon: float = 1e-5, + rms_norm: bool = False, + initializer_cfg=None, + fused_add_norm=False, + residual_in_fp32=False, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + + self.embedding = nn.Embedding(vocab_size, d_model, **factory_kwargs) + + # We change the order of residual and layer norm: + # Instead of LN -> Attn / MLP -> Add, we do: + # Add -> LN -> Attn / MLP / Mixer, returning both the residual branch (output of Add) and + # the main branch (output of MLP / Mixer). The model definition is unchanged. + # This is for performance reason: we can fuse add + layer_norm. + self.fused_add_norm = fused_add_norm + if self.fused_add_norm: + if layer_norm_fn is None or rms_norm_fn is None: + raise ImportError("Failed to import Triton LayerNorm / RMSNorm kernels") + + self.layers = nn.ModuleList( + [ + create_block( + d_model, + ssm_cfg=ssm_cfg, + norm_epsilon=norm_epsilon, + rms_norm=rms_norm, + residual_in_fp32=residual_in_fp32, + fused_add_norm=fused_add_norm, + layer_idx=i, + **factory_kwargs, + ) + for i in range(n_layer) + ] + ) + + self.norm_f = (nn.LayerNorm if not rms_norm else RMSNorm)( + d_model, eps=norm_epsilon, **factory_kwargs + ) + + self.apply( + partial( + _init_weights, + n_layer=n_layer, + **(initializer_cfg if initializer_cfg is not None else {}), + ) + ) + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return { + i: layer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) + for i, layer in enumerate(self.layers) + } + + def forward(self, input_ids, inference_params=None): + hidden_states = self.embedding(input_ids) + residual = None + for layer in self.layers: + hidden_states, residual = layer( + hidden_states, residual, inference_params=inference_params + ) + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm_f(residual.to(dtype=self.norm_f.weight.dtype)) + else: + # Set prenorm=False here since we don't need the residual + fused_add_norm_fn = rms_norm_fn if isinstance(self.norm_f, RMSNorm) else layer_norm_fn + hidden_states = fused_add_norm_fn( + hidden_states, + self.norm_f.weight, + self.norm_f.bias, + eps=self.norm_f.eps, + residual=residual, + prenorm=False, + residual_in_fp32=self.residual_in_fp32, + ) + return hidden_states + + +class MambaLMHeadModel(nn.Module, GenerationMixin): + + def __init__( + self, + d_model: int, + n_layer: int, + vocab_size: int, + initializer_cfg=None, + pad_vocab_size_multiple: int = 1, + device=None, + dtype=None, + **backbone_kwargs, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + if vocab_size % pad_vocab_size_multiple != 0: + vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple) + self.backbone = MixerModel( + d_model=d_model, + n_layer=n_layer, + vocab_size=vocab_size, + initializer_cfg=initializer_cfg, + **backbone_kwargs, + **factory_kwargs, + ) + self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs) + + # Initialize weights and apply final processing + self.apply( + partial( + _init_weights, + n_layer=n_layer, + **(initializer_cfg if initializer_cfg is not None else {}), + ) + ) + self.tie_weights() + + def tie_weights(self): + self.lm_head.weight = self.backbone.embedding.weight + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.backbone.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) + + def forward(self, input_ids, position_ids=None, inference_params=None, num_last_tokens=0): + """ + "position_ids" is just to be compatible with Transformer generation. We don't use it. + num_last_tokens: if > 0, only return the logits for the last n tokens + """ + hidden_states = self.backbone(input_ids, inference_params=inference_params) + if num_last_tokens > 0: + hidden_states = hidden_states[:, -num_last_tokens:] + lm_logits = self.lm_head(hidden_states) + CausalLMOutput = namedtuple("CausalLMOutput", ["logits"]) + return CausalLMOutput(logits=lm_logits) + + @classmethod + def from_pretrained(cls, pretrained_model_name, device=None, dtype=None, **kwargs): + config = load_config_hf(pretrained_model_name) + model = cls(**config, device=device, dtype=dtype, **kwargs) + model.load_state_dict(load_state_dict_hf(pretrained_model_name, device=device, dtype=dtype)) + return model diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/__init__.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/mamba_simple.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/mamba_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffc53d24110bc39651d086f7f3969cf5069f196 --- /dev/null +++ b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/mamba_simple.py @@ -0,0 +1,501 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import math +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +except ImportError: + causal_conv1d_fn, causal_conv1d_update = None + +try: + from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn, bimamba_inner_fn, mamba_inner_fn_no_out_proj +except ImportError: + selective_scan_fn, mamba_inner_fn, bimamba_inner_fn, mamba_inner_fn_no_out_proj = None, None, None, None, None + +try: + from mamba_ssm.ops.triton.selective_state_update import selective_state_update +except ImportError: + selective_state_update = None + +try: + from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +class Mamba(nn.Module): + def __init__( + self, + d_model, + d_state=16, + d_conv=4, + expand=2, + dt_rank="auto", + dt_min=0.001, + dt_max=0.1, + dt_init="random", + dt_scale=1.0, + dt_init_floor=1e-4, + conv_bias=True, + bias=False, + use_fast_path=True, # Fused kernel options + layer_idx=None, + device=None, + dtype=None, + bimamba_type="none", + nslices=5 + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.d_model = d_model + self.d_state = d_state + self.d_conv = d_conv + self.expand = expand + self.d_inner = int(self.expand * self.d_model) + self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank + self.use_fast_path = use_fast_path + self.layer_idx = layer_idx + self.bimamba_type = bimamba_type + self.nslices = nslices + + self.in_proj = nn.Linear(self.d_model, self.d_inner * 2, bias=bias, **factory_kwargs) + + self.conv1d = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.activation = "silu" + self.act = nn.SiLU() + + self.x_proj = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + # Initialize special dt projection to preserve variance at initialization + dt_init_std = self.dt_rank**-0.5 * dt_scale + if dt_init == "constant": + nn.init.constant_(self.dt_proj.weight, dt_init_std) + elif dt_init == "random": + nn.init.uniform_(self.dt_proj.weight, -dt_init_std, dt_init_std) + else: + raise NotImplementedError + + # Initialize dt bias so that F.softplus(dt_bias) is between dt_min and dt_max + dt = torch.exp( + torch.rand(self.d_inner, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) + + math.log(dt_min) + ).clamp(min=dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + with torch.no_grad(): + self.dt_proj.bias.copy_(inv_dt) + # Our initialization would set all Linear.bias to zero, need to mark this one as _no_reinit + self.dt_proj.bias._no_reinit = True + + # S4D real initialization + A = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_log = torch.log(A) # Keep A_log in fp32 + self.A_log = nn.Parameter(A_log) + self.A_log._no_weight_decay = True + + # D "skip" parameter + self.D = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D._no_weight_decay = True + + # bidirectional + assert bimamba_type == "v3" + + A_b = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_b_log = torch.log(A_b) # Keep A_b_log in fp32 + self.A_b_log = nn.Parameter(A_b_log) + self.A_b_log._no_weight_decay = True + + self.conv1d_b = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.x_proj_b = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj_b = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + self.D_b = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D_b._no_weight_decay = True + + # assert bimamba_type == "v3" + # spatial + A_s = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_s_log = torch.log(A_s) # Keep A_b_log in fp32 + self.A_s_log = nn.Parameter(A_s_log) + self.A_s_log._no_weight_decay = True + + self.conv1d_s = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.x_proj_s = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj_s = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + self.D_s = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D_s._no_weight_decay = True + + + + + self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) + + def forward(self, hidden_states, inference_params=None): + """ + hidden_states: (B, L, D) + Returns: same shape as hidden_states + """ + batch, seqlen, dim = hidden_states.shape + + conv_state, ssm_state = None, None + if inference_params is not None: + conv_state, ssm_state = self._get_states_from_cache(inference_params, batch) + if inference_params.seqlen_offset > 0: + # The states are updated inplace + out, _, _ = self.step(hidden_states, conv_state, ssm_state) + return out + + # We do matmul and transpose BLH -> HBL at the same time + xz = rearrange( + self.in_proj.weight @ rearrange(hidden_states, "b l d -> d (b l)"), + "d (b l) -> b d l", + l=seqlen, + ) + if self.in_proj.bias is not None: + xz = xz + rearrange(self.in_proj.bias.to(dtype=xz.dtype), "d -> d 1") + + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + # In the backward pass we write dx and dz next to each other to avoid torch.cat + if self.use_fast_path and inference_params is None: # Doesn't support outputting the states + if self.bimamba_type == "v3": + A_b = -torch.exp(self.A_b_log.float()) + out = mamba_inner_fn_no_out_proj( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + out_b = mamba_inner_fn_no_out_proj( + xz.flip([-1]), + self.conv1d_b.weight, + self.conv1d_b.bias, + self.x_proj_b.weight, + self.dt_proj_b.weight, + A_b, + None, + None, + self.D_b.float(), + delta_bias=self.dt_proj_b.bias.float(), + delta_softplus=True, + ) + A_s = -torch.exp(self.A_s_log.float()) + + xz_s = xz.chunk(self.nslices, dim=-1) + xz_s = torch.stack(xz_s,dim=-1) + xz_s = xz_s.flatten(-2) + out_s = mamba_inner_fn_no_out_proj( + xz_s, + self.conv1d_s.weight, + self.conv1d_s.bias, + self.x_proj_s.weight, + self.dt_proj_s.weight, + A_s, + None, + None, + self.D_s.float(), + delta_bias=self.dt_proj_s.bias.float(), + delta_softplus=True, + ) + out_s = out_s.reshape(batch,self.d_inner,seqlen//self.nslices,self.nslices).permute(0,1,3,2).flatten(-2) + + # F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + out = F.linear(rearrange(out + out_b.flip([-1]) + out_s, "b d l -> b l d"), self.out_proj.weight, self.out_proj.bias) + elif self.bimamba_type == "v2": + A_b = -torch.exp(self.A_b_log.float()) + out = mamba_inner_fn_no_out_proj( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + out_b = mamba_inner_fn_no_out_proj( + xz.flip([-1]), + self.conv1d_b.weight, + self.conv1d_b.bias, + self.x_proj_b.weight, + self.dt_proj_b.weight, + A_b, + None, + None, + self.D_b.float(), + delta_bias=self.dt_proj_b.bias.float(), + delta_softplus=True, + ) + # F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + out = F.linear(rearrange(out + out_b.flip([-1]), "b d l -> b l d"), self.out_proj.weight, self.out_proj.bias) + else: + out = mamba_inner_fn( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + self.out_proj.weight, + self.out_proj.bias, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + else: + x, z = xz.chunk(2, dim=1) + # Compute short convolution + if conv_state is not None: + conv_state.copy_(x[:, :, -self.d_conv :]) # Update state (B D W) + if causal_conv1d_fn is None: + x = self.act(self.conv1d(x)[..., :seqlen]) + else: + assert self.activation in ["silu", "swish"] + x = causal_conv1d_fn( + x, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + # We're careful here about the layout, to avoid extra transposes. + # We want dt to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d) + dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt = self.dt_proj.weight @ dt.t() + dt = rearrange(dt, "d (b l) -> b d l", l=seqlen) + B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + assert self.activation in ["silu", "swish"] + y = selective_scan_fn( + x, + dt, + A, + B, + C, + self.D.float(), + z=z, + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + return_last_state=ssm_state is not None, + ) + if ssm_state is not None: + y, last_state = y + ssm_state.copy_(last_state) + y = rearrange(y, "b d l -> b l d") + out = self.out_proj(y) + return out + + def step(self, hidden_states, conv_state, ssm_state): + dtype = hidden_states.dtype + assert hidden_states.shape[1] == 1, "Only support decoding with 1 token at a time for now" + xz = self.in_proj(hidden_states.squeeze(1)) # (B 2D) + x, z = xz.chunk(2, dim=-1) # (B D) + + # Conv step + if causal_conv1d_update is None: + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = x + x = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) + if self.conv1d.bias is not None: + x = x + self.conv1d.bias + x = self.act(x).to(dtype=dtype) + else: + x = causal_conv1d_update( + x, + conv_state, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + x_db = self.x_proj(x) # (B dt_rank+2*d_state) + dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) + # Don't add dt_bias here + dt = F.linear(dt, self.dt_proj.weight) # (B d_inner) + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + + # SSM step + if selective_state_update is None: + # Discretize A and B + dt = F.softplus(dt + self.dt_proj.bias.to(dtype=dt.dtype)) + dA = torch.exp(torch.einsum("bd,dn->bdn", dt, A)) + dB = torch.einsum("bd,bn->bdn", dt, B) + ssm_state.copy_(ssm_state * dA + rearrange(x, "b d -> b d 1") * dB) + y = torch.einsum("bdn,bn->bd", ssm_state.to(dtype), C) + y = y + self.D.to(dtype) * x + y = y * self.act(z) # (B D) + else: + y = selective_state_update( + ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True + ) + + out = self.out_proj(y) + return out.unsqueeze(1), conv_state, ssm_state + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + device = self.out_proj.weight.device + conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype + conv_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_conv, device=device, dtype=conv_dtype + ) + ssm_dtype = self.dt_proj.weight.dtype if dtype is None else dtype + # ssm_dtype = torch.float32 + ssm_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_state, device=device, dtype=ssm_dtype + ) + return conv_state, ssm_state + + def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False): + assert self.layer_idx is not None + if self.layer_idx not in inference_params.key_value_memory_dict: + batch_shape = (batch_size,) + conv_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_conv, + device=self.conv1d.weight.device, + dtype=self.conv1d.weight.dtype, + ) + ssm_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_state, + device=self.dt_proj.weight.device, + dtype=self.dt_proj.weight.dtype, + # dtype=torch.float32, + ) + inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state) + else: + conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] + # TODO: What if batch size changes between generation, and we reuse the same states? + if initialize_states: + conv_state.zero_() + ssm_state.zero_() + return conv_state, ssm_state + + +class Block(nn.Module): + def __init__( + self, dim, mixer_cls, norm_cls=nn.LayerNorm, fused_add_norm=False, residual_in_fp32=False + ): + """ + Simple block wrapping a mixer class with LayerNorm/RMSNorm and residual connection" + + This Block has a slightly different structure compared to a regular + prenorm Transformer block. + The standard block is: LN -> MHA/MLP -> Add. + [Ref: https://arxiv.org/abs/2002.04745] + Here we have: Add -> LN -> Mixer, returning both + the hidden_states (output of the mixer) and the residual. + This is purely for performance reasons, as we can fuse add and LayerNorm. + The residual needs to be provided (except for the very first block). + """ + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + self.fused_add_norm = fused_add_norm + self.mixer = mixer_cls(dim) + self.norm = norm_cls(dim) + if self.fused_add_norm: + assert RMSNorm is not None, "RMSNorm import fails" + assert isinstance( + self.norm, (nn.LayerNorm, RMSNorm) + ), "Only LayerNorm and RMSNorm are supported for fused_add_norm" + + def forward( + self, hidden_states: Tensor, residual: Optional[Tensor] = None, inference_params=None + ): + r"""Pass the input through the encoder layer. + + Args: + hidden_states: the sequence to the encoder layer (required). + residual: hidden_states = Mixer(LN(residual)) + """ + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm(residual.to(dtype=self.norm.weight.dtype)) + if self.residual_in_fp32: + residual = residual.to(torch.float32) + else: + fused_add_norm_fn = rms_norm_fn if isinstance(self.norm, RMSNorm) else layer_norm_fn + hidden_states, residual = fused_add_norm_fn( + hidden_states, + self.norm.weight, + self.norm.bias, + residual=residual, + prenorm=True, + residual_in_fp32=self.residual_in_fp32, + eps=self.norm.eps, + ) + hidden_states = self.mixer(hidden_states, inference_params=inference_params) + return hidden_states, residual + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/__init__.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/selective_scan_interface.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/selective_scan_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..99b455ed949c123bb453922d5ac88d00f401e392 --- /dev/null +++ b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/selective_scan_interface.py @@ -0,0 +1,709 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_bwd, custom_fwd + +from einops import rearrange, repeat + +from causal_conv1d import causal_conv1d_fn +import causal_conv1d_cuda +import selective_scan_cuda + + +class SelectiveScanFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + if u.stride(-1) != 1: + u = u.contiguous() + if delta.stride(-1) != 1: + delta = delta.contiguous() + if D is not None: + D = D.contiguous() + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if z is not None and z.stride(-1) != 1: + z = z.contiguous() + if B.dim() == 3: + B = rearrange(B, "b dstate l -> b 1 dstate l") + ctx.squeeze_B = True + if C.dim() == 3: + C = rearrange(C, "b dstate l -> b 1 dstate l") + ctx.squeeze_C = True + out, x, *rest = selective_scan_cuda.fwd(u, delta, A, B, C, D, z, delta_bias, delta_softplus) + ctx.delta_softplus = delta_softplus + ctx.has_z = z is not None + last_state = x[:, :, -1, 1::2] # (batch, dim, dstate) + if not ctx.has_z: + ctx.save_for_backward(u, delta, A, B, C, D, delta_bias, x) + return out if not return_last_state else (out, last_state) + else: + ctx.save_for_backward(u, delta, A, B, C, D, z, delta_bias, x, out) + out_z = rest[0] + return out_z if not return_last_state else (out_z, last_state) + + @staticmethod + def backward(ctx, dout, *args): + if not ctx.has_z: + u, delta, A, B, C, D, delta_bias, x = ctx.saved_tensors + z = None + out = None + else: + u, delta, A, B, C, D, z, delta_bias, x, out = ctx.saved_tensors + if dout.stride(-1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + # Here we just pass in None and dz will be allocated in the C++ code. + du, ddelta, dA, dB, dC, dD, ddelta_bias, *rest = selective_scan_cuda.bwd( + u, delta, A, B, C, D, z, delta_bias, dout, x, out, None, ctx.delta_softplus, + False # option to recompute out_z, not used here + ) + dz = rest[0] if ctx.has_z else None + dB = dB.squeeze(1) if getattr(ctx, "squeeze_B", False) else dB + dC = dC.squeeze(1) if getattr(ctx, "squeeze_C", False) else dC + return (du, ddelta, dA, dB, dC, + dD if D is not None else None, + dz, + ddelta_bias if delta_bias is not None else None, + None, + None) + + +def selective_scan_fn(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + """if return_last_state is True, returns (out, last_state) + last_state has shape (batch, dim, dstate). Note that the gradient of the last state is + not considered in the backward pass. + """ + return SelectiveScanFn.apply(u, delta, A, B, C, D, z, delta_bias, delta_softplus, return_last_state) + + +def selective_scan_ref(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + """ + u: r(B D L) + delta: r(B D L) + A: c(D N) or r(D N) + B: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) + C: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) + D: r(D) + z: r(B D L) + delta_bias: r(D), fp32 + + out: r(B D L) + last_state (optional): r(B D dstate) or c(B D dstate) + """ + dtype_in = u.dtype + u = u.float() + delta = delta.float() + if delta_bias is not None: + delta = delta + delta_bias[..., None].float() + if delta_softplus: + delta = F.softplus(delta) + batch, dim, dstate = u.shape[0], A.shape[0], A.shape[1] + is_variable_B = B.dim() >= 3 + is_variable_C = C.dim() >= 3 + if A.is_complex(): + if is_variable_B: + B = torch.view_as_complex(rearrange(B.float(), "... (L two) -> ... L two", two=2)) + if is_variable_C: + C = torch.view_as_complex(rearrange(C.float(), "... (L two) -> ... L two", two=2)) + else: + B = B.float() + C = C.float() + x = A.new_zeros((batch, dim, dstate)) + ys = [] + deltaA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A)) + if not is_variable_B: + deltaB_u = torch.einsum('bdl,dn,bdl->bdln', delta, B, u) + else: + if B.dim() == 3: + deltaB_u = torch.einsum('bdl,bnl,bdl->bdln', delta, B, u) + else: + B = repeat(B, "B G N L -> B (G H) N L", H=dim // B.shape[1]) + deltaB_u = torch.einsum('bdl,bdnl,bdl->bdln', delta, B, u) + if is_variable_C and C.dim() == 4: + C = repeat(C, "B G N L -> B (G H) N L", H=dim // C.shape[1]) + last_state = None + for i in range(u.shape[2]): + x = deltaA[:, :, i] * x + deltaB_u[:, :, i] + if not is_variable_C: + y = torch.einsum('bdn,dn->bd', x, C) + else: + if C.dim() == 3: + y = torch.einsum('bdn,bn->bd', x, C[:, :, i]) + else: + y = torch.einsum('bdn,bdn->bd', x, C[:, :, :, i]) + if i == u.shape[2] - 1: + last_state = x + if y.is_complex(): + y = y.real * 2 + ys.append(y) + y = torch.stack(ys, dim=2) # (batch dim L) + out = y if D is None else y + u * rearrange(D, "d -> d 1") + if z is not None: + out = out * F.silu(z) + out = out.to(dtype=dtype_in) + return out if not return_last_state else (out, last_state) + + +class MambaInnerFnNoOutProj(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out, scan_intermediates, out_z = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + ctx.delta_softplus = delta_softplus + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, conv1d_out, delta, + A, B, C, D, delta_bias, scan_intermediates, out) + # return rearrange(out_z, "b d l -> b l d") + return out_z + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, + conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + # dout_y = rearrange(dout, "b l d -> b d l") # because no arrange at end of forward, so dout shape is b d l + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout, scan_intermediates, out, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dA, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +class MambaInnerFn(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype()) + if out_proj_bias is not None else None) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out, scan_intermediates, out_z = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + ctx.delta_softplus = delta_softplus + ctx.out_proj_bias_is_None = out_proj_bias is None + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, out_proj_weight, conv1d_out, delta, + A, B, C, D, delta_bias, scan_intermediates, out) + return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, + conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + dout = rearrange(dout, "b l e -> e (b l)") + dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L) + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates, out, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)")) + dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dout_proj_weight, dout_proj_bias, + dA, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +class BiMambaInnerFn(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype()) + if out_proj_bias is not None else None) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out_f, scan_intermediates_f, out_z_f = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + assert not A_b.is_complex(), "A should not be complex!!" + out_b, scan_intermediates_b, out_z_b = selective_scan_cuda.fwd( + conv1d_out.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, delta_softplus, + ) + + out_z = out_z_f + out_z_b.flip([-1]) + + ctx.delta_softplus = delta_softplus + ctx.out_proj_bias_is_None = out_proj_bias is None + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, out_proj_weight, conv1d_out, delta, + A, A_b, B, C, D, delta_bias, scan_intermediates_f, scan_intermediates_b, out_f, out_b) + return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, + conv1d_out, delta, A, A_b, B, C, D, delta_bias, scan_intermediates_f, scan_intermediates_b, out_f, out_b) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + dout = rearrange(dout, "b l e -> e (b l)") + dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L) + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z_f = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates_f, out_f, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + # flip one + dz_b = torch.empty_like(dz) + dconv1d_out_f_b, ddelta_f_b, dA_b, dB_f_b, dC_f_b, dD_b, ddelta_bias_b, dz_b, out_z_b = selective_scan_cuda.bwd( + conv1d_out.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, dout_y.flip([-1]), scan_intermediates_b, out_b, dz_b, + ctx.delta_softplus, + True # option to recompute out_z + ) + + dconv1d_out = dconv1d_out + dconv1d_out_f_b.flip([-1]) + ddelta = ddelta + ddelta_f_b.flip([-1]) + dB = dB + dB_f_b.flip([-1]) + dC = dC + dC_f_b.flip([-1]) + dD = dD + dD_b + ddelta_bias = ddelta_bias + ddelta_bias_b + dz = dz + dz_b.flip([-1]) + out_z = out_z_f + out_z_b.flip([-1]) + + dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)")) + dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dout_proj_weight, dout_proj_bias, + dA, dA_b, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +def mamba_inner_fn( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return MambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + +def bimamba_inner_fn( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return BiMambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + + +def mamba_inner_fn_no_out_proj( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return MambaInnerFnNoOutProj.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + + +def mamba_inner_ref( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, "silu") + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = delta_proj_weight @ x_dbl[:, :delta_rank].t() + delta = rearrange(delta, "d (b l) -> b d l", l=L) + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + if C is None: # variable B + C = x_dbl[:, -d_state:] # (bl d) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True) + return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + +def bimamba_inner_ref( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, "silu") + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = delta_proj_weight @ x_dbl[:, :delta_rank].t() + delta = rearrange(delta, "d (b l) -> b d l", l=L) + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + if C is None: # variable B + C = x_dbl[:, -d_state:] # (bl d) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True) + y_b = selective_scan_fn(x.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, delta_softplus=True) + y = y + y_b.flip([-1]) + return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias) diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/__init__.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/layernorm.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/layernorm.py new file mode 100644 index 0000000000000000000000000000000000000000..8df9d042a34b6584196f218f5ffeeb104799bd5e --- /dev/null +++ b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/layernorm.py @@ -0,0 +1,636 @@ +# Copyright (c) 2023, Tri Dao. +# Implement residual + layer_norm / rms_norm. + +# Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html +# For the backward pass, we keep weight_grad and bias_grad in registers and accumulate. +# This is faster for dimensions up to 8k, but after that it's much slower due to register spilling. +# The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine. + +import math + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_fwd, custom_bwd + +import triton +import triton.language as tl + + +def layer_norm_ref(x, weight, bias, residual=None, eps=1e-6, prenorm=False, upcast=False): + dtype = x.dtype + if upcast: + weight = weight.float() + bias = bias.float() if bias is not None else None + if upcast: + x = x.float() + residual = residual.float() if residual is not None else residual + if residual is not None: + x = (x + residual).to(x.dtype) + out = F.layer_norm(x.to(weight.dtype), x.shape[-1:], weight=weight, bias=bias, eps=eps).to( + dtype + ) + return out if not prenorm else (out, x) + + +def rms_norm_ref(x, weight, bias, residual=None, eps=1e-6, prenorm=False, upcast=False): + dtype = x.dtype + if upcast: + weight = weight.float() + bias = bias.float() if bias is not None else None + if upcast: + x = x.float() + residual = residual.float() if residual is not None else residual + if residual is not None: + x = (x + residual).to(x.dtype) + rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps) + out = (x * rstd * weight) + bias if bias is not None else (x * rstd * weight) + out = out.to(dtype) + return out if not prenorm else (out, x) + + +@triton.autotune( + configs=[ + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + triton.Config({}, num_warps=16), + triton.Config({}, num_warps=32), + ], + key=["N", "HAS_RESIDUAL", "STORE_RESIDUAL_OUT", "IS_RMS_NORM", "HAS_BIAS"], +) +# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +# @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None}) +@triton.jit +def _layer_norm_fwd_1pass_kernel( + X, # pointer to the input + Y, # pointer to the output + W, # pointer to the weights + B, # pointer to the biases + RESIDUAL, # pointer to the residual + RESIDUAL_OUT, # pointer to the residual + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_res_row, + stride_res_out_row, + N, # number of columns in X + eps, # epsilon to avoid division by zero + IS_RMS_NORM: tl.constexpr, + BLOCK_N: tl.constexpr, + HAS_RESIDUAL: tl.constexpr, + STORE_RESIDUAL_OUT: tl.constexpr, + HAS_BIAS: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + X += row * stride_x_row + Y += row * stride_y_row + if HAS_RESIDUAL: + RESIDUAL += row * stride_res_row + if STORE_RESIDUAL_OUT: + RESIDUAL_OUT += row * stride_res_out_row + # Compute mean and variance + cols = tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) + if HAS_RESIDUAL: + residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32) + x += residual + if STORE_RESIDUAL_OUT: + tl.store(RESIDUAL_OUT + cols, x, mask=cols < N) + if not IS_RMS_NORM: + mean = tl.sum(x, axis=0) / N + tl.store(Mean + row, mean) + xbar = tl.where(cols < N, x - mean, 0.0) + var = tl.sum(xbar * xbar, axis=0) / N + else: + xbar = tl.where(cols < N, x, 0.0) + var = tl.sum(xbar * xbar, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + tl.store(Rstd + row, rstd) + # Normalize and apply linear transformation + mask = cols < N + w = tl.load(W + cols, mask=mask).to(tl.float32) + if HAS_BIAS: + b = tl.load(B + cols, mask=mask).to(tl.float32) + x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + y = x_hat * w + b if HAS_BIAS else x_hat * w + # Write output + tl.store(Y + cols, y, mask=mask) + + +def _layer_norm_fwd( + x, weight, bias, eps, residual=None, out_dtype=None, residual_dtype=None, is_rms_norm=False +): + if residual is not None: + residual_dtype = residual.dtype + M, N = x.shape + assert x.stride(-1) == 1 + if residual is not None: + assert residual.stride(-1) == 1 + assert residual.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + # allocate output + y = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype) + assert y.stride(-1) == 1 + if residual is not None or (residual_dtype is not None and residual_dtype != x.dtype): + residual_out = torch.empty(M, N, device=x.device, dtype=residual_dtype) + assert residual_out.stride(-1) == 1 + else: + residual_out = None + mean = torch.empty((M,), dtype=torch.float32, device="cuda") if not is_rms_norm else None + rstd = torch.empty((M,), dtype=torch.float32, device="cuda") + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + if N > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + # heuristics for number of warps + with torch.cuda.device(x.device.index): + _layer_norm_fwd_1pass_kernel[(M,)]( + x, + y, + weight, + bias, + residual, + residual_out, + mean, + rstd, + x.stride(0), + y.stride(0), + residual.stride(0) if residual is not None else 0, + residual_out.stride(0) if residual_out is not None else 0, + N, + eps, + is_rms_norm, + BLOCK_N, + residual is not None, + residual_out is not None, + bias is not None, + ) + # residual_out is None if residual is None and residual_dtype == input_dtype + return y, mean, rstd, residual_out if residual_out is not None else x + + +@triton.autotune( + configs=[ + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + triton.Config({}, num_warps=16), + triton.Config({}, num_warps=32), + ], + key=["N", "HAS_DRESIDUAL", "STORE_DRESIDUAL", "IS_RMS_NORM", "HAS_BIAS"], +) +# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +# @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None}) +# @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None}) +@triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None}) +@triton.jit +def _layer_norm_bwd_kernel( + X, # pointer to the input + W, # pointer to the weights + B, # pointer to the biases + Y, # pointer to the output to be recomputed + DY, # pointer to the output gradient + DX, # pointer to the input gradient + DW, # pointer to the partial sum of weights gradient + DB, # pointer to the partial sum of biases gradient + DRESIDUAL, + DRESIDUAL_IN, + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_dy_row, + stride_dx_row, + stride_dres_row, + stride_dres_in_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + rows_per_program, + IS_RMS_NORM: tl.constexpr, + BLOCK_N: tl.constexpr, + HAS_DRESIDUAL: tl.constexpr, + STORE_DRESIDUAL: tl.constexpr, + HAS_BIAS: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, +): + # Map the program id to the elements of X, DX, and DY it should compute. + row_block_id = tl.program_id(0) + row_start = row_block_id * rows_per_program + cols = tl.arange(0, BLOCK_N) + mask = cols < N + X += row_start * stride_x_row + if HAS_DRESIDUAL: + DRESIDUAL += row_start * stride_dres_row + if STORE_DRESIDUAL: + DRESIDUAL_IN += row_start * stride_dres_in_row + DY += row_start * stride_dy_row + DX += row_start * stride_dx_row + if RECOMPUTE_OUTPUT: + Y += row_start * stride_y_row + w = tl.load(W + cols, mask=mask).to(tl.float32) + if RECOMPUTE_OUTPUT and HAS_BIAS: + b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32) + dw = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_BIAS: + db = tl.zeros((BLOCK_N,), dtype=tl.float32) + row_end = min((row_block_id + 1) * rows_per_program, M) + for row in range(row_start, row_end): + # Load data to SRAM + x = tl.load(X + cols, mask=mask, other=0).to(tl.float32) + dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) + if not IS_RMS_NORM: + mean = tl.load(Mean + row) + rstd = tl.load(Rstd + row) + # Compute dx + xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + xhat = tl.where(mask, xhat, 0.0) + if RECOMPUTE_OUTPUT: + y = xhat * w + b if HAS_BIAS else xhat * w + tl.store(Y + cols, y, mask=mask) + wdy = w * dy + dw += dy * xhat + if HAS_BIAS: + db += dy + if not IS_RMS_NORM: + c1 = tl.sum(xhat * wdy, axis=0) / N + c2 = tl.sum(wdy, axis=0) / N + dx = (wdy - (xhat * c1 + c2)) * rstd + else: + c1 = tl.sum(xhat * wdy, axis=0) / N + dx = (wdy - xhat * c1) * rstd + if HAS_DRESIDUAL: + dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32) + dx += dres + # Write dx + if STORE_DRESIDUAL: + tl.store(DRESIDUAL_IN + cols, dx, mask=mask) + tl.store(DX + cols, dx, mask=mask) + + X += stride_x_row + if HAS_DRESIDUAL: + DRESIDUAL += stride_dres_row + if STORE_DRESIDUAL: + DRESIDUAL_IN += stride_dres_in_row + if RECOMPUTE_OUTPUT: + Y += stride_y_row + DY += stride_dy_row + DX += stride_dx_row + tl.store(DW + row_block_id * N + cols, dw, mask=mask) + if HAS_BIAS: + tl.store(DB + row_block_id * N + cols, db, mask=mask) + + +def _layer_norm_bwd( + dy, + x, + weight, + bias, + eps, + mean, + rstd, + dresidual=None, + has_residual=False, + is_rms_norm=False, + x_dtype=None, + recompute_output=False, +): + M, N = x.shape + assert x.stride(-1) == 1 + assert dy.stride(-1) == 1 + assert dy.shape == (M, N) + if dresidual is not None: + assert dresidual.stride(-1) == 1 + assert dresidual.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + # allocate output + dx = ( + torch.empty_like(x) + if x_dtype is None + else torch.empty(M, N, dtype=x_dtype, device=x.device) + ) + dresidual_in = torch.empty_like(x) if has_residual and dx.dtype != x.dtype else None + y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None + + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + if N > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + _dw = torch.empty((sm_count, N), dtype=torch.float32, device=weight.device) + _db = ( + torch.empty((sm_count, N), dtype=torch.float32, device=bias.device) + if bias is not None + else None + ) + rows_per_program = math.ceil(M / sm_count) + grid = (sm_count,) + with torch.cuda.device(x.device.index): + _layer_norm_bwd_kernel[grid]( + x, + weight, + bias, + y, + dy, + dx, + _dw, + _db, + dresidual, + dresidual_in, + mean, + rstd, + x.stride(0), + 0 if not recompute_output else y.stride(0), + dy.stride(0), + dx.stride(0), + dresidual.stride(0) if dresidual is not None else 0, + dresidual_in.stride(0) if dresidual_in is not None else 0, + M, + N, + eps, + rows_per_program, + is_rms_norm, + BLOCK_N, + dresidual is not None, + dresidual_in is not None, + bias is not None, + ) + dw = _dw.sum(0).to(weight.dtype) + db = _db.sum(0).to(bias.dtype) if bias is not None else None + # Don't need to compute dresidual_in separately in this case + if has_residual and dx.dtype == x.dtype: + dresidual_in = dx + return (dx, dw, db, dresidual_in) if not recompute_output else (dx, dw, db, dresidual_in, y) + + +class LayerNormFn(torch.autograd.Function): + @staticmethod + def forward( + ctx, + x, + weight, + bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + ): + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if residual is not None: + assert residual.shape == x_shape_og + residual = residual.reshape(-1, residual.shape[-1]) + if residual.stride(-1) != 1: + residual = residual.contiguous() + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + residual_dtype = ( + residual.dtype + if residual is not None + else (torch.float32 if residual_in_fp32 else None) + ) + y, mean, rstd, residual_out = _layer_norm_fwd( + x, weight, bias, eps, residual, residual_dtype=residual_dtype, is_rms_norm=is_rms_norm + ) + ctx.save_for_backward(residual_out, weight, bias, mean, rstd) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.is_rms_norm = is_rms_norm + ctx.has_residual = residual is not None + ctx.prenorm = prenorm + ctx.x_dtype = x.dtype + y = y.reshape(x_shape_og) + return y if not prenorm else (y, residual_out.reshape(x_shape_og)) + + @staticmethod + def backward(ctx, dy, *args): + x, weight, bias, mean, rstd = ctx.saved_tensors + dy = dy.reshape(-1, dy.shape[-1]) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + if ctx.prenorm: + dresidual = args[0] + dresidual = dresidual.reshape(-1, dresidual.shape[-1]) + if dresidual.stride(-1) != 1: + dresidual = dresidual.contiguous() + assert dresidual.shape == x.shape + else: + dresidual = None + dx, dw, db, dresidual_in = _layer_norm_bwd( + dy, + x, + weight, + bias, + ctx.eps, + mean, + rstd, + dresidual, + ctx.has_residual, + ctx.is_rms_norm, + x_dtype=ctx.x_dtype, + ) + return ( + dx.reshape(ctx.x_shape_og), + dw, + db, + dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None, + None, + None, + None, + None, + ) + + +def layer_norm_fn( + x, + weight, + bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, +): + return LayerNormFn.apply(x, weight, bias, residual, eps, prenorm, residual_in_fp32, is_rms_norm) + + +def rms_norm_fn(x, weight, bias, residual=None, prenorm=False, residual_in_fp32=False, eps=1e-6): + return LayerNormFn.apply(x, weight, bias, residual, eps, prenorm, residual_in_fp32, True) + + +class RMSNorm(torch.nn.Module): + def __init__(self, hidden_size, eps=1e-5, device=None, dtype=None): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.eps = eps + self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.register_parameter("bias", None) + self.reset_parameters() + + def reset_parameters(self): + torch.nn.init.ones_(self.weight) + + def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False): + return rms_norm_fn( + x, + self.weight, + self.bias, + residual=residual, + eps=self.eps, + prenorm=prenorm, + residual_in_fp32=residual_in_fp32, + is_rms_norm=True, + ) + + +class LayerNormLinearFn(torch.autograd.Function): + @staticmethod + @custom_fwd + def forward( + ctx, + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + ): + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if residual is not None: + assert residual.shape == x_shape_og + residual = residual.reshape(-1, residual.shape[-1]) + if residual.stride(-1) != 1: + residual = residual.contiguous() + norm_weight = norm_weight.contiguous() + if norm_bias is not None: + norm_bias = norm_bias.contiguous() + residual_dtype = ( + residual.dtype + if residual is not None + else (torch.float32 if residual_in_fp32 else None) + ) + y, mean, rstd, residual_out = _layer_norm_fwd( + x, + norm_weight, + norm_bias, + eps, + residual, + out_dtype=None if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype(), + residual_dtype=residual_dtype, + is_rms_norm=is_rms_norm, + ) + y = y.reshape(x_shape_og) + dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else y.dtype + linear_weight = linear_weight.to(dtype) + linear_bias = linear_bias.to(dtype) if linear_bias is not None else None + out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias) + # We don't store y, will be recomputed in the backward pass to save memory + ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.is_rms_norm = is_rms_norm + ctx.has_residual = residual is not None + ctx.prenorm = prenorm + ctx.x_dtype = x.dtype + ctx.linear_bias_is_none = linear_bias is None + return out if not prenorm else (out, residual_out.reshape(x_shape_og)) + + @staticmethod + @custom_bwd + def backward(ctx, dout, *args): + x, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors + dout = dout.reshape(-1, dout.shape[-1]) + dy = F.linear(dout, linear_weight.t()) + dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + if ctx.prenorm: + dresidual = args[0] + dresidual = dresidual.reshape(-1, dresidual.shape[-1]) + if dresidual.stride(-1) != 1: + dresidual = dresidual.contiguous() + assert dresidual.shape == x.shape + else: + dresidual = None + dx, dnorm_weight, dnorm_bias, dresidual_in, y = _layer_norm_bwd( + dy, + x, + norm_weight, + norm_bias, + ctx.eps, + mean, + rstd, + dresidual, + ctx.has_residual, + ctx.is_rms_norm, + x_dtype=ctx.x_dtype, + recompute_output=True, + ) + dlinear_weight = torch.einsum("bo,bi->oi", dout, y) + return ( + dx.reshape(ctx.x_shape_og), + dnorm_weight, + dnorm_bias, + dlinear_weight, + dlinear_bias, + dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None, + None, + None, + None, + None, + ) + + +def layer_norm_linear_fn( + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, +): + return LayerNormLinearFn.apply( + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual, + eps, + prenorm, + residual_in_fp32, + is_rms_norm, + ) diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/selective_state_update.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/selective_state_update.py new file mode 100644 index 0000000000000000000000000000000000000000..fa95de73f173292914c5f00fbe9426937d00e502 --- /dev/null +++ b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/selective_state_update.py @@ -0,0 +1,192 @@ +# Copyright (c) 2023, Tri Dao. + +"""We want triton==2.1.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + + +@triton.heuristics({"HAS_DT_BIAS": lambda args: args["dt_bias_ptr"] is not None}) +@triton.heuristics({"HAS_D": lambda args: args["D_ptr"] is not None}) +@triton.heuristics({"HAS_Z": lambda args: args["z_ptr"] is not None}) +@triton.heuristics({"BLOCK_SIZE_DSTATE": lambda args: triton.next_power_of_2(args["dstate"])}) +@triton.jit +def _selective_scan_update_kernel( + # Pointers to matrices + state_ptr, x_ptr, dt_ptr, dt_bias_ptr, A_ptr, B_ptr, C_ptr, D_ptr, z_ptr, out_ptr, + # Matrix dimensions + batch, dim, dstate, + # Strides + stride_state_batch, stride_state_dim, stride_state_dstate, + stride_x_batch, stride_x_dim, + stride_dt_batch, stride_dt_dim, + stride_dt_bias_dim, + stride_A_dim, stride_A_dstate, + stride_B_batch, stride_B_dstate, + stride_C_batch, stride_C_dstate, + stride_D_dim, + stride_z_batch, stride_z_dim, + stride_out_batch, stride_out_dim, + # Meta-parameters + DT_SOFTPLUS: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, + HAS_DT_BIAS: tl.constexpr, + HAS_D: tl.constexpr, + HAS_Z: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_m = tl.program_id(axis=0) + pid_b = tl.program_id(axis=1) + state_ptr += pid_b * stride_state_batch + x_ptr += pid_b * stride_x_batch + dt_ptr += pid_b * stride_dt_batch + B_ptr += pid_b * stride_B_batch + C_ptr += pid_b * stride_C_batch + if HAS_Z: + z_ptr += pid_b * stride_z_batch + out_ptr += pid_b * stride_out_batch + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_DSTATE) + state_ptrs = state_ptr + (offs_m[:, None] * stride_state_dim + offs_n[None, :] * stride_state_dstate) + x_ptrs = x_ptr + offs_m * stride_x_dim + dt_ptrs = dt_ptr + offs_m * stride_dt_dim + if HAS_DT_BIAS: + dt_bias_ptrs = dt_bias_ptr + offs_m * stride_dt_bias_dim + A_ptrs = A_ptr + (offs_m[:, None] * stride_A_dim + offs_n[None, :] * stride_A_dstate) + B_ptrs = B_ptr + offs_n * stride_B_dstate + C_ptrs = C_ptr + offs_n * stride_C_dstate + if HAS_D: + D_ptrs = D_ptr + offs_m * stride_D_dim + if HAS_Z: + z_ptrs = z_ptr + offs_m * stride_z_dim + out_ptrs = out_ptr + offs_m * stride_out_dim + + state = tl.load(state_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0) + x = tl.load(x_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + dt = tl.load(dt_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if HAS_DT_BIAS: + dt += tl.load(dt_bias_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if DT_SOFTPLUS: + dt = tl.log(1.0 + tl.exp(dt)) + A = tl.load(A_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + dA = tl.exp(A * dt[:, None]) + B = tl.load(B_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32) + C = tl.load(C_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32) + if HAS_D: + D = tl.load(D_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if HAS_Z: + z = tl.load(z_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + + dB = B[None, :] * dt[:, None] + state = state * dA + dB * x[:, None] + tl.store(state_ptrs, state, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate)) + out = tl.sum(state * C[None, :], axis=1) + if HAS_D: + out += x * D + if HAS_Z: + out *= z * tl.sigmoid(z) + tl.store(out_ptrs, out, mask=offs_m < dim) + + +def selective_state_update(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + state: (batch, dim, dstate) + x: (batch, dim) + dt: (batch, dim) + A: (dim, dstate) + B: (batch, dstate) + C: (batch, dstate) + D: (dim,) + z: (batch, dim) + dt_bias: (dim,) + Return: + out: (batch, dim) + """ + batch, dim, dstate = state.shape + assert x.shape == (batch, dim) + assert dt.shape == x.shape + assert A.shape == (dim, dstate) + assert B.shape == (batch, dstate) + assert C.shape == B.shape + if D is not None: + assert D.shape == (dim,) + if z is not None: + assert z.shape == x.shape + if dt_bias is not None: + assert dt_bias.shape == (dim,) + out = torch.empty_like(x) + grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE_M']), batch) + z_strides = ((z.stride(0), z.stride(1)) if z is not None else (0, 0)) + # We don't want autotune since it will overwrite the state + # We instead tune by hand. + BLOCK_SIZE_M, num_warps = ((32, 4) if dstate <= 16 + else ((16, 4) if dstate <= 32 else + ((8, 4) if dstate <= 64 else + ((4, 4) if dstate <= 128 else + ((4, 8)))))) + with torch.cuda.device(x.device.index): + _selective_scan_update_kernel[grid]( + state, x, dt, dt_bias, A, B, C, D, z, out, + batch, dim, dstate, + state.stride(0), state.stride(1), state.stride(2), + x.stride(0), x.stride(1), + dt.stride(0), dt.stride(1), + dt_bias.stride(0) if dt_bias is not None else 0, + A.stride(0), A.stride(1), + B.stride(0), B.stride(1), + C.stride(0), C.stride(1), + D.stride(0) if D is not None else 0, + z_strides[0], z_strides[1], + out.stride(0), out.stride(1), + dt_softplus, + BLOCK_SIZE_M, + num_warps=num_warps, + ) + return out + + +def selective_state_update_ref(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + state: (batch, dim, dstate) + x: (batch, dim) + dt: (batch, dim) + A: (dim, dstate) + B: (batch, dstate) + C: (batch, dstate) + D: (dim,) + z: (batch, dim) + dt_bias: (dim,) + Return: + out: (batch, dim) + """ + batch, dim, dstate = state.shape + assert x.shape == (batch, dim) + assert dt.shape == x.shape + assert A.shape == (dim, dstate) + assert B.shape == (batch, dstate) + assert C.shape == B.shape + if D is not None: + assert D.shape == (dim,) + if z is not None: + assert z.shape == x.shape + if dt_bias is not None: + assert dt_bias.shape == (dim,) + dt = dt + dt_bias + dt = F.softplus(dt) if dt_softplus else dt + dA = torch.exp(rearrange(dt, "b d -> b d 1") * A) # (batch, dim, dstate) + dB = rearrange(dt, "b d -> b d 1") * rearrange(B, "b n -> b 1 n") # (batch, dim, dstate) + state.copy_(state * dA + dB * rearrange(x, "b d -> b d 1")) # (batch, dim, dstate + out = torch.einsum("bdn,bn->bd", state.to(C.dtype), C) + if D is not None: + out += (x * D).to(out.dtype) + return (out if z is None else out * F.silu(z)).to(x.dtype) diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/__init__.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/generation.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/generation.py new file mode 100644 index 0000000000000000000000000000000000000000..9d766b29ac28a388a7d77b22aa2cb1eda733c0f4 --- /dev/null +++ b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/generation.py @@ -0,0 +1,377 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. +import gc +import time +from collections import namedtuple +from dataclasses import dataclass, field +from functools import partial +from typing import Callable, Optional, Sequence, Union + +import torch +import torch.nn.functional as F +from einops import rearrange, repeat +from torch import Tensor +from torch.profiler import ProfilerActivity, profile, record_function +from transformers.generation import GreedySearchDecoderOnlyOutput, SampleDecoderOnlyOutput + + +@dataclass +class InferenceParams: + """Inference parameters that are passed to the main model in order + to efficienly calculate and store the context during inference.""" + + max_seqlen: int + max_batch_size: int + seqlen_offset: int = 0 + batch_size_offset: int = 0 + key_value_memory_dict: dict = field(default_factory=dict) + lengths_per_sample: Optional[Tensor] = None + + def reset(self, max_seqlen, max_batch_size): + self.max_seqlen = max_seqlen + self.max_batch_size = max_batch_size + self.seqlen_offset = 0 + if self.lengths_per_sample is not None: + self.lengths_per_sample.zero_() + + +# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py +# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L231 +def modify_logits_for_top_k_filtering(logits, top_k): + """Set the logits for none top-k values to -inf. Done in-place.""" + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits.masked_fill_(indices_to_remove, float("-Inf")) + + +# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py +# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L170 +def modify_logits_for_top_p_filtering(logits, top_p): + """Set the logits for none top-p values to -inf. Done in-place.""" + if top_p <= 0.0 or top_p >= 1.0: + return + # First sort and calculate cumulative sum of probabilities. + sorted_logits, sorted_indices = torch.sort(logits, descending=False) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs <= (1 - top_p) + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter( + 1, sorted_indices, sorted_indices_to_remove + ) + logits.masked_fill_(indices_to_remove, float("-inf")) + + +def sample(logits, top_k=1, top_p=0.0, temperature=1.0): + """Sample from top-k logits. + Arguments: + logits: Tensor of shape (batch_size, vocab_size) + """ + if top_k == 1: # Short-circuit for greedy decoding + return logits.argmax(dim=-1) + else: + if top_p > 0.0: + assert top_p <= 1.0, "top-p should be in (0, 1]." + if top_k > 0: + top_k = min(top_k, logits.size(-1)) # Safety check + logits_top, indices = torch.topk(logits, top_k, dim=-1) + if temperature != 1.0: + logits_top /= temperature + modify_logits_for_top_p_filtering(logits_top, top_p) + return indices[ + torch.arange(indices.shape[0], device=indices.device), + torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(dim=-1), + ] + else: + # Clone so that when we modify for top_p we don't change the original logits + logits_top = logits / temperature if temperature != 1.0 else logits.clone() + modify_logits_for_top_p_filtering(logits_top, top_p) + return torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze( + dim=-1 + ) + + +@torch.inference_mode() +def decode( + input_ids, + model, + max_length, + top_k=1, + top_p=0.0, + temperature=1.0, + eos_token_id=None, + teacher_outputs=None, + vocab_size=None, + tensor_parallel=1, + cg=False, + enable_timing=False, +): + """Decoding, either greedy or with top-k or top-p sampling. + If top-k = 0, don't limit the number of candidates (pure sampling). + Top-k and top-p can be used together. If top_k > 0 and top_p > 0, then top-k is applied first, + then top-p. + We assume that all sequences in the same batch have the same length. + + Arguments: + input_ids: (batch, seq_len) + max_length: int + teacher_outputs (optional): (batch, seq_len). If provided, instead of sampling from the + logits, the next token is taken from the teacher_outputs. Useful for testing. + Returns: GreedySearchDecoderOnlyOutput or SampleDecoderOnlyOutput, with the following fields: + sequences: (batch, max_length) + scores: tuples of (batch, vocab_size) + """ + batch_size, seqlen_og = input_ids.shape + teacher_output_len = teacher_outputs.shape[1] if teacher_outputs is not None else 0 + if cg: + if not hasattr(model, "_decoding_cache"): + model._decoding_cache = None + model._decoding_cache = update_graph_cache( + model, + model._decoding_cache, + batch_size, + seqlen_og, + max_length, + tensor_parallel=tensor_parallel, + ) + inference_params = model._decoding_cache.inference_params + inference_params.reset(max_length, batch_size) + else: + inference_params = InferenceParams(max_seqlen=max_length, max_batch_size=batch_size) + + def get_logits(input_ids, inference_params): + decoding = inference_params.seqlen_offset > 0 + if decoding: + position_ids = torch.full( + (batch_size, 1), + inference_params.seqlen_offset, + dtype=torch.long, + device=input_ids.device, + ) + else: + position_ids = None + if not cg or not decoding: + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=1, + ).logits.squeeze(dim=1) + else: + logits = model._decoding_cache.run( + input_ids, position_ids, inference_params.seqlen_offset + ).squeeze(dim=1) + return logits[..., :vocab_size] if vocab_size is not None else logits + + def sample_tokens(logits, inference_params): + if teacher_outputs is None or teacher_output_len <= inference_params.seqlen_offset: + token = sample(logits, top_k=top_k, top_p=top_p, temperature=temperature) + else: + token = teacher_outputs[:, inference_params.seqlen_offset] + # return rearrange(token, "b -> b 1") + return token.unsqueeze(1) + + def should_stop(current_token, inference_params): + if inference_params.seqlen_offset == 0: + return False + if eos_token_id is not None and (current_token == eos_token_id).all(): + return True + if inference_params.seqlen_offset >= max_length - 1: + return True + return False + + start = torch.cuda.Event(enable_timing=enable_timing) + end = torch.cuda.Event(enable_timing=enable_timing) + + if enable_timing: + if tensor_parallel > 1: + torch.distributed.barrier() + start.record() + scores, sequences = [], [input_ids] + while not should_stop(sequences[-1], inference_params): + scores.append(get_logits(sequences[-1], inference_params)) + inference_params.seqlen_offset += sequences[-1].shape[1] + sequences.append(sample_tokens(scores[-1], inference_params)) + if enable_timing: + end.record() + if tensor_parallel > 1: + torch.distributed.barrier() + torch.cuda.synchronize() + print(f"Prompt processing + decoding time: {(start.elapsed_time(end)):.0f}ms") + output_cls = GreedySearchDecoderOnlyOutput if top_k == 1 else SampleDecoderOnlyOutput + return output_cls(sequences=torch.cat(sequences, dim=1), scores=tuple(scores)) + + +class GenerationMixin: + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + raise NotImplementedError + + def generate( + self, + input_ids, + max_length, + top_k=1, + top_p=0.0, + temperature=1.0, + return_dict_in_generate=False, + output_scores=False, + **kwargs, + ): + output = decode( + input_ids, self, max_length, top_k=top_k, top_p=top_p, temperature=temperature, **kwargs + ) + if not output_scores: + output.scores = None + return output if return_dict_in_generate else output.sequences + + +def allocate_inference_cache( + max_batch_size, + max_seqlen, + nheads, + headdim, + layers: Union[int, Sequence], + device, + dtype=torch.float16, +): + assert dtype in [torch.float16, torch.bfloat16, torch.float32] + kv_cache_shape = (max_batch_size, max_seqlen, 2, nheads, headdim) + if isinstance(layers, int): + layers = range(layers) + return {i: torch.empty(kv_cache_shape, device=device, dtype=dtype) for i in layers} + + +@dataclass +class DecodingCGCache: + max_batch_size: int = 0 + max_seqlen: int = 0 + device = None + dtype = None + callables: dict = field(default_factory=dict) + mempool = None + inference_params: Optional[InferenceParams] = None + run: Optional[Callable] = None + + +@torch.inference_mode() +def update_graph_cache( + model, + cache, + batch_size, + seqlen_og, + max_seqlen, + decoding_seqlens=(1,), + tensor_parallel=1, + dtype=None, + n_warmups=2, +): + if cache is None: + cache = DecodingCGCache() + param_example = next(iter(model.parameters())) + device = param_example.device + if dtype is None: + dtype = param_example.dtype + if ( + (device, dtype) != (cache.device, cache.dtype) + or batch_size > cache.max_batch_size + or max_seqlen > cache.max_seqlen + ): # Invalidate the cache + cache.callables = {} + cache.mempool = None + cache.inference_params = None + gc.collect() + cache.device, cache.dtype = device, dtype + cache.max_batch_size, cache.max_seqlen = batch_size, max_seqlen + if hasattr(model, "allocate_inference_cache"): + inf_cache = model.allocate_inference_cache(batch_size, max_seqlen, dtype) + else: + headdim = getattr( + model.config, + "head_dim", + model.config.hidden_size // model.config.num_attention_heads, + ) + inf_cache = allocate_inference_cache( + batch_size, + max_seqlen, + model.config.num_attention_heads // tensor_parallel, + headdim, + model.config.num_hidden_layers, + device, + dtype, + ) + lengths_per_sample = torch.full((batch_size,), seqlen_og, dtype=torch.int32, device=device) + cache.inference_params = InferenceParams( + max_seqlen=max_seqlen, + max_batch_size=batch_size, + seqlen_offset=seqlen_og, + key_value_memory_dict=inf_cache, + lengths_per_sample=lengths_per_sample, + ) + cache.mempool = torch.cuda.graphs.graph_pool_handle() + for decoding_seqlen in decoding_seqlens: + if (batch_size, decoding_seqlen) not in cache.callables: + cache.callables[batch_size, decoding_seqlen] = capture_graph( + model, + cache.inference_params, + batch_size, + max_seqlen, + decoding_seqlen=decoding_seqlen, + mempool=cache.mempool, + n_warmups=n_warmups, + ) + + def dispatch(input_ids, position_ids, seqlen): + batch_size, decoding_seqlen = input_ids.shape[:2] + return cache.callables[batch_size, decoding_seqlen](input_ids, position_ids, seqlen) + + cache.run = dispatch + cache.inference_params.seqlen_offset = 0 # Reset so it's not confusing + return cache + + +def capture_graph( + model, inference_params, batch_size, max_seqlen, decoding_seqlen=1, mempool=None, n_warmups=2 +): + device = next(iter(model.parameters())).device + input_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device) + position_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device) + seqlen_offset_og = inference_params.seqlen_offset + inference_params.seqlen_offset = max_seqlen - decoding_seqlen + inference_params.lengths_per_sample[:] = inference_params.seqlen_offset + + # Warmup before capture + s = torch.cuda.Stream() + s.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(s): + for _ in range(n_warmups): + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=decoding_seqlen, + ).logits + s.synchronize() + # This might be needed for correctness if we run with NCCL_GRAPH_MIXING_SUPPORT=0, + # which requires that graph launch and non-captured launch to not overlap (I think, + # that's how I interpret the documentation). I'm not sure if this is required. + if torch.distributed.is_initialized(): + torch.distributed.barrier() + torch.cuda.current_stream().wait_stream(s) + # Captures the graph + # To allow capture, automatically sets a side stream as the current stream in the context + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph, pool=mempool): + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=decoding_seqlen, + ).logits + + def run(new_input_ids, new_position_ids, seqlen): + inference_params.lengths_per_sample[:] = seqlen + input_ids.copy_(new_input_ids) + position_ids.copy_(new_position_ids) + graph.replay() + return logits.clone() + + inference_params.seqlen_offset = seqlen_offset_og + return run diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/hf.py b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/hf.py new file mode 100644 index 0000000000000000000000000000000000000000..0d7555acddbd260636d1d14d5bd6324f6af0056a --- /dev/null +++ b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/hf.py @@ -0,0 +1,23 @@ +import json + +import torch + +from transformers.utils import WEIGHTS_NAME, CONFIG_NAME +from transformers.utils.hub import cached_file + + +def load_config_hf(model_name): + resolved_archive_file = cached_file(model_name, CONFIG_NAME, _raise_exceptions_for_missing_entries=False) + return json.load(open(resolved_archive_file)) + + +def load_state_dict_hf(model_name, device=None, dtype=None): + # If not fp32, then we don't want to load directly to the GPU + mapped_device = "cpu" if dtype not in [torch.float32, None] else device + resolved_archive_file = cached_file(model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False) + return torch.load(resolved_archive_file, map_location=mapped_device) + # Convert dtype before moving to GPU to save memory + if dtype is not None: + state_dict = {k: v.to(dtype=dtype) for k, v in state_dict.items()} + state_dict = {k: v.to(device=device) for k, v in state_dict.items()} + return state_dict diff --git a/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/selective_scan_cuda.cpython-312-x86_64-linux-gnu.so b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/selective_scan_cuda.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..614433170ff0d70d84e91250d258b1596a92d08d --- /dev/null +++ b/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/selective_scan_cuda.cpython-312-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de497e1e8bf755807db1be7e7a915b2711899de71e3c20f26bf9ed388d485b39 +size 61424368 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_deps b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_deps new file mode 100644 index 0000000000000000000000000000000000000000..868105a23d6da24b20cd9b9b649e563c81cbfaa8 --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_deps @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4831c2f877d7c143edf32ef11a91a00533610c0df16673d58e5d734eb5fde2a +size 679492 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_log b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_log new file mode 100644 index 0000000000000000000000000000000000000000..ba4797cd6d493c56ebea07adb348cc082b09d0dc --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_log @@ -0,0 +1,11 @@ +# ninja log v7 +1 19805 1766954697427967371 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o cb5c8304b521546b +2 41314 1766954697431967553 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o 292bedf00520f73c +19845 60355 1766954717272866994 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o f16354525b1b0ec9 +2 69379 1766954697431967553 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o 722bc0a6c32ace27 +1 71204 1766954697427967371 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o d3fa7d01cfc27105 +1 71283 1766954697427967371 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o 7e325ee323faedd +41320 80944 1766954738749840606 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o d1292e78dbee8802 +2 99285 1766954697431967553 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o 7d1d4df97ab3b9ab +1 100796 1766954697427967371 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o d85521417cf6cf13 +1 101411 1766954697427967371 /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o 9e39e25fb3232edb diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/build.ninja b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/build.ninja new file mode 100644 index 0000000000000000000000000000000000000000..b4ae4e0af9f7546bbb7ab04ce54801a4a6c41a7c --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/build.ninja @@ -0,0 +1,46 @@ +ninja_required_version = 1.3 +cxx = c++ +nvcc = /usr/local/cuda/bin/nvcc + +cflags = -pthread -B /home/zeus/miniconda3/envs/cloudspace/compiler_compat -fno-strict-overflow -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /home/zeus/miniconda3/envs/cloudspace/include -fPIC -O2 -isystem /home/zeus/miniconda3/envs/cloudspace/include -fPIC -I/teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan -I/home/zeus/miniconda3/envs/cloudspace/lib/python3.12/site-packages/torch/include -I/home/zeus/miniconda3/envs/cloudspace/lib/python3.12/site-packages/torch/include/torch/csrc/api/include -I/usr/local/cuda/include -I/home/zeus/miniconda3/envs/cloudspace/include/python3.12 -c +post_cflags = -O3 -std=c++17 -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1018"' -DTORCH_EXTENSION_NAME=selective_scan_cuda +cuda_cflags = -I/teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan -I/home/zeus/miniconda3/envs/cloudspace/lib/python3.12/site-packages/torch/include -I/home/zeus/miniconda3/envs/cloudspace/lib/python3.12/site-packages/torch/include/torch/csrc/api/include -I/usr/local/cuda/include -I/home/zeus/miniconda3/envs/cloudspace/include/python3.12 -c +cuda_post_cflags = -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options ''"'"'-fPIC'"'"'' -O3 -std=c++17 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_BFLOAT16_OPERATORS__ -U__CUDA_NO_BFLOAT16_CONVERSIONS__ -U__CUDA_NO_BFLOAT162_OPERATORS__ -U__CUDA_NO_BFLOAT162_CONVERSIONS__ --expt-relaxed-constexpr --expt-extended-lambda --use_fast_math --ptxas-options=-v -lineinfo -gencode arch=compute_89,code=sm_89 --threads 4 -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1018"' -DTORCH_EXTENSION_NAME=selective_scan_cuda +cuda_dlink_post_cflags = +sycl_dlink_post_cflags = +ldflags = + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags + depfile = $out.d + deps = gcc + +rule cuda_compile + depfile = $out.d + deps = gcc + command = $nvcc --generate-dependencies-with-compile --dependency-output $out.d $cuda_cflags -c $in -o $out $cuda_post_cflags + + + + + + + +build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o: compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan.cpp +build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu +build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu +build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu +build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu +build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu +build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu +build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu +build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp16.cu +build /teamspace/studios/this_studio/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o: cuda_compile /teamspace/studios/this_studio/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp32.cu + + + + + + + + diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o new file mode 100644 index 0000000000000000000000000000000000000000..fb9ff29ded6a84809186f6ddea678c193f4d891d --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30642e36142e5b060973723e4e887b2d59880c30a0f91c192fb857b1bef0af49 +size 397560 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o new file mode 100644 index 0000000000000000000000000000000000000000..c0ae6a699b02be35fcd7d1d42b27aca2065b57bf --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25fa5c83cf2ee35e8070b8c8c52c70c4bf3dcf21029f5e9f4590eb13f7a3cc42 +size 10478608 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o new file mode 100644 index 0000000000000000000000000000000000000000..59262b6694f6011d87882b7d550fc93d136ac10e --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a579cb649353677789e91906a42ba466016e6d0a8e46b03a422aa3c8f9a36feb +size 6449680 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o new file mode 100644 index 0000000000000000000000000000000000000000..1e064c242a793e46960c376507fec59748539cab --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:402053a21751a06229a38b35bdfbb6baa63d4c4435f82e628a8a3999a5d2beb2 +size 10455272 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o new file mode 100644 index 0000000000000000000000000000000000000000..8edecf7112671e10e6784edf3a0fd52999c908f6 --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:357d3d897d7697774261cbd88f27fc5e4fb2f01caa8a5428ace0c66f22de7794 +size 6435864 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o new file mode 100644 index 0000000000000000000000000000000000000000..effaf88981097ce66e6997dc06f90ec03aef90be --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00817b11aa97238772fbee510dc5da41411df480d0258a5344126c72deb983f3 +size 10070104 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o new file mode 100644 index 0000000000000000000000000000000000000000..8929c84dd2f0f0b28315e5a0385c67a17c512935 --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88cc42d0fa66b0cbac41a4445a1f8c204c4f0f9a6cce18b9f94086213bfba7f1 +size 6057784 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o new file mode 100644 index 0000000000000000000000000000000000000000..be04c8f9a8828eb50c56537cbf8c7fec81fce683 --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5d08cbe64bf5265c8f7245f0440b337fe5fb7701c5715e8206eab1d88e475d2 +size 4116912 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o new file mode 100644 index 0000000000000000000000000000000000000000..2bfb5d5fa14170e3fcf2841a3abfb10fe4cca094 --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38293bd3de57cfff70fe33ad77437e7ffcfe7378675092f38c348efe066ecef0 +size 4098472 diff --git a/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o new file mode 100644 index 0000000000000000000000000000000000000000..2e0efb63f6a4f491f4b35b47661001f3d067f846 --- /dev/null +++ b/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8a5320d5733c4b86ff7fc1e42d6a3f24f4ec935999fa7a9c4242990ea422530 +size 3936016 diff --git a/SegMamba/mamba/csrc/selective_scan/reverse_scan.cuh b/SegMamba/mamba/csrc/selective_scan/reverse_scan.cuh new file mode 100644 index 0000000000000000000000000000000000000000..d7e93174bb391d45271e6c77669a5e52d6c9cc78 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/reverse_scan.cuh @@ -0,0 +1,401 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#include + +#include +#include +#include +// #include +#include "uninitialized_copy.cuh" + +/** + * Perform a reverse sequential reduction over \p LENGTH elements of the \p input array. The aggregate is returned. + */ +template < + int LENGTH, + typename T, + typename ReductionOp> +__device__ __forceinline__ T ThreadReverseReduce(const T (&input)[LENGTH], ReductionOp reduction_op) { + static_assert(LENGTH > 0); + T retval = input[LENGTH - 1]; + #pragma unroll + for (int i = LENGTH - 2; i >= 0; --i) { retval = reduction_op(retval, input[i]); } + return retval; +} + +/** + * Perform a sequential inclusive postfix reverse scan over the statically-sized \p input array, seeded with the specified \p postfix. The aggregate is returned. + */ +template < + int LENGTH, + typename T, + typename ScanOp> +__device__ __forceinline__ T ThreadReverseScanInclusive( + const T (&input)[LENGTH], + T (&output)[LENGTH], + ScanOp scan_op, + const T postfix) +{ + T inclusive = postfix; + #pragma unroll + for (int i = LENGTH - 1; i >= 0; --i) { + inclusive = scan_op(inclusive, input[i]); + output[i] = inclusive; + } +} + +/** + * Perform a sequential exclusive postfix reverse scan over the statically-sized \p input array, seeded with the specified \p postfix. The aggregate is returned. + */ +template < + int LENGTH, + typename T, + typename ScanOp> +__device__ __forceinline__ T ThreadReverseScanExclusive( + const T (&input)[LENGTH], + T (&output)[LENGTH], + ScanOp scan_op, + const T postfix) +{ + // Careful, output maybe be aliased to input + T exclusive = postfix; + T inclusive; + #pragma unroll + for (int i = LENGTH - 1; i >= 0; --i) { + inclusive = scan_op(exclusive, input[i]); + output[i] = exclusive; + exclusive = inclusive; + } + return inclusive; +} + + +/** + * \brief WarpReverseScan provides SHFL-based variants of parallel postfix scan of items partitioned across a CUDA thread warp. + * + * LOGICAL_WARP_THREADS must be a power-of-two + */ +template < + typename T, ///< Data type being scanned + int LOGICAL_WARP_THREADS ///< Number of threads per logical warp + > +struct WarpReverseScan { + //--------------------------------------------------------------------- + // Constants and type definitions + //--------------------------------------------------------------------- + + /// Whether the logical warp size and the PTX warp size coincide + static constexpr bool IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0)); + /// The number of warp scan steps + static constexpr int STEPS = cub::Log2::VALUE; + static_assert(LOGICAL_WARP_THREADS == 1 << STEPS); + + + //--------------------------------------------------------------------- + // Thread fields + //--------------------------------------------------------------------- + + /// Lane index in logical warp + unsigned int lane_id; + + /// Logical warp index in 32-thread physical warp + unsigned int warp_id; + + /// 32-thread physical warp member mask of logical warp + unsigned int member_mask; + + //--------------------------------------------------------------------- + // Construction + //--------------------------------------------------------------------- + + /// Constructor + explicit __device__ __forceinline__ + WarpReverseScan() + : lane_id(cub::LaneId()) + , warp_id(IS_ARCH_WARP ? 0 : (lane_id / LOGICAL_WARP_THREADS)) + , member_mask(cub::WarpMask(warp_id)) + { + if (!IS_ARCH_WARP) { + lane_id = lane_id % LOGICAL_WARP_THREADS; + } + } + + + /// Broadcast + __device__ __forceinline__ T Broadcast( + T input, ///< [in] The value to broadcast + int src_lane) ///< [in] Which warp lane is to do the broadcasting + { + return cub::ShuffleIndex(input, src_lane, member_mask); + } + + + /// Inclusive scan + template + __device__ __forceinline__ void InclusiveReverseScan( + T input, ///< [in] Calling thread's input item. + T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. + ScanOpT scan_op) ///< [in] Binary scan operator + { + inclusive_output = input; + #pragma unroll + for (int STEP = 0; STEP < STEPS; STEP++) { + int offset = 1 << STEP; + T temp = cub::ShuffleDown( + inclusive_output, offset, LOGICAL_WARP_THREADS - 1, member_mask + ); + // Perform scan op if from a valid peer + inclusive_output = static_cast(lane_id) >= LOGICAL_WARP_THREADS - offset + ? inclusive_output : scan_op(temp, inclusive_output); + } + } + + /// Exclusive scan + // Get exclusive from inclusive + template + __device__ __forceinline__ void ExclusiveReverseScan( + T input, ///< [in] Calling thread's input item. + T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. + ScanOpT scan_op, ///< [in] Binary scan operator + T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. + { + T inclusive_output; + InclusiveReverseScan(input, inclusive_output, scan_op); + warp_aggregate = cub::ShuffleIndex(inclusive_output, 0, member_mask); + // initial value unknown + exclusive_output = cub::ShuffleDown( + inclusive_output, 1, LOGICAL_WARP_THREADS - 1, member_mask + ); + } + + /** + * \brief Computes both inclusive and exclusive reverse scans using the specified binary scan functor across the calling warp. Because no initial value is supplied, the \p exclusive_output computed for the last warp-lane is undefined. + */ + template + __device__ __forceinline__ void ReverseScan( + T input, ///< [in] Calling thread's input item. + T &inclusive_output, ///< [out] Calling thread's inclusive-scan output item. + T &exclusive_output, ///< [out] Calling thread's exclusive-scan output item. + ScanOpT scan_op) ///< [in] Binary scan operator + { + InclusiveReverseScan(input, inclusive_output, scan_op); + // initial value unknown + exclusive_output = cub::ShuffleDown( + inclusive_output, 1, LOGICAL_WARP_THREADS - 1, member_mask + ); + } + +}; + +/** + * \brief BlockReverseScan provides variants of raking-based parallel postfix scan across a CUDA thread block. + */ +template < + typename T, ///< Data type being scanned + int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension + bool MEMOIZE=false ///< Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure + > +struct BlockReverseScan { + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + /// Constants + /// The thread block size in threads + static constexpr int BLOCK_THREADS = BLOCK_DIM_X; + + /// Layout type for padded thread block raking grid + using BlockRakingLayout = cub::BlockRakingLayout; + // The number of reduction elements is not a multiple of the number of raking threads for now + static_assert(BlockRakingLayout::UNGUARDED); + + /// Number of raking threads + static constexpr int RAKING_THREADS = BlockRakingLayout::RAKING_THREADS; + /// Number of raking elements per warp synchronous raking thread + static constexpr int SEGMENT_LENGTH = BlockRakingLayout::SEGMENT_LENGTH; + /// Cooperative work can be entirely warp synchronous + static constexpr bool WARP_SYNCHRONOUS = (int(BLOCK_THREADS) == int(RAKING_THREADS)); + + /// WarpReverseScan utility type + using WarpReverseScan = WarpReverseScan; + + /// Shared memory storage layout type + struct _TempStorage { + typename BlockRakingLayout::TempStorage raking_grid; ///< Padded thread block raking grid + }; + + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : cub::Uninitialized<_TempStorage> {}; + + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + // Thread fields + _TempStorage &temp_storage; + unsigned int linear_tid; + T cached_segment[SEGMENT_LENGTH]; + + + //--------------------------------------------------------------------- + // Utility methods + //--------------------------------------------------------------------- + + /// Performs upsweep raking reduction, returning the aggregate + template + __device__ __forceinline__ T Upsweep(ScanOp scan_op) { + T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); + // Read data into registers + #pragma unroll + for (int i = 0; i < SEGMENT_LENGTH; ++i) { cached_segment[i] = smem_raking_ptr[i]; } + T raking_partial = cached_segment[SEGMENT_LENGTH - 1]; + #pragma unroll + for (int i = SEGMENT_LENGTH - 2; i >= 0; --i) { + raking_partial = scan_op(raking_partial, cached_segment[i]); + } + return raking_partial; + } + + + /// Performs exclusive downsweep raking scan + template + __device__ __forceinline__ void ExclusiveDownsweep( + ScanOp scan_op, + T raking_partial) + { + T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); + // Read data back into registers + if (!MEMOIZE) { + #pragma unroll + for (int i = 0; i < SEGMENT_LENGTH; ++i) { cached_segment[i] = smem_raking_ptr[i]; } + } + ThreadReverseScanExclusive(cached_segment, cached_segment, scan_op, raking_partial); + // Write data back to smem + #pragma unroll + for (int i = 0; i < SEGMENT_LENGTH; ++i) { smem_raking_ptr[i] = cached_segment[i]; } + } + + + //--------------------------------------------------------------------- + // Constructors + //--------------------------------------------------------------------- + + /// Constructor + __device__ __forceinline__ BlockReverseScan( + TempStorage &temp_storage) + : + temp_storage(temp_storage.Alias()), + linear_tid(cub::RowMajorTid(BLOCK_DIM_X, 1, 1)) + {} + + + /// Computes an exclusive thread block-wide postfix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_postfix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically postfixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. + template < + typename ScanOp, + typename BlockPostfixCallbackOp> + __device__ __forceinline__ void ExclusiveReverseScan( + T input, ///< [in] Calling thread's input item + T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) + ScanOp scan_op, ///< [in] Binary scan operator + BlockPostfixCallbackOp &block_postfix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a thread block-wide postfix to be applied to all inputs. + { + if (WARP_SYNCHRONOUS) { + // Short-circuit directly to warp-synchronous scan + T block_aggregate; + WarpReverseScan warp_scan; + warp_scan.ExclusiveReverseScan(input, exclusive_output, scan_op, block_aggregate); + // Obtain warp-wide postfix in lane0, then broadcast to other lanes + T block_postfix = block_postfix_callback_op(block_aggregate); + block_postfix = warp_scan.Broadcast(block_postfix, 0); + exclusive_output = linear_tid == BLOCK_THREADS - 1 ? block_postfix : scan_op(block_postfix, exclusive_output); + } else { + // Place thread partial into shared memory raking grid + T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); + detail::uninitialized_copy(placement_ptr, input); + cub::CTA_SYNC(); + // Reduce parallelism down to just raking threads + if (linear_tid < RAKING_THREADS) { + WarpReverseScan warp_scan; + // Raking upsweep reduction across shared partials + T upsweep_partial = Upsweep(scan_op); + // Warp-synchronous scan + T exclusive_partial, block_aggregate; + warp_scan.ExclusiveReverseScan(upsweep_partial, exclusive_partial, scan_op, block_aggregate); + // Obtain block-wide postfix in lane0, then broadcast to other lanes + T block_postfix = block_postfix_callback_op(block_aggregate); + block_postfix = warp_scan.Broadcast(block_postfix, 0); + // Update postfix with warpscan exclusive partial + T downsweep_postfix = linear_tid == RAKING_THREADS - 1 + ? block_postfix : scan_op(block_postfix, exclusive_partial); + // Exclusive raking downsweep scan + ExclusiveDownsweep(scan_op, downsweep_postfix); + } + cub::CTA_SYNC(); + // Grab thread postfix from shared memory + exclusive_output = *placement_ptr; + + // // Compute warp scan in each warp. + // // The exclusive output from the last lane in each warp is invalid. + // T inclusive_output; + // WarpReverseScan warp_scan; + // warp_scan.ReverseScan(input, inclusive_output, exclusive_output, scan_op); + + // // Compute the warp-wide postfix and block-wide aggregate for each warp. Warp postfix for the last warp is invalid. + // T block_aggregate; + // T warp_postfix = ComputeWarpPostfix(scan_op, inclusive_output, block_aggregate); + + // // Apply warp postfix to our lane's partial + // if (warp_id != 0) { + // exclusive_output = scan_op(warp_postfix, exclusive_output); + // if (lane_id == 0) { exclusive_output = warp_postfix; } + // } + + // // Use the first warp to determine the thread block postfix, returning the result in lane0 + // if (warp_id == 0) { + // T block_postfix = block_postfix_callback_op(block_aggregate); + // if (lane_id == 0) { + // // Share the postfix with all threads + // detail::uninitialized_copy(&temp_storage.block_postfix, + // block_postfix); + + // exclusive_output = block_postfix; // The block postfix is the exclusive output for tid0 + // } + // } + + // cub::CTA_SYNC(); + + // // Incorporate thread block postfix into outputs + // T block_postfix = temp_storage.block_postfix; + // if (linear_tid > 0) { exclusive_output = scan_op(block_postfix, exclusive_output); } + } + } + + + /** + * \brief Computes an inclusive block-wide postfix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. the call-back functor \p block_postfix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically postfixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. + */ + template < + int ITEMS_PER_THREAD, + typename ScanOp, + typename BlockPostfixCallbackOp> + __device__ __forceinline__ void InclusiveReverseScan( + T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items + T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) + ScanOp scan_op, ///< [in] Binary scan functor + BlockPostfixCallbackOp &block_postfix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a block-wide postfix to be applied to the logical input sequence. + { + // Reduce consecutive thread items in registers + T thread_postfix = ThreadReverseReduce(input, scan_op); + // Exclusive thread block-scan + ExclusiveReverseScan(thread_postfix, thread_postfix, scan_op, block_postfix_callback_op); + // Inclusive scan in registers with postfix as seed + ThreadReverseScanInclusive(input, output, scan_op, thread_postfix); + } + +}; \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan.cpp b/SegMamba/mamba/csrc/selective_scan/selective_scan.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f51af402a190dc14247ef8185a7d01b697313f02 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan.cpp @@ -0,0 +1,497 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include +#include + +#include "selective_scan.h" + +#define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")") + +#define DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(ITYPE, NAME, ...) \ + if (ITYPE == at::ScalarType::Half) { \ + using input_t = at::Half; \ + __VA_ARGS__(); \ + } else if (ITYPE == at::ScalarType::BFloat16) { \ + using input_t = at::BFloat16; \ + __VA_ARGS__(); \ + } else if (ITYPE == at::ScalarType::Float) { \ + using input_t = float; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for input type '", toString(ITYPE), "'"); \ + } + +#define DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(WTYPE, NAME, ...) \ + if (WTYPE == at::ScalarType::Half) { \ + using weight_t = at::Half; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::BFloat16) { \ + using weight_t = at::BFloat16; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::Float) { \ + using weight_t = float; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \ + } + +#define DISPATCH_WTYPE_FLOAT_AND_COMPLEX(WTYPE, NAME, ...) \ + if (WTYPE == at::ScalarType::Float) { \ + using weight_t = float; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::ComplexFloat) { \ + using weight_t = c10::complex; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \ + } + +template +void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); + +template +void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); + +void set_ssm_params_fwd(SSMParamsBase ¶ms, + // sizes + const size_t batch, + const size_t dim, + const size_t seqlen, + const size_t dstate, + const size_t n_groups, + const size_t n_chunks, + const bool is_variable_B, + const bool is_variable_C, + // device pointers + const at::Tensor u, + const at::Tensor delta, + const at::Tensor A, + const at::Tensor B, + const at::Tensor C, + const at::Tensor out, + const at::Tensor z, + const at::Tensor out_z, + void* D_ptr, + void* delta_bias_ptr, + void* x_ptr, + bool has_z, + bool delta_softplus) { + + // Reset the parameters + memset(¶ms, 0, sizeof(params)); + + params.batch = batch; + params.dim = dim; + params.seqlen = seqlen; + params.dstate = dstate; + params.n_groups = n_groups; + params.n_chunks = n_chunks; + params.dim_ngroups_ratio = dim / n_groups; + + params.delta_softplus = delta_softplus; + + params.is_variable_B = is_variable_B; + params.is_variable_C = is_variable_C; + + // Set the pointers and strides. + params.u_ptr = u.data_ptr(); + params.delta_ptr = delta.data_ptr(); + params.A_ptr = A.data_ptr(); + params.B_ptr = B.data_ptr(); + params.C_ptr = C.data_ptr(); + params.D_ptr = D_ptr; + params.delta_bias_ptr = delta_bias_ptr; + params.out_ptr = out.data_ptr(); + params.x_ptr = x_ptr; + params.z_ptr = has_z ? z.data_ptr() : nullptr; + params.out_z_ptr = has_z ? out_z.data_ptr() : nullptr; + // All stride are in elements, not bytes. + params.A_d_stride = A.stride(0); + params.A_dstate_stride = A.stride(1); + if (!is_variable_B) { + params.B_d_stride = B.stride(0); + } else { + params.B_batch_stride = B.stride(0); + params.B_group_stride = B.stride(1); + } + params.B_dstate_stride = !is_variable_B ? B.stride(1) : B.stride(2); + if (!is_variable_C) { + params.C_d_stride = C.stride(0); + } else { + params.C_batch_stride = C.stride(0); + params.C_group_stride = C.stride(1); + } + params.C_dstate_stride = !is_variable_C ? C.stride(1) : C.stride(2); + params.u_batch_stride = u.stride(0); + params.u_d_stride = u.stride(1); + params.delta_batch_stride = delta.stride(0); + params.delta_d_stride = delta.stride(1); + if (has_z) { + params.z_batch_stride = z.stride(0); + params.z_d_stride = z.stride(1); + params.out_z_batch_stride = out_z.stride(0); + params.out_z_d_stride = out_z.stride(1); + } + params.out_batch_stride = out.stride(0); + params.out_d_stride = out.stride(1); +} + +void set_ssm_params_bwd(SSMParamsBwd ¶ms, + // sizes + const size_t batch, + const size_t dim, + const size_t seqlen, + const size_t dstate, + const size_t n_groups, + const size_t n_chunks, + const bool is_variable_B, + const bool is_variable_C, + // device pointers + const at::Tensor u, + const at::Tensor delta, + const at::Tensor A, + const at::Tensor B, + const at::Tensor C, + const at::Tensor z, + const at::Tensor out, + const at::Tensor out_z, + void* D_ptr, + void* delta_bias_ptr, + void* x_ptr, + const at::Tensor dout, + const at::Tensor du, + const at::Tensor ddelta, + const at::Tensor dA, + const at::Tensor dB, + const at::Tensor dC, + const at::Tensor dz, + void* dD_ptr, + void* ddelta_bias_ptr, + bool has_z, + bool delta_softplus, + bool recompute_out_z) { + // Pass in "dout" instead of "out", we're not gonna use "out" unless we have z + set_ssm_params_fwd(params, batch, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C, + u, delta, A, B, C, has_z ? out : dout, + has_z ? z : dout, + // If not recompute_out_z, pass dout instead of out_z. + // This won't be used by the bwd kernel + recompute_out_z ? out_z : dout, + D_ptr, delta_bias_ptr, x_ptr, has_z, delta_softplus); + if (!recompute_out_z) { params.out_z_ptr = nullptr; } + + // Set the pointers and strides. + params.dout_ptr = dout.data_ptr(); + params.du_ptr = du.data_ptr(); + params.dA_ptr = dA.data_ptr(); + params.dB_ptr = dB.data_ptr(); + params.dC_ptr = dC.data_ptr(); + params.dD_ptr = dD_ptr; + params.ddelta_ptr = ddelta.data_ptr(); + params.ddelta_bias_ptr = ddelta_bias_ptr; + params.dz_ptr = has_z ? dz.data_ptr() : nullptr; + // All stride are in elements, not bytes. + params.dout_batch_stride = dout.stride(0); + params.dout_d_stride = dout.stride(1); + params.dA_d_stride = dA.stride(0); + params.dA_dstate_stride = dA.stride(1); + if (!is_variable_B) { + params.dB_d_stride = dB.stride(0); + } else { + params.dB_batch_stride = dB.stride(0); + params.dB_group_stride = dB.stride(1); + } + params.dB_dstate_stride = !is_variable_B ? dB.stride(1) : dB.stride(2); + if (!is_variable_C) { + params.dC_d_stride = dC.stride(0); + } else { + params.dC_batch_stride = dC.stride(0); + params.dC_group_stride = dC.stride(1); + } + params.dC_dstate_stride = !is_variable_C ? dC.stride(1) : dC.stride(2); + params.du_batch_stride = du.stride(0); + params.du_d_stride = du.stride(1); + params.ddelta_batch_stride = ddelta.stride(0); + params.ddelta_d_stride = ddelta.stride(1); + if (has_z) { + params.dz_batch_stride = dz.stride(0); + params.dz_d_stride = dz.stride(1); + } +} + +std::vector +selective_scan_fwd(const at::Tensor &u, const at::Tensor &delta, + const at::Tensor &A, const at::Tensor &B, const at::Tensor &C, + const c10::optional &D_, + const c10::optional &z_, + const c10::optional &delta_bias_, + bool delta_softplus) { + auto input_type = u.scalar_type(); + auto weight_type = A.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::ComplexFloat); + + const bool is_variable_B = B.dim() >= 3; + const bool is_variable_C = C.dim() >= 3; + const bool is_complex = weight_type == at::ScalarType::ComplexFloat; + + TORCH_CHECK(delta.scalar_type() == input_type); + TORCH_CHECK(B.scalar_type() == (!is_variable_B ? weight_type : input_type)); + TORCH_CHECK(C.scalar_type() == (!is_variable_C ? weight_type : input_type)); + + TORCH_CHECK(u.is_cuda()); + TORCH_CHECK(delta.is_cuda()); + TORCH_CHECK(A.is_cuda()); + TORCH_CHECK(B.is_cuda()); + TORCH_CHECK(C.is_cuda()); + + TORCH_CHECK(u.stride(-1) == 1); + TORCH_CHECK(delta.stride(-1) == 1); + + const auto sizes = u.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int seqlen = sizes[2]; + const int dstate = A.size(1); + const int n_groups = is_variable_B ? B.size(1) : 1; + + TORCH_CHECK(dstate <= 256, "selective_scan only supports state dimension <= 256"); + + CHECK_SHAPE(u, batch_size, dim, seqlen); + CHECK_SHAPE(delta, batch_size, dim, seqlen); + CHECK_SHAPE(A, dim, dstate); + if (!is_variable_B) { + CHECK_SHAPE(B, dim, dstate); + } else { + CHECK_SHAPE(B, batch_size, n_groups, dstate, !is_complex ? seqlen : seqlen * 2); + TORCH_CHECK(B.stride(-1) == 1); + } + if (!is_variable_C) { + CHECK_SHAPE(C, dim, dstate); + } else { + CHECK_SHAPE(C, batch_size, n_groups, dstate, !is_complex ? seqlen: seqlen * 2); + TORCH_CHECK(C.stride(-1) == 1); + } + + if (D_.has_value()) { + auto D = D_.value(); + TORCH_CHECK(D.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(D.is_cuda()); + TORCH_CHECK(D.stride(-1) == 1); + CHECK_SHAPE(D, dim); + } + + if (delta_bias_.has_value()) { + auto delta_bias = delta_bias_.value(); + TORCH_CHECK(delta_bias.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(delta_bias.is_cuda()); + TORCH_CHECK(delta_bias.stride(-1) == 1); + CHECK_SHAPE(delta_bias, dim); + } + + at::Tensor z, out_z; + const bool has_z = z_.has_value(); + if (has_z) { + z = z_.value(); + TORCH_CHECK(z.scalar_type() == input_type); + TORCH_CHECK(z.is_cuda()); + TORCH_CHECK(z.stride(-1) == 1); + CHECK_SHAPE(z, batch_size, dim, seqlen); + out_z = torch::empty_like(z); + } + + const int n_chunks = (seqlen + 2048 - 1) / 2048; + // const int n_chunks = (seqlen + 1024 - 1) / 1024; + // at::Tensor out = torch::empty_like(u); + // Right now u has BHL layout and delta has HBL layout, and we want out to have HBL layout + at::Tensor out = torch::empty_like(delta); + at::Tensor x; + x = torch::empty({batch_size, dim, n_chunks, dstate * 2}, u.options().dtype(weight_type)); + + SSMParamsBase params; + set_ssm_params_fwd(params, batch_size, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C, + u, delta, A, B, C, out, z, out_z, + D_.has_value() ? D_.value().data_ptr() : nullptr, + delta_bias_.has_value() ? delta_bias_.value().data_ptr() : nullptr, + x.data_ptr(), + has_z, + delta_softplus); + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)u.get_device()}; + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(u.scalar_type(), "selective_scan_fwd", [&] { + DISPATCH_WTYPE_FLOAT_AND_COMPLEX(A.scalar_type(), "selective_scan_fwd", [&] { + selective_scan_fwd_cuda(params, stream); + }); + }); + std::vector result = {out, x}; + if (has_z) { result.push_back(out_z); } + return result; +} + +std::vector +selective_scan_bwd(const at::Tensor &u, const at::Tensor &delta, + const at::Tensor &A, const at::Tensor &B, const at::Tensor &C, + const c10::optional &D_, + const c10::optional &z_, + const c10::optional &delta_bias_, + const at::Tensor &dout, + const c10::optional &x_, + const c10::optional &out_, + c10::optional &dz_, + bool delta_softplus, + bool recompute_out_z) { + auto input_type = u.scalar_type(); + auto weight_type = A.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::ComplexFloat); + + const bool is_variable_B = B.dim() >= 3; + const bool is_variable_C = C.dim() >= 3; + const bool is_complex = weight_type == at::ScalarType::ComplexFloat; + + TORCH_CHECK(delta.scalar_type() == input_type); + TORCH_CHECK(B.scalar_type() == (!is_variable_B ? weight_type : input_type)); + TORCH_CHECK(C.scalar_type() == (!is_variable_C ? weight_type : input_type)); + TORCH_CHECK(dout.scalar_type() == input_type); + + TORCH_CHECK(u.is_cuda()); + TORCH_CHECK(delta.is_cuda()); + TORCH_CHECK(A.is_cuda()); + TORCH_CHECK(B.is_cuda()); + TORCH_CHECK(C.is_cuda()); + TORCH_CHECK(dout.is_cuda()); + + TORCH_CHECK(u.stride(-1) == 1); + TORCH_CHECK(delta.stride(-1) == 1); + TORCH_CHECK(dout.stride(-1) == 1); + + const auto sizes = u.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int seqlen = sizes[2]; + const int dstate = A.size(1); + const int n_groups = is_variable_B ? B.size(1) : 1; + + TORCH_CHECK(dstate <= 256, "selective_scan only supports state dimension <= 256"); + + CHECK_SHAPE(u, batch_size, dim, seqlen); + CHECK_SHAPE(delta, batch_size, dim, seqlen); + CHECK_SHAPE(A, dim, dstate); + if (!is_variable_B) { + CHECK_SHAPE(B, dim, dstate); + } else { + CHECK_SHAPE(B, batch_size, n_groups, dstate, !is_complex ? seqlen : seqlen * 2); + TORCH_CHECK(B.stride(-1) == 1); + } + if (!is_variable_C) { + CHECK_SHAPE(C, dim, dstate); + } else { + CHECK_SHAPE(C, batch_size, n_groups, dstate, !is_complex ? seqlen: seqlen * 2); + TORCH_CHECK(C.stride(-1) == 1); + } + CHECK_SHAPE(dout, batch_size, dim, seqlen); + + if (D_.has_value()) { + auto D = D_.value(); + TORCH_CHECK(D.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(D.is_cuda()); + TORCH_CHECK(D.stride(-1) == 1); + CHECK_SHAPE(D, dim); + } + + if (delta_bias_.has_value()) { + auto delta_bias = delta_bias_.value(); + TORCH_CHECK(delta_bias.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(delta_bias.is_cuda()); + TORCH_CHECK(delta_bias.stride(-1) == 1); + CHECK_SHAPE(delta_bias, dim); + } + + at::Tensor z, out, dz, out_z; + const bool has_z = z_.has_value(); + if (has_z) { + z = z_.value(); + TORCH_CHECK(z.scalar_type() == input_type); + TORCH_CHECK(z.is_cuda()); + TORCH_CHECK(z.stride(-1) == 1); + CHECK_SHAPE(z, batch_size, dim, seqlen); + + TORCH_CHECK(out_.has_value()); + out = out_.value(); + TORCH_CHECK(out.scalar_type() == input_type); + TORCH_CHECK(out.is_cuda()); + TORCH_CHECK(out.stride(-1) == 1); + CHECK_SHAPE(out, batch_size, dim, seqlen); + + if (dz_.has_value()) { + dz = dz_.value(); + TORCH_CHECK(dz.scalar_type() == input_type); + TORCH_CHECK(dz.is_cuda()); + TORCH_CHECK(dz.stride(-1) == 1); + CHECK_SHAPE(dz, batch_size, dim, seqlen); + } else { + dz = torch::empty_like(z); + } + if (recompute_out_z) { + out_z = torch::empty_like(out); + } + } + + const int n_chunks = (seqlen + 2048 - 1) / 2048; + // const int n_chunks = (seqlen + 1024 - 1) / 1024; + if (n_chunks > 1) { TORCH_CHECK(x_.has_value()); } + if (x_.has_value()) { + auto x = x_.value(); + TORCH_CHECK(x.scalar_type() == weight_type); + TORCH_CHECK(x.is_cuda()); + TORCH_CHECK(x.is_contiguous()); + CHECK_SHAPE(x, batch_size, dim, n_chunks, 2 * dstate); + } + + at::Tensor du = torch::empty_like(u); + at::Tensor ddelta = torch::empty_like(delta); + at::Tensor dA = torch::zeros_like(A); + at::Tensor dB = !is_variable_B ? torch::zeros_like(B) : torch::zeros_like(B, B.options().dtype(torch::kFloat32)); + at::Tensor dC = !is_variable_C ? torch::zeros_like(C) : torch::zeros_like(C, C.options().dtype(torch::kFloat32)); + at::Tensor dD; + if (D_.has_value()) { dD = torch::zeros_like(D_.value()); } + at::Tensor ddelta_bias; + if (delta_bias_.has_value()) { ddelta_bias = torch::zeros_like(delta_bias_.value()); } + + SSMParamsBwd params; + set_ssm_params_bwd(params, batch_size, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C, + u, delta, A, B, C, z, out, out_z, + D_.has_value() ? D_.value().data_ptr() : nullptr, + delta_bias_.has_value() ? delta_bias_.value().data_ptr() : nullptr, + x_.has_value() ? x_.value().data_ptr() : nullptr, + dout, du, ddelta, dA, dB, dC, dz, + D_.has_value() ? dD.data_ptr() : nullptr, + delta_bias_.has_value() ? ddelta_bias.data_ptr() : nullptr, + has_z, delta_softplus, recompute_out_z); + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)u.get_device()}; + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(u.scalar_type(), "selective_scan_bwd", [&] { + DISPATCH_WTYPE_FLOAT_AND_COMPLEX(A.scalar_type(), "selective_scan_bwd", [&] { + selective_scan_bwd_cuda(params, stream); + }); + }); + std::vector result = {du, ddelta, dA, dB.to(B.dtype()), dC.to(C.dtype()), dD, ddelta_bias}; + if (has_z) { result.push_back(dz); } + if (recompute_out_z) { result.push_back(out_z); } + return result; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("fwd", &selective_scan_fwd, "Selective scan forward"); + m.def("bwd", &selective_scan_bwd, "Selective scan backward"); +} diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan.h b/SegMamba/mamba/csrc/selective_scan/selective_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..e2c7bcdbd5ddadc5975caa641ecb1dcd3b73dafd --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan.h @@ -0,0 +1,101 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +struct SSMScanParamsBase { + using index_t = uint32_t; + + int batch, seqlen, n_chunks; + index_t a_batch_stride; + index_t b_batch_stride; + index_t out_batch_stride; + + // Common data pointers. + void *__restrict__ a_ptr; + void *__restrict__ b_ptr; + void *__restrict__ out_ptr; + void *__restrict__ x_ptr; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +struct SSMParamsBase { + using index_t = uint32_t; + + int batch, dim, seqlen, dstate, n_groups, n_chunks; + int dim_ngroups_ratio; + bool is_variable_B; + bool is_variable_C; + + bool delta_softplus; + + index_t A_d_stride; + index_t A_dstate_stride; + index_t B_batch_stride; + index_t B_d_stride; + index_t B_dstate_stride; + index_t B_group_stride; + index_t C_batch_stride; + index_t C_d_stride; + index_t C_dstate_stride; + index_t C_group_stride; + index_t u_batch_stride; + index_t u_d_stride; + index_t delta_batch_stride; + index_t delta_d_stride; + index_t z_batch_stride; + index_t z_d_stride; + index_t out_batch_stride; + index_t out_d_stride; + index_t out_z_batch_stride; + index_t out_z_d_stride; + + // Common data pointers. + void *__restrict__ A_ptr; + void *__restrict__ B_ptr; + void *__restrict__ C_ptr; + void *__restrict__ D_ptr; + void *__restrict__ u_ptr; + void *__restrict__ delta_ptr; + void *__restrict__ delta_bias_ptr; + void *__restrict__ out_ptr; + void *__restrict__ x_ptr; + void *__restrict__ z_ptr; + void *__restrict__ out_z_ptr; +}; + +struct SSMParamsBwd: public SSMParamsBase { + index_t dout_batch_stride; + index_t dout_d_stride; + index_t dA_d_stride; + index_t dA_dstate_stride; + index_t dB_batch_stride; + index_t dB_group_stride; + index_t dB_d_stride; + index_t dB_dstate_stride; + index_t dC_batch_stride; + index_t dC_group_stride; + index_t dC_d_stride; + index_t dC_dstate_stride; + index_t du_batch_stride; + index_t du_d_stride; + index_t dz_batch_stride; + index_t dz_d_stride; + index_t ddelta_batch_stride; + index_t ddelta_d_stride; + + // Common data pointers. + void *__restrict__ dout_ptr; + void *__restrict__ dA_ptr; + void *__restrict__ dB_ptr; + void *__restrict__ dC_ptr; + void *__restrict__ dD_ptr; + void *__restrict__ du_ptr; + void *__restrict__ dz_ptr; + void *__restrict__ ddelta_ptr; + void *__restrict__ ddelta_bias_ptr; +}; diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu new file mode 100644 index 0000000000000000000000000000000000000000..c55f0e858af4ebd246a5d251308ab920b4e01a50 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu new file mode 100644 index 0000000000000000000000000000000000000000..72adaf5cb13c6429e2f345a0a823c6bc3722b95a --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu new file mode 100644 index 0000000000000000000000000000000000000000..df126d7c8d5f9f0862273d2fe21ea15b35757b64 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu new file mode 100644 index 0000000000000000000000000000000000000000..3ff271b50eaff208ae33c16c87ab7aaee76dfd76 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu new file mode 100644 index 0000000000000000000000000000000000000000..5554902342785b289b81c060a71a51734fc1e6bf --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu new file mode 100644 index 0000000000000000000000000000000000000000..a7ed642231da80c455c0499702cc8a1cb4536ec2 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_kernel.cuh b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_kernel.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2ed101148a4b32560111e5a832fc8b5881a4b243 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_kernel.cuh @@ -0,0 +1,531 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK +#include // For atomicAdd on complex + +#include +#include +#include +#include + +#include "selective_scan.h" +#include "selective_scan_common.h" +#include "reverse_scan.cuh" +#include "static_switch.h" + +template __device__ __forceinline__ scalar_t conj(scalar_t x); +template<> __device__ __forceinline__ float conj(float x) { return x; } +template<> __device__ __forceinline__ complex_t conj(complex_t x) { return std::conj(x); } + +template +struct Selective_Scan_bwd_kernel_traits { + static_assert(kNItems_ % 4 == 0); + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static constexpr int kNItems = kNItems_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : std::min(8, kNItems); + static_assert(kNItems % kNElts == 0); + static constexpr int kNLoads = kNItems / kNElts; + static constexpr bool kIsComplex = std::is_same_v; + static constexpr bool kIsEvenLen = kIsEvenLen_; + static constexpr bool kIsVariableB = kIsVariableB_; + static constexpr bool kIsVariableC = kIsVariableC_; + static constexpr bool kDeltaSoftplus = kDeltaSoftplus_; + static constexpr bool kHasZ = kHasZ_; + // Setting MinBlocksPerMP to be 3 (instead of 2) for 128 threads with float improves occupancy. + // For complex this would lead to massive register spilling, so we keep it at 2. + static constexpr int kMinBlocks = kNThreads == 128 && !kIsComplex ? 3 : 2; + using vec_t = typename BytesToType::Type; + using scan_t = std::conditional_t; + using BlockLoadT = cub::BlockLoad; + using BlockLoadVecT = cub::BlockLoad; + using BlockLoadWeightT = cub::BlockLoad; + using BlockLoadWeightVecT = cub::BlockLoad; + using BlockStoreT = cub::BlockStore; + using BlockStoreVecT = cub::BlockStore; + // using BlockScanT = cub::BlockScan; + using BlockScanT = cub::BlockScan; + // using BlockScanT = cub::BlockScan; + using BlockReverseScanT = BlockReverseScan; + using BlockReduceT = cub::BlockReduce; + using BlockReduceFloatT = cub::BlockReduce; + using BlockReduceComplexT = cub::BlockReduce; + using BlockExchangeT = cub::BlockExchange; + static constexpr int kSmemIOSize = std::max({sizeof(typename BlockLoadT::TempStorage), + sizeof(typename BlockLoadVecT::TempStorage), + (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockLoadWeightT::TempStorage), + (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockLoadWeightVecT::TempStorage), + sizeof(typename BlockStoreT::TempStorage), + sizeof(typename BlockStoreVecT::TempStorage)}); + static constexpr int kSmemExchangeSize = (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockExchangeT::TempStorage); + static constexpr int kSmemReduceSize = sizeof(typename BlockReduceT::TempStorage); + static constexpr int kSmemSize = kSmemIOSize + kSmemExchangeSize + kSmemReduceSize + sizeof(typename BlockScanT::TempStorage) + sizeof(typename BlockReverseScanT::TempStorage); +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads, Ktraits::kMinBlocks) +void selective_scan_bwd_kernel(SSMParamsBwd params) { + constexpr bool kIsComplex = Ktraits::kIsComplex; + constexpr bool kIsVariableB = Ktraits::kIsVariableB; + constexpr bool kIsVariableC = Ktraits::kIsVariableC; + constexpr bool kDeltaSoftplus = Ktraits::kDeltaSoftplus; + constexpr bool kHasZ = Ktraits::kHasZ; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr int kNItems = Ktraits::kNItems; + using input_t = typename Ktraits::input_t; + using weight_t = typename Ktraits::weight_t; + using scan_t = typename Ktraits::scan_t; + + // Shared memory. + extern __shared__ char smem_[]; + // cast to lvalue reference of expected type + // char *smem_loadstorescan = smem_ + 2 * MAX_DSTATE * sizeof(weight_t); + // auto& smem_load = reinterpret_cast(smem_ + 2 * MAX_DSTATE * sizeof(weight_t)); + // auto& smem_load = reinterpret_cast(smem_loadstorescan); + auto& smem_load = reinterpret_cast(smem_); + auto& smem_load_weight = reinterpret_cast(smem_); + auto& smem_load_weight1 = *reinterpret_cast(smem_ + sizeof(typename Ktraits::BlockLoadWeightT::TempStorage)); + auto& smem_store = reinterpret_cast(smem_); + auto& smem_exchange = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + auto& smem_exchange1 = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize + sizeof(typename Ktraits::BlockExchangeT::TempStorage)); + auto& smem_reduce = *reinterpret_cast(reinterpret_cast(&smem_exchange) + Ktraits::kSmemExchangeSize); + auto& smem_reduce_float = *reinterpret_cast(&smem_reduce); + auto& smem_reduce_complex = *reinterpret_cast(&smem_reduce); + auto& smem_scan = *reinterpret_cast(reinterpret_cast(&smem_reduce) + Ktraits::kSmemReduceSize); + auto& smem_reverse_scan = *reinterpret_cast(reinterpret_cast(&smem_scan) + sizeof(typename Ktraits::BlockScanT::TempStorage)); + weight_t *smem_delta_a = reinterpret_cast(smem_ + Ktraits::kSmemSize); + scan_t *smem_running_postfix = reinterpret_cast(smem_delta_a + 2 * MAX_DSTATE + kNThreads); + weight_t *smem_da = reinterpret_cast(smem_running_postfix + MAX_DSTATE); + weight_t *smem_dbc = reinterpret_cast(smem_da + MAX_DSTATE); + + const int batch_id = blockIdx.x; + const int dim_id = blockIdx.y; + const int group_id = dim_id / (params.dim_ngroups_ratio); + input_t *u = reinterpret_cast(params.u_ptr) + batch_id * params.u_batch_stride + + dim_id * params.u_d_stride; + input_t *delta = reinterpret_cast(params.delta_ptr) + batch_id * params.delta_batch_stride + + dim_id * params.delta_d_stride; + input_t *dout = reinterpret_cast(params.dout_ptr) + batch_id * params.dout_batch_stride + + dim_id * params.dout_d_stride; + weight_t *A = reinterpret_cast(params.A_ptr) + dim_id * params.A_d_stride; + weight_t *B = reinterpret_cast(params.B_ptr) + dim_id * params.B_d_stride; + input_t *Bvar = reinterpret_cast(params.B_ptr) + batch_id * params.B_batch_stride + group_id * params.B_group_stride; + weight_t *C = reinterpret_cast(params.C_ptr) + dim_id * params.C_d_stride; + input_t *Cvar = reinterpret_cast(params.C_ptr) + batch_id * params.C_batch_stride + group_id * params.C_group_stride; + weight_t *dA = reinterpret_cast(params.dA_ptr) + dim_id * params.dA_d_stride; + weight_t *dB = reinterpret_cast(params.dB_ptr) + + (!kIsVariableB ? dim_id * params.dB_d_stride : batch_id * (!kIsComplex ? params.dB_batch_stride : params.dB_batch_stride / 2) + group_id * params.dB_group_stride); + weight_t *dC = reinterpret_cast(params.dC_ptr) + + (!kIsVariableC ? dim_id * params.dC_d_stride : batch_id * (!kIsComplex ? params.dC_batch_stride : params.dC_batch_stride / 2) + group_id * params.dC_group_stride); + float *dD = params.dD_ptr == nullptr ? nullptr : reinterpret_cast(params.dD_ptr) + dim_id; + float D_val = params.D_ptr == nullptr ? 0 : reinterpret_cast(params.D_ptr)[dim_id]; + float *ddelta_bias = params.ddelta_bias_ptr == nullptr ? nullptr : reinterpret_cast(params.ddelta_bias_ptr) + dim_id; + float delta_bias = params.delta_bias_ptr == nullptr ? 0 : reinterpret_cast(params.delta_bias_ptr)[dim_id]; + scan_t *x = params.x_ptr == nullptr + ? nullptr + : reinterpret_cast(params.x_ptr) + (batch_id * params.dim + dim_id) * (params.n_chunks) * params.dstate; + float dD_val = 0; + float ddelta_bias_val = 0; + + constexpr int kChunkSize = kNThreads * kNItems; + u += (params.n_chunks - 1) * kChunkSize; + delta += (params.n_chunks - 1) * kChunkSize; + dout += (params.n_chunks - 1) * kChunkSize; + Bvar += (params.n_chunks - 1) * kChunkSize * (!kIsComplex ? 1 : 2); + Cvar += (params.n_chunks - 1) * kChunkSize * (!kIsComplex ? 1 : 2); + for (int chunk = params.n_chunks - 1; chunk >= 0; --chunk) { + input_t u_vals[kNItems]; + input_t delta_vals_load[kNItems]; + input_t dout_vals_load[kNItems]; + __syncthreads(); + load_input(u, u_vals, smem_load, params.seqlen - chunk * kChunkSize); + u -= kChunkSize; + __syncthreads(); + load_input(delta, delta_vals_load, smem_load, params.seqlen - chunk * kChunkSize); + // Will reload delta at the same location if kDeltaSoftplus + if constexpr (!kDeltaSoftplus) { delta -= kChunkSize; } + __syncthreads(); + load_input(dout, dout_vals_load, smem_load, params.seqlen - chunk * kChunkSize); + dout -= kChunkSize; + + float dout_vals[kNItems], delta_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + dout_vals[i] = float(dout_vals_load[i]); + delta_vals[i] = float(delta_vals_load[i]) + delta_bias; + if constexpr (kDeltaSoftplus) { + delta_vals[i] = delta_vals[i] <= 20.f ? log1pf(expf(delta_vals[i])) : delta_vals[i]; + } + } + + if constexpr (kHasZ) { + input_t *z = reinterpret_cast(params.z_ptr) + batch_id * params.z_batch_stride + + dim_id * params.z_d_stride + chunk * kChunkSize; + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + dim_id * params.out_d_stride + chunk * kChunkSize; + input_t *dz = reinterpret_cast(params.dz_ptr) + batch_id * params.dz_batch_stride + + dim_id * params.dz_d_stride + chunk * kChunkSize; + input_t z_vals[kNItems], out_vals[kNItems]; + __syncthreads(); + load_input(z, z_vals, smem_load, params.seqlen - chunk * kChunkSize); + __syncthreads(); + load_input(out, out_vals, smem_load, params.seqlen - chunk * kChunkSize); + float dz_vals[kNItems], z_silu_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + float z_val = z_vals[i]; + float z_sigmoid_val = 1.0f / (1.0f + expf(-z_val)); + z_silu_vals[i] = z_val * z_sigmoid_val; + dz_vals[i] = dout_vals[i] * float(out_vals[i]) * z_sigmoid_val + * (1.0f + z_val * (1.0f - z_sigmoid_val)); + dout_vals[i] *= z_silu_vals[i]; + } + __syncthreads(); + store_output(dz, dz_vals, smem_store, params.seqlen - chunk * kChunkSize); + if (params.out_z_ptr != nullptr) { // Recompute and store out_z + float out_z_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { out_z_vals[i] = float(out_vals[i]) * z_silu_vals[i]; } + // if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0) { + // printf("out_val=%f, z_silu_val = %f, out_z_val = %f\n", float(out_vals[0]), z_silu_vals[0], out_z_vals[0]); + // } + input_t *out_z = reinterpret_cast(params.out_z_ptr) + batch_id * params.out_z_batch_stride + + dim_id * params.out_z_d_stride + chunk * kChunkSize; + __syncthreads(); + store_output(out_z, out_z_vals, smem_store, params.seqlen - chunk * kChunkSize); + } + } + + float du_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { du_vals[i] = D_val * dout_vals[i]; } + #pragma unroll + for (int i = 0; i < kNItems; ++i) { dD_val += dout_vals[i] * float(u_vals[i]); } + + float ddelta_vals[kNItems] = {0}; + __syncthreads(); + for (int state_idx = 0; state_idx < params.dstate; ++state_idx) { + const weight_t A_val = A[state_idx * params.A_dstate_stride]; + // Multiply the real part of A with LOG2E so we can use exp2f instead of expf. + weight_t A_scaled; + constexpr float kLog2e = M_LOG2E; + if constexpr (!kIsComplex) { + A_scaled = A_val * kLog2e; + } else { + A_scaled = complex_t(A_val.real_ * kLog2e, A_val.imag_); + } + weight_t B_val, C_val; + weight_t B_vals[kNItems], C_vals[kNItems]; + if constexpr (!kIsVariableB) { + B_val = B[state_idx * params.B_dstate_stride]; + } else { + load_weight(Bvar + state_idx * params.B_dstate_stride, B_vals, + smem_load_weight, (params.seqlen - chunk * kChunkSize) * (!kIsComplex ? 1 : 2)); + } + if constexpr (!kIsVariableC) { + C_val = C[state_idx * params.C_dstate_stride]; + } else { + auto &smem_load_weight_C = !kIsVariableB ? smem_load_weight : smem_load_weight1; + load_weight(Cvar + state_idx * params.C_dstate_stride, C_vals, + smem_load_weight_C, (params.seqlen - chunk * kChunkSize) * (!kIsComplex ? 1 : 2)); + } + // const weight_t A_val = smem_a[state_idx]; + scan_t thread_data[kNItems], thread_reverse_data[kNItems]; + if constexpr (!kIsComplex) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + const float delta_a_exp = exp2f(delta_vals[i] * A_scaled); + thread_data[i] = make_float2(delta_a_exp, !kIsVariableB ? delta_vals[i] * float(u_vals[i]) : delta_vals[i] * float(u_vals[i]) * B_vals[i]); + if (i == 0) { + smem_delta_a[threadIdx.x == 0 ? state_idx + (chunk % 2) * MAX_DSTATE : threadIdx.x + 2 * MAX_DSTATE] = delta_a_exp; + } else { + thread_reverse_data[i - 1].x = delta_a_exp; + } + thread_reverse_data[i].y = dout_vals[i] * + (!kIsVariableC + ? (!kIsVariableB ? B_val * C_val : C_val) + : (!kIsVariableB ? B_val * C_vals[i] : C_vals[i])); + } + __syncthreads(); + thread_reverse_data[kNItems - 1].x = threadIdx.x == kNThreads - 1 + ? (chunk == params.n_chunks - 1 ? 1.f : smem_delta_a[state_idx + ((chunk + 1) % 2) * MAX_DSTATE]) + : smem_delta_a[threadIdx.x + 1 + 2 * MAX_DSTATE]; + // Initialize running total + scan_t running_prefix = chunk > 0 && threadIdx.x % 32 == 0 ? x[(chunk - 1) * params.dstate + state_idx] : make_float2(1.f, 0.f); + SSMScanPrefixCallbackOp prefix_op(running_prefix); + Ktraits::BlockScanT(smem_scan).InclusiveScan( + thread_data, thread_data, SSMScanOp(), prefix_op + ); + scan_t running_postfix = chunk < params.n_chunks - 1 && threadIdx.x % 32 == 0 ? smem_running_postfix[state_idx] : make_float2(1.f, 0.f); + SSMScanPrefixCallbackOp postfix_op(running_postfix); + Ktraits::BlockReverseScanT(smem_reverse_scan).InclusiveReverseScan( + thread_reverse_data, thread_reverse_data, SSMScanOp(), postfix_op + ); + if (threadIdx.x == 0) { smem_running_postfix[state_idx] = postfix_op.running_prefix; } + weight_t dA_val = 0, dBC_val = 0; + weight_t dB_vals[kNItems], dC_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + const float dx = thread_reverse_data[i].y; + const float ddelta_u = !kIsVariableB ? dx : dx * B_vals[i]; + du_vals[i] += ddelta_u * delta_vals[i]; + const float a = thread_data[i].y - (!kIsVariableB ? delta_vals[i] * float(u_vals[i]) : delta_vals[i] * float(u_vals[i]) * B_vals[i]); + ddelta_vals[i] += ddelta_u * float(u_vals[i]) + dx * A_val * a; + dA_val += dx * delta_vals[i] * a; + if constexpr (!kIsVariableB || !kIsVariableC) { + if constexpr (!kIsVariableB) { // dBC_val is dB_val + dBC_val += dout_vals[i] * (!kIsVariableC ? thread_data[i].y : thread_data[i].y * C_vals[i]); + } else { // dBC_val is dC_val + dBC_val += dout_vals[i] * thread_data[i].y; + } + } + if constexpr (kIsVariableB) { dB_vals[i] = dx * delta_vals[i] * float(u_vals[i]); } + if constexpr (kIsVariableC) { + dC_vals[i] = dout_vals[i] * (!kIsVariableB ? thread_data[i].y * B_val : thread_data[i].y); + } + } + // Block-exchange to make the atomicAdd's coalesced, otherwise they're much slower + if constexpr (kIsVariableB || kIsVariableC) { + if constexpr (kIsVariableB) { + Ktraits::BlockExchangeT(smem_exchange).BlockedToStriped(dB_vals, dB_vals); + } + if constexpr (kIsVariableC) { + auto &smem_exchange_C = !kIsVariableB ? smem_exchange : smem_exchange1; + Ktraits::BlockExchangeT(smem_exchange_C).BlockedToStriped(dC_vals, dC_vals); + } + const int seqlen_remaining = params.seqlen - chunk * kChunkSize - threadIdx.x; + weight_t *dB_cur = dB + state_idx * params.dB_dstate_stride + chunk * kChunkSize + threadIdx.x; + weight_t *dC_cur = dC + state_idx * params.dC_dstate_stride + chunk * kChunkSize + threadIdx.x; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + if (i * kNThreads < seqlen_remaining) { + if constexpr (kIsVariableB) { gpuAtomicAdd(dB_cur + i * kNThreads, dB_vals[i]); } + if constexpr (kIsVariableC) { gpuAtomicAdd(dC_cur + i * kNThreads, dC_vals[i]); } + } + } + } + if constexpr (!kIsVariableB || !kIsVariableC) { + float2 dA_dBC_val = make_float2(dA_val, dBC_val); + dA_dBC_val = Ktraits::BlockReduceT(smem_reduce).Sum(dA_dBC_val); + dA_val = dA_dBC_val.x; + if (threadIdx.x == 0) { + smem_dbc[state_idx] = chunk == params.n_chunks - 1 ? dA_dBC_val.y : dA_dBC_val.y + smem_dbc[state_idx]; + } + } else { + dA_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dA_val); + } + if (threadIdx.x == 0) { + smem_da[state_idx] = chunk == params.n_chunks - 1 ? dA_val : dA_val + smem_da[state_idx]; + } + } else { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + // Pytorch's implementation of complex exp (which calls thrust) is very slow + complex_t delta_a_exp = cexp2f(delta_vals[i] * A_scaled); + weight_t B_delta_u_val = !kIsVariableB ? delta_vals[i] * float(u_vals[i]) : B_vals[i] * delta_vals[i] * float(u_vals[i]); + thread_data[i] = make_float4(delta_a_exp.real_, delta_a_exp.imag_, B_delta_u_val.real_, B_delta_u_val.imag_); + if (i == 0) { + smem_delta_a[threadIdx.x == 0 ? state_idx + (chunk % 2) * MAX_DSTATE : threadIdx.x + 2 * MAX_DSTATE] = delta_a_exp; + } else { + thread_reverse_data[i - 1].x = delta_a_exp.real_; + thread_reverse_data[i - 1].y = -delta_a_exp.imag_; + } + complex_t dout_BC = 2 * dout_vals[i] + * conj(!kIsVariableC + ? (!kIsVariableB ? B_val * C_val : C_val) + : (!kIsVariableB ? B_val * C_vals[i] : C_vals[i])); + thread_reverse_data[i].z = dout_BC.real_; + thread_reverse_data[i].w = dout_BC.imag_; + } + __syncthreads(); + complex_t delta_a_exp = threadIdx.x == kNThreads - 1 + ? (chunk == params.n_chunks - 1 ? 1.f : smem_delta_a[state_idx + ((chunk + 1) % 2) * MAX_DSTATE]) + : smem_delta_a[threadIdx.x + 1 + 2 * MAX_DSTATE]; + thread_reverse_data[kNItems - 1].x = delta_a_exp.real_; + thread_reverse_data[kNItems - 1].y = -delta_a_exp.imag_; + // Initialize running total + scan_t running_prefix = chunk > 0 && threadIdx.x % 32 == 0 ? x[(chunk - 1) * params.dstate + state_idx] : make_float4(1.f, 0.f, 0.f, 0.f); + SSMScanPrefixCallbackOp prefix_op(running_prefix); + Ktraits::BlockScanT(smem_scan).InclusiveScan( + thread_data, thread_data, SSMScanOp(), prefix_op + ); + scan_t running_postfix = chunk < params.n_chunks - 1 && threadIdx.x % 32 == 0 ? smem_running_postfix[state_idx] : make_float4(1.f, 0.f, 0.f, 0.f); + SSMScanPrefixCallbackOp postfix_op(running_postfix); + Ktraits::BlockReverseScanT(smem_reverse_scan).InclusiveReverseScan( + thread_reverse_data, thread_reverse_data, SSMScanOp(), postfix_op + ); + if (threadIdx.x == 0) { smem_running_postfix[state_idx] = postfix_op.running_prefix; } + weight_t dA_val = 0, dBC_val = 0; + weight_t dB_vals[kNItems], dC_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + complex_t x = complex_t(thread_data[i].z, thread_data[i].w); + complex_t dx = complex_t(thread_reverse_data[i].z, thread_reverse_data[i].w); + float ddelta_u = !kIsVariableB ? dx.real_ : (dx * conj(B_vals[i])).real_; + if constexpr (!kIsVariableB || !kIsVariableC) { + if constexpr (!kIsVariableB) { // dBC_val is dB_val + dBC_val += (2 * dout_vals[i]) * conj(!kIsVariableC ? x : x * C_vals[i]); + } else { // dBC_val is dC_val + dBC_val += (2 * dout_vals[i]) * conj(x); + } + } + const complex_t a_conj = conj(x - (!kIsVariableB ? delta_vals[i] * float(u_vals[i]) : delta_vals[i] * float(u_vals[i]) * B_vals[i])); + du_vals[i] += ddelta_u * delta_vals[i]; + ddelta_vals[i] += ddelta_u * float(u_vals[i]) + (dx * conj(A_val) * a_conj).real_; + dA_val += delta_vals[i] * dx * a_conj; + if constexpr (kIsVariableB) { dB_vals[i] = dx * delta_vals[i] * float(u_vals[i]); } + if constexpr (kIsVariableC) { + dC_vals[i] = (2 * dout_vals[i]) * conj(!kIsVariableB ? x * B_val : x); + } + } + // Block-exchange to make the atomicAdd's coalesced, otherwise they're much slower + if constexpr (kIsVariableB || kIsVariableC) { + float dB_vals_f[kNItems * 2], dC_vals_f[kNItems * 2]; + if constexpr (kIsVariableB) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + dB_vals_f[i * 2] = dB_vals[i].real_; + dB_vals_f[i * 2 + 1] = dB_vals[i].imag_; + } + Ktraits::BlockExchangeT(smem_exchange).BlockedToStriped(dB_vals_f, dB_vals_f); + } + if constexpr (kIsVariableC) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + dC_vals_f[i * 2] = dC_vals[i].real_; + dC_vals_f[i * 2 + 1] = dC_vals[i].imag_; + } + auto &smem_exchange_C = !kIsVariableB ? smem_exchange : smem_exchange1; + Ktraits::BlockExchangeT(smem_exchange_C).BlockedToStriped(dC_vals_f, dC_vals_f); + } + const int seqlen_remaining = (params.seqlen - chunk * kChunkSize) * 2 - threadIdx.x; + float *dB_cur = reinterpret_cast(dB) + state_idx * params.dB_dstate_stride + chunk * kChunkSize * 2 + threadIdx.x; + float *dC_cur = reinterpret_cast(dC) + state_idx * params.dC_dstate_stride + chunk * kChunkSize * 2 + threadIdx.x; + #pragma unroll + for (int i = 0; i < kNItems * 2; ++i) { + if (i * kNThreads < seqlen_remaining) { + if constexpr (kIsVariableB) { gpuAtomicAdd(dB_cur + i * kNThreads, dB_vals_f[i]); } + if constexpr (kIsVariableC) { gpuAtomicAdd(dC_cur + i * kNThreads, dC_vals_f[i]); } + } + } + } + if constexpr (!kIsVariableB || !kIsVariableC) { + float4 dA_dBC_val = make_float4(dA_val.real_, dA_val.imag_, dBC_val.real_, dBC_val.imag_); + dA_dBC_val = Ktraits::BlockReduceT(smem_reduce).Sum(dA_dBC_val); + dA_val = complex_t(dA_dBC_val.x, dA_dBC_val.y); + dBC_val = complex_t(dA_dBC_val.z, dA_dBC_val.w); + if (threadIdx.x == 0) { + smem_dbc[state_idx] = chunk == params.n_chunks - 1 ? dBC_val : dBC_val + smem_dbc[state_idx]; + } + } else { + dA_val = Ktraits::BlockReduceComplexT(smem_reduce_complex).Sum(dA_val); + } + if (threadIdx.x == 0) { + smem_da[state_idx] = chunk == params.n_chunks - 1 ? dA_val : dA_val + smem_da[state_idx]; + } + } + } + + if constexpr (kDeltaSoftplus) { + __syncthreads(); + input_t delta_vals_load[kNItems]; + load_input(delta, delta_vals_load, smem_load, params.seqlen - chunk * kChunkSize); + delta -= kChunkSize; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + float delta_val = float(delta_vals_load[i]) + delta_bias; + float delta_val_neg_exp = expf(-delta_val); + ddelta_vals[i] = delta_val <= 20.f + ? ddelta_vals[i] / (1.f + delta_val_neg_exp) + : ddelta_vals[i]; + } + } + for (int i = 0; i < kNItems; ++i) { ddelta_bias_val += ddelta_vals[i]; } + + input_t *du = reinterpret_cast(params.du_ptr) + batch_id * params.du_batch_stride + + dim_id * params.du_d_stride + chunk * kChunkSize; + input_t *ddelta = reinterpret_cast(params.ddelta_ptr) + batch_id * params.ddelta_batch_stride + + dim_id * params.ddelta_d_stride + chunk * kChunkSize; + __syncthreads(); + store_output(du, du_vals, smem_store, params.seqlen - chunk * kChunkSize); + __syncthreads(); + store_output(ddelta, ddelta_vals, smem_store, params.seqlen - chunk * kChunkSize); + + Bvar -= kChunkSize * (!kIsComplex ? 1 : 2); + Cvar -= kChunkSize * (!kIsComplex ? 1 : 2); + } + if (params.dD_ptr != nullptr) { + dD_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dD_val); + if (threadIdx.x == 0) { gpuAtomicAdd(dD, dD_val); } + } + if (params.ddelta_bias_ptr != nullptr) { + __syncthreads(); + ddelta_bias_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(ddelta_bias_val); + if (threadIdx.x == 0) { gpuAtomicAdd(ddelta_bias, ddelta_bias_val); } + } + for (int state_idx = threadIdx.x; state_idx < params.dstate; state_idx += blockDim.x) { + gpuAtomicAdd(&(dA[state_idx * params.dA_dstate_stride]), smem_da[state_idx]); + weight_t dBC_val; + if (!kIsVariableB || !kIsVariableC) { dBC_val = smem_dbc[state_idx]; } + if constexpr (!kIsVariableB) { + gpuAtomicAdd(&(dB[state_idx * params.dB_dstate_stride]), + !kIsVariableC ? dBC_val * conj(C[state_idx * params.C_dstate_stride]) : dBC_val); + } + if constexpr (!kIsVariableC) { + gpuAtomicAdd(&(dC[state_idx * params.dC_dstate_stride]), + !kIsVariableB ? dBC_val * conj(B[state_idx * params.B_dstate_stride]) : dBC_val); + } + } +} + +template +void selective_scan_bwd_launch(SSMParamsBwd ¶ms, cudaStream_t stream) { + BOOL_SWITCH(params.seqlen % (kNThreads * kNItems) == 0, kIsEvenLen, [&] { + BOOL_SWITCH(params.is_variable_B, kIsVariableB, [&] { + BOOL_SWITCH(params.is_variable_C, kIsVariableC, [&] { + BOOL_SWITCH(params.delta_softplus, kDeltaSoftplus, [&] { + BOOL_SWITCH(params.z_ptr != nullptr , kHasZ, [&] { + using Ktraits = Selective_Scan_bwd_kernel_traits; + // using Ktraits = Selective_Scan_bwd_kernel_traits; + // TODO: check this + constexpr int kSmemSize = Ktraits::kSmemSize + MAX_DSTATE * sizeof(typename Ktraits::scan_t) + (kNThreads + 4 * MAX_DSTATE) * sizeof(typename Ktraits::weight_t); + // printf("smem_size = %d\n", kSmemSize); + dim3 grid(params.batch, params.dim); + auto kernel = &selective_scan_bwd_kernel; + if (kSmemSize >= 48 * 1024) { + C10_CUDA_CHECK(cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + } + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + }); + }); + }); +} + +template +void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream) { + if (params.seqlen <= 128) { + selective_scan_bwd_launch<32, 4, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 256) { + selective_scan_bwd_launch<32, 8, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 512) { + selective_scan_bwd_launch<32, 16, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 1024) { + selective_scan_bwd_launch<64, 16, input_t, weight_t>(params, stream); + } else { + selective_scan_bwd_launch<128, 16, input_t, weight_t>(params, stream); + } +} \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_common.h b/SegMamba/mamba/csrc/selective_scan/selective_scan_common.h new file mode 100644 index 0000000000000000000000000000000000000000..9140dcdf3b68ad2de95bcd3fd9543a9d320cef68 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_common.h @@ -0,0 +1,221 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#include +#include +#include // For scalar_value_type + +#define MAX_DSTATE 256 + +using complex_t = c10::complex; + +inline __device__ float2 operator+(const float2 & a, const float2 & b){ + return {a.x + b.x, a.y + b.y}; +} + +inline __device__ float3 operator+(const float3 &a, const float3 &b) { + return {a.x + b.x, a.y + b.y, a.z + b.z}; +} + +inline __device__ float4 operator+(const float4 & a, const float4 & b){ + return {a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w}; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template struct BytesToType {}; + +template<> struct BytesToType<16> { + using Type = uint4; + static_assert(sizeof(Type) == 16); +}; + +template<> struct BytesToType<8> { + using Type = uint64_t; + static_assert(sizeof(Type) == 8); +}; + +template<> struct BytesToType<4> { + using Type = uint32_t; + static_assert(sizeof(Type) == 4); +}; + +template<> struct BytesToType<2> { + using Type = uint16_t; + static_assert(sizeof(Type) == 2); +}; + +template<> struct BytesToType<1> { + using Type = uint8_t; + static_assert(sizeof(Type) == 1); +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct Converter{ + static inline __device__ void to_float(const scalar_t (&src)[N], float (&dst)[N]) { + #pragma unroll + for (int i = 0; i < N; ++i) { dst[i] = src[i]; } + } +}; + +template +struct Converter{ + static inline __device__ void to_float(const at::Half (&src)[N], float (&dst)[N]) { + static_assert(N % 2 == 0); + auto &src2 = reinterpret_cast(src); + auto &dst2 = reinterpret_cast(dst); + #pragma unroll + for (int i = 0; i < N / 2; ++i) { dst2[i] = __half22float2(src2[i]); } + } +}; + +#if __CUDA_ARCH__ >= 800 +template +struct Converter{ + static inline __device__ void to_float(const at::BFloat16 (&src)[N], float (&dst)[N]) { + static_assert(N % 2 == 0); + auto &src2 = reinterpret_cast(src); + auto &dst2 = reinterpret_cast(dst); + #pragma unroll + for (int i = 0; i < N / 2; ++i) { dst2[i] = __bfloat1622float2(src2[i]); } + } +}; +#endif + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +// From https://stackoverflow.com/questions/9860711/cucomplex-h-and-exp +// and https://forums.developer.nvidia.com/t/complex-number-exponential-function/24696 +__device__ __forceinline__ complex_t cexp2f(complex_t z) { + float t = exp2f(z.real_); + float c, s; + sincosf(z.imag_, &s, &c); + return complex_t(c * t, s * t); +} + +__device__ __forceinline__ complex_t cexpf(complex_t z) { + float t = expf(z.real_); + float c, s; + sincosf(z.imag_, &s, &c); + return complex_t(c * t, s * t); +} + +template struct SSMScanOp; + +template<> +struct SSMScanOp { + __device__ __forceinline__ float2 operator()(const float2 &ab0, const float2 &ab1) const { + return make_float2(ab1.x * ab0.x, ab1.x * ab0.y + ab1.y); + } +}; + +template<> +struct SSMScanOp { + __device__ __forceinline__ float4 operator()(const float4 &ab0, const float4 &ab1) const { + complex_t a0 = complex_t(ab0.x, ab0.y); + complex_t b0 = complex_t(ab0.z, ab0.w); + complex_t a1 = complex_t(ab1.x, ab1.y); + complex_t b1 = complex_t(ab1.z, ab1.w); + complex_t out_a = a1 * a0; + complex_t out_b = a1 * b0 + b1; + return make_float4(out_a.real_, out_a.imag_, out_b.real_, out_b.imag_); + } +}; + +// A stateful callback functor that maintains a running prefix to be applied +// during consecutive scan operations. +template struct SSMScanPrefixCallbackOp { + using scan_t = std::conditional_t, float2, float4>; + scan_t running_prefix; + // Constructor + __device__ SSMScanPrefixCallbackOp(scan_t running_prefix_) : running_prefix(running_prefix_) {} + // Callback operator to be entered by the first warp of threads in the block. + // Thread-0 is responsible for returning a value for seeding the block-wide scan. + __device__ scan_t operator()(scan_t block_aggregate) { + scan_t old_prefix = running_prefix; + running_prefix = SSMScanOp()(running_prefix, block_aggregate); + return old_prefix; + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template +inline __device__ void load_input(typename Ktraits::input_t *u, + typename Ktraits::input_t (&u_vals)[Ktraits::kNItems], + typename Ktraits::BlockLoadT::TempStorage &smem_load, + int seqlen) { + if constexpr (Ktraits::kIsEvenLen) { + auto& smem_load_vec = reinterpret_cast(smem_load); + using vec_t = typename Ktraits::vec_t; + Ktraits::BlockLoadVecT(smem_load_vec).Load( + reinterpret_cast(u), + reinterpret_cast(u_vals) + ); + } else { + Ktraits::BlockLoadT(smem_load).Load(u, u_vals, seqlen, 0.f); + } +} + +template +inline __device__ void load_weight(typename Ktraits::input_t *Bvar, + typename Ktraits::weight_t (&B_vals)[Ktraits::kNItems], + typename Ktraits::BlockLoadWeightT::TempStorage &smem_load_weight, + int seqlen) { + constexpr int kNItems = Ktraits::kNItems; + if constexpr (!Ktraits::kIsComplex) { + typename Ktraits::input_t B_vals_load[kNItems]; + if constexpr (Ktraits::kIsEvenLen) { + auto& smem_load_weight_vec = reinterpret_cast(smem_load_weight); + using vec_t = typename Ktraits::vec_t; + Ktraits::BlockLoadWeightVecT(smem_load_weight_vec).Load( + reinterpret_cast(Bvar), + reinterpret_cast(B_vals_load) + ); + } else { + Ktraits::BlockLoadWeightT(smem_load_weight).Load(Bvar, B_vals_load, seqlen, 0.f); + } + // #pragma unroll + // for (int i = 0; i < kNItems; ++i) { B_vals[i] = B_vals_load[i]; } + Converter::to_float(B_vals_load, B_vals); + } else { + typename Ktraits::input_t B_vals_load[kNItems * 2]; + if constexpr (Ktraits::kIsEvenLen) { + auto& smem_load_weight_vec = reinterpret_cast(smem_load_weight); + using vec_t = typename Ktraits::vec_t; + Ktraits::BlockLoadWeightVecT(smem_load_weight_vec).Load( + reinterpret_cast(Bvar), + reinterpret_cast(B_vals_load) + ); + } else { + Ktraits::BlockLoadWeightT(smem_load_weight).Load(Bvar, B_vals_load, seqlen, 0.f); + } + #pragma unroll + for (int i = 0; i < kNItems; ++i) { B_vals[i] = complex_t(B_vals_load[i * 2], B_vals_load[i * 2 + 1]); } + } +} + +template +inline __device__ void store_output(typename Ktraits::input_t *out, + const float (&out_vals)[Ktraits::kNItems], + typename Ktraits::BlockStoreT::TempStorage &smem_store, + int seqlen) { + typename Ktraits::input_t write_vals[Ktraits::kNItems]; + #pragma unroll + for (int i = 0; i < Ktraits::kNItems; ++i) { write_vals[i] = out_vals[i]; } + if constexpr (Ktraits::kIsEvenLen) { + auto& smem_store_vec = reinterpret_cast(smem_store); + using vec_t = typename Ktraits::vec_t; + Ktraits::BlockStoreVecT(smem_store_vec).Store( + reinterpret_cast(out), + reinterpret_cast(write_vals) + ); + } else { + Ktraits::BlockStoreT(smem_store).Store(out, write_vals, seqlen); + } +} diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu b/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu new file mode 100644 index 0000000000000000000000000000000000000000..2b8615b1d522c119125d4cb6ff3dce42f2bd4659 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu @@ -0,0 +1,10 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_fwd_kernel.cuh" + +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp16.cu b/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp16.cu new file mode 100644 index 0000000000000000000000000000000000000000..015e2a0eff633daf2693e43a2648008652a38c7c --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp16.cu @@ -0,0 +1,10 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_fwd_kernel.cuh" + +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp32.cu b/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp32.cu new file mode 100644 index 0000000000000000000000000000000000000000..c142fe0208ea784679122ba04997d3432b05efcc --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp32.cu @@ -0,0 +1,10 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_fwd_kernel.cuh" + +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_kernel.cuh b/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_kernel.cuh new file mode 100644 index 0000000000000000000000000000000000000000..440a209108bfe120c73d123bbf0b82ccf43a5638 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_kernel.cuh @@ -0,0 +1,345 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK + +#include +#include +#include + +#include "selective_scan.h" +#include "selective_scan_common.h" +#include "static_switch.h" + +template +struct Selective_Scan_fwd_kernel_traits { + static_assert(kNItems_ % 4 == 0); + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + // Setting MinBlocksPerMP to be 3 (instead of 2) for 128 threads improves occupancy. + static constexpr int kMinBlocks = kNThreads < 128 ? 5 : 3; + static constexpr int kNItems = kNItems_; + static constexpr int kNRows = kNRows_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : std::min(8, kNItems); + static_assert(kNItems % kNElts == 0); + static constexpr int kNLoads = kNItems / kNElts; + static constexpr bool kIsComplex = std::is_same_v; + static constexpr bool kIsEvenLen = kIsEvenLen_; + static constexpr bool kIsVariableB = kIsVariableB_; + static constexpr bool kIsVariableC = kIsVariableC_; + static constexpr bool kHasZ = kHasZ_; + + static constexpr bool kDirectIO = kIsEvenLen && kNLoads == 1; + + using vec_t = typename BytesToType::Type; + using scan_t = std::conditional_t; + using BlockLoadT = cub::BlockLoad; + using BlockLoadVecT = cub::BlockLoad; + using BlockLoadWeightT = cub::BlockLoad; + using BlockLoadWeightVecT = cub::BlockLoad; + using BlockStoreT = cub::BlockStore; + using BlockStoreVecT = cub::BlockStore; + // using BlockScanT = cub::BlockScan; + // using BlockScanT = cub::BlockScan; + using BlockScanT = cub::BlockScan; + static constexpr int kSmemIOSize = std::max({sizeof(typename BlockLoadT::TempStorage), + sizeof(typename BlockLoadVecT::TempStorage), + (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockLoadWeightT::TempStorage), + (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockLoadWeightVecT::TempStorage), + sizeof(typename BlockStoreT::TempStorage), + sizeof(typename BlockStoreVecT::TempStorage)}); + static constexpr int kSmemSize = kSmemIOSize + sizeof(typename BlockScanT::TempStorage); +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads, Ktraits::kMinBlocks) +void selective_scan_fwd_kernel(SSMParamsBase params) { + constexpr bool kIsComplex = Ktraits::kIsComplex; + constexpr bool kIsVariableB = Ktraits::kIsVariableB; + constexpr bool kIsVariableC = Ktraits::kIsVariableC; + constexpr bool kHasZ = Ktraits::kHasZ; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr int kNItems = Ktraits::kNItems; + constexpr int kNRows = Ktraits::kNRows; + constexpr bool kDirectIO = Ktraits::kDirectIO; + using input_t = typename Ktraits::input_t; + using weight_t = typename Ktraits::weight_t; + using scan_t = typename Ktraits::scan_t; + + // Shared memory. + extern __shared__ char smem_[]; + // cast to lvalue reference of expected type + // char *smem_loadstorescan = smem_ + 2 * MAX_DSTATE * sizeof(weight_t); + // auto& smem_load = reinterpret_cast(smem_ + 2 * MAX_DSTATE * sizeof(weight_t)); + // auto& smem_load = reinterpret_cast(smem_loadstorescan); + auto& smem_load = reinterpret_cast(smem_); + auto& smem_load_weight = reinterpret_cast(smem_); + auto& smem_load_weight1 = *reinterpret_cast(smem_ + sizeof(typename Ktraits::BlockLoadWeightT::TempStorage)); + auto& smem_store = reinterpret_cast(smem_); + auto& smem_scan = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + // weight_t *smem_a = reinterpret_cast(smem_ + smem_loadstorescan_size); + // weight_t *smem_bc = reinterpret_cast(smem_a + MAX_DSTATE); + scan_t *smem_running_prefix = reinterpret_cast(smem_ + Ktraits::kSmemSize); + + const int batch_id = blockIdx.x; + const int dim_id = blockIdx.y; + const int group_id = dim_id / (params.dim_ngroups_ratio); + input_t *u = reinterpret_cast(params.u_ptr) + batch_id * params.u_batch_stride + + dim_id * kNRows * params.u_d_stride; + input_t *delta = reinterpret_cast(params.delta_ptr) + batch_id * params.delta_batch_stride + + dim_id * kNRows * params.delta_d_stride; + weight_t *A = reinterpret_cast(params.A_ptr) + dim_id * kNRows * params.A_d_stride; + weight_t *B = reinterpret_cast(params.B_ptr) + dim_id * kNRows * params.B_d_stride; + input_t *Bvar = reinterpret_cast(params.B_ptr) + batch_id * params.B_batch_stride + group_id * params.B_group_stride; + weight_t *C = reinterpret_cast(params.C_ptr) + dim_id * kNRows * params.C_d_stride; + input_t *Cvar = reinterpret_cast(params.C_ptr) + batch_id * params.C_batch_stride + group_id * params.C_group_stride; + scan_t *x = reinterpret_cast(params.x_ptr) + (batch_id * params.dim + dim_id * kNRows) * params.n_chunks * params.dstate; + + float D_val[kNRows] = {0}; + if (params.D_ptr != nullptr) { + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + D_val[r] = reinterpret_cast(params.D_ptr)[dim_id * kNRows + r]; + } + } + float delta_bias[kNRows] = {0}; + if (params.delta_bias_ptr != nullptr) { + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + delta_bias[r] = reinterpret_cast(params.delta_bias_ptr)[dim_id * kNRows + r]; + } + } + + // for (int state_idx = threadIdx.x; state_idx < params.dstate; state_idx += blockDim.x) { + // smem_a[state_idx] = A[state_idx * params.A_dstate_stride]; + // smem_bc[state_idx] = B[state_idx * params.B_dstate_stride] * C[state_idx * params.C_dstate_stride]; + // } + + constexpr int kChunkSize = kNThreads * kNItems; + for (int chunk = 0; chunk < params.n_chunks; ++chunk) { + input_t u_vals[kNRows][kNItems], delta_vals_load[kNRows][kNItems]; + __syncthreads(); + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + if constexpr (!kDirectIO) { + if (r > 0) { __syncthreads(); } + } + load_input(u + r * params.u_d_stride, u_vals[r], smem_load, params.seqlen - chunk * kChunkSize); + if constexpr (!kDirectIO) { __syncthreads(); } + load_input(delta + r * params.delta_d_stride, delta_vals_load[r], smem_load, params.seqlen - chunk * kChunkSize); + } + u += kChunkSize; + delta += kChunkSize; + + float delta_vals[kNRows][kNItems], delta_u_vals[kNRows][kNItems], out_vals[kNRows][kNItems]; + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + float u_val = float(u_vals[r][i]); + delta_vals[r][i] = float(delta_vals_load[r][i]) + delta_bias[r]; + if (params.delta_softplus) { + delta_vals[r][i] = delta_vals[r][i] <= 20.f ? log1pf(expf(delta_vals[r][i])) : delta_vals[r][i]; + } + delta_u_vals[r][i] = delta_vals[r][i] * u_val; + out_vals[r][i] = D_val[r] * u_val; + } + } + + __syncthreads(); + for (int state_idx = 0; state_idx < params.dstate; ++state_idx) { + weight_t A_val[kNRows]; + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + A_val[r] = A[state_idx * params.A_dstate_stride + r * params.A_d_stride]; + // Multiply the real part of A with LOG2E so we can use exp2f instead of expf. + constexpr float kLog2e = M_LOG2E; + if constexpr (!kIsComplex) { + A_val[r] *= kLog2e; + } else { + A_val[r].real_ *= kLog2e; + } + } + // This variable holds B * C if both B and C are constant across seqlen. If only B varies + // across seqlen, this holds C. If only C varies across seqlen, this holds B. + // If both B and C vary, this is unused. + weight_t BC_val[kNRows]; + weight_t B_vals[kNItems], C_vals[kNItems]; + if constexpr (kIsVariableB) { + load_weight(Bvar + state_idx * params.B_dstate_stride, B_vals, + smem_load_weight, (params.seqlen - chunk * kChunkSize) * (!kIsComplex ? 1 : 2)); + if constexpr (!kIsVariableC) { + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + BC_val[r] = C[state_idx * params.C_dstate_stride + r * params.C_d_stride]; + } + } + } + if constexpr (kIsVariableC) { + auto &smem_load_weight_C = !kIsVariableB ? smem_load_weight : smem_load_weight1; + load_weight(Cvar + state_idx * params.C_dstate_stride, C_vals, + smem_load_weight_C, (params.seqlen - chunk * kChunkSize) * (!kIsComplex ? 1 : 2)); + if constexpr (!kIsVariableB) { + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + BC_val[r] = B[state_idx * params.B_dstate_stride + r * params.B_d_stride]; + } + } + } + if constexpr (!kIsVariableB && !kIsVariableC) { + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + BC_val[r] = B[state_idx * params.B_dstate_stride + r * params.B_d_stride] * C[state_idx * params.C_dstate_stride + r * params.C_d_stride]; + } + } + + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + if (r > 0) { __syncthreads(); } // Scan could be using the same smem + scan_t thread_data[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + if constexpr (!kIsComplex) { + thread_data[i] = make_float2(exp2f(delta_vals[r][i] * A_val[r]), + !kIsVariableB ? delta_u_vals[r][i] : B_vals[i] * delta_u_vals[r][i]); + if constexpr (!Ktraits::kIsEvenLen) { // So that the last state is correct + if (threadIdx.x * kNItems + i >= params.seqlen - chunk * kChunkSize) { + thread_data[i] = make_float2(1.f, 0.f); + } + } + } else { + // Pytorch's implementation of complex exp (which calls thrust) is very slow + complex_t delta_a_exp = cexp2f(delta_vals[r][i] * A_val[r]); + weight_t B_delta_u_val = !kIsVariableB ? delta_u_vals[r][i] : B_vals[i] * delta_u_vals[r][i]; + thread_data[i] = make_float4(delta_a_exp.real_, delta_a_exp.imag_, B_delta_u_val.real_, B_delta_u_val.imag_); + if constexpr (!Ktraits::kIsEvenLen) { // So that the last state is correct + if (threadIdx.x * kNItems + i >= params.seqlen - chunk * kChunkSize) { + thread_data[i] = make_float4(1.f, 0.f, 0.f, 0.f); + } + } + } + } + // Initialize running total + scan_t running_prefix; + if constexpr (!kIsComplex) { + // If we use WARP_SCAN then all lane 0 of all warps (not just thread 0) needs to read + running_prefix = chunk > 0 && threadIdx.x % 32 == 0 ? smem_running_prefix[state_idx + r * MAX_DSTATE] : make_float2(1.f, 0.f); + // running_prefix = chunk > 0 && threadIdx.x == 0 ? smem_running_prefix[state_idx] : make_float2(1.f, 0.f); + } else { + running_prefix = chunk > 0 && threadIdx.x % 32 == 0 ? smem_running_prefix[state_idx + r * MAX_DSTATE] : make_float4(1.f, 0.f, 0.f, 0.f); + // running_prefix = chunk > 0 && threadIdx.x == 0 ? smem_running_prefix[state_idx] : make_float4(1.f, 0.f, 0.f, 0.f); + } + SSMScanPrefixCallbackOp prefix_op(running_prefix); + Ktraits::BlockScanT(smem_scan).InclusiveScan( + thread_data, thread_data, SSMScanOp(), prefix_op + ); + // There's a syncthreads in the scan op, so we don't need to sync here. + // Unless there's only 1 warp, but then it's the same thread (0) reading and writing. + if (threadIdx.x == 0) { + smem_running_prefix[state_idx] = prefix_op.running_prefix; + x[(r * params.n_chunks + chunk) * params.dstate + state_idx] = prefix_op.running_prefix; + } + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + const weight_t C_val = !kIsVariableC + ? BC_val[r] + : (!kIsVariableB ? BC_val[r] * C_vals[i] : C_vals[i]); + if constexpr (!kIsComplex) { + out_vals[r][i] += thread_data[i].y * C_val; + } else { + out_vals[r][i] += (complex_t(thread_data[i].z, thread_data[i].w) * C_val).real_ * 2; + } + } + } + } + + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + dim_id * kNRows * params.out_d_stride + chunk * kChunkSize; + __syncthreads(); + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + if constexpr (!kDirectIO) { + if (r > 0) { __syncthreads(); } + } + store_output(out + r * params.out_d_stride, out_vals[r], smem_store, params.seqlen - chunk * kChunkSize); + } + + if constexpr (kHasZ) { + input_t *z = reinterpret_cast(params.z_ptr) + batch_id * params.z_batch_stride + + dim_id * kNRows * params.z_d_stride + chunk * kChunkSize; + input_t *out_z = reinterpret_cast(params.out_z_ptr) + batch_id * params.out_z_batch_stride + + dim_id * kNRows * params.out_z_d_stride + chunk * kChunkSize; + #pragma unroll + for (int r = 0; r < kNRows; ++r) { + input_t z_vals[kNItems]; + __syncthreads(); + load_input(z + r * params.z_d_stride, z_vals, smem_load, params.seqlen - chunk * kChunkSize); + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + float z_val = z_vals[i]; + out_vals[r][i] *= z_val / (1 + expf(-z_val)); + } + __syncthreads(); + store_output(out_z + r * params.out_z_d_stride, out_vals[r], smem_store, params.seqlen - chunk * kChunkSize); + } + } + + Bvar += kChunkSize * (!kIsComplex ? 1 : 2); + Cvar += kChunkSize * (!kIsComplex ? 1 : 2); + } +} + +template +void selective_scan_fwd_launch(SSMParamsBase ¶ms, cudaStream_t stream) { + // Only kNRows == 1 is tested for now, which ofc doesn't differ from previously when we had each block + // processing 1 row. + constexpr int kNRows = 1; + BOOL_SWITCH(params.seqlen % (kNThreads * kNItems) == 0, kIsEvenLen, [&] { + BOOL_SWITCH(params.is_variable_B, kIsVariableB, [&] { + BOOL_SWITCH(params.is_variable_C, kIsVariableC, [&] { + BOOL_SWITCH(params.z_ptr != nullptr , kHasZ, [&] { + using Ktraits = Selective_Scan_fwd_kernel_traits; + // constexpr int kSmemSize = Ktraits::kSmemSize; + constexpr int kSmemSize = Ktraits::kSmemSize + kNRows * MAX_DSTATE * sizeof(typename Ktraits::scan_t); + // printf("smem_size = %d\n", kSmemSize); + dim3 grid(params.batch, params.dim / kNRows); + auto kernel = &selective_scan_fwd_kernel; + if (kSmemSize >= 48 * 1024) { + C10_CUDA_CHECK(cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + } + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + }); + }); +} + +template +void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream) { + if (params.seqlen <= 128) { + selective_scan_fwd_launch<32, 4, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 256) { + selective_scan_fwd_launch<32, 8, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 512) { + selective_scan_fwd_launch<32, 16, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 1024) { + selective_scan_fwd_launch<64, 16, input_t, weight_t>(params, stream); + } else { + selective_scan_fwd_launch<128, 16, input_t, weight_t>(params, stream); + } +} diff --git a/SegMamba/mamba/csrc/selective_scan/static_switch.h b/SegMamba/mamba/csrc/selective_scan/static_switch.h new file mode 100644 index 0000000000000000000000000000000000000000..7920ac045d0a2a1f4c4159ee3eebe51fe1e2c203 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/static_switch.h @@ -0,0 +1,25 @@ +// Inspired by https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h +// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h + +#pragma once + +/// @param COND - a boolean expression to switch by +/// @param CONST_NAME - a name given for the constexpr bool variable. +/// @param ... - code to execute for true and false +/// +/// Usage: +/// ``` +/// BOOL_SWITCH(flag, BoolConst, [&] { +/// some_function(...); +/// }); +/// ``` +#define BOOL_SWITCH(COND, CONST_NAME, ...) \ + [&] { \ + if (COND) { \ + constexpr bool CONST_NAME = true; \ + return __VA_ARGS__(); \ + } else { \ + constexpr bool CONST_NAME = false; \ + return __VA_ARGS__(); \ + } \ + }() diff --git a/SegMamba/mamba/csrc/selective_scan/uninitialized_copy.cuh b/SegMamba/mamba/csrc/selective_scan/uninitialized_copy.cuh new file mode 100644 index 0000000000000000000000000000000000000000..630622dddcc9041737307810000584a843a01764 --- /dev/null +++ b/SegMamba/mamba/csrc/selective_scan/uninitialized_copy.cuh @@ -0,0 +1,69 @@ +/****************************************************************************** + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#include + + +namespace detail +{ + +#if defined(_NVHPC_CUDA) +template +__host__ __device__ void uninitialized_copy(T *ptr, U &&val) +{ + // NVBug 3384810 + new (ptr) T(::cuda::std::forward(val)); +} +#else +template ::value, + int + >::type = 0> +__host__ __device__ void uninitialized_copy(T *ptr, U &&val) +{ + *ptr = ::cuda::std::forward(val); +} + +template ::value, + int + >::type = 0> +__host__ __device__ void uninitialized_copy(T *ptr, U &&val) +{ + new (ptr) T(::cuda::std::forward(val)); +} +#endif + +} // namespace detail diff --git a/SegMamba/mamba/evals/lm_harness_eval.py b/SegMamba/mamba/evals/lm_harness_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..d09d40534cf53be4d1387666697c82aa53add625 --- /dev/null +++ b/SegMamba/mamba/evals/lm_harness_eval.py @@ -0,0 +1,39 @@ +import torch + +import transformers +from transformers import AutoTokenizer + +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel + +from lm_eval.api.model import LM +from lm_eval.models.huggingface import HFLM +from lm_eval.api.registry import register_model +from lm_eval.__main__ import cli_evaluate + + +@register_model("mamba") +class MambaEvalWrapper(HFLM): + + AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM + + def __init__(self, pretrained="state-spaces/mamba-2.8b", max_length=2048, batch_size=None, device="cuda", + dtype=torch.float16): + LM.__init__(self) + self._model = MambaLMHeadModel.from_pretrained(pretrained, device=device, dtype=dtype) + self.tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") + self.tokenizer.pad_token_id = self.tokenizer.eos_token_id + self.vocab_size = self.tokenizer.vocab_size + self._batch_size = batch_size if batch_size is None else 64 + self._max_length = max_length + self._device = torch.device(device) + + @property + def batch_size(self): + return self._batch_size + + def _model_generate(self, context, max_length, stop, **generation_kwargs): + raise NotImplementedError() + + +if __name__ == "__main__": + cli_evaluate() diff --git a/SegMamba/mamba/mamba_ssm.egg-info/PKG-INFO b/SegMamba/mamba/mamba_ssm.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..ed8c6e96be3e40e08d2015c80de632e3000fd561 --- /dev/null +++ b/SegMamba/mamba/mamba_ssm.egg-info/PKG-INFO @@ -0,0 +1,181 @@ +Metadata-Version: 2.4 +Name: mamba_ssm +Version: 1.0.1 +Summary: Mamba state-space model +Home-page: https://github.com/state-spaces/mamba +Author: Tri Dao, Albert Gu +Author-email: tri@tridao.me, agu@cs.cmu.edu +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: Unix +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS +Requires-Dist: torch +Requires-Dist: packaging +Requires-Dist: ninja +Requires-Dist: einops +Requires-Dist: triton +Requires-Dist: transformers +Requires-Dist: causal_conv1d +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: license-file +Dynamic: requires-dist +Dynamic: requires-python +Dynamic: summary + +# Mamba + +![Mamba](assets/selection.png "Selective State Space") +> **Mamba: Linear-Time Sequence Modeling with Selective State Spaces**\ +> Albert Gu*, Tri Dao*\ +> Paper: https://arxiv.org/abs/2312.00752 + +## About + +Mamba is a new state space model architecture showing promising performance on information-dense data such as language modeling, where previous subquadratic models fall short of Transformers. +It is based on the line of progress on [structured state space models](https://github.com/state-spaces/s4), +with an efficient hardware-aware design and implementation in the spirit of [FlashAttention](https://github.com/Dao-AILab/flash-attention). + +## Installation + +- `pip install causal-conv1d`: an efficient implementation of a simple causal Conv1d layer used inside the Mamba block. +- `pip install mamba-ssm`: the core Mamba package. + +It can also be built from source with `pip install .` from this repository. + +If `pip` complains about PyTorch versions, try passing `--no-build-isolation` to `pip`. + +Other requirements: +- Linux +- NVIDIA GPU +- PyTorch 1.12+ +- CUDA 11.6+ + +## Usage + +We expose several levels of interface with the Mamba model. + +### Selective SSM + +Mamba is based on a selective SSM layer, which is the focus of the paper (Section 3; Algorithm 2). + +Source: [ops/selective_scan_interface.py](mamba_ssm/ops/selective_scan_interface.py). + +### Mamba Block + +The main module of this repository is the Mamba architecture block wrapping the selective SSM. + +Source: [modules/mamba_simple.py](mamba_ssm/modules/mamba_simple.py). + +Usage: +``` +from mamba_ssm import Mamba + +batch, length, dim = 2, 64, 16 +x = torch.randn(batch, length, dim).to("cuda") +model = Mamba( + # This module uses roughly 3 * expand * d_model^2 parameters + d_model=dim, # Model dimension d_model + d_state=16, # SSM state expansion factor + d_conv=4, # Local convolution width + expand=2, # Block expansion factor +).to("cuda") +y = model(x) +assert y.shape == x.shape +``` + +### Mamba Language Model + +Finally, we provide an example of a complete language model: a deep sequence model backbone (with repeating Mamba blocks) + language model head. + +Source: [models/mixer_seq_simple.py](mamba_ssm/models/mixer_seq_simple.py). + +This is an example of how to integrate Mamba into an end-to-end neural network. +This example is used in the generation scripts below. + + + +## Pretrained Models + +Pretrained models are uploaded to +[HuggingFace](https://huggingface.co/state-spaces): `mamba-130m`, `mamba-370m`, +`mamba-790m`, `mamba-1.4b`, `mamba-2.8b`. + +The models will be autodownloaded by the generation script below. + +These models were trained on the [Pile](https://huggingface.co/datasets/EleutherAI/pile), and follow the standard model dimensions described by GPT-3 and followed by many open source models: + +| Parameters | Layers | Model dim. | +|------------|--------|------------| +| 130M | 12 | 768 | +| 370M | 24 | 1024 | +| 790M | 24 | 1536 | +| 1.4B | 24 | 2048 | +| 2.8B | 32 | 2560 | + +(The layer count of Mamba should be doubled, as two Mamba blocks are needed for each "layer" (MHA block + MLP block) of a Transformer.) + +Note: these are base models trained only for 300B tokens, without any form of downstream modification (instruction tuning, etc.). +Performance is expected to be comparable or better than other architectures trained on similar data, but not to match larger or fine-tuned models. + + +## Evaluations + +To run zero-shot evaluations of models (corresponding to Table 3 of the paper), +we use the +[lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) +library. + +1. Pull the `lm-evaluation-harness` repo by `git submodule update --init + --recursive`. We use the `big-refactor` branch. +2. Install `lm-evaluation-harness`: `pip install -e 3rdparty/lm-evaluation-harness` +3. Run evaluation with (more documentation at the [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) repo): +``` +python evals/lm_harness_eval.py --model mamba --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64 +python evals/lm_harness_eval.py --model hf --model_args pretrained=EleutherAI/pythia-160m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64 +``` + +Note that the result of each task might differ from reported values by 0.1-0.3 due to noise in the evaluation process. + +## Inference + +The script [benchmarks/benchmark_generation_mamba_simple.py](benchmarks/benchmark_generation_mamba_simple.py) +1. autoloads a model from the HuggingFace Hub, +2. generates completions of a user-specified prompt, +3. benchmarks the inference speed of this generation. + +Other configurable options include the top-p (nucleus sampling) probability, and the softmax temperature. + +### Examples + +To test generation latency (e.g. batch size = 1) with different sampling strategies: + +``` +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.5 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.5 +``` + +To test generation throughput with random prompts (e.g. large batch size): +``` +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --batch 128 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --batch 128 +``` + +## Citation + +If you use this codebase, or otherwise found our work valuable, please cite Mamba: +``` +@article{mamba, + title={Mamba: Linear-Time Sequence Modeling with Selective State Spaces}, + author={Gu, Albert and Dao, Tri}, + journal={arXiv preprint arXiv:2312.00752}, + year={2023} +} +``` diff --git a/SegMamba/mamba/mamba_ssm.egg-info/SOURCES.txt b/SegMamba/mamba/mamba_ssm.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..52536c42f43c8e90c131d567ceee396c09076a2e --- /dev/null +++ b/SegMamba/mamba/mamba_ssm.egg-info/SOURCES.txt @@ -0,0 +1,32 @@ +AUTHORS +LICENSE +README.md +setup.py +csrc/selective_scan/selective_scan.cpp +csrc/selective_scan/selective_scan_bwd_bf16_complex.cu +csrc/selective_scan/selective_scan_bwd_bf16_real.cu +csrc/selective_scan/selective_scan_bwd_fp16_complex.cu +csrc/selective_scan/selective_scan_bwd_fp16_real.cu +csrc/selective_scan/selective_scan_bwd_fp32_complex.cu +csrc/selective_scan/selective_scan_bwd_fp32_real.cu +csrc/selective_scan/selective_scan_fwd_bf16.cu +csrc/selective_scan/selective_scan_fwd_fp16.cu +csrc/selective_scan/selective_scan_fwd_fp32.cu +mamba_ssm/__init__.py +mamba_ssm.egg-info/PKG-INFO +mamba_ssm.egg-info/SOURCES.txt +mamba_ssm.egg-info/dependency_links.txt +mamba_ssm.egg-info/requires.txt +mamba_ssm.egg-info/top_level.txt +mamba_ssm/models/__init__.py +mamba_ssm/models/mixer_seq_simple.py +mamba_ssm/modules/__init__.py +mamba_ssm/modules/mamba_simple.py +mamba_ssm/ops/__init__.py +mamba_ssm/ops/selective_scan_interface.py +mamba_ssm/ops/triton/__init__.py +mamba_ssm/ops/triton/layernorm.py +mamba_ssm/ops/triton/selective_state_update.py +mamba_ssm/utils/__init__.py +mamba_ssm/utils/generation.py +mamba_ssm/utils/hf.py \ No newline at end of file diff --git a/SegMamba/mamba/mamba_ssm.egg-info/dependency_links.txt b/SegMamba/mamba/mamba_ssm.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/SegMamba/mamba/mamba_ssm.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/SegMamba/mamba/mamba_ssm.egg-info/requires.txt b/SegMamba/mamba/mamba_ssm.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..8bf31e81ea760588be6fc8a4c24a43a599d29eab --- /dev/null +++ b/SegMamba/mamba/mamba_ssm.egg-info/requires.txt @@ -0,0 +1,7 @@ +torch +packaging +ninja +einops +triton +transformers +causal_conv1d diff --git a/SegMamba/mamba/mamba_ssm.egg-info/top_level.txt b/SegMamba/mamba/mamba_ssm.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..def7a96b278675080fd2ada3941fd1499491296f --- /dev/null +++ b/SegMamba/mamba/mamba_ssm.egg-info/top_level.txt @@ -0,0 +1,2 @@ +mamba_ssm +selective_scan_cuda diff --git a/SegMamba/mamba/mamba_ssm/.DS_Store b/SegMamba/mamba/mamba_ssm/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..029c7125f94cd01f8ed4fcedbb1636be3bb28de4 Binary files /dev/null and b/SegMamba/mamba/mamba_ssm/.DS_Store differ diff --git a/SegMamba/mamba/mamba_ssm/__init__.py b/SegMamba/mamba/mamba_ssm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ecd144db5dbec72bcfcdcea28c624a7e2bf053b --- /dev/null +++ b/SegMamba/mamba/mamba_ssm/__init__.py @@ -0,0 +1,5 @@ +__version__ = "1.0.1" + +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn, bimamba_inner_fn +from mamba_ssm.modules.mamba_simple import Mamba +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel diff --git a/SegMamba/mamba/mamba_ssm/models/__init__.py b/SegMamba/mamba/mamba_ssm/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/mamba/mamba_ssm/models/mixer_seq_simple.py b/SegMamba/mamba/mamba_ssm/models/mixer_seq_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..383f773f1f700cd53176e51327a5d8dc58158da0 --- /dev/null +++ b/SegMamba/mamba/mamba_ssm/models/mixer_seq_simple.py @@ -0,0 +1,233 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. + +import math +from functools import partial + +from collections import namedtuple + +import torch +import torch.nn as nn + +from mamba_ssm.modules.mamba_simple import Mamba, Block +from mamba_ssm.utils.generation import GenerationMixin +from mamba_ssm.utils.hf import load_config_hf, load_state_dict_hf + +try: + from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +def create_block( + d_model, + ssm_cfg=None, + norm_epsilon=1e-5, + rms_norm=False, + residual_in_fp32=False, + fused_add_norm=False, + layer_idx=None, + device=None, + dtype=None, +): + if ssm_cfg is None: + ssm_cfg = {} + factory_kwargs = {"device": device, "dtype": dtype} + mixer_cls = partial(Mamba, layer_idx=layer_idx, **ssm_cfg, **factory_kwargs) + norm_cls = partial( + nn.LayerNorm if not rms_norm else RMSNorm, eps=norm_epsilon, **factory_kwargs + ) + block = Block( + d_model, + mixer_cls, + norm_cls=norm_cls, + fused_add_norm=fused_add_norm, + residual_in_fp32=residual_in_fp32, + ) + block.layer_idx = layer_idx + return block + + +# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454 +def _init_weights( + module, + n_layer, + initializer_range=0.02, # Now only used for embedding layer. + rescale_prenorm_residual=True, + n_residuals_per_layer=1, # Change to 2 if we have MLP +): + if isinstance(module, nn.Linear): + if module.bias is not None: + if not getattr(module.bias, "_no_reinit", False): + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Embedding): + nn.init.normal_(module.weight, std=initializer_range) + + if rescale_prenorm_residual: + # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: + # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale + # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. + # > -- GPT-2 :: https://openai.com/blog/better-language-models/ + # + # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py + for name, p in module.named_parameters(): + if name in ["out_proj.weight", "fc2.weight"]: + # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block + # Following Pytorch init, except scale by 1/sqrt(2 * n_layer) + # We need to reinit p since this code could be called multiple times + # Having just p *= scale would repeatedly scale it down + nn.init.kaiming_uniform_(p, a=math.sqrt(5)) + with torch.no_grad(): + p /= math.sqrt(n_residuals_per_layer * n_layer) + + +class MixerModel(nn.Module): + def __init__( + self, + d_model: int, + n_layer: int, + vocab_size: int, + ssm_cfg=None, + norm_epsilon: float = 1e-5, + rms_norm: bool = False, + initializer_cfg=None, + fused_add_norm=False, + residual_in_fp32=False, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + + self.embedding = nn.Embedding(vocab_size, d_model, **factory_kwargs) + + # We change the order of residual and layer norm: + # Instead of LN -> Attn / MLP -> Add, we do: + # Add -> LN -> Attn / MLP / Mixer, returning both the residual branch (output of Add) and + # the main branch (output of MLP / Mixer). The model definition is unchanged. + # This is for performance reason: we can fuse add + layer_norm. + self.fused_add_norm = fused_add_norm + if self.fused_add_norm: + if layer_norm_fn is None or rms_norm_fn is None: + raise ImportError("Failed to import Triton LayerNorm / RMSNorm kernels") + + self.layers = nn.ModuleList( + [ + create_block( + d_model, + ssm_cfg=ssm_cfg, + norm_epsilon=norm_epsilon, + rms_norm=rms_norm, + residual_in_fp32=residual_in_fp32, + fused_add_norm=fused_add_norm, + layer_idx=i, + **factory_kwargs, + ) + for i in range(n_layer) + ] + ) + + self.norm_f = (nn.LayerNorm if not rms_norm else RMSNorm)( + d_model, eps=norm_epsilon, **factory_kwargs + ) + + self.apply( + partial( + _init_weights, + n_layer=n_layer, + **(initializer_cfg if initializer_cfg is not None else {}), + ) + ) + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return { + i: layer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) + for i, layer in enumerate(self.layers) + } + + def forward(self, input_ids, inference_params=None): + hidden_states = self.embedding(input_ids) + residual = None + for layer in self.layers: + hidden_states, residual = layer( + hidden_states, residual, inference_params=inference_params + ) + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm_f(residual.to(dtype=self.norm_f.weight.dtype)) + else: + # Set prenorm=False here since we don't need the residual + fused_add_norm_fn = rms_norm_fn if isinstance(self.norm_f, RMSNorm) else layer_norm_fn + hidden_states = fused_add_norm_fn( + hidden_states, + self.norm_f.weight, + self.norm_f.bias, + eps=self.norm_f.eps, + residual=residual, + prenorm=False, + residual_in_fp32=self.residual_in_fp32, + ) + return hidden_states + + +class MambaLMHeadModel(nn.Module, GenerationMixin): + + def __init__( + self, + d_model: int, + n_layer: int, + vocab_size: int, + initializer_cfg=None, + pad_vocab_size_multiple: int = 1, + device=None, + dtype=None, + **backbone_kwargs, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + if vocab_size % pad_vocab_size_multiple != 0: + vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple) + self.backbone = MixerModel( + d_model=d_model, + n_layer=n_layer, + vocab_size=vocab_size, + initializer_cfg=initializer_cfg, + **backbone_kwargs, + **factory_kwargs, + ) + self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs) + + # Initialize weights and apply final processing + self.apply( + partial( + _init_weights, + n_layer=n_layer, + **(initializer_cfg if initializer_cfg is not None else {}), + ) + ) + self.tie_weights() + + def tie_weights(self): + self.lm_head.weight = self.backbone.embedding.weight + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.backbone.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) + + def forward(self, input_ids, position_ids=None, inference_params=None, num_last_tokens=0): + """ + "position_ids" is just to be compatible with Transformer generation. We don't use it. + num_last_tokens: if > 0, only return the logits for the last n tokens + """ + hidden_states = self.backbone(input_ids, inference_params=inference_params) + if num_last_tokens > 0: + hidden_states = hidden_states[:, -num_last_tokens:] + lm_logits = self.lm_head(hidden_states) + CausalLMOutput = namedtuple("CausalLMOutput", ["logits"]) + return CausalLMOutput(logits=lm_logits) + + @classmethod + def from_pretrained(cls, pretrained_model_name, device=None, dtype=None, **kwargs): + config = load_config_hf(pretrained_model_name) + model = cls(**config, device=device, dtype=dtype, **kwargs) + model.load_state_dict(load_state_dict_hf(pretrained_model_name, device=device, dtype=dtype)) + return model diff --git a/SegMamba/mamba/mamba_ssm/modules/__init__.py b/SegMamba/mamba/mamba_ssm/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/mamba/mamba_ssm/modules/mamba_simple.py b/SegMamba/mamba/mamba_ssm/modules/mamba_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffc53d24110bc39651d086f7f3969cf5069f196 --- /dev/null +++ b/SegMamba/mamba/mamba_ssm/modules/mamba_simple.py @@ -0,0 +1,501 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import math +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +except ImportError: + causal_conv1d_fn, causal_conv1d_update = None + +try: + from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn, bimamba_inner_fn, mamba_inner_fn_no_out_proj +except ImportError: + selective_scan_fn, mamba_inner_fn, bimamba_inner_fn, mamba_inner_fn_no_out_proj = None, None, None, None, None + +try: + from mamba_ssm.ops.triton.selective_state_update import selective_state_update +except ImportError: + selective_state_update = None + +try: + from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +class Mamba(nn.Module): + def __init__( + self, + d_model, + d_state=16, + d_conv=4, + expand=2, + dt_rank="auto", + dt_min=0.001, + dt_max=0.1, + dt_init="random", + dt_scale=1.0, + dt_init_floor=1e-4, + conv_bias=True, + bias=False, + use_fast_path=True, # Fused kernel options + layer_idx=None, + device=None, + dtype=None, + bimamba_type="none", + nslices=5 + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.d_model = d_model + self.d_state = d_state + self.d_conv = d_conv + self.expand = expand + self.d_inner = int(self.expand * self.d_model) + self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank + self.use_fast_path = use_fast_path + self.layer_idx = layer_idx + self.bimamba_type = bimamba_type + self.nslices = nslices + + self.in_proj = nn.Linear(self.d_model, self.d_inner * 2, bias=bias, **factory_kwargs) + + self.conv1d = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.activation = "silu" + self.act = nn.SiLU() + + self.x_proj = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + # Initialize special dt projection to preserve variance at initialization + dt_init_std = self.dt_rank**-0.5 * dt_scale + if dt_init == "constant": + nn.init.constant_(self.dt_proj.weight, dt_init_std) + elif dt_init == "random": + nn.init.uniform_(self.dt_proj.weight, -dt_init_std, dt_init_std) + else: + raise NotImplementedError + + # Initialize dt bias so that F.softplus(dt_bias) is between dt_min and dt_max + dt = torch.exp( + torch.rand(self.d_inner, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) + + math.log(dt_min) + ).clamp(min=dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + with torch.no_grad(): + self.dt_proj.bias.copy_(inv_dt) + # Our initialization would set all Linear.bias to zero, need to mark this one as _no_reinit + self.dt_proj.bias._no_reinit = True + + # S4D real initialization + A = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_log = torch.log(A) # Keep A_log in fp32 + self.A_log = nn.Parameter(A_log) + self.A_log._no_weight_decay = True + + # D "skip" parameter + self.D = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D._no_weight_decay = True + + # bidirectional + assert bimamba_type == "v3" + + A_b = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_b_log = torch.log(A_b) # Keep A_b_log in fp32 + self.A_b_log = nn.Parameter(A_b_log) + self.A_b_log._no_weight_decay = True + + self.conv1d_b = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.x_proj_b = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj_b = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + self.D_b = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D_b._no_weight_decay = True + + # assert bimamba_type == "v3" + # spatial + A_s = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_s_log = torch.log(A_s) # Keep A_b_log in fp32 + self.A_s_log = nn.Parameter(A_s_log) + self.A_s_log._no_weight_decay = True + + self.conv1d_s = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.x_proj_s = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj_s = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + self.D_s = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D_s._no_weight_decay = True + + + + + self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) + + def forward(self, hidden_states, inference_params=None): + """ + hidden_states: (B, L, D) + Returns: same shape as hidden_states + """ + batch, seqlen, dim = hidden_states.shape + + conv_state, ssm_state = None, None + if inference_params is not None: + conv_state, ssm_state = self._get_states_from_cache(inference_params, batch) + if inference_params.seqlen_offset > 0: + # The states are updated inplace + out, _, _ = self.step(hidden_states, conv_state, ssm_state) + return out + + # We do matmul and transpose BLH -> HBL at the same time + xz = rearrange( + self.in_proj.weight @ rearrange(hidden_states, "b l d -> d (b l)"), + "d (b l) -> b d l", + l=seqlen, + ) + if self.in_proj.bias is not None: + xz = xz + rearrange(self.in_proj.bias.to(dtype=xz.dtype), "d -> d 1") + + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + # In the backward pass we write dx and dz next to each other to avoid torch.cat + if self.use_fast_path and inference_params is None: # Doesn't support outputting the states + if self.bimamba_type == "v3": + A_b = -torch.exp(self.A_b_log.float()) + out = mamba_inner_fn_no_out_proj( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + out_b = mamba_inner_fn_no_out_proj( + xz.flip([-1]), + self.conv1d_b.weight, + self.conv1d_b.bias, + self.x_proj_b.weight, + self.dt_proj_b.weight, + A_b, + None, + None, + self.D_b.float(), + delta_bias=self.dt_proj_b.bias.float(), + delta_softplus=True, + ) + A_s = -torch.exp(self.A_s_log.float()) + + xz_s = xz.chunk(self.nslices, dim=-1) + xz_s = torch.stack(xz_s,dim=-1) + xz_s = xz_s.flatten(-2) + out_s = mamba_inner_fn_no_out_proj( + xz_s, + self.conv1d_s.weight, + self.conv1d_s.bias, + self.x_proj_s.weight, + self.dt_proj_s.weight, + A_s, + None, + None, + self.D_s.float(), + delta_bias=self.dt_proj_s.bias.float(), + delta_softplus=True, + ) + out_s = out_s.reshape(batch,self.d_inner,seqlen//self.nslices,self.nslices).permute(0,1,3,2).flatten(-2) + + # F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + out = F.linear(rearrange(out + out_b.flip([-1]) + out_s, "b d l -> b l d"), self.out_proj.weight, self.out_proj.bias) + elif self.bimamba_type == "v2": + A_b = -torch.exp(self.A_b_log.float()) + out = mamba_inner_fn_no_out_proj( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + out_b = mamba_inner_fn_no_out_proj( + xz.flip([-1]), + self.conv1d_b.weight, + self.conv1d_b.bias, + self.x_proj_b.weight, + self.dt_proj_b.weight, + A_b, + None, + None, + self.D_b.float(), + delta_bias=self.dt_proj_b.bias.float(), + delta_softplus=True, + ) + # F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + out = F.linear(rearrange(out + out_b.flip([-1]), "b d l -> b l d"), self.out_proj.weight, self.out_proj.bias) + else: + out = mamba_inner_fn( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + self.out_proj.weight, + self.out_proj.bias, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + else: + x, z = xz.chunk(2, dim=1) + # Compute short convolution + if conv_state is not None: + conv_state.copy_(x[:, :, -self.d_conv :]) # Update state (B D W) + if causal_conv1d_fn is None: + x = self.act(self.conv1d(x)[..., :seqlen]) + else: + assert self.activation in ["silu", "swish"] + x = causal_conv1d_fn( + x, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + # We're careful here about the layout, to avoid extra transposes. + # We want dt to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d) + dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt = self.dt_proj.weight @ dt.t() + dt = rearrange(dt, "d (b l) -> b d l", l=seqlen) + B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + assert self.activation in ["silu", "swish"] + y = selective_scan_fn( + x, + dt, + A, + B, + C, + self.D.float(), + z=z, + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + return_last_state=ssm_state is not None, + ) + if ssm_state is not None: + y, last_state = y + ssm_state.copy_(last_state) + y = rearrange(y, "b d l -> b l d") + out = self.out_proj(y) + return out + + def step(self, hidden_states, conv_state, ssm_state): + dtype = hidden_states.dtype + assert hidden_states.shape[1] == 1, "Only support decoding with 1 token at a time for now" + xz = self.in_proj(hidden_states.squeeze(1)) # (B 2D) + x, z = xz.chunk(2, dim=-1) # (B D) + + # Conv step + if causal_conv1d_update is None: + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = x + x = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) + if self.conv1d.bias is not None: + x = x + self.conv1d.bias + x = self.act(x).to(dtype=dtype) + else: + x = causal_conv1d_update( + x, + conv_state, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + x_db = self.x_proj(x) # (B dt_rank+2*d_state) + dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) + # Don't add dt_bias here + dt = F.linear(dt, self.dt_proj.weight) # (B d_inner) + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + + # SSM step + if selective_state_update is None: + # Discretize A and B + dt = F.softplus(dt + self.dt_proj.bias.to(dtype=dt.dtype)) + dA = torch.exp(torch.einsum("bd,dn->bdn", dt, A)) + dB = torch.einsum("bd,bn->bdn", dt, B) + ssm_state.copy_(ssm_state * dA + rearrange(x, "b d -> b d 1") * dB) + y = torch.einsum("bdn,bn->bd", ssm_state.to(dtype), C) + y = y + self.D.to(dtype) * x + y = y * self.act(z) # (B D) + else: + y = selective_state_update( + ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True + ) + + out = self.out_proj(y) + return out.unsqueeze(1), conv_state, ssm_state + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + device = self.out_proj.weight.device + conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype + conv_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_conv, device=device, dtype=conv_dtype + ) + ssm_dtype = self.dt_proj.weight.dtype if dtype is None else dtype + # ssm_dtype = torch.float32 + ssm_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_state, device=device, dtype=ssm_dtype + ) + return conv_state, ssm_state + + def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False): + assert self.layer_idx is not None + if self.layer_idx not in inference_params.key_value_memory_dict: + batch_shape = (batch_size,) + conv_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_conv, + device=self.conv1d.weight.device, + dtype=self.conv1d.weight.dtype, + ) + ssm_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_state, + device=self.dt_proj.weight.device, + dtype=self.dt_proj.weight.dtype, + # dtype=torch.float32, + ) + inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state) + else: + conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] + # TODO: What if batch size changes between generation, and we reuse the same states? + if initialize_states: + conv_state.zero_() + ssm_state.zero_() + return conv_state, ssm_state + + +class Block(nn.Module): + def __init__( + self, dim, mixer_cls, norm_cls=nn.LayerNorm, fused_add_norm=False, residual_in_fp32=False + ): + """ + Simple block wrapping a mixer class with LayerNorm/RMSNorm and residual connection" + + This Block has a slightly different structure compared to a regular + prenorm Transformer block. + The standard block is: LN -> MHA/MLP -> Add. + [Ref: https://arxiv.org/abs/2002.04745] + Here we have: Add -> LN -> Mixer, returning both + the hidden_states (output of the mixer) and the residual. + This is purely for performance reasons, as we can fuse add and LayerNorm. + The residual needs to be provided (except for the very first block). + """ + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + self.fused_add_norm = fused_add_norm + self.mixer = mixer_cls(dim) + self.norm = norm_cls(dim) + if self.fused_add_norm: + assert RMSNorm is not None, "RMSNorm import fails" + assert isinstance( + self.norm, (nn.LayerNorm, RMSNorm) + ), "Only LayerNorm and RMSNorm are supported for fused_add_norm" + + def forward( + self, hidden_states: Tensor, residual: Optional[Tensor] = None, inference_params=None + ): + r"""Pass the input through the encoder layer. + + Args: + hidden_states: the sequence to the encoder layer (required). + residual: hidden_states = Mixer(LN(residual)) + """ + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm(residual.to(dtype=self.norm.weight.dtype)) + if self.residual_in_fp32: + residual = residual.to(torch.float32) + else: + fused_add_norm_fn = rms_norm_fn if isinstance(self.norm, RMSNorm) else layer_norm_fn + hidden_states, residual = fused_add_norm_fn( + hidden_states, + self.norm.weight, + self.norm.bias, + residual=residual, + prenorm=True, + residual_in_fp32=self.residual_in_fp32, + eps=self.norm.eps, + ) + hidden_states = self.mixer(hidden_states, inference_params=inference_params) + return hidden_states, residual + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) diff --git a/SegMamba/mamba/mamba_ssm/ops/__init__.py b/SegMamba/mamba/mamba_ssm/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/mamba/mamba_ssm/ops/selective_scan_interface.py b/SegMamba/mamba/mamba_ssm/ops/selective_scan_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..99b455ed949c123bb453922d5ac88d00f401e392 --- /dev/null +++ b/SegMamba/mamba/mamba_ssm/ops/selective_scan_interface.py @@ -0,0 +1,709 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_bwd, custom_fwd + +from einops import rearrange, repeat + +from causal_conv1d import causal_conv1d_fn +import causal_conv1d_cuda +import selective_scan_cuda + + +class SelectiveScanFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + if u.stride(-1) != 1: + u = u.contiguous() + if delta.stride(-1) != 1: + delta = delta.contiguous() + if D is not None: + D = D.contiguous() + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if z is not None and z.stride(-1) != 1: + z = z.contiguous() + if B.dim() == 3: + B = rearrange(B, "b dstate l -> b 1 dstate l") + ctx.squeeze_B = True + if C.dim() == 3: + C = rearrange(C, "b dstate l -> b 1 dstate l") + ctx.squeeze_C = True + out, x, *rest = selective_scan_cuda.fwd(u, delta, A, B, C, D, z, delta_bias, delta_softplus) + ctx.delta_softplus = delta_softplus + ctx.has_z = z is not None + last_state = x[:, :, -1, 1::2] # (batch, dim, dstate) + if not ctx.has_z: + ctx.save_for_backward(u, delta, A, B, C, D, delta_bias, x) + return out if not return_last_state else (out, last_state) + else: + ctx.save_for_backward(u, delta, A, B, C, D, z, delta_bias, x, out) + out_z = rest[0] + return out_z if not return_last_state else (out_z, last_state) + + @staticmethod + def backward(ctx, dout, *args): + if not ctx.has_z: + u, delta, A, B, C, D, delta_bias, x = ctx.saved_tensors + z = None + out = None + else: + u, delta, A, B, C, D, z, delta_bias, x, out = ctx.saved_tensors + if dout.stride(-1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + # Here we just pass in None and dz will be allocated in the C++ code. + du, ddelta, dA, dB, dC, dD, ddelta_bias, *rest = selective_scan_cuda.bwd( + u, delta, A, B, C, D, z, delta_bias, dout, x, out, None, ctx.delta_softplus, + False # option to recompute out_z, not used here + ) + dz = rest[0] if ctx.has_z else None + dB = dB.squeeze(1) if getattr(ctx, "squeeze_B", False) else dB + dC = dC.squeeze(1) if getattr(ctx, "squeeze_C", False) else dC + return (du, ddelta, dA, dB, dC, + dD if D is not None else None, + dz, + ddelta_bias if delta_bias is not None else None, + None, + None) + + +def selective_scan_fn(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + """if return_last_state is True, returns (out, last_state) + last_state has shape (batch, dim, dstate). Note that the gradient of the last state is + not considered in the backward pass. + """ + return SelectiveScanFn.apply(u, delta, A, B, C, D, z, delta_bias, delta_softplus, return_last_state) + + +def selective_scan_ref(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + """ + u: r(B D L) + delta: r(B D L) + A: c(D N) or r(D N) + B: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) + C: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) + D: r(D) + z: r(B D L) + delta_bias: r(D), fp32 + + out: r(B D L) + last_state (optional): r(B D dstate) or c(B D dstate) + """ + dtype_in = u.dtype + u = u.float() + delta = delta.float() + if delta_bias is not None: + delta = delta + delta_bias[..., None].float() + if delta_softplus: + delta = F.softplus(delta) + batch, dim, dstate = u.shape[0], A.shape[0], A.shape[1] + is_variable_B = B.dim() >= 3 + is_variable_C = C.dim() >= 3 + if A.is_complex(): + if is_variable_B: + B = torch.view_as_complex(rearrange(B.float(), "... (L two) -> ... L two", two=2)) + if is_variable_C: + C = torch.view_as_complex(rearrange(C.float(), "... (L two) -> ... L two", two=2)) + else: + B = B.float() + C = C.float() + x = A.new_zeros((batch, dim, dstate)) + ys = [] + deltaA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A)) + if not is_variable_B: + deltaB_u = torch.einsum('bdl,dn,bdl->bdln', delta, B, u) + else: + if B.dim() == 3: + deltaB_u = torch.einsum('bdl,bnl,bdl->bdln', delta, B, u) + else: + B = repeat(B, "B G N L -> B (G H) N L", H=dim // B.shape[1]) + deltaB_u = torch.einsum('bdl,bdnl,bdl->bdln', delta, B, u) + if is_variable_C and C.dim() == 4: + C = repeat(C, "B G N L -> B (G H) N L", H=dim // C.shape[1]) + last_state = None + for i in range(u.shape[2]): + x = deltaA[:, :, i] * x + deltaB_u[:, :, i] + if not is_variable_C: + y = torch.einsum('bdn,dn->bd', x, C) + else: + if C.dim() == 3: + y = torch.einsum('bdn,bn->bd', x, C[:, :, i]) + else: + y = torch.einsum('bdn,bdn->bd', x, C[:, :, :, i]) + if i == u.shape[2] - 1: + last_state = x + if y.is_complex(): + y = y.real * 2 + ys.append(y) + y = torch.stack(ys, dim=2) # (batch dim L) + out = y if D is None else y + u * rearrange(D, "d -> d 1") + if z is not None: + out = out * F.silu(z) + out = out.to(dtype=dtype_in) + return out if not return_last_state else (out, last_state) + + +class MambaInnerFnNoOutProj(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out, scan_intermediates, out_z = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + ctx.delta_softplus = delta_softplus + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, conv1d_out, delta, + A, B, C, D, delta_bias, scan_intermediates, out) + # return rearrange(out_z, "b d l -> b l d") + return out_z + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, + conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + # dout_y = rearrange(dout, "b l d -> b d l") # because no arrange at end of forward, so dout shape is b d l + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout, scan_intermediates, out, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dA, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +class MambaInnerFn(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype()) + if out_proj_bias is not None else None) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out, scan_intermediates, out_z = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + ctx.delta_softplus = delta_softplus + ctx.out_proj_bias_is_None = out_proj_bias is None + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, out_proj_weight, conv1d_out, delta, + A, B, C, D, delta_bias, scan_intermediates, out) + return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, + conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + dout = rearrange(dout, "b l e -> e (b l)") + dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L) + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates, out, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)")) + dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dout_proj_weight, dout_proj_bias, + dA, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +class BiMambaInnerFn(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype()) + if out_proj_bias is not None else None) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out_f, scan_intermediates_f, out_z_f = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + assert not A_b.is_complex(), "A should not be complex!!" + out_b, scan_intermediates_b, out_z_b = selective_scan_cuda.fwd( + conv1d_out.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, delta_softplus, + ) + + out_z = out_z_f + out_z_b.flip([-1]) + + ctx.delta_softplus = delta_softplus + ctx.out_proj_bias_is_None = out_proj_bias is None + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, out_proj_weight, conv1d_out, delta, + A, A_b, B, C, D, delta_bias, scan_intermediates_f, scan_intermediates_b, out_f, out_b) + return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, + conv1d_out, delta, A, A_b, B, C, D, delta_bias, scan_intermediates_f, scan_intermediates_b, out_f, out_b) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + dout = rearrange(dout, "b l e -> e (b l)") + dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L) + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z_f = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates_f, out_f, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + # flip one + dz_b = torch.empty_like(dz) + dconv1d_out_f_b, ddelta_f_b, dA_b, dB_f_b, dC_f_b, dD_b, ddelta_bias_b, dz_b, out_z_b = selective_scan_cuda.bwd( + conv1d_out.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, dout_y.flip([-1]), scan_intermediates_b, out_b, dz_b, + ctx.delta_softplus, + True # option to recompute out_z + ) + + dconv1d_out = dconv1d_out + dconv1d_out_f_b.flip([-1]) + ddelta = ddelta + ddelta_f_b.flip([-1]) + dB = dB + dB_f_b.flip([-1]) + dC = dC + dC_f_b.flip([-1]) + dD = dD + dD_b + ddelta_bias = ddelta_bias + ddelta_bias_b + dz = dz + dz_b.flip([-1]) + out_z = out_z_f + out_z_b.flip([-1]) + + dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)")) + dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dout_proj_weight, dout_proj_bias, + dA, dA_b, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +def mamba_inner_fn( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return MambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + +def bimamba_inner_fn( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return BiMambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + + +def mamba_inner_fn_no_out_proj( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return MambaInnerFnNoOutProj.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + + +def mamba_inner_ref( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, "silu") + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = delta_proj_weight @ x_dbl[:, :delta_rank].t() + delta = rearrange(delta, "d (b l) -> b d l", l=L) + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + if C is None: # variable B + C = x_dbl[:, -d_state:] # (bl d) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True) + return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + +def bimamba_inner_ref( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, "silu") + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = delta_proj_weight @ x_dbl[:, :delta_rank].t() + delta = rearrange(delta, "d (b l) -> b d l", l=L) + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + if C is None: # variable B + C = x_dbl[:, -d_state:] # (bl d) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True) + y_b = selective_scan_fn(x.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, delta_softplus=True) + y = y + y_b.flip([-1]) + return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias) diff --git a/SegMamba/mamba/mamba_ssm/ops/triton/__init__.py b/SegMamba/mamba/mamba_ssm/ops/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/mamba/mamba_ssm/ops/triton/layernorm.py b/SegMamba/mamba/mamba_ssm/ops/triton/layernorm.py new file mode 100644 index 0000000000000000000000000000000000000000..8df9d042a34b6584196f218f5ffeeb104799bd5e --- /dev/null +++ b/SegMamba/mamba/mamba_ssm/ops/triton/layernorm.py @@ -0,0 +1,636 @@ +# Copyright (c) 2023, Tri Dao. +# Implement residual + layer_norm / rms_norm. + +# Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html +# For the backward pass, we keep weight_grad and bias_grad in registers and accumulate. +# This is faster for dimensions up to 8k, but after that it's much slower due to register spilling. +# The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine. + +import math + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_fwd, custom_bwd + +import triton +import triton.language as tl + + +def layer_norm_ref(x, weight, bias, residual=None, eps=1e-6, prenorm=False, upcast=False): + dtype = x.dtype + if upcast: + weight = weight.float() + bias = bias.float() if bias is not None else None + if upcast: + x = x.float() + residual = residual.float() if residual is not None else residual + if residual is not None: + x = (x + residual).to(x.dtype) + out = F.layer_norm(x.to(weight.dtype), x.shape[-1:], weight=weight, bias=bias, eps=eps).to( + dtype + ) + return out if not prenorm else (out, x) + + +def rms_norm_ref(x, weight, bias, residual=None, eps=1e-6, prenorm=False, upcast=False): + dtype = x.dtype + if upcast: + weight = weight.float() + bias = bias.float() if bias is not None else None + if upcast: + x = x.float() + residual = residual.float() if residual is not None else residual + if residual is not None: + x = (x + residual).to(x.dtype) + rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps) + out = (x * rstd * weight) + bias if bias is not None else (x * rstd * weight) + out = out.to(dtype) + return out if not prenorm else (out, x) + + +@triton.autotune( + configs=[ + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + triton.Config({}, num_warps=16), + triton.Config({}, num_warps=32), + ], + key=["N", "HAS_RESIDUAL", "STORE_RESIDUAL_OUT", "IS_RMS_NORM", "HAS_BIAS"], +) +# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +# @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None}) +@triton.jit +def _layer_norm_fwd_1pass_kernel( + X, # pointer to the input + Y, # pointer to the output + W, # pointer to the weights + B, # pointer to the biases + RESIDUAL, # pointer to the residual + RESIDUAL_OUT, # pointer to the residual + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_res_row, + stride_res_out_row, + N, # number of columns in X + eps, # epsilon to avoid division by zero + IS_RMS_NORM: tl.constexpr, + BLOCK_N: tl.constexpr, + HAS_RESIDUAL: tl.constexpr, + STORE_RESIDUAL_OUT: tl.constexpr, + HAS_BIAS: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + X += row * stride_x_row + Y += row * stride_y_row + if HAS_RESIDUAL: + RESIDUAL += row * stride_res_row + if STORE_RESIDUAL_OUT: + RESIDUAL_OUT += row * stride_res_out_row + # Compute mean and variance + cols = tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) + if HAS_RESIDUAL: + residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32) + x += residual + if STORE_RESIDUAL_OUT: + tl.store(RESIDUAL_OUT + cols, x, mask=cols < N) + if not IS_RMS_NORM: + mean = tl.sum(x, axis=0) / N + tl.store(Mean + row, mean) + xbar = tl.where(cols < N, x - mean, 0.0) + var = tl.sum(xbar * xbar, axis=0) / N + else: + xbar = tl.where(cols < N, x, 0.0) + var = tl.sum(xbar * xbar, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + tl.store(Rstd + row, rstd) + # Normalize and apply linear transformation + mask = cols < N + w = tl.load(W + cols, mask=mask).to(tl.float32) + if HAS_BIAS: + b = tl.load(B + cols, mask=mask).to(tl.float32) + x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + y = x_hat * w + b if HAS_BIAS else x_hat * w + # Write output + tl.store(Y + cols, y, mask=mask) + + +def _layer_norm_fwd( + x, weight, bias, eps, residual=None, out_dtype=None, residual_dtype=None, is_rms_norm=False +): + if residual is not None: + residual_dtype = residual.dtype + M, N = x.shape + assert x.stride(-1) == 1 + if residual is not None: + assert residual.stride(-1) == 1 + assert residual.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + # allocate output + y = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype) + assert y.stride(-1) == 1 + if residual is not None or (residual_dtype is not None and residual_dtype != x.dtype): + residual_out = torch.empty(M, N, device=x.device, dtype=residual_dtype) + assert residual_out.stride(-1) == 1 + else: + residual_out = None + mean = torch.empty((M,), dtype=torch.float32, device="cuda") if not is_rms_norm else None + rstd = torch.empty((M,), dtype=torch.float32, device="cuda") + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + if N > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + # heuristics for number of warps + with torch.cuda.device(x.device.index): + _layer_norm_fwd_1pass_kernel[(M,)]( + x, + y, + weight, + bias, + residual, + residual_out, + mean, + rstd, + x.stride(0), + y.stride(0), + residual.stride(0) if residual is not None else 0, + residual_out.stride(0) if residual_out is not None else 0, + N, + eps, + is_rms_norm, + BLOCK_N, + residual is not None, + residual_out is not None, + bias is not None, + ) + # residual_out is None if residual is None and residual_dtype == input_dtype + return y, mean, rstd, residual_out if residual_out is not None else x + + +@triton.autotune( + configs=[ + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + triton.Config({}, num_warps=16), + triton.Config({}, num_warps=32), + ], + key=["N", "HAS_DRESIDUAL", "STORE_DRESIDUAL", "IS_RMS_NORM", "HAS_BIAS"], +) +# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +# @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None}) +# @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None}) +@triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None}) +@triton.jit +def _layer_norm_bwd_kernel( + X, # pointer to the input + W, # pointer to the weights + B, # pointer to the biases + Y, # pointer to the output to be recomputed + DY, # pointer to the output gradient + DX, # pointer to the input gradient + DW, # pointer to the partial sum of weights gradient + DB, # pointer to the partial sum of biases gradient + DRESIDUAL, + DRESIDUAL_IN, + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_dy_row, + stride_dx_row, + stride_dres_row, + stride_dres_in_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + rows_per_program, + IS_RMS_NORM: tl.constexpr, + BLOCK_N: tl.constexpr, + HAS_DRESIDUAL: tl.constexpr, + STORE_DRESIDUAL: tl.constexpr, + HAS_BIAS: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, +): + # Map the program id to the elements of X, DX, and DY it should compute. + row_block_id = tl.program_id(0) + row_start = row_block_id * rows_per_program + cols = tl.arange(0, BLOCK_N) + mask = cols < N + X += row_start * stride_x_row + if HAS_DRESIDUAL: + DRESIDUAL += row_start * stride_dres_row + if STORE_DRESIDUAL: + DRESIDUAL_IN += row_start * stride_dres_in_row + DY += row_start * stride_dy_row + DX += row_start * stride_dx_row + if RECOMPUTE_OUTPUT: + Y += row_start * stride_y_row + w = tl.load(W + cols, mask=mask).to(tl.float32) + if RECOMPUTE_OUTPUT and HAS_BIAS: + b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32) + dw = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_BIAS: + db = tl.zeros((BLOCK_N,), dtype=tl.float32) + row_end = min((row_block_id + 1) * rows_per_program, M) + for row in range(row_start, row_end): + # Load data to SRAM + x = tl.load(X + cols, mask=mask, other=0).to(tl.float32) + dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) + if not IS_RMS_NORM: + mean = tl.load(Mean + row) + rstd = tl.load(Rstd + row) + # Compute dx + xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + xhat = tl.where(mask, xhat, 0.0) + if RECOMPUTE_OUTPUT: + y = xhat * w + b if HAS_BIAS else xhat * w + tl.store(Y + cols, y, mask=mask) + wdy = w * dy + dw += dy * xhat + if HAS_BIAS: + db += dy + if not IS_RMS_NORM: + c1 = tl.sum(xhat * wdy, axis=0) / N + c2 = tl.sum(wdy, axis=0) / N + dx = (wdy - (xhat * c1 + c2)) * rstd + else: + c1 = tl.sum(xhat * wdy, axis=0) / N + dx = (wdy - xhat * c1) * rstd + if HAS_DRESIDUAL: + dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32) + dx += dres + # Write dx + if STORE_DRESIDUAL: + tl.store(DRESIDUAL_IN + cols, dx, mask=mask) + tl.store(DX + cols, dx, mask=mask) + + X += stride_x_row + if HAS_DRESIDUAL: + DRESIDUAL += stride_dres_row + if STORE_DRESIDUAL: + DRESIDUAL_IN += stride_dres_in_row + if RECOMPUTE_OUTPUT: + Y += stride_y_row + DY += stride_dy_row + DX += stride_dx_row + tl.store(DW + row_block_id * N + cols, dw, mask=mask) + if HAS_BIAS: + tl.store(DB + row_block_id * N + cols, db, mask=mask) + + +def _layer_norm_bwd( + dy, + x, + weight, + bias, + eps, + mean, + rstd, + dresidual=None, + has_residual=False, + is_rms_norm=False, + x_dtype=None, + recompute_output=False, +): + M, N = x.shape + assert x.stride(-1) == 1 + assert dy.stride(-1) == 1 + assert dy.shape == (M, N) + if dresidual is not None: + assert dresidual.stride(-1) == 1 + assert dresidual.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + # allocate output + dx = ( + torch.empty_like(x) + if x_dtype is None + else torch.empty(M, N, dtype=x_dtype, device=x.device) + ) + dresidual_in = torch.empty_like(x) if has_residual and dx.dtype != x.dtype else None + y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None + + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + if N > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + _dw = torch.empty((sm_count, N), dtype=torch.float32, device=weight.device) + _db = ( + torch.empty((sm_count, N), dtype=torch.float32, device=bias.device) + if bias is not None + else None + ) + rows_per_program = math.ceil(M / sm_count) + grid = (sm_count,) + with torch.cuda.device(x.device.index): + _layer_norm_bwd_kernel[grid]( + x, + weight, + bias, + y, + dy, + dx, + _dw, + _db, + dresidual, + dresidual_in, + mean, + rstd, + x.stride(0), + 0 if not recompute_output else y.stride(0), + dy.stride(0), + dx.stride(0), + dresidual.stride(0) if dresidual is not None else 0, + dresidual_in.stride(0) if dresidual_in is not None else 0, + M, + N, + eps, + rows_per_program, + is_rms_norm, + BLOCK_N, + dresidual is not None, + dresidual_in is not None, + bias is not None, + ) + dw = _dw.sum(0).to(weight.dtype) + db = _db.sum(0).to(bias.dtype) if bias is not None else None + # Don't need to compute dresidual_in separately in this case + if has_residual and dx.dtype == x.dtype: + dresidual_in = dx + return (dx, dw, db, dresidual_in) if not recompute_output else (dx, dw, db, dresidual_in, y) + + +class LayerNormFn(torch.autograd.Function): + @staticmethod + def forward( + ctx, + x, + weight, + bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + ): + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if residual is not None: + assert residual.shape == x_shape_og + residual = residual.reshape(-1, residual.shape[-1]) + if residual.stride(-1) != 1: + residual = residual.contiguous() + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + residual_dtype = ( + residual.dtype + if residual is not None + else (torch.float32 if residual_in_fp32 else None) + ) + y, mean, rstd, residual_out = _layer_norm_fwd( + x, weight, bias, eps, residual, residual_dtype=residual_dtype, is_rms_norm=is_rms_norm + ) + ctx.save_for_backward(residual_out, weight, bias, mean, rstd) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.is_rms_norm = is_rms_norm + ctx.has_residual = residual is not None + ctx.prenorm = prenorm + ctx.x_dtype = x.dtype + y = y.reshape(x_shape_og) + return y if not prenorm else (y, residual_out.reshape(x_shape_og)) + + @staticmethod + def backward(ctx, dy, *args): + x, weight, bias, mean, rstd = ctx.saved_tensors + dy = dy.reshape(-1, dy.shape[-1]) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + if ctx.prenorm: + dresidual = args[0] + dresidual = dresidual.reshape(-1, dresidual.shape[-1]) + if dresidual.stride(-1) != 1: + dresidual = dresidual.contiguous() + assert dresidual.shape == x.shape + else: + dresidual = None + dx, dw, db, dresidual_in = _layer_norm_bwd( + dy, + x, + weight, + bias, + ctx.eps, + mean, + rstd, + dresidual, + ctx.has_residual, + ctx.is_rms_norm, + x_dtype=ctx.x_dtype, + ) + return ( + dx.reshape(ctx.x_shape_og), + dw, + db, + dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None, + None, + None, + None, + None, + ) + + +def layer_norm_fn( + x, + weight, + bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, +): + return LayerNormFn.apply(x, weight, bias, residual, eps, prenorm, residual_in_fp32, is_rms_norm) + + +def rms_norm_fn(x, weight, bias, residual=None, prenorm=False, residual_in_fp32=False, eps=1e-6): + return LayerNormFn.apply(x, weight, bias, residual, eps, prenorm, residual_in_fp32, True) + + +class RMSNorm(torch.nn.Module): + def __init__(self, hidden_size, eps=1e-5, device=None, dtype=None): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.eps = eps + self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.register_parameter("bias", None) + self.reset_parameters() + + def reset_parameters(self): + torch.nn.init.ones_(self.weight) + + def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False): + return rms_norm_fn( + x, + self.weight, + self.bias, + residual=residual, + eps=self.eps, + prenorm=prenorm, + residual_in_fp32=residual_in_fp32, + is_rms_norm=True, + ) + + +class LayerNormLinearFn(torch.autograd.Function): + @staticmethod + @custom_fwd + def forward( + ctx, + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + ): + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if residual is not None: + assert residual.shape == x_shape_og + residual = residual.reshape(-1, residual.shape[-1]) + if residual.stride(-1) != 1: + residual = residual.contiguous() + norm_weight = norm_weight.contiguous() + if norm_bias is not None: + norm_bias = norm_bias.contiguous() + residual_dtype = ( + residual.dtype + if residual is not None + else (torch.float32 if residual_in_fp32 else None) + ) + y, mean, rstd, residual_out = _layer_norm_fwd( + x, + norm_weight, + norm_bias, + eps, + residual, + out_dtype=None if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype(), + residual_dtype=residual_dtype, + is_rms_norm=is_rms_norm, + ) + y = y.reshape(x_shape_og) + dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else y.dtype + linear_weight = linear_weight.to(dtype) + linear_bias = linear_bias.to(dtype) if linear_bias is not None else None + out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias) + # We don't store y, will be recomputed in the backward pass to save memory + ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.is_rms_norm = is_rms_norm + ctx.has_residual = residual is not None + ctx.prenorm = prenorm + ctx.x_dtype = x.dtype + ctx.linear_bias_is_none = linear_bias is None + return out if not prenorm else (out, residual_out.reshape(x_shape_og)) + + @staticmethod + @custom_bwd + def backward(ctx, dout, *args): + x, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors + dout = dout.reshape(-1, dout.shape[-1]) + dy = F.linear(dout, linear_weight.t()) + dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + if ctx.prenorm: + dresidual = args[0] + dresidual = dresidual.reshape(-1, dresidual.shape[-1]) + if dresidual.stride(-1) != 1: + dresidual = dresidual.contiguous() + assert dresidual.shape == x.shape + else: + dresidual = None + dx, dnorm_weight, dnorm_bias, dresidual_in, y = _layer_norm_bwd( + dy, + x, + norm_weight, + norm_bias, + ctx.eps, + mean, + rstd, + dresidual, + ctx.has_residual, + ctx.is_rms_norm, + x_dtype=ctx.x_dtype, + recompute_output=True, + ) + dlinear_weight = torch.einsum("bo,bi->oi", dout, y) + return ( + dx.reshape(ctx.x_shape_og), + dnorm_weight, + dnorm_bias, + dlinear_weight, + dlinear_bias, + dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None, + None, + None, + None, + None, + ) + + +def layer_norm_linear_fn( + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, +): + return LayerNormLinearFn.apply( + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual, + eps, + prenorm, + residual_in_fp32, + is_rms_norm, + ) diff --git a/SegMamba/mamba/mamba_ssm/ops/triton/selective_state_update.py b/SegMamba/mamba/mamba_ssm/ops/triton/selective_state_update.py new file mode 100644 index 0000000000000000000000000000000000000000..fa95de73f173292914c5f00fbe9426937d00e502 --- /dev/null +++ b/SegMamba/mamba/mamba_ssm/ops/triton/selective_state_update.py @@ -0,0 +1,192 @@ +# Copyright (c) 2023, Tri Dao. + +"""We want triton==2.1.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + + +@triton.heuristics({"HAS_DT_BIAS": lambda args: args["dt_bias_ptr"] is not None}) +@triton.heuristics({"HAS_D": lambda args: args["D_ptr"] is not None}) +@triton.heuristics({"HAS_Z": lambda args: args["z_ptr"] is not None}) +@triton.heuristics({"BLOCK_SIZE_DSTATE": lambda args: triton.next_power_of_2(args["dstate"])}) +@triton.jit +def _selective_scan_update_kernel( + # Pointers to matrices + state_ptr, x_ptr, dt_ptr, dt_bias_ptr, A_ptr, B_ptr, C_ptr, D_ptr, z_ptr, out_ptr, + # Matrix dimensions + batch, dim, dstate, + # Strides + stride_state_batch, stride_state_dim, stride_state_dstate, + stride_x_batch, stride_x_dim, + stride_dt_batch, stride_dt_dim, + stride_dt_bias_dim, + stride_A_dim, stride_A_dstate, + stride_B_batch, stride_B_dstate, + stride_C_batch, stride_C_dstate, + stride_D_dim, + stride_z_batch, stride_z_dim, + stride_out_batch, stride_out_dim, + # Meta-parameters + DT_SOFTPLUS: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, + HAS_DT_BIAS: tl.constexpr, + HAS_D: tl.constexpr, + HAS_Z: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_m = tl.program_id(axis=0) + pid_b = tl.program_id(axis=1) + state_ptr += pid_b * stride_state_batch + x_ptr += pid_b * stride_x_batch + dt_ptr += pid_b * stride_dt_batch + B_ptr += pid_b * stride_B_batch + C_ptr += pid_b * stride_C_batch + if HAS_Z: + z_ptr += pid_b * stride_z_batch + out_ptr += pid_b * stride_out_batch + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_DSTATE) + state_ptrs = state_ptr + (offs_m[:, None] * stride_state_dim + offs_n[None, :] * stride_state_dstate) + x_ptrs = x_ptr + offs_m * stride_x_dim + dt_ptrs = dt_ptr + offs_m * stride_dt_dim + if HAS_DT_BIAS: + dt_bias_ptrs = dt_bias_ptr + offs_m * stride_dt_bias_dim + A_ptrs = A_ptr + (offs_m[:, None] * stride_A_dim + offs_n[None, :] * stride_A_dstate) + B_ptrs = B_ptr + offs_n * stride_B_dstate + C_ptrs = C_ptr + offs_n * stride_C_dstate + if HAS_D: + D_ptrs = D_ptr + offs_m * stride_D_dim + if HAS_Z: + z_ptrs = z_ptr + offs_m * stride_z_dim + out_ptrs = out_ptr + offs_m * stride_out_dim + + state = tl.load(state_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0) + x = tl.load(x_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + dt = tl.load(dt_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if HAS_DT_BIAS: + dt += tl.load(dt_bias_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if DT_SOFTPLUS: + dt = tl.log(1.0 + tl.exp(dt)) + A = tl.load(A_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + dA = tl.exp(A * dt[:, None]) + B = tl.load(B_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32) + C = tl.load(C_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32) + if HAS_D: + D = tl.load(D_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if HAS_Z: + z = tl.load(z_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + + dB = B[None, :] * dt[:, None] + state = state * dA + dB * x[:, None] + tl.store(state_ptrs, state, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate)) + out = tl.sum(state * C[None, :], axis=1) + if HAS_D: + out += x * D + if HAS_Z: + out *= z * tl.sigmoid(z) + tl.store(out_ptrs, out, mask=offs_m < dim) + + +def selective_state_update(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + state: (batch, dim, dstate) + x: (batch, dim) + dt: (batch, dim) + A: (dim, dstate) + B: (batch, dstate) + C: (batch, dstate) + D: (dim,) + z: (batch, dim) + dt_bias: (dim,) + Return: + out: (batch, dim) + """ + batch, dim, dstate = state.shape + assert x.shape == (batch, dim) + assert dt.shape == x.shape + assert A.shape == (dim, dstate) + assert B.shape == (batch, dstate) + assert C.shape == B.shape + if D is not None: + assert D.shape == (dim,) + if z is not None: + assert z.shape == x.shape + if dt_bias is not None: + assert dt_bias.shape == (dim,) + out = torch.empty_like(x) + grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE_M']), batch) + z_strides = ((z.stride(0), z.stride(1)) if z is not None else (0, 0)) + # We don't want autotune since it will overwrite the state + # We instead tune by hand. + BLOCK_SIZE_M, num_warps = ((32, 4) if dstate <= 16 + else ((16, 4) if dstate <= 32 else + ((8, 4) if dstate <= 64 else + ((4, 4) if dstate <= 128 else + ((4, 8)))))) + with torch.cuda.device(x.device.index): + _selective_scan_update_kernel[grid]( + state, x, dt, dt_bias, A, B, C, D, z, out, + batch, dim, dstate, + state.stride(0), state.stride(1), state.stride(2), + x.stride(0), x.stride(1), + dt.stride(0), dt.stride(1), + dt_bias.stride(0) if dt_bias is not None else 0, + A.stride(0), A.stride(1), + B.stride(0), B.stride(1), + C.stride(0), C.stride(1), + D.stride(0) if D is not None else 0, + z_strides[0], z_strides[1], + out.stride(0), out.stride(1), + dt_softplus, + BLOCK_SIZE_M, + num_warps=num_warps, + ) + return out + + +def selective_state_update_ref(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + state: (batch, dim, dstate) + x: (batch, dim) + dt: (batch, dim) + A: (dim, dstate) + B: (batch, dstate) + C: (batch, dstate) + D: (dim,) + z: (batch, dim) + dt_bias: (dim,) + Return: + out: (batch, dim) + """ + batch, dim, dstate = state.shape + assert x.shape == (batch, dim) + assert dt.shape == x.shape + assert A.shape == (dim, dstate) + assert B.shape == (batch, dstate) + assert C.shape == B.shape + if D is not None: + assert D.shape == (dim,) + if z is not None: + assert z.shape == x.shape + if dt_bias is not None: + assert dt_bias.shape == (dim,) + dt = dt + dt_bias + dt = F.softplus(dt) if dt_softplus else dt + dA = torch.exp(rearrange(dt, "b d -> b d 1") * A) # (batch, dim, dstate) + dB = rearrange(dt, "b d -> b d 1") * rearrange(B, "b n -> b 1 n") # (batch, dim, dstate) + state.copy_(state * dA + dB * rearrange(x, "b d -> b d 1")) # (batch, dim, dstate + out = torch.einsum("bdn,bn->bd", state.to(C.dtype), C) + if D is not None: + out += (x * D).to(out.dtype) + return (out if z is None else out * F.silu(z)).to(x.dtype) diff --git a/SegMamba/mamba/mamba_ssm/utils/__init__.py b/SegMamba/mamba/mamba_ssm/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SegMamba/mamba/mamba_ssm/utils/generation.py b/SegMamba/mamba/mamba_ssm/utils/generation.py new file mode 100644 index 0000000000000000000000000000000000000000..9d766b29ac28a388a7d77b22aa2cb1eda733c0f4 --- /dev/null +++ b/SegMamba/mamba/mamba_ssm/utils/generation.py @@ -0,0 +1,377 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. +import gc +import time +from collections import namedtuple +from dataclasses import dataclass, field +from functools import partial +from typing import Callable, Optional, Sequence, Union + +import torch +import torch.nn.functional as F +from einops import rearrange, repeat +from torch import Tensor +from torch.profiler import ProfilerActivity, profile, record_function +from transformers.generation import GreedySearchDecoderOnlyOutput, SampleDecoderOnlyOutput + + +@dataclass +class InferenceParams: + """Inference parameters that are passed to the main model in order + to efficienly calculate and store the context during inference.""" + + max_seqlen: int + max_batch_size: int + seqlen_offset: int = 0 + batch_size_offset: int = 0 + key_value_memory_dict: dict = field(default_factory=dict) + lengths_per_sample: Optional[Tensor] = None + + def reset(self, max_seqlen, max_batch_size): + self.max_seqlen = max_seqlen + self.max_batch_size = max_batch_size + self.seqlen_offset = 0 + if self.lengths_per_sample is not None: + self.lengths_per_sample.zero_() + + +# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py +# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L231 +def modify_logits_for_top_k_filtering(logits, top_k): + """Set the logits for none top-k values to -inf. Done in-place.""" + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits.masked_fill_(indices_to_remove, float("-Inf")) + + +# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py +# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L170 +def modify_logits_for_top_p_filtering(logits, top_p): + """Set the logits for none top-p values to -inf. Done in-place.""" + if top_p <= 0.0 or top_p >= 1.0: + return + # First sort and calculate cumulative sum of probabilities. + sorted_logits, sorted_indices = torch.sort(logits, descending=False) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs <= (1 - top_p) + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter( + 1, sorted_indices, sorted_indices_to_remove + ) + logits.masked_fill_(indices_to_remove, float("-inf")) + + +def sample(logits, top_k=1, top_p=0.0, temperature=1.0): + """Sample from top-k logits. + Arguments: + logits: Tensor of shape (batch_size, vocab_size) + """ + if top_k == 1: # Short-circuit for greedy decoding + return logits.argmax(dim=-1) + else: + if top_p > 0.0: + assert top_p <= 1.0, "top-p should be in (0, 1]." + if top_k > 0: + top_k = min(top_k, logits.size(-1)) # Safety check + logits_top, indices = torch.topk(logits, top_k, dim=-1) + if temperature != 1.0: + logits_top /= temperature + modify_logits_for_top_p_filtering(logits_top, top_p) + return indices[ + torch.arange(indices.shape[0], device=indices.device), + torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(dim=-1), + ] + else: + # Clone so that when we modify for top_p we don't change the original logits + logits_top = logits / temperature if temperature != 1.0 else logits.clone() + modify_logits_for_top_p_filtering(logits_top, top_p) + return torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze( + dim=-1 + ) + + +@torch.inference_mode() +def decode( + input_ids, + model, + max_length, + top_k=1, + top_p=0.0, + temperature=1.0, + eos_token_id=None, + teacher_outputs=None, + vocab_size=None, + tensor_parallel=1, + cg=False, + enable_timing=False, +): + """Decoding, either greedy or with top-k or top-p sampling. + If top-k = 0, don't limit the number of candidates (pure sampling). + Top-k and top-p can be used together. If top_k > 0 and top_p > 0, then top-k is applied first, + then top-p. + We assume that all sequences in the same batch have the same length. + + Arguments: + input_ids: (batch, seq_len) + max_length: int + teacher_outputs (optional): (batch, seq_len). If provided, instead of sampling from the + logits, the next token is taken from the teacher_outputs. Useful for testing. + Returns: GreedySearchDecoderOnlyOutput or SampleDecoderOnlyOutput, with the following fields: + sequences: (batch, max_length) + scores: tuples of (batch, vocab_size) + """ + batch_size, seqlen_og = input_ids.shape + teacher_output_len = teacher_outputs.shape[1] if teacher_outputs is not None else 0 + if cg: + if not hasattr(model, "_decoding_cache"): + model._decoding_cache = None + model._decoding_cache = update_graph_cache( + model, + model._decoding_cache, + batch_size, + seqlen_og, + max_length, + tensor_parallel=tensor_parallel, + ) + inference_params = model._decoding_cache.inference_params + inference_params.reset(max_length, batch_size) + else: + inference_params = InferenceParams(max_seqlen=max_length, max_batch_size=batch_size) + + def get_logits(input_ids, inference_params): + decoding = inference_params.seqlen_offset > 0 + if decoding: + position_ids = torch.full( + (batch_size, 1), + inference_params.seqlen_offset, + dtype=torch.long, + device=input_ids.device, + ) + else: + position_ids = None + if not cg or not decoding: + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=1, + ).logits.squeeze(dim=1) + else: + logits = model._decoding_cache.run( + input_ids, position_ids, inference_params.seqlen_offset + ).squeeze(dim=1) + return logits[..., :vocab_size] if vocab_size is not None else logits + + def sample_tokens(logits, inference_params): + if teacher_outputs is None or teacher_output_len <= inference_params.seqlen_offset: + token = sample(logits, top_k=top_k, top_p=top_p, temperature=temperature) + else: + token = teacher_outputs[:, inference_params.seqlen_offset] + # return rearrange(token, "b -> b 1") + return token.unsqueeze(1) + + def should_stop(current_token, inference_params): + if inference_params.seqlen_offset == 0: + return False + if eos_token_id is not None and (current_token == eos_token_id).all(): + return True + if inference_params.seqlen_offset >= max_length - 1: + return True + return False + + start = torch.cuda.Event(enable_timing=enable_timing) + end = torch.cuda.Event(enable_timing=enable_timing) + + if enable_timing: + if tensor_parallel > 1: + torch.distributed.barrier() + start.record() + scores, sequences = [], [input_ids] + while not should_stop(sequences[-1], inference_params): + scores.append(get_logits(sequences[-1], inference_params)) + inference_params.seqlen_offset += sequences[-1].shape[1] + sequences.append(sample_tokens(scores[-1], inference_params)) + if enable_timing: + end.record() + if tensor_parallel > 1: + torch.distributed.barrier() + torch.cuda.synchronize() + print(f"Prompt processing + decoding time: {(start.elapsed_time(end)):.0f}ms") + output_cls = GreedySearchDecoderOnlyOutput if top_k == 1 else SampleDecoderOnlyOutput + return output_cls(sequences=torch.cat(sequences, dim=1), scores=tuple(scores)) + + +class GenerationMixin: + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + raise NotImplementedError + + def generate( + self, + input_ids, + max_length, + top_k=1, + top_p=0.0, + temperature=1.0, + return_dict_in_generate=False, + output_scores=False, + **kwargs, + ): + output = decode( + input_ids, self, max_length, top_k=top_k, top_p=top_p, temperature=temperature, **kwargs + ) + if not output_scores: + output.scores = None + return output if return_dict_in_generate else output.sequences + + +def allocate_inference_cache( + max_batch_size, + max_seqlen, + nheads, + headdim, + layers: Union[int, Sequence], + device, + dtype=torch.float16, +): + assert dtype in [torch.float16, torch.bfloat16, torch.float32] + kv_cache_shape = (max_batch_size, max_seqlen, 2, nheads, headdim) + if isinstance(layers, int): + layers = range(layers) + return {i: torch.empty(kv_cache_shape, device=device, dtype=dtype) for i in layers} + + +@dataclass +class DecodingCGCache: + max_batch_size: int = 0 + max_seqlen: int = 0 + device = None + dtype = None + callables: dict = field(default_factory=dict) + mempool = None + inference_params: Optional[InferenceParams] = None + run: Optional[Callable] = None + + +@torch.inference_mode() +def update_graph_cache( + model, + cache, + batch_size, + seqlen_og, + max_seqlen, + decoding_seqlens=(1,), + tensor_parallel=1, + dtype=None, + n_warmups=2, +): + if cache is None: + cache = DecodingCGCache() + param_example = next(iter(model.parameters())) + device = param_example.device + if dtype is None: + dtype = param_example.dtype + if ( + (device, dtype) != (cache.device, cache.dtype) + or batch_size > cache.max_batch_size + or max_seqlen > cache.max_seqlen + ): # Invalidate the cache + cache.callables = {} + cache.mempool = None + cache.inference_params = None + gc.collect() + cache.device, cache.dtype = device, dtype + cache.max_batch_size, cache.max_seqlen = batch_size, max_seqlen + if hasattr(model, "allocate_inference_cache"): + inf_cache = model.allocate_inference_cache(batch_size, max_seqlen, dtype) + else: + headdim = getattr( + model.config, + "head_dim", + model.config.hidden_size // model.config.num_attention_heads, + ) + inf_cache = allocate_inference_cache( + batch_size, + max_seqlen, + model.config.num_attention_heads // tensor_parallel, + headdim, + model.config.num_hidden_layers, + device, + dtype, + ) + lengths_per_sample = torch.full((batch_size,), seqlen_og, dtype=torch.int32, device=device) + cache.inference_params = InferenceParams( + max_seqlen=max_seqlen, + max_batch_size=batch_size, + seqlen_offset=seqlen_og, + key_value_memory_dict=inf_cache, + lengths_per_sample=lengths_per_sample, + ) + cache.mempool = torch.cuda.graphs.graph_pool_handle() + for decoding_seqlen in decoding_seqlens: + if (batch_size, decoding_seqlen) not in cache.callables: + cache.callables[batch_size, decoding_seqlen] = capture_graph( + model, + cache.inference_params, + batch_size, + max_seqlen, + decoding_seqlen=decoding_seqlen, + mempool=cache.mempool, + n_warmups=n_warmups, + ) + + def dispatch(input_ids, position_ids, seqlen): + batch_size, decoding_seqlen = input_ids.shape[:2] + return cache.callables[batch_size, decoding_seqlen](input_ids, position_ids, seqlen) + + cache.run = dispatch + cache.inference_params.seqlen_offset = 0 # Reset so it's not confusing + return cache + + +def capture_graph( + model, inference_params, batch_size, max_seqlen, decoding_seqlen=1, mempool=None, n_warmups=2 +): + device = next(iter(model.parameters())).device + input_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device) + position_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device) + seqlen_offset_og = inference_params.seqlen_offset + inference_params.seqlen_offset = max_seqlen - decoding_seqlen + inference_params.lengths_per_sample[:] = inference_params.seqlen_offset + + # Warmup before capture + s = torch.cuda.Stream() + s.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(s): + for _ in range(n_warmups): + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=decoding_seqlen, + ).logits + s.synchronize() + # This might be needed for correctness if we run with NCCL_GRAPH_MIXING_SUPPORT=0, + # which requires that graph launch and non-captured launch to not overlap (I think, + # that's how I interpret the documentation). I'm not sure if this is required. + if torch.distributed.is_initialized(): + torch.distributed.barrier() + torch.cuda.current_stream().wait_stream(s) + # Captures the graph + # To allow capture, automatically sets a side stream as the current stream in the context + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph, pool=mempool): + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=decoding_seqlen, + ).logits + + def run(new_input_ids, new_position_ids, seqlen): + inference_params.lengths_per_sample[:] = seqlen + input_ids.copy_(new_input_ids) + position_ids.copy_(new_position_ids) + graph.replay() + return logits.clone() + + inference_params.seqlen_offset = seqlen_offset_og + return run diff --git a/SegMamba/mamba/mamba_ssm/utils/hf.py b/SegMamba/mamba/mamba_ssm/utils/hf.py new file mode 100644 index 0000000000000000000000000000000000000000..0d7555acddbd260636d1d14d5bd6324f6af0056a --- /dev/null +++ b/SegMamba/mamba/mamba_ssm/utils/hf.py @@ -0,0 +1,23 @@ +import json + +import torch + +from transformers.utils import WEIGHTS_NAME, CONFIG_NAME +from transformers.utils.hub import cached_file + + +def load_config_hf(model_name): + resolved_archive_file = cached_file(model_name, CONFIG_NAME, _raise_exceptions_for_missing_entries=False) + return json.load(open(resolved_archive_file)) + + +def load_state_dict_hf(model_name, device=None, dtype=None): + # If not fp32, then we don't want to load directly to the GPU + mapped_device = "cpu" if dtype not in [torch.float32, None] else device + resolved_archive_file = cached_file(model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False) + return torch.load(resolved_archive_file, map_location=mapped_device) + # Convert dtype before moving to GPU to save memory + if dtype is not None: + state_dict = {k: v.to(dtype=dtype) for k, v in state_dict.items()} + state_dict = {k: v.to(device=device) for k, v in state_dict.items()} + return state_dict diff --git a/SegMamba/mamba/setup.py b/SegMamba/mamba/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..06e1ee35d98a450199f8ef9700dd1d84c0b1f9ee --- /dev/null +++ b/SegMamba/mamba/setup.py @@ -0,0 +1,288 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. +import sys +import warnings +import os +import re +import ast +from pathlib import Path +from packaging.version import parse, Version +import platform +import shutil + +from setuptools import setup, find_packages +import subprocess + +import urllib.request +import urllib.error +from wheel.bdist_wheel import bdist_wheel as _bdist_wheel + +import torch +from torch.utils.cpp_extension import ( + BuildExtension, + CppExtension, + CUDAExtension, + CUDA_HOME, +) + + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + + +# ninja build does not work unless include_dirs are abs path +this_dir = os.path.dirname(os.path.abspath(__file__)) + +PACKAGE_NAME = "mamba_ssm" + +BASE_WHEEL_URL = "https://github.com/state-spaces/mamba/releases/download/{tag_name}/{wheel_name}" + +# FORCE_BUILD: Force a fresh build locally, instead of attempting to find prebuilt wheels +# SKIP_CUDA_BUILD: Intended to allow CI to use a simple `python setup.py sdist` run to copy over raw files, without any cuda compilation +FORCE_BUILD = os.getenv("MAMBA_FORCE_BUILD", "FALSE") == "TRUE" +SKIP_CUDA_BUILD = os.getenv("MAMBA_SKIP_CUDA_BUILD", "FALSE") == "TRUE" +# For CI, we want the option to build with C++11 ABI since the nvcr images use C++11 ABI +FORCE_CXX11_ABI = os.getenv("MAMBA_FORCE_CXX11_ABI", "FALSE") == "TRUE" + + +def get_platform(): + """ + Returns the platform name as used in wheel filenames. + """ + if sys.platform.startswith("linux"): + return "linux_x86_64" + elif sys.platform == "darwin": + mac_version = ".".join(platform.mac_ver()[0].split(".")[:2]) + return f"macosx_{mac_version}_x86_64" + elif sys.platform == "win32": + return "win_amd64" + else: + raise ValueError("Unsupported platform: {}".format(sys.platform)) + + +def get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True + ) + output = raw_output.split() + release_idx = output.index("release") + 1 + bare_metal_version = parse(output[release_idx].split(",")[0]) + + return raw_output, bare_metal_version + + +def check_if_cuda_home_none(global_option: str) -> None: + if CUDA_HOME is not None: + return + # warn instead of error because user could be downloading prebuilt wheels, so nvcc won't be necessary + # in that case. + warnings.warn( + f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? " + "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, " + "only images whose names contain 'devel' will provide nvcc." + ) + + +def append_nvcc_threads(nvcc_extra_args): + return nvcc_extra_args + ["--threads", "4"] + + +cmdclass = {} +ext_modules = [] + +if not SKIP_CUDA_BUILD: + print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__)) + TORCH_MAJOR = int(torch.__version__.split(".")[0]) + TORCH_MINOR = int(torch.__version__.split(".")[1]) + + check_if_cuda_home_none(PACKAGE_NAME) + # Check, if CUDA11 is installed for compute capability 8.0 + cc_flag = [] + if CUDA_HOME is not None: + _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME) + if bare_metal_version < Version("11.6"): + raise RuntimeError( + f"{PACKAGE_NAME} is only supported on CUDA 11.6 and above. " + "Note: make sure nvcc has a supported version by running nvcc -V." + ) + + arch_list_env = os.getenv("MAMBA_CUDA_ARCH_LIST") or os.getenv("TORCH_CUDA_ARCH_LIST") + if arch_list_env: + arch_list = [a.strip() for a in arch_list_env.split(",") if a.strip()] + for arch in arch_list: + arch = arch.split("+")[0] + if arch.startswith(("sm_", "compute_")): + arch_num = arch.split("_", 1)[1] + else: + arch_num = arch.replace(".", "") + cc_flag.append("-gencode") + cc_flag.append(f"arch=compute_{arch_num},code=sm_{arch_num}") + else: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_70,code=sm_70") + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + if bare_metal_version >= Version("11.8"): + cc_flag.append("-gencode") + cc_flag.append("arch=compute_90,code=sm_90") + + # HACK: The compiler flag -D_GLIBCXX_USE_CXX11_ABI is set to be the same as + # torch._C._GLIBCXX_USE_CXX11_ABI + # https://github.com/pytorch/pytorch/blob/8472c24e3b5b60150096486616d98b7bea01500b/torch/utils/cpp_extension.py#L920 + if FORCE_CXX11_ABI: + torch._C._GLIBCXX_USE_CXX11_ABI = True + + ext_modules.append( + CUDAExtension( + name="selective_scan_cuda", + sources=[ + "csrc/selective_scan/selective_scan.cpp", + "csrc/selective_scan/selective_scan_fwd_fp32.cu", + "csrc/selective_scan/selective_scan_fwd_fp16.cu", + "csrc/selective_scan/selective_scan_fwd_bf16.cu", + "csrc/selective_scan/selective_scan_bwd_fp32_real.cu", + "csrc/selective_scan/selective_scan_bwd_fp32_complex.cu", + "csrc/selective_scan/selective_scan_bwd_fp16_real.cu", + "csrc/selective_scan/selective_scan_bwd_fp16_complex.cu", + "csrc/selective_scan/selective_scan_bwd_bf16_real.cu", + "csrc/selective_scan/selective_scan_bwd_bf16_complex.cu", + ], + extra_compile_args={ + "cxx": ["-O3", "-std=c++17"], + "nvcc": append_nvcc_threads( + [ + "-O3", + "-std=c++17", + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "-U__CUDA_NO_BFLOAT16_OPERATORS__", + "-U__CUDA_NO_BFLOAT16_CONVERSIONS__", + "-U__CUDA_NO_BFLOAT162_OPERATORS__", + "-U__CUDA_NO_BFLOAT162_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", + "--use_fast_math", + "--ptxas-options=-v", + "-lineinfo", + ] + + cc_flag + ), + }, + include_dirs=[Path(this_dir) / "csrc" / "selective_scan"], + ) + ) + + +def get_package_version(): + with open(Path(this_dir) / PACKAGE_NAME / "__init__.py", "r") as f: + version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE) + public_version = ast.literal_eval(version_match.group(1)) + local_version = os.environ.get("MAMBA_LOCAL_VERSION") + if local_version: + return f"{public_version}+{local_version}" + else: + return str(public_version) + + +def get_wheel_url(): + # Determine the version numbers that will be used to determine the correct wheel + # We're using the CUDA version used to build torch, not the one currently installed + # _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME) + torch_cuda_version = parse(torch.version.cuda) + torch_version_raw = parse(torch.__version__) + # For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.2 + # to save CI time. Minor versions should be compatible. + torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.2") + python_version = f"cp{sys.version_info.major}{sys.version_info.minor}" + platform_name = get_platform() + mamba_ssm_version = get_package_version() + # cuda_version = f"{cuda_version_raw.major}{cuda_version_raw.minor}" + cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}" + torch_version = f"{torch_version_raw.major}.{torch_version_raw.minor}" + cxx11_abi = str(torch._C._GLIBCXX_USE_CXX11_ABI).upper() + + # Determine wheel URL based on CUDA version, torch version, python version and OS + wheel_filename = f"{PACKAGE_NAME}-{mamba_ssm_version}+cu{cuda_version}torch{torch_version}cxx11abi{cxx11_abi}-{python_version}-{python_version}-{platform_name}.whl" + wheel_url = BASE_WHEEL_URL.format( + tag_name=f"v{mamba_ssm_version}", wheel_name=wheel_filename + ) + return wheel_url, wheel_filename + + +class CachedWheelsCommand(_bdist_wheel): + """ + The CachedWheelsCommand plugs into the default bdist wheel, which is ran by pip when it cannot + find an existing wheel (which is currently the case for all installs). We use + the environment parameters to detect whether there is already a pre-built version of a compatible + wheel available and short-circuits the standard full build pipeline. + """ + + def run(self): + if FORCE_BUILD: + return super().run() + + wheel_url, wheel_filename = get_wheel_url() + print("Guessing wheel URL: ", wheel_url) + try: + urllib.request.urlretrieve(wheel_url, wheel_filename) + + # Make the archive + # Lifted from the root wheel processing command + # https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85 + if not os.path.exists(self.dist_dir): + os.makedirs(self.dist_dir) + + impl_tag, abi_tag, plat_tag = self.get_tag() + archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}" + + wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl") + print("Raw wheel path", wheel_path) + shutil.move(wheel_filename, wheel_path) + except urllib.error.HTTPError: + print("Precompiled wheel not found. Building from source...") + # If the wheel could not be downloaded, build from source + super().run() + + +setup( + name=PACKAGE_NAME, + version=get_package_version(), + packages=find_packages( + exclude=( + "build", + "csrc", + "include", + "tests", + "dist", + "docs", + "benchmarks", + "mamba_ssm.egg-info", + ) + ), + author="Tri Dao, Albert Gu", + author_email="tri@tridao.me, agu@cs.cmu.edu", + description="Mamba state-space model", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/state-spaces/mamba", + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: BSD License", + "Operating System :: Unix", + ], + ext_modules=ext_modules, + cmdclass={"bdist_wheel": CachedWheelsCommand, "build_ext": BuildExtension} + if ext_modules + else { + "bdist_wheel": CachedWheelsCommand, + }, + python_requires=">=3.7", + install_requires=[ + "torch", + "packaging", + "ninja", + "einops", + "triton", + "transformers", + "causal_conv1d", + ], +) diff --git a/SegMamba/mamba/test_mamba_module.py b/SegMamba/mamba/test_mamba_module.py new file mode 100644 index 0000000000000000000000000000000000000000..64710e92f7ec4fc0fe88821550e4ecf902a22bfe --- /dev/null +++ b/SegMamba/mamba/test_mamba_module.py @@ -0,0 +1,15 @@ +import torch +from mamba_ssm import Mamba + +batch, length, dim = 2, 64, 768 +x = torch.randn(batch, length, dim).to("cuda") +model = Mamba( + # This module uses roughly 3 * expand * d_model^2 parameters + d_model=dim, # Model dimension d_model + d_state=16, # SSM state expansion factor # 64 + d_conv=4, # Local convolution width + expand=2, # Block expansion factor + use_fast_path=False, +).to("cuda") +y = model(x) +assert y.shape == x.shape diff --git a/SegMamba/mamba/tests/ops/test_selective_scan.py b/SegMamba/mamba/tests/ops/test_selective_scan.py new file mode 100644 index 0000000000000000000000000000000000000000..26b34a37560f08ced653a1d9320a14f3d3f9ebd3 --- /dev/null +++ b/SegMamba/mamba/tests/ops/test_selective_scan.py @@ -0,0 +1,423 @@ +# Copyright (C) 2023, Tri Dao. + +import math + +import torch +import torch.nn.functional as F +from torch.autograd import gradcheck +import pytest + +from einops import rearrange + +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, selective_scan_ref +from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, mamba_inner_ref +from mamba_ssm.ops.selective_scan_interface import bimamba_inner_fn, bimamba_inner_ref + + +# @pytest.mark.parametrize('wtype', [torch.float32, torch.complex64]) +@pytest.mark.parametrize('wtype', [torch.float32]) +# @pytest.mark.parametrize('itype', [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize('itype', [torch.float32]) +# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 372, 512, 784, 1024, 1134, 2048, 4096]) +@pytest.mark.parametrize('seqlen', [128, 256, 512, 1024, 2048, 4096]) +# @pytest.mark.parametrize('seqlen', [128]) +# @pytest.mark.parametrize("return_last_state", [False, True]) +@pytest.mark.parametrize("return_last_state", [True]) +# @pytest.mark.parametrize('has_delta_bias', [False, True]) +@pytest.mark.parametrize('has_delta_bias', [True]) +# @pytest.mark.parametrize('delta_softplus', [False, True]) +@pytest.mark.parametrize('delta_softplus', [True]) +# @pytest.mark.parametrize('has_z', [False, True]) +@pytest.mark.parametrize('has_z', [True]) +# @pytest.mark.parametrize('has_D', [False, True]) +@pytest.mark.parametrize('has_D', [True]) +@pytest.mark.parametrize("varBC_groups", [1, 2]) +# @pytest.mark.parametrize("varBC_groups", [1]) +# @pytest.mark.parametrize("is_variable_C", [False, True]) +@pytest.mark.parametrize("is_variable_C", [True]) +# @pytest.mark.parametrize("is_variable_B", [False, True]) +@pytest.mark.parametrize("is_variable_B", [True]) +def test_selective_scan(is_variable_B, is_variable_C, varBC_groups, has_D, has_z, has_delta_bias, + delta_softplus, return_last_state, seqlen, itype, wtype): + if varBC_groups > 1 and (not is_variable_B or not is_variable_C): + pytest.skip() # This config is not applicable + device = 'cuda' + rtol, atol = (6e-4, 2e-3) if itype == torch.float32 else (3e-3, 5e-3) + if itype == torch.bfloat16: + rtol, atol = 3e-2, 5e-2 + rtolw, atolw = (1e-3, 1e-3) + if has_z: # If we have z, the errors on the weights seem higher + rtolw = max(rtolw, rtol) + atolw = max(atolw, atol) + # set seed + torch.random.manual_seed(0) + batch_size = 2 + dim = 4 + dstate = 8 + is_complex = wtype == torch.complex64 + A = (-0.5 * torch.rand(dim, dstate, device=device, dtype=wtype)).requires_grad_() + if not is_variable_B: + B_shape = (dim, dstate) + elif varBC_groups == 1: + B_shape = (batch_size, dstate, seqlen if not is_complex else seqlen * 2) + else: + B_shape = (batch_size, varBC_groups, dstate, seqlen if not is_complex else seqlen * 2) + B = torch.randn(*B_shape, device=device, dtype=wtype if not is_variable_B else itype, + requires_grad=True) + if not is_variable_C: + C_shape = (dim, dstate) + elif varBC_groups == 1: + C_shape = (batch_size, dstate, seqlen if not is_complex else seqlen * 2) + else: + C_shape = (batch_size, varBC_groups, dstate, seqlen if not is_complex else seqlen * 2) + C = torch.randn(*C_shape, device=device, dtype=wtype if not is_variable_C else itype, + requires_grad=True) + if has_D: + D = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + else: + D = None + if has_z: + z = torch.randn(batch_size, dim, seqlen, device=device, dtype=itype, requires_grad=True) + else: + z = None + if has_delta_bias: + delta_bias = (0.5 * torch.rand(dim, device=device, dtype=torch.float32)).requires_grad_() + else: + delta_bias = None + u = torch.randn(batch_size, dim, seqlen, device=device, dtype=itype, requires_grad=True) + delta = (0.5 * torch.rand(batch_size, dim, seqlen, device=device, dtype=itype)).requires_grad_() + A_ref = A.detach().clone().requires_grad_() + B_ref = B.detach().clone().requires_grad_() + C_ref = C.detach().clone().requires_grad_() + D_ref = D.detach().clone().requires_grad_() if D is not None else None + z_ref = z.detach().clone().requires_grad_() if z is not None else None + u_ref = u.detach().clone().requires_grad_() + delta_ref = delta.detach().clone().requires_grad_() + delta_bias_ref = delta_bias.detach().clone().requires_grad_() if delta_bias is not None else None + out, *rest = selective_scan_fn( + u, delta, A, B, C, D, z=z, + delta_bias=delta_bias, delta_softplus=delta_softplus, + return_last_state=return_last_state + ) + if return_last_state: + state = rest[0] + out_ref, *rest = selective_scan_ref( + u_ref, delta_ref, A_ref, B_ref, C_ref, D_ref, z=z_ref, + delta_bias=delta_bias_ref, delta_softplus=delta_softplus, + return_last_state=return_last_state + ) + if return_last_state: + state_ref = rest[0] + # dA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A)) + # dt_u = delta * u + + print(f'Output max diff: {(out - out_ref).abs().max().item()}') + print(f'Output mean diff: {(out - out_ref).abs().mean().item()}') + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) + if return_last_state: + print(f'State max diff: {(state - state_ref).abs().max().item()}') + assert torch.allclose(state, state_ref, rtol=rtol, atol=atol) + + g = torch.randn_like(out) + out_ref.backward(g) + out.backward(g) + + print(f'du max diff: {(u.grad - u_ref.grad).abs().max().item()}') + print(f'ddelta max diff: {(delta.grad - delta_ref.grad).abs().max().item()}') + print(f'dA max diff: {(A.grad - A_ref.grad).abs().max().item()}') + print(f'dB max diff: {(B.grad - B_ref.grad).abs().max().item()}') + print(f'dC max diff: {(C.grad - C_ref.grad).abs().max().item()}') + if has_D: + print(f'dD max diff: {(D.grad - D_ref.grad).abs().max().item()}') + if has_z: + print(f'dz max diff: {(z.grad - z_ref.grad).abs().max().item()}') + if has_delta_bias: + print(f'ddelta_bias max diff: {(delta_bias.grad - delta_bias_ref.grad).abs().max().item()}') + + assert torch.allclose(u.grad, u_ref.grad.to(dtype=itype), rtol=rtol * 2, atol=atol * 2) + assert torch.allclose(delta.grad, delta_ref.grad.to(dtype=itype), rtol=rtol * 5, atol=atol * 10) + assert torch.allclose(A.grad, A_ref.grad, rtol=rtolw, atol=atolw * 5) + assert torch.allclose(B.grad, B_ref.grad, rtol=rtolw if not is_variable_B else rtol, + atol=atolw if not is_variable_B else atol) + assert torch.allclose(C.grad, C_ref.grad, rtol=rtolw if not is_variable_C else rtol, + atol=atolw if not is_variable_C else atol) + if has_D: + assert torch.allclose(D.grad, D_ref.grad, rtol=rtolw, atol=atolw) + if has_z: + assert torch.allclose(z.grad, z_ref.grad, rtol=rtolw, atol=atolw) + if has_delta_bias: + assert torch.allclose(delta_bias.grad, delta_bias_ref.grad, rtol=rtolw, atol=atolw) + + +@pytest.mark.parametrize('wtype', [torch.float32, torch.complex64]) +# @pytest.mark.parametrize('wtype', [torch.complex64]) +# @pytest.mark.parametrize('itype', [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize('itype', [torch.float32]) +# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 372, 512, 784, 1024, 1134, 2048, 4096]) +@pytest.mark.parametrize('seqlen', [128]) +@pytest.mark.parametrize("is_variable_C", [False, True]) +# @pytest.mark.parametrize("is_variable_C", [False]) +@pytest.mark.parametrize("is_variable_B", [False, True]) +# @pytest.mark.parametrize("is_variable_B", [True]) +def test_mamba_inner_fn(is_variable_B, is_variable_C, seqlen, itype, wtype): + device = 'cuda' + rtol, atol = (6e-4, 2e-3) if itype == torch.float32 else (3e-3, 5e-3) + if itype == torch.bfloat16: + rtol, atol = 3e-2, 5e-2 + rtolw, atolw = (1e-3, 1e-3) + # If we have z, the errors on the weights seem higher + rtolw = max(rtolw, rtol) + atolw = max(atolw, atol) + # set seed + torch.random.manual_seed(0) + batch_size = 2 + dim = 768 + dstate = 8 + dt_rank = 48 + is_complex = wtype == torch.complex64 + xz = torch.randn(batch_size, 2 * dim, seqlen, device=device, dtype=itype, requires_grad=True) + conv1d_weight = torch.randn(dim, 1, 3, device=device, dtype=torch.float32, requires_grad=True) + conv1d_bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + x_proj_weight = torch.randn(dt_rank + (bool(is_variable_B) + bool(is_variable_C)) * dstate + * (1 if not is_complex else 2), + dim, device=device, dtype=itype, requires_grad=True) + delta_proj_weight = torch.randn(dim, dt_rank, device=device, dtype=itype, requires_grad=True) + out_proj_weight = torch.randn(dim // 2, dim, device=device, dtype=itype, requires_grad=True) + out_proj_bias = None + A = (-0.5 * torch.rand(dim, dstate, device=device, dtype=wtype)).requires_grad_() + B = (torch.randn(dim, dstate, device=device, dtype=wtype, requires_grad=True) + if not is_variable_B else None) + C = (torch.randn(dim, dstate, device=device, dtype=wtype, requires_grad=True) + if not is_variable_C else None) + D = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + delta_bias = (0.5 * torch.rand(dim, device=device, dtype=torch.float32)).requires_grad_() + B_proj_bias = None + C_proj_bias = None + xz_ref = xz.detach().clone().requires_grad_() + conv1d_weight_ref = conv1d_weight.detach().clone().requires_grad_() + conv1d_bias_ref = conv1d_bias.detach().clone().requires_grad_() + x_proj_weight_ref = x_proj_weight.detach().clone().requires_grad_() + delta_proj_weight_ref = delta_proj_weight.detach().clone().requires_grad_() + out_proj_weight_ref = out_proj_weight.detach().clone().requires_grad_() + out_proj_bias_ref = (out_proj_bias.detach().clone().requires_grad_() + if out_proj_bias is not None else None) + A_ref = A.detach().clone().requires_grad_() + B_ref = B.detach().clone().requires_grad_() if B is not None else None + C_ref = C.detach().clone().requires_grad_() if C is not None else None + D_ref = D.detach().clone().requires_grad_() + delta_bias_ref = delta_bias.detach().clone().requires_grad_() if delta_bias is not None else None + out = mamba_inner_fn(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B, C, D, delta_bias=delta_bias, delta_softplus=True) + out_ref = mamba_inner_ref(xz_ref, conv1d_weight_ref, conv1d_bias_ref, x_proj_weight_ref, + delta_proj_weight_ref, out_proj_weight_ref, out_proj_bias_ref, + A_ref, B_ref, C_ref, D_ref, + delta_bias=delta_bias_ref, delta_softplus=True) + # dA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A)) + # dt_u = delta * u + print("mamba_inner_fn") + print(f'Output max diff: {(out - out_ref).abs().max().item()}') + print(f'Output mean diff: {(out - out_ref).abs().mean().item()}') + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) + + g = torch.randn_like(out) + out_ref.backward(g) + out.backward(g) + + print(f'dxz max diff: {(xz.grad - xz_ref.grad).abs().max().item()}') + print(f'dA max diff: {(A.grad - A_ref.grad).abs().max().item()}') + if not is_variable_B: + print(f'dB max diff: {(B.grad - B_ref.grad).abs().max().item()}') + if not is_variable_C: + print(f'dC max diff: {(C.grad - C_ref.grad).abs().max().item()}') + print(f'dD max diff: {(D.grad - D_ref.grad).abs().max().item()}') + print(f'ddelta_bias max diff: {(delta_bias.grad - delta_bias_ref.grad).abs().max().item()}') + print(f'dout_proj_weight max diff: {(out_proj_weight.grad - out_proj_weight_ref.grad).abs().max().item()}') + print(f'ddelta_proj_weight max diff: {(delta_proj_weight.grad - delta_proj_weight_ref.grad).abs().max().item()}') + print(f'dx_proj_weight max diff: {(x_proj_weight.grad - x_proj_weight_ref.grad).abs().max().item()}') + print(f'dconv1d_weight max diff: {(conv1d_weight.grad - conv1d_weight_ref.grad).abs().max().item()}') + print(f'dconv1d_bias max diff: {(conv1d_bias.grad - conv1d_bias_ref.grad).abs().max().item()}') + + # assert torch.allclose(xz.grad, xz_ref.grad.to(dtype=itype), rtol=rtol * 2, atol=atol * 2) + # assert torch.allclose(delta.grad, delta_ref.grad.to(dtype=itype), rtol=rtol * 5, atol=atol * 10) + # assert torch.allclose(A.grad, A_ref.grad, rtol=rtolw, atol=atolw * 5) + # assert torch.allclose(B.grad, B_ref.grad, rtol=rtolw if not is_variable_B else rtol, + # atol=atolw if not is_variable_B else atol) + # assert torch.allclose(C.grad, C_ref.grad, rtol=rtolw if not is_variable_C else rtol, + # atol=atolw if not is_variable_C else atol) + # assert torch.allclose(D.grad, D_ref.grad, rtol=rtolw, atol=atolw) + # assert torch.allclose(delta_bias.grad, delta_bias_ref.grad, rtol=rtolw, atol=atolw) + + +# test_mamba_inner_fn(False, False, 128, torch.float32, torch.float32) + + +@pytest.mark.parametrize('wtype', [torch.float32, torch.complex64]) +# @pytest.mark.parametrize('wtype', [torch.complex64]) +# @pytest.mark.parametrize('itype', [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize('itype', [torch.float32]) +# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 372, 512, 784, 1024, 1134, 2048, 4096]) +@pytest.mark.parametrize('seqlen', [128]) +@pytest.mark.parametrize("is_variable_C", [False, True]) +# @pytest.mark.parametrize("is_variable_C", [False]) +@pytest.mark.parametrize("is_variable_B", [False, True]) +# @pytest.mark.parametrize("is_variable_B", [True]) +def test_bimamba_inner_fn(is_variable_B, is_variable_C, seqlen, itype, wtype): + device = 'cuda' + rtol, atol = (6e-4, 2e-3) if itype == torch.float32 else (3e-3, 5e-3) + if itype == torch.bfloat16: + rtol, atol = 3e-2, 5e-2 + rtolw, atolw = (1e-3, 1e-3) + # If we have z, the errors on the weights seem higher + rtolw = max(rtolw, rtol) + atolw = max(atolw, atol) + # set seed + torch.random.manual_seed(0) + batch_size = 2 + dim = 768 + dstate = 8 + dt_rank = 48 + is_complex = wtype == torch.complex64 + xz = torch.randn(batch_size, 2 * dim, seqlen, device=device, dtype=itype, requires_grad=True) + conv1d_weight = torch.randn(dim, 1, 3, device=device, dtype=torch.float32, requires_grad=True) + conv1d_bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + x_proj_weight = torch.randn(dt_rank + (bool(is_variable_B) + bool(is_variable_C)) * dstate + * (1 if not is_complex else 2), + dim, device=device, dtype=itype, requires_grad=True) + delta_proj_weight = torch.randn(dim, dt_rank, device=device, dtype=itype, requires_grad=True) + out_proj_weight = torch.randn(dim // 2, dim, device=device, dtype=itype, requires_grad=True) + out_proj_bias = None + A = (-0.5 * torch.rand(dim, dstate, device=device, dtype=wtype)).requires_grad_() + A_b = (-0.5 * torch.rand(dim, dstate, device=device, dtype=wtype)).requires_grad_() + B = (torch.randn(dim, dstate, device=device, dtype=wtype, requires_grad=True) + if not is_variable_B else None) + C = (torch.randn(dim, dstate, device=device, dtype=wtype, requires_grad=True) + if not is_variable_C else None) + D = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + delta_bias = (0.5 * torch.rand(dim, device=device, dtype=torch.float32)).requires_grad_() + B_proj_bias = None + C_proj_bias = None + xz_ref = xz.detach().clone().requires_grad_() + conv1d_weight_ref = conv1d_weight.detach().clone().requires_grad_() + conv1d_bias_ref = conv1d_bias.detach().clone().requires_grad_() + x_proj_weight_ref = x_proj_weight.detach().clone().requires_grad_() + delta_proj_weight_ref = delta_proj_weight.detach().clone().requires_grad_() + out_proj_weight_ref = out_proj_weight.detach().clone().requires_grad_() + out_proj_bias_ref = (out_proj_bias.detach().clone().requires_grad_() + if out_proj_bias is not None else None) + A_ref = A.detach().clone().requires_grad_() + A_b_ref = A_b.detach().clone().requires_grad_() + B_ref = B.detach().clone().requires_grad_() if B is not None else None + C_ref = C.detach().clone().requires_grad_() if C is not None else None + D_ref = D.detach().clone().requires_grad_() + delta_bias_ref = delta_bias.detach().clone().requires_grad_() if delta_bias is not None else None + out = bimamba_inner_fn(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B, C, D, delta_bias=delta_bias, delta_softplus=True) + out_ref = bimamba_inner_fn(xz_ref, conv1d_weight_ref, conv1d_bias_ref, x_proj_weight_ref, + delta_proj_weight_ref, out_proj_weight_ref, out_proj_bias_ref, + A_ref, A_b_ref, B_ref, C_ref, D_ref, + delta_bias=delta_bias_ref, delta_softplus=True) + # dA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A)) + # dt_u = delta * u + print("bimamba_inner_fn") + print(f'Output max diff: {(out - out_ref).abs().max().item()}') + print(f'Output mean diff: {(out - out_ref).abs().mean().item()}') + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) + + g = torch.randn_like(out) + out_ref.backward(g) + out.backward(g) + + print(f'dxz max diff: {(xz.grad - xz_ref.grad).abs().max().item()}') + print(f'dA max diff: {(A.grad - A_ref.grad).abs().max().item()}') + print(f'dA_b max diff: {(A_b.grad - A_b_ref.grad).abs().max().item()}') + if not is_variable_B: + print(f'dB max diff: {(B.grad - B_ref.grad).abs().max().item()}') + if not is_variable_C: + print(f'dC max diff: {(C.grad - C_ref.grad).abs().max().item()}') + print(f'dD max diff: {(D.grad - D_ref.grad).abs().max().item()}') + print(f'ddelta_bias max diff: {(delta_bias.grad - delta_bias_ref.grad).abs().max().item()}') + print(f'dout_proj_weight max diff: {(out_proj_weight.grad - out_proj_weight_ref.grad).abs().max().item()}') + print(f'ddelta_proj_weight max diff: {(delta_proj_weight.grad - delta_proj_weight_ref.grad).abs().max().item()}') + print(f'dx_proj_weight max diff: {(x_proj_weight.grad - x_proj_weight_ref.grad).abs().max().item()}') + print(f'dconv1d_weight max diff: {(conv1d_weight.grad - conv1d_weight_ref.grad).abs().max().item()}') + print(f'dconv1d_bias max diff: {(conv1d_bias.grad - conv1d_bias_ref.grad).abs().max().item()}') + +@pytest.mark.parametrize('wtype', [torch.float32, torch.complex64]) +# @pytest.mark.parametrize('wtype', [torch.complex64]) +# @pytest.mark.parametrize('itype', [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize('itype', [torch.float32]) +# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 372, 512, 784, 1024, 1134, 2048, 4096]) +@pytest.mark.parametrize('seqlen', [128]) +@pytest.mark.parametrize("is_variable_C", [False, True]) +# @pytest.mark.parametrize("is_variable_C", [False]) +@pytest.mark.parametrize("is_variable_B", [False, True]) +# @pytest.mark.parametrize("is_variable_B", [True]) +def test_bimamba_inner_fn_grad_check(is_variable_B, is_variable_C, seqlen, itype, wtype): + device = 'cuda' + rtol, atol = (6e-4, 2e-3) if itype == torch.float32 else (3e-3, 5e-3) + if itype == torch.bfloat16: + rtol, atol = 3e-2, 5e-2 + rtolw, atolw = (1e-3, 1e-3) + # If we have z, the errors on the weights seem higher + rtolw = max(rtolw, rtol) + atolw = max(atolw, atol) + # set seed + torch.random.manual_seed(0) + batch_size = 2 // 2 + dim = 768 // 8 + dstate = 8 // 8 + dt_rank = 48 // 8 + is_complex = wtype == torch.complex64 + xz = torch.randn(batch_size, 2 * dim, seqlen, device=device, dtype=itype, requires_grad=True) + conv1d_weight = torch.randn(dim, 1, 3, device=device, dtype=torch.float32, requires_grad=True) + conv1d_bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + x_proj_weight = torch.randn(dt_rank + (bool(is_variable_B) + bool(is_variable_C)) * dstate + * (1 if not is_complex else 2), + dim, device=device, dtype=itype, requires_grad=True) + delta_proj_weight = torch.randn(dim, dt_rank, device=device, dtype=itype, requires_grad=True) + out_proj_weight = torch.randn(dim // 2, dim, device=device, dtype=itype, requires_grad=True) + out_proj_bias = None + A = (-0.5 * torch.rand(dim, dstate, device=device, dtype=wtype)).requires_grad_() + A_b = (-0.5 * torch.rand(dim, dstate, device=device, dtype=wtype)).requires_grad_() + B = (torch.randn(dim, dstate, device=device, dtype=wtype, requires_grad=True) + if not is_variable_B else None) + C = (torch.randn(dim, dstate, device=device, dtype=wtype, requires_grad=True) + if not is_variable_C else None) + D = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + delta_bias = (0.5 * torch.rand(dim, device=device, dtype=torch.float32)).requires_grad_() + B_proj_bias = None + C_proj_bias = None + xz_ref = xz.detach().clone().requires_grad_() + conv1d_weight_ref = conv1d_weight.detach().clone().requires_grad_() + conv1d_bias_ref = conv1d_bias.detach().clone().requires_grad_() + x_proj_weight_ref = x_proj_weight.detach().clone().requires_grad_() + delta_proj_weight_ref = delta_proj_weight.detach().clone().requires_grad_() + out_proj_weight_ref = out_proj_weight.detach().clone().requires_grad_() + out_proj_bias_ref = (out_proj_bias.detach().clone().requires_grad_() + if out_proj_bias is not None else None) + A_ref = A.detach().clone().requires_grad_() + A_b_ref = A_b.detach().clone().requires_grad_() + B_ref = B.detach().clone().requires_grad_() if B is not None else None + C_ref = C.detach().clone().requires_grad_() if C is not None else None + D_ref = D.detach().clone().requires_grad_() + delta_bias_ref = delta_bias.detach().clone().requires_grad_() if delta_bias is not None else None + + # func = bimamba_inner_fn + # func = mamba_inner_fn + func = mamba_inner_ref + + # gradok = gradcheck(func, (xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,out_proj_weight, out_proj_bias, A, A_b, B, C, D, delta_bias, None, None, True)) + gradok = gradcheck(func, (xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,out_proj_weight, out_proj_bias, A, B, C, D, delta_bias, None, None, True), eps=1e-6, atol=1e-4, nondet_tol=1.) + print(f'* {gradok} check_gradient_numerical bimamba_inner_fn') + + + +# test_bimamba_inner_fn(True, True, 128, torch.float32, torch.float32) +# test_mamba_inner_fn(True, True, 128, torch.float32, torch.float32) +test_bimamba_inner_fn_grad_check(True, True, 128, torch.float32, torch.float32) + +# input = (torch.randn(20,20,dtype=torch.double,requires_grad=True), torch.randn(30,20,dtype=torch.double,requires_grad=True)) +# test = gradcheck(torch.nn.functional.linear, input, eps=1e-6, atol=1e-4) +# print(test) \ No newline at end of file diff --git a/SegMamba/mamba/tests/ops/triton/test_selective_state_update.py b/SegMamba/mamba/tests/ops/triton/test_selective_state_update.py new file mode 100644 index 0000000000000000000000000000000000000000..70a8d79d9cad3e4d33897478caf178bd96d0ae5a --- /dev/null +++ b/SegMamba/mamba/tests/ops/triton/test_selective_state_update.py @@ -0,0 +1,49 @@ +# Copyright (C) 2023, Tri Dao. + +import math + +import torch +import torch.nn.functional as F +import pytest + +from einops import rearrange + +from mamba_ssm.ops.triton.selective_state_update import selective_state_update, selective_state_update_ref + + +@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16]) +# @pytest.mark.parametrize('itype', [torch.float16]) +@pytest.mark.parametrize("has_z", [False, True]) +# @pytest.mark.parametrize('has_z', [True]) +@pytest.mark.parametrize("dstate", [16, 32, 64]) +# @pytest.mark.parametrize("dstate", [16]) +@pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096]) +# @pytest.mark.parametrize("dim", [2048]) +def test_causal_conv1d_update(dim, dstate, has_z, itype): + device = "cuda" + rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 1e-2) + if itype == torch.bfloat16: + rtol, atol = 1e-2, 5e-2 + # set seed + torch.random.manual_seed(0) + batch_size = 2 + state = torch.randn(batch_size, dim, dstate, dtype=itype, device=device) + x = torch.randn(batch_size, dim, device=device, dtype=itype) + dt = torch.randn(batch_size, dim, device=device, dtype=itype) + dt_bias = torch.rand(dim, device=device) - 4.0 + A = -torch.rand(dim, dstate, device=device) - 1.0 + B = torch.randn(batch_size, dstate, device=device) + C = torch.randn(batch_size, dstate, device=device) + D = torch.randn(dim, device=device) + if has_z: + z = torch.randn_like(x) + else: + z = None + state_ref = state.detach().clone() + out = selective_state_update(state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True) + out_ref = selective_state_update_ref(state_ref, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True) + + print(f"Output max diff: {(out - out_ref).abs().max().item()}") + print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") + assert torch.allclose(state, state_ref, rtol=rtol, atol=atol) + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) diff --git a/SegMamba/model_segmamba/segmamba.py b/SegMamba/model_segmamba/segmamba.py new file mode 100644 index 0000000000000000000000000000000000000000..b53e49400d0b8825c080a64b96e1cc89a4cc6676 --- /dev/null +++ b/SegMamba/model_segmamba/segmamba.py @@ -0,0 +1,344 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +import torch.nn as nn +import torch +from functools import partial + +from monai.networks.blocks.dynunet_block import UnetOutBlock +from monai.networks.blocks.unetr_block import UnetrBasicBlock, UnetrUpBlock +from mamba_ssm import Mamba +import torch.nn.functional as F + +class LayerNorm(nn.Module): + r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with + shape (batch_size, height, width, channels) while channels_first corresponds to inputs + with shape (batch_size, channels, height, width). + """ + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError + self.normalized_shape = (normalized_shape, ) + + def forward(self, x): + if self.data_format == "channels_last": + return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + elif self.data_format == "channels_first": + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None, None] * x + self.bias[:, None, None, None] + + return x + +class MambaLayer(nn.Module): + def __init__(self, dim, d_state = 16, d_conv = 4, expand = 2, num_slices=None): + super().__init__() + self.dim = dim + self.norm = nn.LayerNorm(dim) + self.mamba = Mamba( + d_model=dim, # Model dimension d_model + d_state=d_state, # SSM state expansion factor + d_conv=d_conv, # Local convolution width + expand=expand, # Block expansion factor + bimamba_type="v3", + nslices=num_slices, + ) + + def forward(self, x): + B, C = x.shape[:2] + x_skip = x + assert C == self.dim + n_tokens = x.shape[2:].numel() + img_dims = x.shape[2:] + x_flat = x.reshape(B, C, n_tokens).transpose(-1, -2) + x_norm = self.norm(x_flat) + x_mamba = self.mamba(x_norm) + + out = x_mamba.transpose(-1, -2).reshape(B, C, *img_dims) + out = out + x_skip + + return out + +class MlpChannel(nn.Module): + def __init__(self,hidden_size, mlp_dim, ): + super().__init__() + self.fc1 = nn.Conv3d(hidden_size, mlp_dim, 1) + self.act = nn.GELU() + self.fc2 = nn.Conv3d(mlp_dim, hidden_size, 1) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + +class GSC(nn.Module): + def __init__(self, in_channles) -> None: + super().__init__() + + self.proj = nn.Conv3d(in_channles, in_channles, 3, 1, 1) + self.norm = nn.InstanceNorm3d(in_channles) + self.nonliner = nn.ReLU() + + self.proj2 = nn.Conv3d(in_channles, in_channles, 3, 1, 1) + self.norm2 = nn.InstanceNorm3d(in_channles) + self.nonliner2 = nn.ReLU() + + self.proj3 = nn.Conv3d(in_channles, in_channles, 1, 1, 0) + self.norm3 = nn.InstanceNorm3d(in_channles) + self.nonliner3 = nn.ReLU() + + self.proj4 = nn.Conv3d(in_channles, in_channles, 1, 1, 0) + self.norm4 = nn.InstanceNorm3d(in_channles) + self.nonliner4 = nn.ReLU() + + def forward(self, x): + + x_residual = x + + x1 = self.proj(x) + x1 = self.norm(x1) + x1 = self.nonliner(x1) + + x1 = self.proj2(x1) + x1 = self.norm2(x1) + x1 = self.nonliner2(x1) + + x2 = self.proj3(x) + x2 = self.norm3(x2) + x2 = self.nonliner3(x2) + + x = x1 + x2 + x = self.proj4(x) + x = self.norm4(x) + x = self.nonliner4(x) + + return x + x_residual + +class MambaEncoder(nn.Module): + def __init__(self, in_chans=1, depths=[2, 2, 2, 2], dims=[48, 96, 192, 384], + drop_path_rate=0., layer_scale_init_value=1e-6, out_indices=[0, 1, 2, 3]): + super().__init__() + + self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers + stem = nn.Sequential( + nn.Conv3d(in_chans, dims[0], kernel_size=7, stride=2, padding=3), + ) + self.downsample_layers.append(stem) + for i in range(3): + downsample_layer = nn.Sequential( + # LayerNorm(dims[i], eps=1e-6, data_format="channels_first"), + nn.InstanceNorm3d(dims[i]), + nn.Conv3d(dims[i], dims[i+1], kernel_size=2, stride=2), + ) + self.downsample_layers.append(downsample_layer) + + self.stages = nn.ModuleList() + self.gscs = nn.ModuleList() + num_slices_list = [64, 32, 16, 8] + cur = 0 + for i in range(4): + gsc = GSC(dims[i]) + + stage = nn.Sequential( + *[MambaLayer(dim=dims[i], num_slices=num_slices_list[i]) for j in range(depths[i])] + ) + + self.stages.append(stage) + self.gscs.append(gsc) + cur += depths[i] + + self.out_indices = out_indices + + self.mlps = nn.ModuleList() + for i_layer in range(4): + layer = nn.InstanceNorm3d(dims[i_layer]) + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + self.mlps.append(MlpChannel(dims[i_layer], 2 * dims[i_layer])) + + def forward_features(self, x): + outs = [] + for i in range(4): + x = self.downsample_layers[i](x) + x = self.gscs[i](x) + x = self.stages[i](x) + + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + x_out = norm_layer(x) + x_out = self.mlps[i](x_out) + outs.append(x_out) + + return tuple(outs) + + def forward(self, x): + x = self.forward_features(x) + return x + +class SegMamba(nn.Module): + def __init__( + self, + in_chans=1, + out_chans=13, + depths=[2, 2, 2, 2], + feat_size=[48, 96, 192, 384], + drop_path_rate=0, + layer_scale_init_value=1e-6, + hidden_size: int = 768, + norm_name = "instance", + conv_block: bool = True, + res_block: bool = True, + spatial_dims=3, + ) -> None: + super().__init__() + + self.hidden_size = hidden_size + self.in_chans = in_chans + self.out_chans = out_chans + self.depths = depths + self.drop_path_rate = drop_path_rate + self.feat_size = feat_size + self.layer_scale_init_value = layer_scale_init_value + + self.spatial_dims = spatial_dims + self.vit = MambaEncoder(in_chans, + depths=depths, + dims=feat_size, + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value, + ) + self.encoder1 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.in_chans, + out_channels=self.feat_size[0], + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + self.encoder2 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[0], + out_channels=self.feat_size[1], + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + self.encoder3 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[1], + out_channels=self.feat_size[2], + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + self.encoder4 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[2], + out_channels=self.feat_size[3], + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + + self.encoder5 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[3], + out_channels=self.hidden_size, + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + + self.decoder5 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=self.hidden_size, + out_channels=self.feat_size[3], + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=res_block, + ) + self.decoder4 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[3], + out_channels=self.feat_size[2], + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=res_block, + ) + self.decoder3 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[2], + out_channels=self.feat_size[1], + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=res_block, + ) + self.decoder2 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[1], + out_channels=self.feat_size[0], + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=res_block, + ) + self.decoder1 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[0], + out_channels=self.feat_size[0], + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + self.out = UnetOutBlock(spatial_dims=spatial_dims, in_channels=48, out_channels=self.out_chans) + + def proj_feat(self, x): + new_view = [x.size(0)] + self.proj_view_shape + x = x.view(new_view) + x = x.permute(self.proj_axes).contiguous() + return x + + def forward(self, x_in): + outs = self.vit(x_in) + enc1 = self.encoder1(x_in) + x2 = outs[0] + enc2 = self.encoder2(x2) + x3 = outs[1] + enc3 = self.encoder3(x3) + x4 = outs[2] + enc4 = self.encoder4(x4) + enc_hidden = self.encoder5(outs[3]) + dec3 = self.decoder5(enc_hidden, enc4) + dec2 = self.decoder4(dec3, enc3) + dec1 = self.decoder3(dec2, enc2) + dec0 = self.decoder2(dec1, enc1) + out = self.decoder1(dec0) + + return self.out(out) + diff --git a/SegMamba/monai/.DS_Store b/SegMamba/monai/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..931d3855616e373c0bec96373a4b56604bb9b337 Binary files /dev/null and b/SegMamba/monai/.DS_Store differ diff --git a/SegMamba/monai/README.md b/SegMamba/monai/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a1e36c62100cc46ec01c41a4d1bd72aa93705649 --- /dev/null +++ b/SegMamba/monai/README.md @@ -0,0 +1,38 @@ +# MONAI + +* **apps**: high level medical domain specific deep learning applications. + +* **auto3dseg**: automated machine learning (AutoML) components for volumetric image analysis. + +* **bundle**: components to build the portable self-descriptive model bundle. + +* **config**: for system configuration and diagnostic output. + +* **csrc**: for C++/CUDA extensions. + +* **data**: for the datasets, readers/writers, and synthetic data. + +* **engines**: engine-derived classes for extending Ignite behaviour. + +* **fl**: federated learning components to allow pipeline integration with any federated learning framework. + +* **handlers**: defines handlers for implementing functionality at various stages in the training process. + +* **inferers**: defines model inference methods. + +* **losses**: classes defining loss functions, which follow the pattern of `torch.nn.modules.loss`. + +* **metrics**: defines metric tracking types. + +* **networks**: contains network definitions, component definitions, and Pytorch specific utilities. + +* **optimizers**: classes defining optimizers, which follow the pattern of `torch.optim`. + +* **transforms**: defines data transforms for preprocessing and postprocessing. + +* **utils**: generic utilities intended to be implemented in pure Python or using Numpy, +and not with Pytorch, such as namespace aliasing, auto module loading. + +* **visualize**: utilities for data visualization. + +* **_extensions**: C++/CUDA extensions to be loaded in a just-in-time manner using `torch.utils.cpp_extension.load`. diff --git a/SegMamba/monai/__init__.py b/SegMamba/monai/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb05ac993d02206a49982d8ff690d87e47b1951a --- /dev/null +++ b/SegMamba/monai/__init__.py @@ -0,0 +1,95 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +import sys + +from ._version import get_versions + +PY_REQUIRED_MAJOR = 3 +PY_REQUIRED_MINOR = 8 + +version_dict = get_versions() +__version__: str = version_dict.get("version", "0+unknown") +__revision_id__: str = version_dict.get("full-revisionid") +del get_versions, version_dict + +__copyright__ = "(c) MONAI Consortium" + +__basedir__ = os.path.dirname(__file__) + +if sys.version_info.major != PY_REQUIRED_MAJOR or sys.version_info.minor < PY_REQUIRED_MINOR: + import warnings + + warnings.warn( + f"MONAI requires Python {PY_REQUIRED_MAJOR}.{PY_REQUIRED_MINOR} or higher. " + f"But the current Python is: {sys.version}", + category=RuntimeWarning, + ) + +from .utils.module import load_submodules # noqa: E402 + +# handlers_* have some external decorators the users may not have installed +# *.so files and folder "_C" may not exist when the cpp extensions are not compiled +excludes = "|".join( + [ + "(^(monai.handlers))", + "(^(monai.bundle))", + "(^(monai.fl))", + "((\\.so)$)", + "(^(monai._C))", + "(.*(__main__)$)", + "(.*(video_dataset)$)", + "(.*(nnunet).*$)", + ] +) + +# load directory modules only, skip loading individual files +load_submodules(sys.modules[__name__], False, exclude_pattern=excludes) + +# load all modules, this will trigger all export decorations +load_submodules(sys.modules[__name__], True, exclude_pattern=excludes) + +__all__ = [ + "apps", + "auto3dseg", + "bundle", + "config", + "data", + "engines", + "fl", + "handlers", + "inferers", + "losses", + "metrics", + "networks", + "optimizers", + "transforms", + "utils", + "visualize", +] + +try: + from .utils.tf32 import detect_default_tf32 + + detect_default_tf32() + import torch + + # workaround related to https://github.com/Project-MONAI/MONAI/issues/7575 + if hasattr(torch.cuda.device_count, "cache_clear"): + torch.cuda.device_count.cache_clear() +except BaseException: + from .utils.misc import MONAIEnvVars + + if MONAIEnvVars.debug(): + raise diff --git a/SegMamba/monai/_extensions/__init__.py b/SegMamba/monai/_extensions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..47d0c7021ac5adb355ded3b6c0d68b920b88d6ad --- /dev/null +++ b/SegMamba/monai/_extensions/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from .loader import load_module diff --git a/SegMamba/monai/_extensions/gmm/gmm.cpp b/SegMamba/monai/_extensions/gmm/gmm.cpp new file mode 100644 index 0000000000000000000000000000000000000000..577e5b117ef13fb81f9aeaac8c70bd22fda963cb --- /dev/null +++ b/SegMamba/monai/_extensions/gmm/gmm.cpp @@ -0,0 +1,83 @@ +/* +Copyright (c) MONAI Consortium +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include + +#include "gmm.h" + +py::tuple init() { + torch::Tensor gmm_tensor = + torch::zeros({GMM_COUNT, GMM_COMPONENT_COUNT}, torch::dtype(torch::kFloat32).device(torch::kCUDA)); + torch::Tensor scratch_tensor = torch::empty({1}, torch::dtype(torch::kFloat32).device(torch::kCUDA)); + return py::make_tuple(gmm_tensor, scratch_tensor); +} + +void learn( + torch::Tensor gmm_tensor, + torch::Tensor scratch_tensor, + torch::Tensor input_tensor, + torch::Tensor label_tensor) { + c10::DeviceType device_type = input_tensor.device().type(); + + unsigned int batch_count = input_tensor.size(0); + unsigned int element_count = input_tensor.stride(1); + + unsigned int scratch_size = + batch_count * (element_count + GMM_COMPONENT_COUNT * GMM_COUNT * (element_count / (32 * 32))); + + if (scratch_tensor.size(0) < scratch_size) { + scratch_tensor.resize_({scratch_size}); + } + + float* gmm = gmm_tensor.data_ptr(); + float* scratch = scratch_tensor.data_ptr(); + float* input = input_tensor.data_ptr(); + int* labels = label_tensor.data_ptr(); + + if (device_type == torch::kCUDA) { + learn_cuda(input, labels, gmm, scratch, batch_count, element_count); + } else { + learn_cpu(input, labels, gmm, scratch, batch_count, element_count); + } +} + +torch::Tensor apply(torch::Tensor gmm_tensor, torch::Tensor input_tensor) { + c10::DeviceType device_type = input_tensor.device().type(); + + unsigned int dim = input_tensor.dim(); + unsigned int batch_count = input_tensor.size(0); + unsigned int element_count = input_tensor.stride(1); + + auto output_size = input_tensor.sizes().vec(); + output_size[1] = MIXTURE_COUNT; + torch::Tensor output_tensor = + torch::empty(c10::IntArrayRef(output_size), torch::dtype(torch::kFloat32).device(device_type)); + + const float* gmm = gmm_tensor.data_ptr(); + const float* input = input_tensor.data_ptr(); + float* output = output_tensor.data_ptr(); + + if (device_type == torch::kCUDA) { + apply_cuda(gmm, input, output, batch_count, element_count); + } else { + apply_cpu(gmm, input, output, batch_count, element_count); + } + + return output_tensor; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("init", torch::wrap_pybind_function(init)); + m.def("learn", torch::wrap_pybind_function(learn)); + m.def("apply", torch::wrap_pybind_function(apply)); +} diff --git a/SegMamba/monai/_extensions/gmm/gmm.h b/SegMamba/monai/_extensions/gmm/gmm.h new file mode 100644 index 0000000000000000000000000000000000000000..09c0389ae66f0161e3ca4d997f0ce0a95e66e5df --- /dev/null +++ b/SegMamba/monai/_extensions/gmm/gmm.h @@ -0,0 +1,53 @@ +/* +Copyright (c) MONAI Consortium +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#if !defined(CHANNEL_COUNT) || !defined(MIXTURE_COUNT) || !defined(MIXTURE_SIZE) +#error Definition of CHANNEL_COUNT, MIXTURE_COUNT, and MIXTURE_SIZE required +#endif + +#if CHANNEL_COUNT < 1 || MIXTURE_COUNT < 1 || MIXTURE_SIZE < 1 +#error CHANNEL_COUNT, MIXTURE_COUNT, and MIXTURE_SIZE must be positive +#endif + +#define MATRIX_COMPONENT_COUNT ((CHANNEL_COUNT + 1) * (CHANNEL_COUNT + 2) / 2) +#define SUB_MATRIX_COMPONENT_COUNT (CHANNEL_COUNT * (CHANNEL_COUNT + 1) / 2) +#define GMM_COMPONENT_COUNT (MATRIX_COMPONENT_COUNT + 1) +#define GMM_COUNT (MIXTURE_COUNT * MIXTURE_SIZE) + +void learn_cpu( + const float* input, + const int* labels, + float* gmm, + float* scratch_memory, + unsigned int batch_count, + unsigned int element_count); +void apply_cpu( + const float* gmm, + const float* input, + float* output, + unsigned int batch_count, + unsigned int element_count); + +void learn_cuda( + const float* input, + const int* labels, + float* gmm, + float* scratch_memory, + unsigned int batch_count, + unsigned int element_count); +void apply_cuda( + const float* gmm, + const float* input, + float* output, + unsigned int batch_count, + unsigned int element_count); diff --git a/SegMamba/monai/_extensions/gmm/gmm_cpu.cpp b/SegMamba/monai/_extensions/gmm/gmm_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d7eedc07c8602b9c08e09d2be1c4431eb6045d7e --- /dev/null +++ b/SegMamba/monai/_extensions/gmm/gmm_cpu.cpp @@ -0,0 +1,35 @@ +/* +Copyright (c) MONAI Consortium +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include + +#include "gmm.h" + +void learn_cpu( + const float* input, + const int* labels, + float* gmm, + float* scratch_memory, + unsigned int batch_count, + unsigned int element_count) { + throw std::invalid_argument("GMM received a cpu tensor but is not yet implemented for the cpu"); +} + +void apply_cpu( + const float* gmm, + const float* input, + float* output, + unsigned int batch_count, + unsigned int element_count) { + throw std::invalid_argument("GMM received a cpu tensor but is not yet implemented for the cpu"); +} diff --git a/SegMamba/monai/_extensions/gmm/gmm_cuda.cu b/SegMamba/monai/_extensions/gmm/gmm_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..0c808d3165d29c4e646bb0390e7ccb62a84a1f6e --- /dev/null +++ b/SegMamba/monai/_extensions/gmm/gmm_cuda.cu @@ -0,0 +1,516 @@ +/* +Copyright (c) MONAI Consortium +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include +#include + +#include "gmm.h" + +#include "gmm_cuda_linalg.cuh" + +#define EPSILON 1e-5 +#define BLOCK_SIZE 32 +#define TILE(SIZE, STRIDE) ((((SIZE)-1) / (STRIDE)) + 1) +#ifdef __HIP_PLATFORM_AMD__ +#define __SHFL_DOWN(a, b) __shfl_down(a, b) +#define __SHFL_XOR(a, b) __shfl_xor(a, b) +#else +#define __SHFL_DOWN(a, b) __shfl_down_sync(0xffffffff, a, b) +#define __SHFL_XOR(a, b) __shfl_xor_sync(0xffffffff, a, b) +#endif + +template +__global__ void CovarianceReductionKernel( + int gaussian_index, + const float* g_image, + const int* g_alpha, + float* g_matrices, + int element_count) { + constexpr int block_size = warp_count * 32; + + __shared__ float s_matrix_component[warp_count]; + + int batch_index = blockIdx.z; + + const float* g_batch_image = g_image + batch_index * element_count * CHANNEL_COUNT; + const int* g_batch_alpha = g_alpha + batch_index * element_count; + float* g_batch_matrices = g_matrices + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT * gridDim.x; + + int local_index = threadIdx.x; + int block_index = blockIdx.x; + int warp_index = local_index >> 5; + int lane_index = local_index & 31; + int global_index = local_index + block_index * block_size * load_count; + int matrix_offset = (gaussian_index * gridDim.x + block_index) * GMM_COMPONENT_COUNT; + + float matrix[MATRIX_COMPONENT_COUNT]; + + for (int i = 0; i < MATRIX_COMPONENT_COUNT; i++) { + matrix[i] = 0; + } + + for (int load = 0; load < load_count; load++) { + global_index += load * block_size; + + if (global_index < element_count) { + int my_alpha = g_batch_alpha[global_index]; + + if (my_alpha != -1) { + if (gaussian_index == (my_alpha & 15) + (my_alpha >> 4) * MIXTURE_COUNT) { + float feature[CHANNEL_COUNT + 1]; + + feature[0] = 1; + + for (int i = 0; i < CHANNEL_COUNT; i++) { + feature[i + 1] = g_batch_image[global_index + i * element_count]; + } + + for (int index = 0, i = 0; i < CHANNEL_COUNT + 1; i++) { + for (int j = i; j < CHANNEL_COUNT + 1; j++, index++) { + matrix[index] += feature[i] * feature[j]; + } + } + } + } + } + } + + __syncthreads(); + + for (int i = 0; i < MATRIX_COMPONENT_COUNT; i++) { + float matrix_component = matrix[i]; + matrix_component += __SHFL_DOWN(matrix_component, 16); + matrix_component += __SHFL_DOWN(matrix_component, 8); + matrix_component += __SHFL_DOWN(matrix_component, 4); + matrix_component += __SHFL_DOWN(matrix_component, 2); + matrix_component += __SHFL_DOWN(matrix_component, 1); + if (lane_index == 0) { + s_matrix_component[warp_index] = matrix_component; + } + + __syncthreads(); + + if (warp_index == 0) { + matrix_component = s_matrix_component[lane_index]; + if (warp_count >= 32) { + matrix_component += __SHFL_DOWN(matrix_component, 16); + } + if (warp_count >= 16) { + matrix_component += __SHFL_DOWN(matrix_component, 8); + } + if (warp_count >= 8) { + matrix_component += __SHFL_DOWN(matrix_component, 4); + } + if (warp_count >= 4) { + matrix_component += __SHFL_DOWN(matrix_component, 2); + } + if (warp_count >= 2) { + matrix_component += __SHFL_DOWN(matrix_component, 1); + } + if (lane_index == 0) { + g_batch_matrices[matrix_offset + i] = matrix_component; + } + } + + __syncthreads(); + } +} + +template +__global__ void CovarianceFinalizationKernel(const float* g_matrices, float* g_gmm, int matrix_count) { + constexpr int block_size = warp_count * 32; + + __shared__ float s_matrix_component[warp_count]; + __shared__ float s_gmm[GMM_COMPONENT_COUNT]; + + int batch_index = blockIdx.z; + + const float* g_batch_matrices = g_matrices + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT * matrix_count; + float* g_batch_gmm = g_gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT; + + int local_index = threadIdx.x; + int warp_index = local_index >> 5; + int lane_index = local_index & 31; + int gmm_index = blockIdx.x; + int matrix_offset = gmm_index * matrix_count; + + int load_count = TILE(matrix_count, block_size); + + float norm_factor = 1.0f; + + for (int index = 0, i = 0; i < CHANNEL_COUNT + 1; i++) { + for (int j = i; j < CHANNEL_COUNT + 1; j++, index++) { + float matrix_component = 0.0f; + + for (int load = 0; load < load_count; load++) { + int matrix_index = local_index + load * block_size; + + if (matrix_index < matrix_count) { + matrix_component += g_batch_matrices[(matrix_offset + matrix_index) * GMM_COMPONENT_COUNT + index]; + } + } + matrix_component += __SHFL_DOWN(matrix_component, 16); + matrix_component += __SHFL_DOWN(matrix_component, 8); + matrix_component += __SHFL_DOWN(matrix_component, 4); + matrix_component += __SHFL_DOWN(matrix_component, 2); + matrix_component += __SHFL_DOWN(matrix_component, 1); + if (lane_index == 0) { + s_matrix_component[warp_index] = matrix_component; + } + + __syncthreads(); + + if (warp_index == 0) { + matrix_component = s_matrix_component[lane_index]; + if (warp_count >= 32) { + matrix_component += __SHFL_DOWN(matrix_component, 16); + } + if (warp_count >= 16) { + matrix_component += __SHFL_DOWN(matrix_component, 8); + } + if (warp_count >= 8) { + matrix_component += __SHFL_DOWN(matrix_component, 4); + } + if (warp_count >= 4) { + matrix_component += __SHFL_DOWN(matrix_component, 2); + } + if (warp_count >= 2) { + matrix_component += __SHFL_DOWN(matrix_component, 1); + } + if (lane_index == 0) { + float constant = i == 0 ? 0.0f : s_gmm[i] * s_gmm[j]; + + if (i != 0 && i == j) { + constant -= EPSILON; + } + + s_gmm[index] = norm_factor * matrix_component - constant; + + if (index == 0 && matrix_component > 0) { + norm_factor = 1.0f / matrix_component; + } + } + } + + __syncthreads(); + } + } + + float* matrix = s_gmm + (CHANNEL_COUNT + 1); + float* det_ptr = s_gmm + MATRIX_COMPONENT_COUNT; + + if (local_index == 0) { + float square_mat[CHANNEL_COUNT][CHANNEL_COUNT]; + float cholesky_mat[CHANNEL_COUNT][CHANNEL_COUNT]; + + for (int i = 0; i < CHANNEL_COUNT; i++) { + for (int j = 0; j < CHANNEL_COUNT; j++) { + square_mat[i][j] = 0.0f; + cholesky_mat[i][j] = 0.0f; + } + } + + to_square(matrix, square_mat); + cholesky(square_mat, cholesky_mat); + + *det_ptr = chol_det(cholesky_mat); + + if (invert_matrix) { + chol_inv(cholesky_mat, square_mat); + to_triangle(square_mat, matrix); + } + } + + if (local_index < GMM_COMPONENT_COUNT) { + g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + local_index] = s_gmm[local_index]; + } +} + +struct GMMSplit_t { + int idx; + float threshold; + float eigenvector[CHANNEL_COUNT]; +}; + +// 1 Block, 32xMIXTURE_COUNT +__global__ void GMMFindSplit(GMMSplit_t* gmmSplit, int gmmK, float* gmm) { + int batch_index = blockIdx.z; + + float* g_batch_gmm = gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT; + GMMSplit_t* g_batch_gmmSplit = gmmSplit + batch_index * MIXTURE_COUNT; + + int gmm_idx = threadIdx.x * MIXTURE_COUNT + threadIdx.y; + + float eigenvalue = 0; + float eigenvector[CHANNEL_COUNT]; + + if (threadIdx.x < gmmK) { + float* matrix = g_batch_gmm + gmm_idx * GMM_COMPONENT_COUNT + (CHANNEL_COUNT + 1); + largest_eigenpair(matrix, eigenvector, &eigenvalue); + } + + float max_value = eigenvalue; + max_value = max(max_value, __SHFL_XOR(max_value, 16)); + max_value = max(max_value, __SHFL_XOR(max_value, 8)); + max_value = max(max_value, __SHFL_XOR(max_value, 4)); + max_value = max(max_value, __SHFL_XOR(max_value, 2)); + max_value = max(max_value, __SHFL_XOR(max_value, 1)); + if (max_value == eigenvalue) { + GMMSplit_t split; + + float* average_feature = gmm + gmm_idx * GMM_COMPONENT_COUNT + 1; + + split.idx = threadIdx.x; + split.threshold = scalar_prod(average_feature, eigenvector); + + for (int i = 0; i < CHANNEL_COUNT; i++) { + split.eigenvector[i] = eigenvector[i]; + } + + g_batch_gmmSplit[threadIdx.y] = split; + } +} + +#define DO_SPLIT_DEGENERACY 4 + +__global__ void GMMDoSplit(const GMMSplit_t* gmmSplit, int k, const float* image, int* alpha, int element_count) { + __shared__ GMMSplit_t s_gmmSplit[MIXTURE_COUNT]; + + int batch_index = blockIdx.z; + + const GMMSplit_t* g_batch_gmmSplit = gmmSplit + batch_index * MIXTURE_COUNT; + const float* g_batch_image = image + batch_index * element_count * CHANNEL_COUNT; + int* g_batch_alpha = alpha + batch_index * element_count; + + int* s_linear = (int*)s_gmmSplit; + int* g_linear = (int*)g_batch_gmmSplit; + + if (threadIdx.x < MIXTURE_COUNT * sizeof(GMMSplit_t)) { + s_linear[threadIdx.x] = g_linear[threadIdx.x]; + } + + __syncthreads(); + + int index = threadIdx.x + blockIdx.x * BLOCK_SIZE * DO_SPLIT_DEGENERACY; + + for (int i = 0; i < DO_SPLIT_DEGENERACY; i++) { + index += BLOCK_SIZE; + + if (index < element_count) { + int my_alpha = g_batch_alpha[index]; + + if (my_alpha != -1) { + int select = my_alpha & 15; + int gmm_idx = my_alpha >> 4; + + if (gmm_idx == s_gmmSplit[select].idx) { + // in the split cluster now + float feature[CHANNEL_COUNT]; + + for (int i = 0; i < CHANNEL_COUNT; i++) { + feature[i] = g_batch_image[index + i * element_count]; + } + + float value = scalar_prod(s_gmmSplit[select].eigenvector, feature); + + if (value > s_gmmSplit[select].threshold) { + // assign pixel to new cluster + g_batch_alpha[index] = k + select; + } + } + } + } + } +} + +// Single block, 32xMIXTURE_COUNT +__global__ void GMMcommonTerm(float* g_gmm) { + int batch_index = blockIdx.z; + + float* g_batch_gmm = g_gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT; + + int gmm_index = (threadIdx.x * MIXTURE_COUNT) + threadIdx.y; + + float gmm_n = threadIdx.x < MIXTURE_SIZE ? g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT] : 0.0f; + + float sum = gmm_n; + sum += __SHFL_XOR(sum, 1); + sum += __SHFL_XOR(sum, 2); + sum += __SHFL_XOR(sum, 4); + sum += __SHFL_XOR(sum, 8); + sum += __SHFL_XOR(sum, 16); + + if (threadIdx.x < MIXTURE_SIZE) { + float det = g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + MATRIX_COMPONENT_COUNT] + EPSILON; + float commonTerm = det > 0.0f ? gmm_n / (sqrtf(det) * sum) : gmm_n / sum; + + g_batch_gmm[gmm_index * GMM_COMPONENT_COUNT + MATRIX_COMPONENT_COUNT] = commonTerm; + } +} + +__device__ float GMMTerm(float* feature, const float* gmm) { + const float* average_feature = gmm + 1; + const float* matrix = gmm + CHANNEL_COUNT + 1; + + float diff[CHANNEL_COUNT]; + + for (int i = 0; i < CHANNEL_COUNT; i++) { + diff[i] = feature[i] - average_feature[i]; + } + + float value = 0.0f; + + for (int index = 0, i = 0; i < CHANNEL_COUNT; i++) { + for (int j = i; j < CHANNEL_COUNT; j++, index++) { + float term = diff[i] * diff[j] * matrix[index]; + + value += i == j ? term : 2 * term; + } + } + + return gmm[MATRIX_COMPONENT_COUNT] * expf(-0.5 * value); +} + +__global__ void GMMDataTermKernel(const float* image, const float* gmm, float* output, int element_count) { + int batch_index = blockIdx.z; + + const float* g_batch_image = image + batch_index * element_count * CHANNEL_COUNT; + const float* g_batch_gmm = gmm + batch_index * GMM_COUNT * GMM_COMPONENT_COUNT; + float* g_batch_output = output + batch_index * element_count * MIXTURE_COUNT; + + int index = blockIdx.x * blockDim.x + threadIdx.x; + + if (index >= element_count) + return; + + float feature[CHANNEL_COUNT]; + + for (int i = 0; i < CHANNEL_COUNT; i++) { + feature[i] = g_batch_image[index + i * element_count]; + } + + float weights[MIXTURE_COUNT]; + float weight_total = 0.0f; + + for (int i = 0; i < MIXTURE_COUNT; i++) { + float mixture_weight = 0.0f; + + for (int j = 0; j < MIXTURE_SIZE; j++) { + mixture_weight += GMMTerm(feature, &g_batch_gmm[(MIXTURE_COUNT * j + i) * GMM_COMPONENT_COUNT]); + } + + weights[i] = mixture_weight; + weight_total += mixture_weight; + } + + for (int i = 0; i < MIXTURE_COUNT; i++) { + // protecting against pixels with 0 in all mixtures + float final_weight = weight_total > 0.0f ? weights[i] / weight_total : 0.0f; + g_batch_output[index + i * element_count] = final_weight; + } +} + +#define THREADS 512 +#define WARPS 16 +#define BLOCK (WARPS << 5) +#define LOAD 4 + +void GMMInitialize( + const float* image, + int* alpha, + float* gmm, + float* scratch_mem, + unsigned int batch_count, + unsigned int element_count) { + unsigned int block_count = TILE(element_count, BLOCK * LOAD); + + float* block_gmm_scratch = scratch_mem; + GMMSplit_t* gmm_split_scratch = (GMMSplit_t*)scratch_mem; + + int gmm_N = MIXTURE_COUNT * MIXTURE_SIZE; + + for (unsigned int k = MIXTURE_COUNT; k < gmm_N; k += MIXTURE_COUNT) { + for (unsigned int i = 0; i < k; ++i) { + CovarianceReductionKernel + <<>>(i, image, alpha, block_gmm_scratch, element_count); + } + + CovarianceFinalizationKernel<<>>(block_gmm_scratch, gmm, block_count); + + GMMFindSplit<<>>( + gmm_split_scratch, k / MIXTURE_COUNT, gmm); + GMMDoSplit<<>>( + gmm_split_scratch, (k / MIXTURE_COUNT) << 4, image, alpha, element_count); + } +} + +void GMMUpdate( + const float* image, + int* alpha, + float* gmm, + float* scratch_mem, + unsigned int batch_count, + unsigned int element_count) { + unsigned int block_count = TILE(element_count, BLOCK * LOAD); + + float* block_gmm_scratch = scratch_mem; + + unsigned int gmm_N = MIXTURE_COUNT * MIXTURE_SIZE; + + for (unsigned int i = 0; i < gmm_N; ++i) { + CovarianceReductionKernel + <<>>(i, image, alpha, block_gmm_scratch, element_count); + } + + CovarianceFinalizationKernel + <<>>(block_gmm_scratch, gmm, block_count); + + GMMcommonTerm<<>>(gmm); +} + +void GMMDataTerm( + const float* image, + const float* gmm, + float* output, + unsigned int batch_count, + unsigned int element_count) { + dim3 block(BLOCK_SIZE, 1); + dim3 grid(TILE(element_count, BLOCK_SIZE), 1, batch_count); + + GMMDataTermKernel<<>>(image, gmm, output, element_count); +} + +void learn_cuda( + const float* input, + const int* labels, + float* gmm, + float* scratch_memory, + unsigned int batch_count, + unsigned int element_count) { + int* alpha = (int*)scratch_memory; + float* scratch_mem = scratch_memory + batch_count * element_count; + + cudaMemcpyAsync(alpha, labels, batch_count * element_count * sizeof(int), cudaMemcpyDeviceToDevice); + + GMMInitialize(input, alpha, gmm, scratch_mem, batch_count, element_count); + GMMUpdate(input, alpha, gmm, scratch_mem, batch_count, element_count); +} + +void apply_cuda( + const float* gmm, + const float* input, + float* output, + unsigned int batch_count, + unsigned int element_count) { + GMMDataTerm(input, gmm, output, batch_count, element_count); +} diff --git a/SegMamba/monai/_extensions/gmm/gmm_cuda_linalg.cuh b/SegMamba/monai/_extensions/gmm/gmm_cuda_linalg.cuh new file mode 100644 index 0000000000000000000000000000000000000000..56c7c7ccdcd53b7bb5c24dcba660af35571caa76 --- /dev/null +++ b/SegMamba/monai/_extensions/gmm/gmm_cuda_linalg.cuh @@ -0,0 +1,144 @@ +/* +Copyright (c) MONAI Consortium +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +__device__ void to_square(float in[SUB_MATRIX_COMPONENT_COUNT], float out[CHANNEL_COUNT][CHANNEL_COUNT]) { + for (int index = 0, i = 0; i < CHANNEL_COUNT; i++) { + for (int j = i; j < CHANNEL_COUNT; j++, index++) { + out[i][j] = in[index]; + out[j][i] = in[index]; + } + } +} + +__device__ void to_triangle(float in[CHANNEL_COUNT][CHANNEL_COUNT], float out[SUB_MATRIX_COMPONENT_COUNT]) { + for (int index = 0, i = 0; i < CHANNEL_COUNT; i++) { + for (int j = i; j < CHANNEL_COUNT; j++, index++) { + out[index] = in[j][i]; + } + } +} + +__device__ void cholesky(float in[CHANNEL_COUNT][CHANNEL_COUNT], float out[CHANNEL_COUNT][CHANNEL_COUNT]) { + for (int i = 0; i < CHANNEL_COUNT; i++) { + for (int j = 0; j < i + 1; j++) { + float sum = 0.0f; + + for (int k = 0; k < j; k++) { + sum += out[i][k] * out[j][k]; + } + + if (i == j) { + out[i][j] = sqrtf(in[i][i] - sum); + } else { + out[i][j] = (in[i][j] - sum) / out[j][j]; + } + } + } +} + +__device__ float chol_det(float in[CHANNEL_COUNT][CHANNEL_COUNT]) { + float det = 1.0f; + + for (int i = 0; i < CHANNEL_COUNT; i++) { + det *= in[i][i]; + } + + return det * det; +} + +__device__ void chol_inv(float in[CHANNEL_COUNT][CHANNEL_COUNT], float out[CHANNEL_COUNT][CHANNEL_COUNT]) { + // Invert cholesky matrix + for (int i = 0; i < CHANNEL_COUNT; i++) { + in[i][i] = 1.0f / (in[i][i] + 0.0001f); + + for (int j = 0; j < i; j++) { + float sum = 0.0f; + + for (int k = j; k < i; k++) { + sum += in[i][k] * in[k][j]; + } + + in[i][j] = -in[i][i] * sum; + } + } + + // Dot with transpose of self + for (int i = 0; i < CHANNEL_COUNT; i++) { + for (int j = 0; j < CHANNEL_COUNT; j++) { + out[i][j] = 0.0f; + + for (int k = max(i, j); k < CHANNEL_COUNT; k++) { + out[i][j] += in[k][i] * in[k][j]; + } + } + } +} + +__device__ void normalize(float* v) { + float norm = 0.0f; + + for (int i = 0; i < CHANNEL_COUNT; i++) { + norm += v[i] * v[i]; + } + + norm = 1.0f / sqrtf(norm); + + for (int i = 0; i < CHANNEL_COUNT; i++) { + v[i] *= norm; + } +} + +__device__ float scalar_prod(float* a, float* b) { + float product = 0.0f; + + for (int i = 0; i < CHANNEL_COUNT; i++) { + product += a[i] * b[i]; + } + + return product; +} + +__device__ void largest_eigenpair(const float* M, float* evec, float* eval) { + float scratch[CHANNEL_COUNT]; + + for (int i = 0; i < CHANNEL_COUNT; i++) { + scratch[i] = i + 1; + } + + for (int itr = 0; itr < 10; itr++) { + *eval = 0.0f; + + for (int i = 0; i < CHANNEL_COUNT; i++) { + int index = i; + + evec[i] = 0.0f; + + for (int j = 0; j < CHANNEL_COUNT; j++) { + evec[i] += M[index] * scratch[j]; + + if (j < i) { + index += CHANNEL_COUNT - (j + 1); + } else { + index += 1; + } + } + + *eval = max(*eval, evec[i]); + } + + for (int i = 0; i < CHANNEL_COUNT; i++) { + evec[i] /= *eval; + scratch[i] = evec[i]; + } + } +} diff --git a/SegMamba/monai/_extensions/loader.py b/SegMamba/monai/_extensions/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..7affd1a3eb84ccb8fb81441e4099991b90167b78 --- /dev/null +++ b/SegMamba/monai/_extensions/loader.py @@ -0,0 +1,93 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import platform +from _thread import interrupt_main +from contextlib import contextmanager +from glob import glob +from os import path +from threading import Timer +from types import ModuleType + +import torch + +from monai.utils.module import get_torch_version_tuple, optional_import + +dir_path = path.dirname(path.realpath(__file__)) + + +@contextmanager +def timeout(time, message): + timer = None + try: + timer = Timer(time, interrupt_main) + timer.daemon = True + timer.start() + yield + except KeyboardInterrupt as e: + if timer is not None and timer.is_alive(): + raise e # interrupt from user? + raise TimeoutError(message) from e + finally: + if timer is not None: + try: + timer.cancel() + finally: + pass + + +def load_module( + module_name: str, defines: dict | None = None, verbose_build: bool = False, build_timeout: int = 300 +) -> ModuleType: + """ + Handles the loading of c++ extension modules. + + Args: + module_name: Name of the module to load. + Must match the name of the relevant source directory in the `_extensions` directory. + defines: Dictionary containing names and values of compilation defines. + verbose_build: Set to true to enable build logging. + build_timeout: Time in seconds before the build will throw an exception to prevent hanging. + """ + + # Ensuring named module exists in _extensions directory. + module_dir = path.join(dir_path, module_name) + if not path.exists(module_dir): + raise ValueError(f"No extension module named {module_name}") + + platform_str = f"_{platform.system()}_{platform.python_version()}_" + platform_str += "".join(f"{v}" for v in get_torch_version_tuple()[:2]) + # Adding configuration to module name. + if defines is not None: + module_name = "_".join([module_name] + [f"{v}" for v in defines.values()]) + + # Gathering source files. + source = glob(path.join(module_dir, "**", "*.cpp"), recursive=True) + if torch.cuda.is_available(): + source += glob(path.join(module_dir, "**", "*.cu"), recursive=True) + platform_str += f"_{torch.version.cuda}" + + # Constructing compilation argument list. + define_args = [] if not defines else [f"-D {key}={defines[key]}" for key in defines] + + # Ninja may be blocked by something out of our control. + # This will error if the build takes longer than expected. + with timeout(build_timeout, "Build appears to be blocked. Is there a stopped process building the same extension?"): + load, _ = optional_import("torch.utils.cpp_extension", name="load") # main trigger some JIT config in pytorch + # This will either run the build or return the existing .so object. + name = module_name + platform_str.replace(".", "_") + module = load( + name=name, sources=source, extra_cflags=define_args, extra_cuda_cflags=define_args, verbose=verbose_build + ) + + return module # type: ignore[no-any-return] diff --git a/SegMamba/monai/_version.py b/SegMamba/monai/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..8b8b186ff8f27945b836370dae364b7ba759655c --- /dev/null +++ b/SegMamba/monai/_version.py @@ -0,0 +1,657 @@ + +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. Generated by +# versioneer-0.23 (https://github.com/python-versioneer/python-versioneer) + +"""Git implementation of _version.py.""" + +import errno +import os +import re +import subprocess +import sys +from typing import Callable, Dict +import functools + + +def get_keywords(): + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = " (HEAD -> dev, refs/pull/7696/head)" + git_full = "fe733b0ff1951ee752ab87ebfe5c4b7c82d30579" + git_date = "2024-05-07 11:55:31 +0800" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_config(): + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440" + cfg.tag_prefix = "" + cfg.parentdir_prefix = "" + cfg.versionfile_source = "monai/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} + + +def register_vcs_handler(vcs, method): # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, + env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + process = None + + popen_kwargs = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: + try: + dispcmd = str([command] + args) + # remember shell=False, so use git.cmd on windows, not just git + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) + break + except OSError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %s" % dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %s" % (commands,)) + return None, None + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: + if verbose: + print("unable to run %s (error)" % dispcmd) + print("stdout was %s" % stdout) + return None, process.returncode + return stdout, process.returncode + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for _ in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %s but none started with prefix %s" % + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = {r.strip() for r in refnames.strip("()").split(",")} + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = {r for r in refs if re.search(r'\d', r)} + if verbose: + print("discarding '%s', no digits" % ",".join(refs - tags)) + if verbose: + print("likely tags: %s" % ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue + if verbose: + print("picking %s" % r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=True) + if rc != 0: + if verbose: + print("Directory %s not under git control" % root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces): + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_branch(pieces): + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). + + Exceptions: + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver): + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces): + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + if pieces["distance"]: + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%d" % (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] + else: + # exception #1 + rendered = "0.post0.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_post_branch(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +def get_versions(): + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for _ in cfg.versionfile_source.split('/'): + root = os.path.dirname(root) + except NameError: + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None} + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", "date": None} diff --git a/SegMamba/monai/apps/__init__.py b/SegMamba/monai/apps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9cc7aeb8e052c23701c7d73a9a98dc37af1de77e --- /dev/null +++ b/SegMamba/monai/apps/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from .datasets import CrossValidation, DecathlonDataset, MedNISTDataset, TciaDataset +from .mmars import MODEL_DESC, RemoteMMARKeys, download_mmar, get_model_spec, load_from_mmar +from .utils import SUPPORTED_HASH_TYPES, check_hash, download_and_extract, download_url, extractall, get_logger, logger diff --git a/SegMamba/monai/apps/auto3dseg/__init__.py b/SegMamba/monai/apps/auto3dseg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7096fb7520a78a02cdeffaaef1e77e246d992d21 --- /dev/null +++ b/SegMamba/monai/apps/auto3dseg/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from .auto_runner import AutoRunner +from .bundle_gen import BundleAlgo, BundleGen +from .data_analyzer import DataAnalyzer +from .ensemble_builder import ( + AlgoEnsemble, + AlgoEnsembleBestByFold, + AlgoEnsembleBestN, + AlgoEnsembleBuilder, + EnsembleRunner, +) +from .hpo_gen import NNIGen, OptunaGen +from .utils import export_bundle_algo_history, get_name_from_algo_id, import_bundle_algo_history diff --git a/SegMamba/monai/apps/auto3dseg/__main__.py b/SegMamba/monai/apps/auto3dseg/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc14c270b4f4edbabe5c394961d6c85ebb3271a --- /dev/null +++ b/SegMamba/monai/apps/auto3dseg/__main__.py @@ -0,0 +1,35 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from monai.apps.auto3dseg.auto_runner import AutoRunner +from monai.apps.auto3dseg.bundle_gen import BundleAlgo, BundleGen +from monai.apps.auto3dseg.data_analyzer import DataAnalyzer +from monai.apps.auto3dseg.ensemble_builder import AlgoEnsembleBuilder, EnsembleRunner +from monai.apps.auto3dseg.hpo_gen import NNIGen, OptunaGen + +if __name__ == "__main__": + from monai.utils import optional_import + + fire, _ = optional_import("fire") + fire.Fire( + { + "DataAnalyzer": DataAnalyzer, + "BundleGen": BundleGen, + "BundleAlgo": BundleAlgo, + "AlgoEnsembleBuilder": AlgoEnsembleBuilder, + "EnsembleRunner": EnsembleRunner, + "AutoRunner": AutoRunner, + "NNIGen": NNIGen, + "OptunaGen": OptunaGen, + } + ) diff --git a/SegMamba/monai/apps/auto3dseg/auto_runner.py b/SegMamba/monai/apps/auto3dseg/auto_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..05c961f999805ebbc61da1df007a725062a61522 --- /dev/null +++ b/SegMamba/monai/apps/auto3dseg/auto_runner.py @@ -0,0 +1,898 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +import shutil +import warnings +from copy import deepcopy +from time import sleep +from typing import Any, cast + +import torch + +from monai.apps.auto3dseg.bundle_gen import BundleGen +from monai.apps.auto3dseg.data_analyzer import DataAnalyzer +from monai.apps.auto3dseg.ensemble_builder import EnsembleRunner +from monai.apps.auto3dseg.hpo_gen import NNIGen +from monai.apps.auto3dseg.utils import export_bundle_algo_history, import_bundle_algo_history +from monai.apps.utils import get_logger +from monai.auto3dseg.utils import algo_to_pickle +from monai.bundle import ConfigParser +from monai.transforms import SaveImage +from monai.utils import AlgoKeys, has_option, look_up_option, optional_import +from monai.utils.misc import check_kwargs_exist_in_class_init, run_cmd + +logger = get_logger(module_name=__name__) + +nni, has_nni = optional_import("nni") + + +class AutoRunner: + """ + An interface for handling Auto3Dseg with minimal inputs and understanding of the internal states in Auto3Dseg. + The users can run the Auto3Dseg with default settings in one line of code. They can also customize the advanced + features Auto3Dseg in a few additional lines. Examples of customization include + + - change cross-validation folds + - change training/prediction parameters + - change ensemble methods + - automatic hyperparameter optimization. + + The output of the interface is a directory that contains + + - data statistics analysis report + - algorithm definition files (scripts, configs, pickle objects) and training results (checkpoints, accuracies) + - the predictions on the testing datasets from the final algorithm ensemble + - a copy of the input arguments in form of YAML + - cached intermediate results + + Args: + work_dir: working directory to save the intermediate and final results. + input: the configuration dictionary or the file path to the configuration in form of YAML. + The configuration should contain datalist, dataroot, modality, multigpu, and class_names info. + algos: optionally specify algorithms to use. If a dictionary, must be in the form + {"algname": dict(_target_="algname.scripts.algo.AlgnameAlgo", template_path="algname"), ...} + If a list or a string, defines a subset of names of the algorithms to use, e.g. 'segresnet' or + ['segresnet', 'dints'] out of the full set of algorithm templates provided by templates_path_or_url. + Defaults to None, to use all available algorithms. + analyze: on/off switch to run DataAnalyzer and generate a datastats report. Defaults to None, to automatically + decide based on cache, and run data analysis only if we have not completed this step yet. + algo_gen: on/off switch to run AlgoGen and generate templated BundleAlgos. Defaults to None, to automatically + decide based on cache, and run algorithm folders generation only if we have not completed this step yet. + train: on/off switch to run training and generate algorithm checkpoints. Defaults to None, to automatically + decide based on cache, and run training only if we have not completed this step yet. + hpo: use hyperparameter optimization (HPO) in the training phase. Users can provide a list of + hyper-parameter and a search will be performed to investigate the algorithm performances. + hpo_backend: a string that indicates the backend of the HPO. Currently, only NNI Grid-search mode + is supported + ensemble: on/off switch to run model ensemble and use the ensemble to predict outputs in testing + datasets. + not_use_cache: if the value is True, it will ignore all cached results in data analysis, + algorithm generation, or training, and start the pipeline from scratch. + templates_path_or_url: the folder with the algorithm templates or a url. If None provided, the default template + zip url will be downloaded and extracted into the work_dir. + allow_skip: a switch passed to BundleGen process which determines if some Algo in the default templates + can be skipped based on the analysis on the dataset from Auto3DSeg DataAnalyzer. + mlflow_tracking_uri: a tracking URI for MLflow server which could be local directory or address of the remote + tracking Server; MLflow runs will be recorded locally in algorithms' model folder if the value is None. + mlflow_experiment_name: the name of the experiment in MLflow server. + kwargs: image writing parameters for the ensemble inference. The kwargs format follows the SaveImage + transform. For more information, check https://docs.monai.io/en/stable/transforms.html#saveimage. + + + Examples: + - User can use the one-liner to start the Auto3Dseg workflow + + .. code-block:: bash + + python -m monai.apps.auto3dseg AutoRunner run --input \ + '{"modality": "ct", "datalist": "dl.json", "dataroot": "/dr", "multigpu": true, "class_names": ["A", "B"]}' + + - User can also save the input dictionary as a input YAML file and use the following one-liner + + .. code-block:: bash + + python -m monai.apps.auto3dseg AutoRunner run --input=./input.yaml + + - User can specify work_dir and data source config input and run AutoRunner: + + .. code-block:: python + + work_dir = "./work_dir" + input = "path/to/input_yaml" + runner = AutoRunner(work_dir=work_dir, input=input) + runner.run() + + - User can specify a subset of algorithms to use and run AutoRunner: + + .. code-block:: python + + work_dir = "./work_dir" + input = "path/to/input_yaml" + algos = ["segresnet", "dints"] + runner = AutoRunner(work_dir=work_dir, input=input, algos=algos) + runner.run() + + - User can specify a local folder with algorithms templates and run AutoRunner: + + .. code-block:: python + + work_dir = "./work_dir" + input = "path/to/input_yaml" + algos = "segresnet" + templates_path_or_url = "./local_path_to/algorithm_templates" + runner = AutoRunner(work_dir=work_dir, input=input, algos=algos, templates_path_or_url=templates_path_or_url) + runner.run() + + - User can specify training parameters by: + + .. code-block:: python + + input = "path/to/input_yaml" + runner = AutoRunner(input=input) + train_param = { + "num_epochs_per_validation": 1, + "num_images_per_batch": 2, + "num_epochs": 2, + } + runner.set_training_params(params=train_param) # 2 epochs + runner.run() + + - User can specify the fold number of cross validation + + .. code-block:: python + + input = "path/to/input_yaml" + runner = AutoRunner(input=input) + runner.set_num_fold(n_fold = 2) + runner.run() + + - User can specify the prediction parameters during algo ensemble inference: + + .. code-block:: python + + input = "path/to/input_yaml" + pred_params = { + 'files_slices': slice(0,2), + 'mode': "vote", + 'sigmoid': True, + } + runner = AutoRunner(input=input) + runner.set_prediction_params(params=pred_params) + runner.run() + + - User can define a grid search space and use the HPO during training. + + .. code-block:: python + + input = "path/to/input_yaml" + runner = AutoRunner(input=input, hpo=True) + runner.set_nni_search_space({"learning_rate": {"_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1]}}) + runner.run() + + Notes: + Expected results in the work_dir as below:: + + work_dir/ + ├── algorithm_templates # bundle algo templates (scripts/configs) + ├── cache.yaml # Autorunner will automatically cache results to save time + ├── datastats.yaml # datastats of the dataset + ├── dints_0 # network scripts/configs/checkpoints and pickle object of the algo + ├── ensemble_output # the prediction of testing datasets from the ensemble of the algos + ├── input.yaml # copy of the input data source configs + ├── segresnet_0 # network scripts/configs/checkpoints and pickle object of the algo + ├── segresnet2d_0 # network scripts/configs/checkpoints and pickle object of the algo + └── swinunetr_0 # network scripts/configs/checkpoints and pickle object of the algo + + """ + + analyze_params: dict | None + + def __init__( + self, + work_dir: str = "./work_dir", + input: dict[str, Any] | str | None = None, + algos: dict | list | str | None = None, + analyze: bool | None = None, + algo_gen: bool | None = None, + train: bool | None = None, + hpo: bool = False, + hpo_backend: str = "nni", + ensemble: bool = True, + not_use_cache: bool = False, + templates_path_or_url: str | None = None, + allow_skip: bool = True, + mlflow_tracking_uri: str | None = None, + mlflow_experiment_name: str | None = None, + **kwargs: Any, + ): + if input is None and os.path.isfile(os.path.join(os.path.abspath(work_dir), "input.yaml")): + input = os.path.join(os.path.abspath(work_dir), "input.yaml") + logger.info(f"Input config is not provided, using the default {input}") + + self.data_src_cfg = dict() + if isinstance(input, dict): + self.data_src_cfg = input + elif isinstance(input, str) and os.path.isfile(input): + self.data_src_cfg = ConfigParser.load_config_file(input) + logger.info(f"Loading input config {input}") + else: + raise ValueError(f"{input} is not a valid file or dict") + + if "work_dir" in self.data_src_cfg: # override from config + work_dir = self.data_src_cfg["work_dir"] + self.work_dir = os.path.abspath(work_dir) + + logger.info(f"AutoRunner using work directory {self.work_dir}") + os.makedirs(self.work_dir, exist_ok=True) + self.data_src_cfg_name = os.path.join(self.work_dir, "input.yaml") + + self.algos = algos + self.templates_path_or_url = templates_path_or_url + self.allow_skip = allow_skip + + # cache.yaml + self.not_use_cache = not_use_cache + self.cache_filename = os.path.join(self.work_dir, "cache.yaml") + self.cache = self.read_cache() + self.export_cache() + + # determine if we need to analyze, algo_gen or train from cache, unless manually provided + self.analyze = not self.cache["analyze"] if analyze is None else analyze + self.algo_gen = not self.cache["algo_gen"] if algo_gen is None else algo_gen + self.train = train + self.ensemble = ensemble # last step, no need to check + self.hpo = hpo and has_nni + self.hpo_backend = hpo_backend + self.mlflow_tracking_uri = mlflow_tracking_uri + self.mlflow_experiment_name = mlflow_experiment_name + self.kwargs = deepcopy(kwargs) + + # parse input config for AutoRunner param overrides + for param in [ + "analyze", + "algo_gen", + "train", + "hpo", + "ensemble", + "not_use_cache", + "allow_skip", + ]: # override from config + if param in self.data_src_cfg and isinstance(self.data_src_cfg[param], bool): + setattr(self, param, self.data_src_cfg[param]) # e.g. self.analyze = self.data_src_cfg["analyze"] + + for param in [ + "algos", + "hpo_backend", + "templates_path_or_url", + "mlflow_tracking_uri", + "mlflow_experiment_name", + ]: # override from config + if param in self.data_src_cfg: + setattr(self, param, self.data_src_cfg[param]) # e.g. self.algos = self.data_src_cfg["algos"] + + missing_keys = {"dataroot", "datalist", "modality"}.difference(self.data_src_cfg.keys()) + if len(missing_keys) > 0: + raise ValueError(f"Config keys are missing {missing_keys}") + + if not os.path.exists(self.data_src_cfg["datalist"]): + raise ValueError(f"Datalist file is not found {self.data_src_cfg['datalist']}") + + # copy datalist to work_dir + datalist_filename = os.path.join(self.work_dir, os.path.basename(self.data_src_cfg["datalist"])) + if datalist_filename != self.data_src_cfg["datalist"]: + try: + shutil.copyfile(self.data_src_cfg["datalist"], datalist_filename) + logger.info(f"Datalist was copied to work_dir: {datalist_filename}") + except shutil.SameFileError: + pass + + # inspect and update folds + self.max_fold = self.inspect_datalist_folds(datalist_filename=datalist_filename) + if "num_fold" in self.data_src_cfg: + num_fold = int(self.data_src_cfg["num_fold"]) # override from config + logger.info(f"Setting num_fold {num_fold} based on the input config.") + else: + num_fold = self.max_fold + logger.info(f"Setting num_fold {num_fold} based on the input datalist {datalist_filename}.") + + self.data_src_cfg["datalist"] = datalist_filename # update path to a version in work_dir and save user input + ConfigParser.export_config_file( + config=self.data_src_cfg, filepath=self.data_src_cfg_name, fmt="yaml", sort_keys=False + ) + + self.dataroot = self.data_src_cfg["dataroot"] + self.datastats_filename = os.path.join(self.work_dir, "datastats.yaml") + self.datalist_filename = datalist_filename + + self.set_training_params() + self.set_device_info() + self.set_prediction_params() + self.set_analyze_params() + self.set_ensemble_method() + self.set_num_fold(num_fold=num_fold) + + self.gpu_customization = False + self.gpu_customization_specs: dict[str, Any] = {} + + # hpo + if self.hpo_backend.lower() != "nni": + raise NotImplementedError("HPOGen backend only supports NNI") + self.hpo = self.hpo and has_nni + self.set_hpo_params() + self.search_space: dict[str, dict[str, Any]] = {} + self.hpo_tasks = 0 + + if "sigmoid" not in self.kwargs and "sigmoid" in self.data_src_cfg: + self.kwargs["sigmoid"] = self.data_src_cfg["sigmoid"] + + def read_cache(self): + """ + Check if the intermediate result is cached after each step in the current working directory + + Returns: + a dict of cache results. If not_use_cache is set to True, or there is no cache file in the + working directory, the result will be ``empty_cache`` in which all ``has_cache`` keys are + set to False. + """ + + empty_cache = {"analyze": False, "datastats": None, "algo_gen": False, "train": False} + + if self.not_use_cache or not os.path.isfile(self.cache_filename): + return empty_cache + + cache = ConfigParser.load_config_file(self.cache_filename) + + for k, v in empty_cache.items(): + cache.setdefault(k, v) + + if cache["analyze"]: + if not (isinstance(cache["datastats"], str) and os.path.isfile(cache["datastats"])): + cache["analyze"] = False + cache["datastats"] = None + + if cache["algo_gen"]: + history = import_bundle_algo_history(self.work_dir, only_trained=False) + if len(history) == 0: # no saved algo_objects + cache["algo_gen"] = False + + if cache["train"]: + trained_history = import_bundle_algo_history(self.work_dir, only_trained=True) + if len(trained_history) == 0: + cache["train"] = False + + return cache + + def export_cache(self, **kwargs): + """ + Save the cache state as ``cache.yaml`` in the working directory + """ + self.cache.update(kwargs) + ConfigParser.export_config_file( + self.cache, self.cache_filename, fmt="yaml", default_flow_style=None, sort_keys=False + ) + + def inspect_datalist_folds(self, datalist_filename: str) -> int: + """ + Returns number of folds in the datalist file, and assigns fold numbers if not provided. + + Args: + datalist_filename: path to the datalist file. + + Notes: + If the fold key is not provided, it auto generates 5 folds assignments in the training key list. + If validation key list is available, then it assumes a single fold validation. + """ + + datalist = ConfigParser.load_config_file(datalist_filename) + if "training" not in datalist: + raise ValueError("Datalist files has no training key:" + str(datalist_filename)) + + fold_list = [int(d["fold"]) for d in datalist["training"] if "fold" in d] + + if len(fold_list) > 0: + num_fold = max(fold_list) + 1 + logger.info(f"Found num_fold {num_fold} based on the input datalist {datalist_filename}.") + # check if every fold is present + if len(set(fold_list)) != num_fold: + raise ValueError(f"Fold numbers are not continuous from 0 to {num_fold - 1}") + elif "validation" in datalist and len(datalist["validation"]) > 0: + logger.info("No fold numbers provided, attempting to use a single fold based on the validation key") + # update the datalist file + for d in datalist["training"]: + d["fold"] = 1 + for d in datalist["validation"]: + d["fold"] = 0 + + val_labels = {d["label"]: d for d in datalist["validation"] if "label" in d} + logger.info( + f"Found {len(val_labels)} items in the validation key, saving updated datalist to", datalist_filename + ) + + # check for duplicates + for d in datalist["training"]: + if d["label"] in val_labels: + d["fold"] = 0 + del val_labels[d["label"]] + + datalist["training"] = datalist["training"] + list(val_labels.values()) + + ConfigParser.export_config_file(datalist, datalist_filename, fmt="json", indent=4) + num_fold = 1 + + else: + num_fold = 5 + + warnings.warn( + f"Datalist has no folds specified {datalist_filename}..." + f"Generating {num_fold} folds randomly." + f"Please consider presaving fold numbers beforehand for repeated experiments." + ) + + from sklearn.model_selection import KFold + + kf = KFold(n_splits=num_fold, shuffle=True, random_state=0) + for i, (_, valid_idx) in enumerate(kf.split(datalist["training"])): + for vi in valid_idx: + datalist["training"][vi]["fold"] = i + + ConfigParser.export_config_file(datalist, datalist_filename, fmt="json", indent=4) + + return num_fold + + def set_gpu_customization( + self, gpu_customization: bool = False, gpu_customization_specs: dict[str, Any] | None = None + ) -> AutoRunner: + """ + Set options for GPU-based parameter customization/optimization. + + Args: + gpu_customization: the switch to determine automatically customize/optimize bundle script/config + parameters for each bundleAlgo based on gpus. Custom parameters are obtained through dummy + training to simulate the actual model training process and hyperparameter optimization (HPO) + experiments. + gpu_customization_specs (optional): the dictionary to enable users overwrite the HPO settings. user can + overwrite part of variables as follows or all of them. The structure is as follows. + + .. code-block:: python + + gpu_customization_specs = { + 'ALGO': { + 'num_trials': 6, + 'range_num_images_per_batch': [1, 20], + 'range_num_sw_batch_size': [1, 20] + } + } + + ALGO: the name of algorithm. It could be one of algorithm names (e.g., 'dints') or 'universal' which + would apply changes to all algorithms. Possible options are + + - {``"universal"``, ``"dints"``, ``"segresnet"``, ``"segresnet2d"``, ``"swinunetr"``}. + + num_trials: the number of HPO trials/experiments to run. + range_num_images_per_batch: the range of number of images per mini-batch. + range_num_sw_batch_size: the range of batch size in sliding-window inferer. + """ + self.gpu_customization = gpu_customization + if gpu_customization_specs is not None: + self.gpu_customization_specs = gpu_customization_specs + + return self + + def set_num_fold(self, num_fold: int = 5) -> AutoRunner: + """ + Set the number of cross validation folds for all algos. + + Args: + num_fold: a positive integer to define the number of folds. + """ + + if num_fold <= 0: + raise ValueError(f"num_fold is expected to be an integer greater than zero. Now it gets {num_fold}") + if num_fold > self.max_fold + 1: + # Auto3DSeg allows no validation set, so the maximum fold number is max_fold + 1 + raise ValueError( + f"num_fold is greater than the maximum fold number {self.max_fold} in {self.datalist_filename}." + ) + self.num_fold = num_fold + + return self + + def set_training_params(self, params: dict[str, Any] | None = None) -> AutoRunner: + """ + Set the training params for all algos. + + Args: + params: a dict that defines the overriding key-value pairs during training. The overriding method + is defined by the algo class. + + Examples: + For BundleAlgo objects, the training parameter to shorten the training time to a few epochs can be + {"num_epochs": 2, "num_epochs_per_validation": 1} + + """ + self.train_params = deepcopy(params) if params is not None else {} + if "CUDA_VISIBLE_DEVICES" in self.train_params: + warnings.warn( + "CUDA_VISIBLE_DEVICES is deprecated from 'set_training_params'. Use 'set_device_info' instead.", + DeprecationWarning, + ) + + return self + + def set_device_info( + self, + cuda_visible_devices: list[int] | str | None = None, + num_nodes: int | None = None, + mn_start_method: str | None = None, + cmd_prefix: str | None = None, + ) -> AutoRunner: + """ + Set the device related info + + Args: + cuda_visible_devices: define GPU ids for data analyzer, training, and ensembling. + List of GPU ids [0,1,2,3] or a string "0,1,2,3". + Default using env "CUDA_VISIBLE_DEVICES" or all devices available. + num_nodes: number of nodes for training and ensembling. + Default using env "NUM_NODES" or 1 if "NUM_NODES" is unset. + mn_start_method: multi-node start method. Autorunner will use the method to start multi-node processes. + Default using env "MN_START_METHOD" or 'bcprun' if "MN_START_METHOD" is unset. + cmd_prefix: command line prefix for subprocess running in BundleAlgo and EnsembleRunner. + Default using env "CMD_PREFIX" or None, examples are: + + - single GPU/CPU or multinode bcprun: "python " or "/opt/conda/bin/python3.8 ", + - single node multi-GPU running "torchrun --nnodes=1 --nproc_per_node=2 " + + If user define this prefix, please make sure --nproc_per_node matches cuda_visible_device or + os.env['CUDA_VISIBLE_DEVICES']. Also always set --nnodes=1. Set num_nodes for multi-node. + """ + self.device_setting: dict[str, Any] = {} + if cuda_visible_devices is None: + cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES") + if cuda_visible_devices is None: # still None after reading the environ + self.device_setting["CUDA_VISIBLE_DEVICES"] = ",".join([str(x) for x in range(torch.cuda.device_count())]) + self.device_setting["n_devices"] = torch.cuda.device_count() + elif isinstance(cuda_visible_devices, str): + self.device_setting["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices + self.device_setting["n_devices"] = len(cuda_visible_devices.split(",")) + elif isinstance(cuda_visible_devices, (list, tuple)): + self.device_setting["CUDA_VISIBLE_DEVICES"] = ",".join([str(x) for x in cuda_visible_devices]) + self.device_setting["n_devices"] = len(cuda_visible_devices) + else: + logger.warn(f"Wrong format of cuda_visible_devices {cuda_visible_devices}, devices not set") + + if num_nodes is None: + num_nodes = int(os.environ.get("NUM_NODES", 1)) + self.device_setting["NUM_NODES"] = num_nodes + + if mn_start_method is None: + mn_start_method = os.environ.get("MN_START_METHOD", "bcprun") + self.device_setting["MN_START_METHOD"] = mn_start_method + + if cmd_prefix is None: + cmd_prefix = os.environ.get("CMD_PREFIX", "") + self.device_setting["CMD_PREFIX"] = cmd_prefix + + if cmd_prefix is not None: + logger.info(f"Using user defined command running prefix {cmd_prefix}, will override other settings") + + return self + + def set_ensemble_method(self, ensemble_method_name: str = "AlgoEnsembleBestByFold", **kwargs: Any) -> AutoRunner: + """ + Set the bundle ensemble method name and parameters for save image transform parameters. + + Args: + ensemble_method_name: the name of the ensemble method. Only two methods are supported "AlgoEnsembleBestN" + and "AlgoEnsembleBestByFold". + kwargs: the keyword arguments used to define the ensemble method. Currently only ``n_best`` for + ``AlgoEnsembleBestN`` is supported. + """ + self.ensemble_method_name = look_up_option( + ensemble_method_name, supported=["AlgoEnsembleBestN", "AlgoEnsembleBestByFold"] + ) + self.kwargs.update(kwargs) + + return self + + def set_image_save_transform(self, **kwargs: Any) -> AutoRunner: + """ + Set the ensemble output transform. + + Args: + kwargs: image writing parameters for the ensemble inference. The kwargs format follows SaveImage + transform. For more information, check https://docs.monai.io/en/stable/transforms.html#saveimage. + + """ + + are_all_args_present, extra_args = check_kwargs_exist_in_class_init(SaveImage, kwargs) + if are_all_args_present: + self.kwargs.update(kwargs) + else: + raise ValueError( + f"{extra_args} are not supported in monai.transforms.SaveImage," + "Check https://docs.monai.io/en/stable/transforms.html#saveimage for more information." + ) + + return self + + def set_prediction_params(self, params: dict[str, Any] | None = None) -> AutoRunner: + """ + Set the prediction params for all algos. + + Args: + params: a dict that defines the overriding key-value pairs during prediction. The overriding method + is defined by the algo class. + + Examples: + + For BundleAlgo objects, this set of param will specify the algo ensemble to only inference the first + two files in the testing datalist {"file_slices": slice(0, 2)} + + """ + self.pred_params = deepcopy(params) if params is not None else {} + + return self + + def set_analyze_params(self, params: dict[str, Any] | None = None) -> AutoRunner: + """ + Set the data analysis extra params. + + Args: + params: a dict that defines the overriding key-value pairs during training. The overriding method + is defined by the algo class. + + """ + if params is None: + self.analyze_params = {"do_ccp": False, "device": "cuda"} + else: + self.analyze_params = deepcopy(params) + + return self + + def set_hpo_params(self, params: dict[str, Any] | None = None) -> AutoRunner: + """ + Set parameters for the HPO module and the algos before the training. It will attempt to (1) override bundle + templates with the key-value pairs in ``params`` (2) change the config of the HPO module (e.g. NNI) if the + key is found to be one of: + + - "trialCodeDirectory" + - "trialGpuNumber" + - "trialConcurrency" + - "maxTrialNumber" + - "maxExperimentDuration" + - "tuner" + - "trainingService" + + and (3) enable the dry-run mode if the user would generate the NNI configs without starting the NNI service. + + Args: + params: a dict that defines the overriding key-value pairs during instantiation of the algo. For + BundleAlgo, it will override the template config filling. + + Notes: + Users can set ``nni_dry_run`` to ``True`` in the ``params`` to enable the dry-run mode for the NNI backend. + + """ + self.hpo_params = self.train_params if params is None else params + + return self + + def set_nni_search_space(self, search_space: dict[str, Any]) -> AutoRunner: + """ + Set the search space for NNI parameter search. + + Args: + search_space: hyper parameter search space in the form of dict. For more information, please check + NNI documentation: https://nni.readthedocs.io/en/v2.2/Tutorial/SearchSpaceSpec.html . + """ + value_combinations = 1 + for k, v in search_space.items(): + if "_value" not in v: + raise ValueError(f"{search_space} key {k} value {v} has not _value") + value_combinations *= len(v["_value"]) + + self.search_space = search_space + self.hpo_tasks = value_combinations + + return self + + def _train_algo_in_sequence(self, history: list[dict[str, Any]]) -> None: + """ + Train the Algos in a sequential scheme. The order of training is randomized. + + Args: + history: the history of generated Algos. It is a list of dicts. Each element has the task name + (e.g. "dints_0" for dints network in fold 0) as the key and the algo object as the value. + After the training, the algo object with the ``best_metric`` will be saved as a pickle file. + + Note: + The final results of the model training will be written to all the generated algorithm's output + folders under the working directory. The results include the model checkpoints, a + progress.yaml, accuracies in CSV and a pickle file of the Algo object. + """ + for algo_dict in history: + algo = algo_dict[AlgoKeys.ALGO] + if has_option(algo.train, "device_setting"): + algo.train(self.train_params, self.device_setting) + else: + algo.train(self.train_params) + acc = algo.get_score() + + algo_meta_data = {str(AlgoKeys.SCORE): acc} + algo_to_pickle(algo, template_path=algo.template_path, **algo_meta_data) + + def _train_algo_in_nni(self, history: list[dict[str, Any]]) -> None: + """ + Train the Algos using HPO. + + Args: + history: the history of generated Algos. It is a list of dicts. Each element has the task name + (e.g. "dints_0" for dints network in fold 0) as the key and the algo object as the value. + After the training, the algo object with the ``best_metric`` will be saved as a pickle file. + + Note: + The final results of the model training will not be written to all the previously generated + algorithm's output folders. Instead, HPO will generate a new algo during the searching, and + the new algo will be saved under the working directory with a different format of the name. + For example, if the searching space has "learning_rate", the result of HPO will be written to + a folder name with original task name and the param (e.g. "dints_0_learning_rate_0.001"). + The results include the model checkpoints, a progress.yaml, accuracies in CSV and a pickle + file of the Algo object. + + """ + default_nni_config = { + "trialCodeDirectory": ".", + "trialGpuNumber": torch.cuda.device_count(), + "trialConcurrency": 1, + "maxTrialNumber": 10, + "maxExperimentDuration": "1h", + "tuner": {"name": "GridSearch"}, + "trainingService": {"platform": "local", "useActiveGpu": True}, + } + + last_total_tasks = len(import_bundle_algo_history(self.work_dir, only_trained=True)) + mode_dry_run = self.hpo_params.pop("nni_dry_run", False) + for algo_dict in history: + name = algo_dict[AlgoKeys.ID] + algo = algo_dict[AlgoKeys.ALGO] + nni_gen = NNIGen(algo=algo, params=self.hpo_params) + obj_filename = nni_gen.get_obj_filename() + nni_config = deepcopy(default_nni_config) + # override the default nni config with the same key in hpo_params + for key in self.hpo_params: + if key in nni_config: + nni_config[key] = self.hpo_params[key] + nni_config.update({"experimentName": name}) + nni_config.update({"search_space": self.search_space}) + trial_cmd = "python -m monai.apps.auto3dseg NNIGen run_algo " + obj_filename + " " + self.work_dir + nni_config.update({"trialCommand": trial_cmd}) + nni_config_filename = os.path.abspath(os.path.join(self.work_dir, f"{name}_nni_config.yaml")) + ConfigParser.export_config_file(nni_config, nni_config_filename, fmt="yaml", default_flow_style=None) + + max_trial = min(self.hpo_tasks, cast(int, default_nni_config["maxTrialNumber"])) + cmd = "nnictl create --config " + nni_config_filename + " --port 8088" + + if mode_dry_run: + logger.info(f"AutoRunner HPO is in dry-run mode. Please manually launch: {cmd}") + continue + + run_cmd(cmd.split(), check=True) + + n_trainings = len(import_bundle_algo_history(self.work_dir, only_trained=True)) + while n_trainings - last_total_tasks < max_trial: + sleep(1) + n_trainings = len(import_bundle_algo_history(self.work_dir, only_trained=True)) + + cmd = "nnictl stop --all" + run_cmd(cmd.split(), check=True) + logger.info(f"NNI completes HPO on {name}") + last_total_tasks = n_trainings + + def run(self): + """ + Run the AutoRunner pipeline + """ + # step 1: data analysis + if self.analyze and self.analyze_params is not None: + logger.info("Running data analysis...") + da = DataAnalyzer( + self.datalist_filename, self.dataroot, output_path=self.datastats_filename, **self.analyze_params + ) + da.get_all_case_stats() + + da = None # type: ignore + torch.cuda.empty_cache() + + self.export_cache(analyze=True, datastats=self.datastats_filename) + else: + logger.info("Skipping data analysis...") + + # step 2: algorithm generation + if self.algo_gen: + if not os.path.isfile(self.datastats_filename): + raise ValueError( + f"Could not find the datastats file {self.datastats_filename}. " + "Possibly the required data analysis step was not completed." + ) + + bundle_generator = BundleGen( + algos=self.algos, + algo_path=self.work_dir, + templates_path_or_url=self.templates_path_or_url, + data_stats_filename=self.datastats_filename, + data_src_cfg_name=self.data_src_cfg_name, + mlflow_tracking_uri=self.mlflow_tracking_uri, + mlflow_experiment_name=self.mlflow_experiment_name, + ) + + if self.gpu_customization: + bundle_generator.generate( + self.work_dir, + num_fold=self.num_fold, + gpu_customization=self.gpu_customization, + gpu_customization_specs=self.gpu_customization_specs, + allow_skip=self.allow_skip, + ) + else: + bundle_generator.generate(self.work_dir, num_fold=self.num_fold, allow_skip=self.allow_skip) + history = bundle_generator.get_history() + export_bundle_algo_history(history) + self.export_cache(algo_gen=True) + else: + logger.info("Skipping algorithm generation...") + + # step 3: algo training + auto_train_choice = self.train is None + if self.train or (auto_train_choice and not self.cache["train"]): + history = import_bundle_algo_history(self.work_dir, only_trained=False) + + if len(history) == 0: + raise ValueError( + f"Could not find training scripts in {self.work_dir}. " + "Possibly the required algorithms generation step was not completed." + ) + + if auto_train_choice: + skip_algos = [h[AlgoKeys.ID] for h in history if h[AlgoKeys.IS_TRAINED]] + if skip_algos: + logger.info( + f"Skipping already trained algos {skip_algos}." + "Set option train=True to always retrain all algos." + ) + history = [h for h in history if not h[AlgoKeys.IS_TRAINED]] + + if len(history) > 0: + if not self.hpo: + self._train_algo_in_sequence(history) + else: + self._train_algo_in_nni(history) + + self.export_cache(train=True) + else: + logger.info("Skipping algorithm training...") + + # step 4: model ensemble and write the prediction to disks. + if self.ensemble: + ensemble_runner = EnsembleRunner( + data_src_cfg_name=self.data_src_cfg_name, + work_dir=self.work_dir, + num_fold=self.num_fold, + ensemble_method_name=self.ensemble_method_name, + mgpu=int(self.device_setting["n_devices"]) > 1, + **self.kwargs, # for set_image_save_transform + **self.pred_params, + ) # for inference + ensemble_runner.run(self.device_setting) + logger.info("Auto3Dseg pipeline is completed successfully.") diff --git a/SegMamba/monai/apps/auto3dseg/bundle_gen.py b/SegMamba/monai/apps/auto3dseg/bundle_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..8a54d18be7e2a4d192ef9d6a34ed56ee4fc9472b --- /dev/null +++ b/SegMamba/monai/apps/auto3dseg/bundle_gen.py @@ -0,0 +1,665 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import importlib +import os +import re +import shutil +import subprocess +import sys +import time +import warnings +from copy import deepcopy +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Any +from urllib.parse import urlparse + +import torch + +from monai.apps import download_and_extract +from monai.apps.utils import get_logger +from monai.auto3dseg.algo_gen import Algo, AlgoGen +from monai.auto3dseg.utils import ( + _prepare_cmd_bcprun, + _prepare_cmd_default, + _prepare_cmd_torchrun, + _run_cmd_bcprun, + _run_cmd_torchrun, + algo_to_pickle, +) +from monai.bundle.config_parser import ConfigParser +from monai.config import PathLike +from monai.utils import ensure_tuple, look_up_option, run_cmd +from monai.utils.enums import AlgoKeys +from monai.utils.misc import MONAIEnvVars + +logger = get_logger(module_name=__name__) +ALGO_HASH = MONAIEnvVars.algo_hash() + +__all__ = ["BundleAlgo", "BundleGen"] + + +class BundleAlgo(Algo): + """ + An algorithm represented by a set of bundle configurations and scripts. + + ``BundleAlgo.cfg`` is a ``monai.bundle.ConfigParser`` instance. + + .. code-block:: python + + from monai.apps.auto3dseg import BundleAlgo + + data_stats_yaml = "../datastats.yaml" + algo = BundleAlgo(template_path="../algorithm_templates") + algo.set_data_stats(data_stats_yaml) + # algo.set_data_src("../data_src.json") + algo.export_to_disk(".", algo_name="segresnet2d_1") + + This class creates MONAI bundles from a directory of 'bundle template'. Different from the regular MONAI bundle + format, the bundle template may contain placeholders that must be filled using ``fill_template_config`` during + ``export_to_disk``. Then created bundle keeps the same file structure as the template. + + """ + + def __init__(self, template_path: PathLike): + """ + Create an Algo instance based on the predefined Algo template. + + Args: + template_path: path to a folder that contains the algorithm templates. + Please check https://github.com/Project-MONAI/research-contributions/tree/main/auto3dseg/algorithm_templates + + """ + + self.template_path = template_path + self.data_stats_files = "" + self.data_list_file = "" + self.mlflow_tracking_uri: str | None = None + self.mlflow_experiment_name: str | None = None + self.output_path = "" + self.name = "" + self.best_metric = None + # track records when filling template config: {"": {"": value, ...}, ...} + self.fill_records: dict = {} + # device_setting set default value and sanity check, in case device_setting not from autorunner + self.device_setting: dict[str, int | str] = { + "CUDA_VISIBLE_DEVICES": ",".join([str(x) for x in range(torch.cuda.device_count())]), + "n_devices": int(torch.cuda.device_count()), + "NUM_NODES": int(os.environ.get("NUM_NODES", 1)), + "MN_START_METHOD": os.environ.get("MN_START_METHOD", "bcprun"), + "CMD_PREFIX": os.environ.get("CMD_PREFIX", ""), + } + + def pre_check_skip_algo(self, skip_bundlegen: bool = False, skip_info: str = "") -> tuple[bool, str]: + """ + Analyse the data analysis report and check if the algorithm needs to be skipped. + This function is overriden within algo. + Args: + skip_bundlegen: skip generating bundles for this algo if true. + skip_info: info to print when skipped. + """ + return skip_bundlegen, skip_info + + def set_data_stats(self, data_stats_files: str) -> None: + """ + Set the data analysis report (generated by DataAnalyzer). + + Args: + data_stats_files: path to the datastats yaml file + """ + self.data_stats_files = data_stats_files + + def set_data_source(self, data_src_cfg: str) -> None: + """ + Set the data source configuration file + + Args: + data_src_cfg: path to a configuration file (yaml) that contains datalist, dataroot, and other params. + The config will be in a form of {"modality": "ct", "datalist": "path_to_json_datalist", "dataroot": + "path_dir_data"} + """ + self.data_list_file = data_src_cfg + + def set_mlflow_tracking_uri(self, mlflow_tracking_uri: str | None) -> None: + """ + Set the tracking URI for MLflow server + + Args: + mlflow_tracking_uri: a tracking URI for MLflow server which could be local directory or address of + the remote tracking Server; MLflow runs will be recorded locally in algorithms' model folder if + the value is None. + """ + self.mlflow_tracking_uri = mlflow_tracking_uri + + def set_mlflow_experiment_name(self, mlflow_experiment_name: str | None) -> None: + """ + Set the experiment name for MLflow server + + Args: + mlflow_experiment_name: a string to specify the experiment name for MLflow server. + """ + self.mlflow_experiment_name = mlflow_experiment_name + + def fill_template_config(self, data_stats_filename: str, algo_path: str, **kwargs: Any) -> dict: + """ + The configuration files defined when constructing this Algo instance might not have a complete training + and validation pipelines. Some configuration components and hyperparameters of the pipelines depend on the + training data and other factors. This API is provided to allow the creation of fully functioning config files. + Return the records of filling template config: {"": {"": value, ...}, ...}. + + Args: + data_stats_filename: filename of the data stats report (generated by DataAnalyzer) + + Notes: + Template filling is optional. The user can construct a set of pre-filled configs without replacing values + by using the data analysis results. It is also intended to be re-implemented in subclasses of BundleAlgo + if the user wants their own way of auto-configured template filling. + """ + return {} + + def export_to_disk(self, output_path: str, algo_name: str, **kwargs: Any) -> None: + """ + Fill the configuration templates, write the bundle (configs + scripts) to folder `output_path/algo_name`. + + Args: + output_path: Path to export the 'scripts' and 'configs' directories. + algo_name: the identifier of the algorithm (usually contains the name and extra info like fold ID). + kwargs: other parameters, including: "copy_dirs=True/False" means whether to copy the template as output + instead of inplace operation, "fill_template=True/False" means whether to fill the placeholders + in the template. other parameters are for `fill_template_config` function. + + """ + if kwargs.pop("copy_dirs", True): + self.output_path = os.path.join(output_path, algo_name) + os.makedirs(self.output_path, exist_ok=True) + if os.path.isdir(self.output_path): + shutil.rmtree(self.output_path) + # copy algorithm_templates/ to the working directory output_path + shutil.copytree(os.path.join(str(self.template_path), self.name), self.output_path) + else: + self.output_path = str(self.template_path) + if kwargs.pop("fill_template", True): + self.fill_records = self.fill_template_config(self.data_stats_files, self.output_path, **kwargs) + logger.info(f"Generated:{self.output_path}") + + def _create_cmd(self, train_params: None | dict = None) -> tuple[str, str]: + """ + Create the command to execute training. + + """ + if train_params is None: + train_params = {} + params = deepcopy(train_params) + + train_py = os.path.join(self.output_path, "scripts", "train.py") + config_dir = os.path.join(self.output_path, "configs") + + config_files = [] + if os.path.isdir(config_dir): + for file in sorted(os.listdir(config_dir)): + if file.endswith("yaml") or file.endswith("json"): + # Python Fire may be confused by single-quoted WindowsPath + config_files.append(Path(os.path.join(config_dir, file)).as_posix()) + + if int(self.device_setting["NUM_NODES"]) > 1: + # multi-node command + # only bcprun is supported for now + try: + look_up_option(self.device_setting["MN_START_METHOD"], ["bcprun"]) + except ValueError as err: + raise NotImplementedError( + f"{self.device_setting['MN_START_METHOD']} is not supported yet." + "Try modify BundleAlgo._create_cmd for your cluster." + ) from err + + return ( + _prepare_cmd_bcprun( + f"{train_py} run", + cmd_prefix=f"{self.device_setting['CMD_PREFIX']}", + config_file=config_files, + **params, + ), + "", + ) + elif int(self.device_setting["n_devices"]) > 1: + return _prepare_cmd_torchrun(f"{train_py} run", config_file=config_files, **params), "" + else: + return ( + _prepare_cmd_default( + f"{train_py} run", + cmd_prefix=f"{self.device_setting['CMD_PREFIX']}", + config_file=config_files, + **params, + ), + "", + ) + + def _run_cmd(self, cmd: str, devices_info: str = "") -> subprocess.CompletedProcess: + """ + Execute the training command with target devices information. + + """ + if devices_info: + warnings.warn(f"input devices_info {devices_info} is deprecated and ignored.") + + ps_environ = os.environ.copy() + ps_environ["CUDA_VISIBLE_DEVICES"] = str(self.device_setting["CUDA_VISIBLE_DEVICES"]) + + # delete pattern "VAR=VALUE" at the beginning of the string, with optional leading/trailing whitespaces + cmd = re.sub(r"^\s*\w+=.*?\s+", "", cmd) + + if int(self.device_setting["NUM_NODES"]) > 1: + try: + look_up_option(self.device_setting["MN_START_METHOD"], ["bcprun"]) + except ValueError as err: + raise NotImplementedError( + f"{self.device_setting['MN_START_METHOD']} is not supported yet." + "Try modify BundleAlgo._run_cmd for your cluster." + ) from err + + return _run_cmd_bcprun(cmd, n=self.device_setting["NUM_NODES"], p=self.device_setting["n_devices"]) + elif int(self.device_setting["n_devices"]) > 1: + return _run_cmd_torchrun( + cmd, nnodes=1, nproc_per_node=self.device_setting["n_devices"], env=ps_environ, check=True + ) + else: + return run_cmd(cmd.split(), run_cmd_verbose=True, env=ps_environ, check=True) + + def train( + self, train_params: None | dict = None, device_setting: None | dict = None + ) -> subprocess.CompletedProcess: + """ + Load the run function in the training script of each model. Training parameter is predefined by the + algo_config.yaml file, which is pre-filled by the fill_template_config function in the same instance. + + Args: + train_params: training parameters + device_setting: device related settings, should follow the device_setting in auto_runner.set_device_info. + 'CUDA_VISIBLE_DEVICES' should be a string e.g. '0,1,2,3' + """ + if device_setting is not None: + self.device_setting.update(device_setting) + self.device_setting["n_devices"] = len(str(self.device_setting["CUDA_VISIBLE_DEVICES"]).split(",")) + + if train_params is not None and "CUDA_VISIBLE_DEVICES" in train_params: + warnings.warn("CUDA_VISIBLE_DEVICES is deprecated from train_params!") + train_params.pop("CUDA_VISIBLE_DEVICES") + + cmd, _unused_return = self._create_cmd(train_params) + return self._run_cmd(cmd) + + def get_score(self, *args, **kwargs): + """ + Returns validation scores of the model trained by the current Algo. + """ + config_yaml = os.path.join(self.output_path, "configs", "hyper_parameters.yaml") + parser = ConfigParser() + parser.read_config(config_yaml) + ckpt_path = parser.get_parsed_content("ckpt_path", default=self.output_path) + + dict_file = ConfigParser.load_config_file(os.path.join(ckpt_path, "progress.yaml")) + # dict_file: a list of scores saved in the form of dict in progress.yaml + return dict_file[-1]["best_avg_dice_score"] # the last one is the best one + + def get_inferer(self, *args, **kwargs): + """ + Load the InferClass from the infer.py. The InferClass should be defined in the template under the path of + `"scripts/infer.py"`. It is required to define the "InferClass" (name is fixed) with two functions at least + (``__init__`` and ``infer``). The init class has an override kwargs that can be used to override parameters in + the run-time optionally. + + Examples: + + .. code-block:: python + + class InferClass + def __init__(self, config_file: Optional[Union[str, Sequence[str]]] = None, **override): + # read configs from config_file (sequence) + # set up transforms + # set up model + # set up other hyper parameters + return + + @torch.no_grad() + def infer(self, image_file): + # infer the model and save the results to output + return output + + """ + infer_py = os.path.join(self.output_path, "scripts", "infer.py") + if not os.path.isfile(infer_py): + raise ValueError(f"{infer_py} is not found, please check the path.") + + config_dir = os.path.join(self.output_path, "configs") + configs_path = [os.path.join(config_dir, f) for f in os.listdir(config_dir)] + + spec = importlib.util.spec_from_file_location("InferClass", infer_py) + infer_class = importlib.util.module_from_spec(spec) # type: ignore + sys.modules["InferClass"] = infer_class + spec.loader.exec_module(infer_class) # type: ignore + return infer_class.InferClass(configs_path, *args, **kwargs) + + def predict(self, predict_files: list, predict_params: dict | None = None) -> list: + """ + Use the trained model to predict the outputs with a given input image. + + Args: + predict_files: a list of paths to files to run inference on ["path_to_image_1", "path_to_image_2"] + predict_params: a dict to override the parameters in the bundle config (including the files to predict). + + """ + params = {} if predict_params is None else deepcopy(predict_params) + inferer = self.get_inferer(**params) + return [inferer.infer(f) for f in ensure_tuple(predict_files)] + + def get_output_path(self): + """Returns the algo output paths to find the algo scripts and configs.""" + return self.output_path + + +# path to download the algo_templates +default_algo_zip = ( + f"https://github.com/Project-MONAI/research-contributions/releases/download/algo_templates/{ALGO_HASH}.tar.gz" +) + +# default algorithms +default_algos = { + "segresnet2d": dict(_target_="segresnet2d.scripts.algo.Segresnet2dAlgo"), + "dints": dict(_target_="dints.scripts.algo.DintsAlgo"), + "swinunetr": dict(_target_="swinunetr.scripts.algo.SwinunetrAlgo"), + "segresnet": dict(_target_="segresnet.scripts.algo.SegresnetAlgo"), +} + + +def _download_algos_url(url: str, at_path: str) -> dict[str, dict[str, str]]: + """ + Downloads the algorithm templates release archive, and extracts it into a parent directory of the at_path folder. + Returns a dictionary of the algorithm templates. + """ + at_path = os.path.abspath(at_path) + zip_download_dir = TemporaryDirectory() + algo_compressed_file = os.path.join(zip_download_dir.name, "algo_templates.tar.gz") + + download_attempts = 3 + for i in range(download_attempts): + try: + download_and_extract(url=url, filepath=algo_compressed_file, output_dir=os.path.dirname(at_path)) + except Exception as e: + msg = f"Download and extract of {url} failed, attempt {i+1}/{download_attempts}." + if i < download_attempts - 1: + warnings.warn(msg) + time.sleep(i) + else: + zip_download_dir.cleanup() + raise ValueError(msg) from e + else: + break + + zip_download_dir.cleanup() + + algos_all = deepcopy(default_algos) + for name in algos_all: + algos_all[name]["template_path"] = at_path + + return algos_all + + +def _copy_algos_folder(folder, at_path): + """ + Copies the algorithm templates folder to at_path. + Returns a dictionary of algorithm templates. + """ + folder = os.path.abspath(folder) + at_path = os.path.abspath(at_path) + + if folder != at_path: + if os.path.exists(at_path): + shutil.rmtree(at_path) + shutil.copytree(folder, at_path) + + algos_all = {} + for name in os.listdir(at_path): + if os.path.exists(os.path.join(folder, name, "scripts", "algo.py")): + algos_all[name] = dict(_target_=f"{name}.scripts.algo.{name.capitalize()}Algo", template_path=at_path) + logger.info(f"Copying template: {name} -- {algos_all[name]}") + if not algos_all: + raise ValueError(f"Unable to find any algos in {folder}") + + return algos_all + + +class BundleGen(AlgoGen): + """ + This class generates a set of bundles according to the cross-validation folds, each of them can run independently. + + Args: + algo_path: the directory path to save the algorithm templates. Default is the current working dir. + algos: If dictionary, it outlines the algorithm to use. If a list or a string, defines a subset of names of + the algorithms to use, e.g. ('segresnet', 'dints') out of the full set of algorithm templates provided + by templates_path_or_url. Defaults to None - to use all available algorithms. + templates_path_or_url: the folder with the algorithm templates or a url. If None provided, the default template + zip url will be downloaded and extracted into the algo_path. The current default options are released at: + https://github.com/Project-MONAI/research-contributions/tree/main/auto3dseg. + data_stats_filename: the path to the data stats file (generated by DataAnalyzer). + data_src_cfg_name: the path to the data source config YAML file. The config will be in a form of + {"modality": "ct", "datalist": "path_to_json_datalist", "dataroot": "path_dir_data"}. + mlflow_tracking_uri: a tracking URI for MLflow server which could be local directory or address of + the remote tracking Server; MLflow runs will be recorded locally in algorithms' model folder if + the value is None. + mlfow_experiment_name: a string to specify the experiment name for MLflow server. + .. code-block:: bash + + python -m monai.apps.auto3dseg BundleGen generate --data_stats_filename="../algorithms/datastats.yaml" + """ + + def __init__( + self, + algo_path: str = ".", + algos: dict | list | str | None = None, + templates_path_or_url: str | None = None, + data_stats_filename: str | None = None, + data_src_cfg_name: str | None = None, + mlflow_tracking_uri: str | None = None, + mlflow_experiment_name: str | None = None, + ): + if algos is None or isinstance(algos, (list, tuple, str)): + if templates_path_or_url is None: + templates_path_or_url = default_algo_zip + + at_path = os.path.join(os.path.abspath(algo_path), "algorithm_templates") + + if os.path.isdir(templates_path_or_url): + # if a local folder, copy if necessary + logger.info(f"BundleGen from directory {templates_path_or_url}") + algos_all = _copy_algos_folder(folder=templates_path_or_url, at_path=at_path) + elif urlparse(templates_path_or_url).scheme in ("http", "https"): + # if url, trigger the download and extract process + logger.info(f"BundleGen from {templates_path_or_url}") + algos_all = _download_algos_url(url=templates_path_or_url, at_path=at_path) + else: + raise ValueError(f"{self.__class__} received invalid templates_path_or_url: {templates_path_or_url}") + + if algos is not None: + algos = {k: v for k, v in algos_all.items() if k in ensure_tuple(algos)} # keep only provided + if len(algos) == 0: + raise ValueError(f"Unable to find provided algos in {algos_all}") + else: + algos = algos_all + + self.algos: Any = [] + if isinstance(algos, dict): + for algo_name, algo_params in sorted(algos.items()): + template_path = algo_params.get("template_path", ".") + if len(template_path) > 0 and template_path not in sys.path: + sys.path.append(template_path) + + try: + onealgo = ConfigParser(algo_params).get_parsed_content() + onealgo.name = algo_name + self.algos.append(onealgo) + except RuntimeError as e: + msg = """Please make sure the folder structure of an Algo Template follows + [algo_name] + ├── configs + │ ├── hyper_parameters.yaml # automatically generated yaml from a set of ``template_configs`` + └── scripts + ├── test.py + ├── __init__.py + └── validate.py + """ + raise RuntimeError(msg) from e + else: + raise ValueError("Unexpected error algos is not a dict") + + self.data_stats_filename = data_stats_filename + self.data_src_cfg_name = data_src_cfg_name + self.mlflow_tracking_uri = mlflow_tracking_uri + self.mlflow_experiment_name = mlflow_experiment_name + self.history: list[dict] = [] + + def set_data_stats(self, data_stats_filename: str) -> None: + """ + Set the data stats filename + + Args: + data_stats_filename: filename of datastats + """ + self.data_stats_filename = data_stats_filename + + def get_data_stats(self): + """Get the filename of the data stats""" + return self.data_stats_filename + + def set_data_src(self, data_src_cfg_name): + """ + Set the data source filename + + Args: + data_src_cfg_name: filename of data_source file + """ + self.data_src_cfg_name = data_src_cfg_name + + def get_data_src(self): + """Get the data source filename""" + return self.data_src_cfg_name + + def set_mlflow_tracking_uri(self, mlflow_tracking_uri): + """ + Set the tracking URI for MLflow server + + Args: + mlflow_tracking_uri: a tracking URI for MLflow server which could be local directory or address of + the remote tracking Server; MLflow runs will be recorded locally in algorithms' model folder if + the value is None. + """ + self.mlflow_tracking_uri = mlflow_tracking_uri + + def set_mlflow_experiment_name(self, mlflow_experiment_name): + """ + Set the experiment name for MLflow server + + Args: + mlflow_experiment_name: a string to specify the experiment name for MLflow server. + """ + self.mlflow_experiment_name = mlflow_experiment_name + + def get_mlflow_tracking_uri(self): + """Get the tracking URI for MLflow server""" + return self.mlflow_tracking_uri + + def get_mlflow_experiment_name(self): + """Get the experiment name for MLflow server""" + return self.mlflow_experiment_name + + def get_history(self) -> list: + """Get the history of the bundleAlgo object with their names/identifiers""" + return self.history + + def generate( + self, + output_folder: str = ".", + num_fold: int = 5, + gpu_customization: bool = False, + gpu_customization_specs: dict[str, Any] | None = None, + allow_skip: bool = True, + ) -> None: + """ + Generate the bundle scripts/configs for each bundleAlgo + + Args: + output_folder: the output folder to save each algorithm. + num_fold: the number of cross validation fold. + gpu_customization: the switch to determine automatically customize/optimize bundle script/config + parameters for each bundleAlgo based on gpus. Custom parameters are obtained through dummy + training to simulate the actual model training process and hyperparameter optimization (HPO) + experiments. + gpu_customization_specs: the dictionary to enable users overwrite the HPO settings. user can + overwrite part of variables as follows or all of them. The structure is as follows. + allow_skip: a switch to determine if some Algo in the default templates can be skipped based on the + analysis on the dataset from Auto3DSeg DataAnalyzer. + + .. code-block:: python + + gpu_customization_specs = { + 'ALGO': { + 'num_trials': 6, + 'range_num_images_per_batch': [1, 20], + 'range_num_sw_batch_size': [1, 20] + } + } + + ALGO: the name of algorithm. It could be one of algorithm names (e.g., 'dints') or 'universal' which + would apply changes to all algorithms. Possible options are + + - {``"universal"``, ``"dints"``, ``"segresnet"``, ``"segresnet2d"``, ``"swinunetr"``}. + + num_trials: the number of HPO trials/experiments to run. + range_num_images_per_batch: the range of number of images per mini-batch. + range_num_sw_batch_size: the range of batch size in sliding-window inferer. + """ + fold_idx = list(range(num_fold)) + for algo in self.algos: + for f_id in ensure_tuple(fold_idx): + data_stats = self.get_data_stats() + data_src_cfg = self.get_data_src() + mlflow_tracking_uri = self.get_mlflow_tracking_uri() + mlflow_experiment_name = self.get_mlflow_experiment_name() + gen_algo = deepcopy(algo) + gen_algo.set_data_stats(data_stats) + gen_algo.set_data_source(data_src_cfg) + gen_algo.set_mlflow_tracking_uri(mlflow_tracking_uri) + gen_algo.set_mlflow_experiment_name(mlflow_experiment_name) + name = f"{gen_algo.name}_{f_id}" + + if allow_skip: + skip_bundlegen, skip_info = gen_algo.pre_check_skip_algo() + if skip_bundlegen: + logger.info(f"{name} is skipped! {skip_info}") + continue + + if gpu_customization: + gen_algo.export_to_disk( + output_folder, + name, + fold=f_id, + gpu_customization=True, + gpu_customization_specs=gpu_customization_specs, + ) + else: + gen_algo.export_to_disk(output_folder, name, fold=f_id) + + algo_to_pickle(gen_algo, template_path=algo.template_path) + self.history.append( + {AlgoKeys.ID: name, AlgoKeys.ALGO: gen_algo} + ) # track the previous, may create a persistent history diff --git a/SegMamba/monai/apps/auto3dseg/data_analyzer.py b/SegMamba/monai/apps/auto3dseg/data_analyzer.py new file mode 100644 index 0000000000000000000000000000000000000000..15e56abfea4c7e37148402e378368afae24f948a --- /dev/null +++ b/SegMamba/monai/apps/auto3dseg/data_analyzer.py @@ -0,0 +1,386 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import warnings +from os import path +from typing import Any, cast + +import numpy as np +import torch +from torch.multiprocessing import get_context + +from monai.apps.auto3dseg.transforms import EnsureSameShaped +from monai.apps.utils import get_logger +from monai.auto3dseg import SegSummarizer +from monai.auto3dseg.utils import datafold_read +from monai.bundle import config_parser +from monai.bundle.config_parser import ConfigParser +from monai.data import DataLoader, Dataset, partition_dataset +from monai.data.utils import no_collation +from monai.transforms import Compose, EnsureTyped, LoadImaged, Orientationd +from monai.utils import ImageMetaKey, StrEnum, min_version, optional_import +from monai.utils.enums import DataStatsKeys, ImageStatsKeys + + +def strenum_representer(dumper, data): + return dumper.represent_scalar("tag:yaml.org,2002:str", data.value) + + +if optional_import("yaml")[1]: + config_parser.yaml.SafeDumper.add_multi_representer(StrEnum, strenum_representer) + +tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm") +logger = get_logger(module_name=__name__) + +__all__ = ["DataAnalyzer"] + + +class DataAnalyzer: + """ + The DataAnalyzer automatically analyzes given medical image dataset and reports the statistics. + The module expects file paths to the image data and utilizes the LoadImaged transform to read the + files, which supports nii, nii.gz, png, jpg, bmp, npz, npy, and dcm formats. Currently, only + segmentation task is supported, so the user needs to provide paths to the image and label files + (if have). Also, label data format is preferred to be (1,H,W,D), with the label index in the + first dimension. If it is in onehot format, it will be converted to the preferred format. + + Args: + datalist: a Python dictionary storing group, fold, and other information of the medical + image dataset, or a string to the JSON file storing the dictionary. + dataroot: user's local directory containing the datasets. + output_path: path to save the analysis result. + average: whether to average the statistical value across different image modalities. + do_ccp: apply the connected component algorithm to process the labels/images + device: a string specifying hardware (CUDA/CPU) utilized for the operations. + worker: number of workers to use for loading datasets in each GPU/CPU sub-process. + image_key: a string that user specify for the image. The DataAnalyzer will look it up in the + datalist to locate the image files of the dataset. + label_key: a string that user specify for the label. The DataAnalyzer will look it up in the + datalist to locate the label files of the dataset. If label_key is NoneType or "None", + the DataAnalyzer will skip looking for labels and all label-related operations. + hist_bins: bins to compute histogram for each image channel. + hist_range: ranges to compute histogram for each image channel. + fmt: format used to save the analysis results. Currently support ``"json"`` and ``"yaml"``, defaults to "yaml". + histogram_only: whether to only compute histograms. Defaults to False. + extra_params: other optional arguments. Currently supported arguments are : + 'allowed_shape_difference' (default 5) can be used to change the default tolerance of + the allowed shape differences between the image and label items. In case of shape mismatch below + the tolerance, the label image will be resized to match the image using nearest interpolation. + + + Examples: + .. code-block:: python + + from monai.apps.auto3dseg.data_analyzer import DataAnalyzer + + datalist = { + "testing": [{"image": "image_003.nii.gz"}], + "training": [ + {"fold": 0, "image": "image_001.nii.gz", "label": "label_001.nii.gz"}, + {"fold": 0, "image": "image_002.nii.gz", "label": "label_002.nii.gz"}, + {"fold": 1, "image": "image_001.nii.gz", "label": "label_001.nii.gz"}, + {"fold": 1, "image": "image_004.nii.gz", "label": "label_004.nii.gz"}, + ], + } + + dataroot = '/datasets' # the directory where you have the image files (nii.gz) + DataAnalyzer(datalist, dataroot) + + Notes: + The module can also be called from the command line interface (CLI). + + For example: + + .. code-block:: bash + + python -m monai.apps.auto3dseg \\ + DataAnalyzer \\ + get_all_case_stats \\ + --datalist="my_datalist.json" \\ + --dataroot="my_dataroot_dir" + + """ + + def __init__( + self, + datalist: str | dict, + dataroot: str = "", + output_path: str = "./datastats.yaml", + average: bool = True, + do_ccp: bool = False, + device: str | torch.device = "cuda", + worker: int = 4, + image_key: str = "image", + label_key: str | None = "label", + hist_bins: list | int | None = 0, + hist_range: list | None = None, + fmt: str = "yaml", + histogram_only: bool = False, + **extra_params: Any, + ): + if path.isfile(output_path): + warnings.warn(f"File {output_path} already exists and will be overwritten.") + logger.debug(f"{output_path} will be overwritten by a new datastat.") + + self.datalist = datalist + self.dataroot = dataroot + self.output_path = output_path + self.average = average + self.do_ccp = do_ccp + self.device = torch.device(device) + self.worker = worker + self.image_key = image_key + self.label_key = None if label_key == "None" else label_key + self.hist_bins = hist_bins + self.hist_range: list = [-500, 500] if hist_range is None else hist_range + self.fmt = fmt + self.histogram_only = histogram_only + self.extra_params = extra_params + + @staticmethod + def _check_data_uniformity(keys: list[str], result: dict) -> bool: + """ + Check data uniformity since DataAnalyzer provides no support to multi-modal images with different + affine matrices/spacings due to monai transforms. + + Args: + keys: a list of string-type keys under image_stats dictionary. + + Returns: + False if one of the selected key values is not constant across the dataset images. + + """ + + if DataStatsKeys.SUMMARY not in result or DataStatsKeys.IMAGE_STATS not in result[DataStatsKeys.SUMMARY]: + return True + constant_props = [result[DataStatsKeys.SUMMARY][DataStatsKeys.IMAGE_STATS][key] for key in keys] + for prop in constant_props: + if "stdev" in prop and np.any(prop["stdev"]): + logger.debug(f"summary image_stats {prop} has non-zero stdev {prop['stdev']}.") + return False + + return True + + def get_all_case_stats(self, key="training", transform_list=None): + """ + Get all case stats. Caller of the DataAnalyser class. The function initiates multiple GPU or CPU processes of the internal + _get_all_case_stats functions, which iterates datalist and call SegSummarizer to generate stats for each case. + After all case stats are generated, SegSummarizer is called to combine results. + + Args: + key: dataset key + transform_list: option list of transforms before SegSummarizer + + Returns: + A data statistics dictionary containing + "stats_summary" (summary statistics of the entire datasets). Within stats_summary + there are "image_stats" (summarizing info of shape, channel, spacing, and etc + using operations_summary), "image_foreground_stats" (info of the intensity for the + non-zero labeled voxels), and "label_stats" (info of the labels, pixel percentage, + image_intensity, and each individual label in a list) + "stats_by_cases" (List type value. Each element of the list is statistics of + an image-label info. Within each element, there are: "image" (value is the + path to an image), "label" (value is the path to the corresponding label), "image_stats" + (summarizing info of shape, channel, spacing, and etc using operations), + "image_foreground_stats" (similar to the previous one but one foreground image), and + "label_stats" (stats of the individual labels ) + + Notes: + Since the backend of the statistics computation are torch/numpy, nan/inf value + may be generated and carried over in the computation. In such cases, the output + dictionary will include .nan/.inf in the statistics. + + """ + result: dict[DataStatsKeys, Any] = {DataStatsKeys.SUMMARY: {}, DataStatsKeys.BY_CASE: []} + result_bycase: dict[DataStatsKeys, Any] = {DataStatsKeys.SUMMARY: {}, DataStatsKeys.BY_CASE: []} + if self.device.type == "cpu": + nprocs = 1 + logger.info("Using CPU for data analyzing!") + else: + nprocs = torch.cuda.device_count() + logger.info(f"Found {nprocs} GPUs for data analyzing!") + if nprocs > 1: + tmp_ctx: Any = get_context("forkserver") + with tmp_ctx.Manager() as manager: + manager_list = manager.list() + processes = [] + for rank in range(nprocs): + p = tmp_ctx.Process( + target=self._get_all_case_stats, args=(rank, nprocs, manager_list, key, transform_list) + ) + processes.append(p) + for p in processes: + p.start() + for p in processes: + p.join() + # merge DataStatsKeys.BY_CASE + for _ in manager_list: + result_bycase[DataStatsKeys.BY_CASE].extend(_[DataStatsKeys.BY_CASE]) + else: + result_bycase = self._get_all_case_stats(0, 1, None, key, transform_list) + + summarizer = SegSummarizer( + self.image_key, + self.label_key, + average=self.average, + do_ccp=self.do_ccp, + hist_bins=self.hist_bins, + hist_range=self.hist_range, + histogram_only=self.histogram_only, + ) + n_cases = len(result_bycase[DataStatsKeys.BY_CASE]) + result[DataStatsKeys.SUMMARY] = summarizer.summarize(cast(list, result_bycase[DataStatsKeys.BY_CASE])) + result[DataStatsKeys.SUMMARY]["n_cases"] = n_cases + result_bycase[DataStatsKeys.SUMMARY] = result[DataStatsKeys.SUMMARY] + if not self._check_data_uniformity([ImageStatsKeys.SPACING], result): + logger.info("Data spacing is not completely uniform. MONAI transforms may provide unexpected result") + if self.output_path: + logger.info(f"Writing data stats to {self.output_path}.") + ConfigParser.export_config_file( + result, self.output_path, fmt=self.fmt, default_flow_style=None, sort_keys=False + ) + by_case_path = self.output_path.replace(f".{self.fmt}", f"_by_case.{self.fmt}") + if by_case_path == self.output_path: # self.output_path not ended with self.fmt? + by_case_path += f".by_case.{self.fmt}" + logger.info(f"Writing by-case data stats to {by_case_path}, this may take a while.") + ConfigParser.export_config_file( + result_bycase, by_case_path, fmt=self.fmt, default_flow_style=None, sort_keys=False + ) + # release memory + if self.device.type == "cuda": + # release unreferenced tensors to mitigate OOM + # limitation: https://github.com/pytorch/pytorch/issues/12873#issuecomment-482916237 + torch.cuda.empty_cache() + result[DataStatsKeys.BY_CASE] = result_bycase[DataStatsKeys.BY_CASE] + return result + + def _get_all_case_stats( + self, + rank: int = 0, + world_size: int = 1, + manager_list: list | None = None, + key: str = "training", + transform_list: list | None = None, + ) -> Any: + """ + Get all case stats from a partitioned datalist. The function can only be called internally by get_all_case_stats. + Args: + rank: GPU process rank, 0 for CPU process + world_size: total number of GPUs, 1 for CPU process + manager_list: multiprocessing manager list object, if using multi-GPU. + key: dataset key + transform_list: option list of transforms before SegSummarizer + """ + summarizer = SegSummarizer( + self.image_key, + self.label_key, + average=self.average, + do_ccp=self.do_ccp, + hist_bins=self.hist_bins, + hist_range=self.hist_range, + histogram_only=self.histogram_only, + ) + keys = list(filter(None, [self.image_key, self.label_key])) + if transform_list is None: + transform_list = [ + LoadImaged(keys=keys, ensure_channel_first=True, image_only=True), + EnsureTyped(keys=keys, data_type="tensor", dtype=torch.float), + Orientationd(keys=keys, axcodes="RAS"), + ] + if self.label_key is not None: + allowed_shape_difference = self.extra_params.pop("allowed_shape_difference", 5) + transform_list.append( + EnsureSameShaped( + keys=self.label_key, + source_key=self.image_key, + allowed_shape_difference=allowed_shape_difference, + ) + ) + + transform = Compose(transform_list) + files, _ = datafold_read(datalist=self.datalist, basedir=self.dataroot, fold=-1, key=key) + if world_size <= len(files): + files = partition_dataset(data=files, num_partitions=world_size)[rank] + else: + files = partition_dataset(data=files, num_partitions=len(files))[rank] if rank < len(files) else [] + dataset = Dataset(data=files, transform=transform) + dataloader = DataLoader( + dataset, + batch_size=1, + shuffle=False, + num_workers=self.worker, + collate_fn=no_collation, + pin_memory=self.device.type == "cuda", + ) + result_bycase: dict[DataStatsKeys, Any] = {DataStatsKeys.SUMMARY: {}, DataStatsKeys.BY_CASE: []} + device = self.device if self.device.type == "cpu" else torch.device("cuda", rank) + if device.type == "cuda" and not (torch.cuda.is_available() and torch.cuda.device_count() > 0): + logger.info(f"device={device} but CUDA device is not available, using CPU instead.") + device = torch.device("cpu") + if not has_tqdm: + warnings.warn("tqdm is not installed. not displaying the caching progress.") + + for batch_data in tqdm(dataloader) if (has_tqdm and rank == 0) else dataloader: + batch_data = batch_data[0] + try: + batch_data[self.image_key] = batch_data[self.image_key].to(device) + _label_argmax = False + if self.label_key is not None: + label = batch_data[self.label_key] + label = torch.argmax(label, dim=0) if label.shape[0] > 1 else label[0] + _label_argmax = True # track if label is argmaxed + batch_data[self.label_key] = label.to(device) + d = summarizer(batch_data) + except BaseException as err: + if "image_meta_dict" in batch_data.keys(): + filename = batch_data["image_meta_dict"][ImageMetaKey.FILENAME_OR_OBJ] + else: + filename = batch_data[self.image_key].meta[ImageMetaKey.FILENAME_OR_OBJ] + logger.info(f"Unable to process data {filename} on {device}. {err}") + if self.device.type == "cuda": + logger.info("DataAnalyzer `device` set to GPU execution hit an exception. Falling back to `cpu`.") + try: + batch_data[self.image_key] = batch_data[self.image_key].to("cpu") + if self.label_key is not None: + label = batch_data[self.label_key] + if not _label_argmax: + label = torch.argmax(label, dim=0) if label.shape[0] > 1 else label[0] + batch_data[self.label_key] = label.to("cpu") + d = summarizer(batch_data) + except BaseException as err: + logger.info(f"Unable to process data {filename} on {device}. {err}") + continue + else: + continue + + stats_by_cases = { + DataStatsKeys.BY_CASE_IMAGE_PATH: d[DataStatsKeys.BY_CASE_IMAGE_PATH], + DataStatsKeys.BY_CASE_LABEL_PATH: d[DataStatsKeys.BY_CASE_LABEL_PATH], + } + if not self.histogram_only: + stats_by_cases[DataStatsKeys.IMAGE_STATS] = d[DataStatsKeys.IMAGE_STATS] + if self.hist_bins != 0: + stats_by_cases[DataStatsKeys.IMAGE_HISTOGRAM] = d[DataStatsKeys.IMAGE_HISTOGRAM] + + if self.label_key is not None: + stats_by_cases.update( + { + DataStatsKeys.FG_IMAGE_STATS: d[DataStatsKeys.FG_IMAGE_STATS], + DataStatsKeys.LABEL_STATS: d[DataStatsKeys.LABEL_STATS], + } + ) + result_bycase[DataStatsKeys.BY_CASE].append(stats_by_cases) + if manager_list is None: + return result_bycase + else: + manager_list.append(result_bycase) diff --git a/SegMamba/monai/apps/auto3dseg/ensemble_builder.py b/SegMamba/monai/apps/auto3dseg/ensemble_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..b2bea806deace6b0b1d8ae6dc14e20de752cb2fe --- /dev/null +++ b/SegMamba/monai/apps/auto3dseg/ensemble_builder.py @@ -0,0 +1,660 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from abc import ABC, abstractmethod +from collections.abc import Mapping, Sequence +from copy import deepcopy +from typing import Any, cast +from warnings import warn + +import numpy as np +import torch +import torch.distributed as dist + +from monai.apps.auto3dseg.bundle_gen import BundleAlgo +from monai.apps.auto3dseg.utils import get_name_from_algo_id, import_bundle_algo_history +from monai.apps.utils import get_logger +from monai.auto3dseg import concat_val_to_np +from monai.auto3dseg.utils import ( + _prepare_cmd_bcprun, + _prepare_cmd_torchrun, + _run_cmd_bcprun, + _run_cmd_torchrun, + datafold_read, +) +from monai.bundle import ConfigParser +from monai.data import partition_dataset +from monai.transforms import MeanEnsemble, SaveImage, VoteEnsemble +from monai.utils import RankFilter +from monai.utils.enums import AlgoKeys +from monai.utils.misc import check_kwargs_exist_in_class_init, prob2class +from monai.utils.module import look_up_option, optional_import + +tqdm, has_tqdm = optional_import("tqdm", name="tqdm") + +logger = get_logger(module_name=__name__) + + +class AlgoEnsemble(ABC): + """ + The base class of Ensemble methods + """ + + def __init__(self): + self.algos = [] + self.mode = "mean" + self.infer_files = [] + self.algo_ensemble = [] + + def set_algos(self, infer_algos): + """ + Register model in the ensemble + """ + self.algos = deepcopy(infer_algos) + + def get_algo(self, identifier): + """ + Get a model by identifier. + + Args: + identifier: the name of the bundleAlgo + """ + for algo in self.algos: + if identifier == algo[AlgoKeys.ID]: + return algo + + def get_algo_ensemble(self): + """ + Get the algo ensemble after ranking or a empty list if ranking was not started. + + Returns: + A list of Algo + """ + return self.algo_ensemble + + def set_infer_files(self, dataroot: str, data_list_or_path: str | list, data_key: str = "testing") -> None: + """ + Set the files to perform model inference. + + Args: + dataroot: the path of the files + data_list_or_path: the data source file path + """ + + self.infer_files = [] + + if isinstance(data_list_or_path, list): + self.infer_files = data_list_or_path + elif isinstance(data_list_or_path, str): + datalist = ConfigParser.load_config_file(data_list_or_path) + if data_key in datalist: + self.infer_files, _ = datafold_read(datalist=datalist, basedir=dataroot, fold=-1, key=data_key) + elif not hasattr(self, "rank") or self.rank == 0: + logger.info(f"Datalist file has no testing key - {data_key}. No data for inference is specified") + + else: + raise ValueError("Unsupported parameter type") + + def ensemble_pred(self, preds, sigmoid=False): + """ + ensemble the results using either "mean" or "vote" method + + Args: + preds: a list of probability prediction in Tensor-Like format. + sigmoid: use the sigmoid function to threshold probability one-hot map, + otherwise argmax is used. Defaults to False + + Returns: + a tensor which is the ensembled prediction. + """ + + if any(not p.is_cuda for p in preds): + preds = [p.cpu() for p in preds] # ensure CPU if at least one is on CPU + + if self.mode == "mean": + prob = MeanEnsemble()(preds) + return prob2class(cast(torch.Tensor, prob), dim=0, keepdim=True, sigmoid=sigmoid) + elif self.mode == "vote": + classes = [prob2class(p, dim=0, keepdim=True, sigmoid=sigmoid) for p in preds] + if sigmoid: + return VoteEnsemble()(classes) # do not specify num_classes for one-hot encoding + else: + return VoteEnsemble(num_classes=preds[0].shape[0])(classes) + + def _apply_algo_specific_param(self, algo_spec_param: dict, param: dict, algo_name: str) -> dict: + """ + Apply the model-specific params to the prediction params based on the name of the Algo. + + Args: + algo_spec_param: a dict that has structure of {"": ""}. + param: the prediction params to override. + algo_name: name of the Algo + + Returns: + param after being updated with the model-specific param + """ + _param_to_override = deepcopy(algo_spec_param) + _param = deepcopy(param) + for k, v in _param_to_override.items(): + if k.lower() == algo_name.lower(): + _param.update(v) + return _param + + def __call__(self, pred_param: dict | None = None) -> list: + """ + Use the ensembled model to predict result. + + Args: + pred_param: prediction parameter dictionary. The key has two groups: the first one will be consumed + in this function, and the second group will be passed to the `InferClass` to override the + parameters of the class functions. + The first group contains: + + - ``"infer_files"``: file paths to the images to read in a list. + - ``"files_slices"``: a value type of `slice`. The files_slices will slice the ``"infer_files"`` and + only make prediction on the infer_files[file_slices]. + - ``"mode"``: ensemble mode. Currently "mean" and "vote" (majority voting) schemes are supported. + - ``"image_save_func"``: a dictionary used to instantiate the ``SaveImage`` transform. When specified, + the ensemble prediction will save the prediction files, instead of keeping the files in the memory. + Example: `{"_target_": "SaveImage", "output_dir": "./"}` + - ``"sigmoid"``: use the sigmoid function (e.g. x > 0.5) to convert the prediction probability map + to the label class prediction, otherwise argmax(x) is used. + - ``"algo_spec_params"``: a dictionary to add pred_params that are specific to a model. + The dict has a format of {"": ""}. + + The parameters in the second group is defined in the ``config`` of each Algo templates. Please check: + https://github.com/Project-MONAI/research-contributions/tree/main/auto3dseg/algorithm_templates + + Returns: + A list of tensors or file paths, depending on whether ``"image_save_func"`` is set. + """ + param = {} if pred_param is None else deepcopy(pred_param) + files = self.infer_files + + if "infer_files" in param: + files = param.pop("infer_files") + + if "files_slices" in param: + slices = param.pop("files_slices") + files = files[slices] + + if "mode" in param: + mode = param.pop("mode") + self.mode = look_up_option(mode, supported=["mean", "vote"]) + + sigmoid = param.pop("sigmoid", False) + + if "image_save_func" in param: + img_saver = ConfigParser(param["image_save_func"]).get_parsed_content() + + algo_spec_params = param.pop("algo_spec_params", {}) + + outputs = [] + for _, file in ( + enumerate(tqdm(files, desc="Ensembling (rank 0)...")) + if has_tqdm and pred_param and pred_param.get("rank", 0) == 0 + else enumerate(files) + ): + preds = [] + for algo in self.algo_ensemble: + infer_algo_name = get_name_from_algo_id(algo[AlgoKeys.ID]) + infer_instance = algo[AlgoKeys.ALGO] + _param = self._apply_algo_specific_param(algo_spec_params, param, infer_algo_name) + pred = infer_instance.predict(predict_files=[file], predict_params=_param) + preds.append(pred[0]) + if "image_save_func" in param: + try: + ensemble_preds = self.ensemble_pred(preds, sigmoid=sigmoid) + except BaseException: + ensemble_preds = self.ensemble_pred([_.to("cpu") for _ in preds], sigmoid=sigmoid) + res = img_saver(ensemble_preds) + # res is the path to the saved results + if hasattr(res, "meta") and "saved_to" in res.meta.keys(): + res = res.meta["saved_to"] + else: + warn("Image save path not returned.") + res = None + else: + warn("Prediction returned in list instead of disk, provide image_save_func to avoid out of memory.") + res = self.ensemble_pred(preds, sigmoid=sigmoid) + outputs.append(res) + return outputs + + @abstractmethod + def collect_algos(self, *args, **kwargs): + raise NotImplementedError + + +class AlgoEnsembleBestN(AlgoEnsemble): + """ + Ensemble method that select N model out of all using the models' best_metric scores + + Args: + n_best: number of models to pick for ensemble (N). + """ + + def __init__(self, n_best: int = 5): + super().__init__() + self.n_best = n_best + + def sort_score(self): + """ + Sort the best_metrics + """ + scores = concat_val_to_np(self.algos, [AlgoKeys.SCORE]) + return np.argsort(scores).tolist() + + def collect_algos(self, n_best: int = -1) -> None: + """ + Rank the algos by finding the top N (n_best) validation scores. + """ + + if n_best <= 0: + n_best = self.n_best + + ranks = self.sort_score() + if len(ranks) < n_best: + warn(f"Found {len(ranks)} available algos (pre-defined n_best={n_best}). All {len(ranks)} will be used.") + n_best = len(ranks) + + # get the ranks for which the indices are lower than N-n_best + indices = [r for (i, r) in enumerate(ranks) if i < (len(ranks) - n_best)] + + # remove the found indices + indices = sorted(indices, reverse=True) + + self.algo_ensemble = deepcopy(self.algos) + for idx in indices: + if idx < len(self.algo_ensemble): + self.algo_ensemble.pop(idx) + + +class AlgoEnsembleBestByFold(AlgoEnsemble): + """ + Ensemble method that select the best models that are the tops in each fold. + + Args: + n_fold: number of cross-validation folds used in training + """ + + def __init__(self, n_fold: int = 5): + super().__init__() + self.n_fold = n_fold + + def collect_algos(self) -> None: + """ + Rank the algos by finding the best model in each cross-validation fold + """ + + self.algo_ensemble = [] + for f_idx in range(self.n_fold): + best_score = -1.0 + best_model: BundleAlgo | None = None + for algo in self.algos: + # algorithm folder: {net}_{fold_index}_{other} + identifier = algo[AlgoKeys.ID].split("_")[1] + try: + algo_id = int(identifier) + except ValueError as err: + raise ValueError(f"model identifier {identifier} is not number.") from err + if algo_id == f_idx and algo[AlgoKeys.SCORE] > best_score: + best_model = algo + best_score = algo[AlgoKeys.SCORE] + self.algo_ensemble.append(best_model) + + +class AlgoEnsembleBuilder: + """ + Build ensemble workflow from configs and arguments. + + Args: + history: a collection of trained bundleAlgo algorithms. + data_src_cfg_name: filename of the data source. + + Examples: + + .. code-block:: python + + builder = AlgoEnsembleBuilder(history, data_src_cfg) + builder.set_ensemble_method(BundleAlgoEnsembleBestN(3)) + ensemble = builder.get_ensemble() + + """ + + def __init__(self, history: Sequence[dict[str, Any]], data_src_cfg_name: str | None = None): + self.infer_algos: list[dict[AlgoKeys, Any]] = [] + self.ensemble: AlgoEnsemble + self.data_src_cfg = ConfigParser(globals=False) + + if data_src_cfg_name is not None and os.path.exists(str(data_src_cfg_name)): + self.data_src_cfg.read_config(data_src_cfg_name) + + for algo_dict in history: + # load inference_config_paths + + name = algo_dict[AlgoKeys.ID] + gen_algo = algo_dict[AlgoKeys.ALGO] + + best_metric = gen_algo.get_score() + algo_path = gen_algo.output_path + infer_path = os.path.join(algo_path, "scripts", "infer.py") + + if not os.path.isdir(algo_path): + warn(f"{gen_algo.output_path} is not a directory. Please check the path.") + + if not os.path.isfile(infer_path): + warn(f"{infer_path} is not found. Please check the path.") + + self.add_inferer(name, gen_algo, best_metric) + + def add_inferer(self, identifier: str, gen_algo: BundleAlgo, best_metric: float | None = None) -> None: + """ + Add model inferer to the builder. + + Args: + identifier: name of the bundleAlgo. + gen_algo: a trained BundleAlgo model object. + best_metric: the best metric in validation of the trained model. + """ + + if best_metric is None: + raise ValueError("Feature to re-validate is to be implemented") + + algo = {AlgoKeys.ID: identifier, AlgoKeys.ALGO: gen_algo, AlgoKeys.SCORE: best_metric} + self.infer_algos.append(algo) + + def set_ensemble_method(self, ensemble: AlgoEnsemble, *args: Any, **kwargs: Any) -> None: + """ + Set the ensemble method. + + Args: + ensemble: the AlgoEnsemble to build. + """ + + ensemble.set_algos(self.infer_algos) + ensemble.collect_algos(*args, **kwargs) + ensemble.set_infer_files(self.data_src_cfg["dataroot"], self.data_src_cfg["datalist"]) + + self.ensemble = ensemble + + def get_ensemble(self): + """Get the ensemble""" + + return self.ensemble + + +class EnsembleRunner: + """ + The Runner for ensembler. It ensembles predictions and saves them to the disk with a support of using multi-GPU. + + Args: + data_src_cfg_name: filename of the data source. + work_dir: working directory to save the intermediate and final results. Default is `./work_dir`. + num_fold: number of fold. Default is 5. + ensemble_method_name: method to ensemble predictions from different model. Default is AlgoEnsembleBestByFold. + Supported methods: ["AlgoEnsembleBestN", "AlgoEnsembleBestByFold"]. + mgpu: if using multi-gpu. Default is True. + kwargs: additional image writing, ensembling parameters and prediction parameters for the ensemble inference. + - for image saving, please check the supported parameters in SaveImage transform. + - for prediction parameters, please check the supported parameters in the ``AlgoEnsemble`` callables. + - for ensemble parameters, please check the documentation of the selected AlgoEnsemble callable. + + Example: + + .. code-block:: python + + ensemble_runner = EnsembleRunner(data_src_cfg_name, + work_dir, + ensemble_method_name, + mgpu=device_setting['n_devices']>1, + **kwargs, + **pred_params) + ensemble_runner.run(device_setting) + + """ + + def __init__( + self, + data_src_cfg_name: str, + work_dir: str = "./work_dir", + num_fold: int = 5, + ensemble_method_name: str = "AlgoEnsembleBestByFold", + mgpu: bool = True, + **kwargs: Any, + ) -> None: + self.data_src_cfg_name = data_src_cfg_name + self.work_dir = work_dir + self.num_fold = num_fold + self.ensemble_method_name = ensemble_method_name + self.mgpu = mgpu + self.kwargs = deepcopy(kwargs) + self.rank = 0 + self.world_size = 1 + self.device_setting: dict[str, int | str] = { + "CUDA_VISIBLE_DEVICES": ",".join([str(x) for x in range(torch.cuda.device_count())]), + "n_devices": torch.cuda.device_count(), + "NUM_NODES": int(os.environ.get("NUM_NODES", 1)), + "MN_START_METHOD": os.environ.get("MN_START_METHOD", "bcprun"), + "CMD_PREFIX": os.environ.get("CMD_PREFIX", ""), + } + + def set_ensemble_method(self, ensemble_method_name: str = "AlgoEnsembleBestByFold", **kwargs: Any) -> None: + """ + Set the bundle ensemble method + + Args: + ensemble_method_name: the name of the ensemble method. Only two methods are supported "AlgoEnsembleBestN" + and "AlgoEnsembleBestByFold". + kwargs: the keyword arguments used to define the ensemble method. Currently only ``n_best`` for + ``AlgoEnsembleBestN`` is supported. + + """ + self.ensemble_method_name = look_up_option( + ensemble_method_name, supported=["AlgoEnsembleBestN", "AlgoEnsembleBestByFold"] + ) + if self.ensemble_method_name == "AlgoEnsembleBestN": + n_best = kwargs.pop("n_best", 2) + self.ensemble_method = AlgoEnsembleBestN(n_best=n_best) + elif self.ensemble_method_name == "AlgoEnsembleBestByFold": + self.ensemble_method = AlgoEnsembleBestByFold(n_fold=self.num_fold) # type: ignore + else: + raise NotImplementedError(f"Ensemble method {self.ensemble_method_name} is not implemented.") + + def _pop_kwargs_to_get_image_save_transform(self, **kwargs): + """ + Pop the kwargs used to define ImageSave class for the ensemble output. + + Args: + kwargs: image writing parameters for the ensemble inference. The kwargs format follows SaveImage + transform. For more information, check https://docs.monai.io/en/stable/transforms.html#saveimage . + + Returns: + save_image: a dictionary that can be used to instantiate a SaveImage class in ConfigParser. + """ + + output_dir = kwargs.pop("output_dir", None) + + if output_dir is None: + output_dir = os.path.join(self.work_dir, "ensemble_output") + logger.info(f"The output_dir is not specified. {output_dir} will be used to save ensemble predictions.") + + if not os.path.isdir(output_dir): + os.makedirs(output_dir, exist_ok=True) + logger.info(f"Directory {output_dir} is created to save ensemble predictions") + + input_yaml = ConfigParser.load_config_file(self.data_src_cfg_name) + data_root_dir = input_yaml.get("dataroot", "") + + save_image = { + "_target_": "SaveImage", + "output_dir": output_dir, + "output_postfix": kwargs.pop("output_postfix", "ensemble"), + "output_dtype": kwargs.pop("output_dtype", "$np.uint8"), + "resample": kwargs.pop("resample", False), + "print_log": False, + "savepath_in_metadict": True, + "data_root_dir": kwargs.pop("data_root_dir", data_root_dir), + "separate_folder": kwargs.pop("separate_folder", False), + } + + are_all_args_save_image, extra_args = check_kwargs_exist_in_class_init(SaveImage, kwargs) + if are_all_args_save_image: + save_image.update(kwargs) + else: + # kwargs has extra values for other purposes, for example, pred_params + for args in list(kwargs): + if args not in extra_args: + save_image.update({args: kwargs.pop(args)}) + + return save_image + + def set_image_save_transform(self, **kwargs: Any) -> None: + """ + Set the ensemble output transform. + + Args: + kwargs: image writing parameters for the ensemble inference. The kwargs format follows SaveImage + transform. For more information, check https://docs.monai.io/en/stable/transforms.html#saveimage . + + """ + are_all_args_present, extra_args = check_kwargs_exist_in_class_init(SaveImage, kwargs) + if are_all_args_present: + self.kwargs.update(kwargs) + else: + raise ValueError( + f"{extra_args} are not supported in monai.transforms.SaveImage," + "Check https://docs.monai.io/en/stable/transforms.html#saveimage for more information." + ) + + def set_num_fold(self, num_fold: int = 5) -> None: + """ + Set the number of cross validation folds for all algos. + + Args: + num_fold: a positive integer to define the number of folds. + """ + + if num_fold <= 0: + raise ValueError(f"num_fold is expected to be an integer greater than zero. Now it gets {num_fold}") + self.num_fold = num_fold + + def ensemble(self): + if self.mgpu: # torch.cuda.device_count() is not used because env is not set by autorunner + # init multiprocessing and update infer_files + dist.init_process_group(backend="nccl", init_method="env://") + self.world_size = dist.get_world_size() + self.rank = dist.get_rank() + logger.addFilter(RankFilter()) + # set params after init_process_group to know the rank + self.set_num_fold(num_fold=self.num_fold) + self.set_ensemble_method(self.ensemble_method_name, **self.kwargs) + # self.kwargs needs to pop out args for set_image_save_transform + save_image = self._pop_kwargs_to_get_image_save_transform(**self.kwargs) + + history = import_bundle_algo_history(self.work_dir, only_trained=False) + history_untrained = [h for h in history if not h[AlgoKeys.IS_TRAINED]] + if history_untrained: + logger.warning( + f"Ensembling step will skip {[h[AlgoKeys.ID] for h in history_untrained]} untrained algos." + "Generally it means these algos did not complete training." + ) + history = [h for h in history if h[AlgoKeys.IS_TRAINED]] + if len(history) == 0: + raise ValueError( + f"Could not find the trained results in {self.work_dir}. " + "Possibly the required training step was not completed." + ) + + builder = AlgoEnsembleBuilder(history, self.data_src_cfg_name) + builder.set_ensemble_method(self.ensemble_method) + self.ensembler = builder.get_ensemble() + infer_files = self.ensembler.infer_files + if len(infer_files) < self.world_size: + if len(infer_files) == 0: + logger.info("No testing files for inference is provided. Ensembler ending.") + return + infer_files = [infer_files[self.rank]] if self.rank < len(infer_files) else [] + else: + infer_files = partition_dataset( + data=infer_files, shuffle=False, num_partitions=self.world_size, even_divisible=False + )[self.rank] + + # TO DO: Add some function in ensembler for infer_files update? + self.ensembler.infer_files = infer_files + # add rank to pred_params + self.kwargs["rank"] = self.rank + self.kwargs["image_save_func"] = save_image + logger.info("Auto3Dseg picked the following networks to ensemble:") + for algo in self.ensembler.get_algo_ensemble(): + logger.info(algo[AlgoKeys.ID]) + output_dir = save_image["output_dir"] + logger.info(f"Auto3Dseg ensemble prediction outputs will be saved in {output_dir}.") + self.ensembler(pred_param=self.kwargs) + + if self.mgpu: + dist.destroy_process_group() + + def run(self, device_setting: dict | None = None) -> None: + """ + Load the run function in the training script of each model. Training parameter is predefined by the + algo_config.yaml file, which is pre-filled by the fill_template_config function in the same instance. + + Args: + device_setting: device related settings, should follow the device_setting in auto_runner.set_device_info. + 'CUDA_VISIBLE_DEVICES' should be a string e.g. '0,1,2,3' + """ + # device_setting set default value and sanity check, in case device_setting not from autorunner + if device_setting is not None: + self.device_setting.update(device_setting) + self.device_setting["n_devices"] = len(str(self.device_setting["CUDA_VISIBLE_DEVICES"]).split(",")) + self._create_cmd() + + def _create_cmd(self) -> None: + if int(self.device_setting["NUM_NODES"]) <= 1 and int(self.device_setting["n_devices"]) <= 1: + # if single GPU + logger.info("Ensembling using single GPU!") + self.ensemble() + return + + # define base cmd for subprocess + base_cmd = f"monai.apps.auto3dseg EnsembleRunner ensemble \ + --data_src_cfg_name {self.data_src_cfg_name} \ + --work_dir {self.work_dir} \ + --num_fold {self.num_fold} \ + --ensemble_method_name {self.ensemble_method_name} \ + --mgpu True" + + if self.kwargs and isinstance(self.kwargs, Mapping): + for k, v in self.kwargs.items(): + base_cmd += f" --{k}={v}" + # define env for subprocess + ps_environ = os.environ.copy() + ps_environ["CUDA_VISIBLE_DEVICES"] = str(self.device_setting["CUDA_VISIBLE_DEVICES"]) + if int(self.device_setting["NUM_NODES"]) > 1: + if self.device_setting["MN_START_METHOD"] != "bcprun": + raise NotImplementedError( + f"{self.device_setting['MN_START_METHOD']} is not supported yet. " + "Try modify EnsembleRunner._create_cmd for your cluster." + ) + logger.info(f"Ensembling on {self.device_setting['NUM_NODES']} nodes!") + cmd = _prepare_cmd_bcprun("-m " + base_cmd, cmd_prefix=f"{self.device_setting['CMD_PREFIX']}") + _run_cmd_bcprun(cmd, n=self.device_setting["NUM_NODES"], p=self.device_setting["n_devices"]) + + else: + logger.info(f"Ensembling using {self.device_setting['n_devices']} GPU!") + cmd = _prepare_cmd_torchrun("-m " + base_cmd) + _run_cmd_torchrun( + cmd, nnodes=1, nproc_per_node=self.device_setting["n_devices"], env=ps_environ, check=True + ) + return diff --git a/SegMamba/monai/apps/auto3dseg/hpo_gen.py b/SegMamba/monai/apps/auto3dseg/hpo_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..b755b99feb6e01b2df9fc405a9b34eb48756f13b --- /dev/null +++ b/SegMamba/monai/apps/auto3dseg/hpo_gen.py @@ -0,0 +1,401 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from abc import abstractmethod +from copy import deepcopy +from typing import Any, cast +from warnings import warn + +from monai.apps.auto3dseg.bundle_gen import BundleAlgo +from monai.apps.utils import get_logger +from monai.auto3dseg import Algo, AlgoGen, algo_from_pickle, algo_to_pickle +from monai.bundle.config_parser import ConfigParser +from monai.config import PathLike +from monai.utils import optional_import +from monai.utils.enums import AlgoKeys + +nni, has_nni = optional_import("nni") +optuna, has_optuna = optional_import("optuna") +logger = get_logger(module_name=__name__) + +__all__ = ["HPOGen", "NNIGen", "OptunaGen"] + + +class HPOGen(AlgoGen): + """ + The base class for hyperparameter optimization (HPO) interfaces to generate algos in the Auto3Dseg pipeline. + The auto-generated algos are saved at their ``output_path`` on the disk. The files in the ``output_path`` + may contain scripts that define the algo, configuration files, and pickle files that save the internal states + of the algo before/after the training. Compared to the BundleGen class, HPOGen generates Algo on-the-fly, so + training and algo generation may be executed alternatively and take a long time to finish the generation process. + + """ + + @abstractmethod + def get_hyperparameters(self): + """Get the hyperparameter from HPO.""" + raise NotImplementedError + + @abstractmethod + def update_params(self, *args, **kwargs): + """Update Algo parameters according to the hyperparameters to be evaluated.""" + raise NotImplementedError + + @abstractmethod + def set_score(self): + """Report the evaluated results to HPO.""" + raise NotImplementedError + + @abstractmethod + def run_algo(self, *args, **kwargs): + """Interface for launch the training given the fetched hyperparameters.""" + raise NotImplementedError + + +class NNIGen(HPOGen): + """ + Generate algorithms for the NNI to automate hyperparameter tuning. The module has two major interfaces: + ``__init__`` which prints out how to set up the NNI, and a trialCommand function ``run_algo`` for the NNI library to + start the trial of the algo. More about trialCommand function can be found in ``trail code`` section in NNI webpage + https://nni.readthedocs.io/en/latest/tutorials/hpo_quickstart_pytorch/main.html . + + Args: + algo: an Algo object (e.g. BundleAlgo) with defined methods: ``get_output_path`` and train + and supports saving to and loading from pickle files via ``algo_from_pickle`` and ``algo_to_pickle``. + params: a set of parameter to override the algo if override is supported by Algo subclass. + + Examples:: + + The experiment will keep generating new folders to save the model checkpoints, scripts, and configs if available. + ├── algorithm_templates + │ └── unet + ├── unet_0 + │ ├── algo_object.pkl + │ ├── configs + │ └── scripts + ├── unet_0_learning_rate_0.01 + │ ├── algo_object.pkl + │ ├── configs + │ ├── model_fold0 + │ └── scripts + └── unet_0_learning_rate_0.1 + ├── algo_object.pkl + ├── configs + ├── model_fold0 + └── scripts + + .. code-block:: python + # Bundle Algorithms are already generated by BundleGen in work_dir + import_bundle_algo_history(work_dir, only_trained=False) + algo_dict = self.history[0] # pick the first algorithm + algo_name = algo_dict[AlgoKeys.ID] + onealgo = algo_dict[AlgoKeys.ALGO] + nni_gen = NNIGen(algo=onealgo) + nni_gen.print_bundle_algo_instruction() + + Notes: + The NNIGen will prepare the algorithms in a folder and suggest a command to replace trialCommand in the experiment + config. However, NNIGen will not trigger NNI. User needs to write their NNI experiment configs, and then run the + NNI command manually. + """ + + def __init__(self, algo: Algo | None = None, params: dict | None = None): + self.algo: Algo + self.hint = "" + self.obj_filename = "" + + if algo is not None: + if isinstance(algo, BundleAlgo): + if params is None: + self.algo = algo + else: + self.algo = deepcopy(algo) + name = os.path.basename(algo.get_output_path()) + "_override" + output_folder = os.path.dirname(algo.get_output_path()) + + params.update({"fill_with_datastats": False}) # just copy, not using datastats to fill + self.algo.export_to_disk(output_folder, name, **params) + else: + self.algo = algo + + self.obj_filename = algo_to_pickle(self.algo, template_path=self.algo.template_path) + + def get_obj_filename(self): + """Return the filename of the dumped pickle algo object.""" + return self.obj_filename + + def print_bundle_algo_instruction(self): + """ + Print how to write the trial commands for Bundle Algo. + """ + hint = "python -m monai.apps.auto3dseg NNIGen run_algo " + logger.info("=" * 140) + logger.info("If NNI will run in your local env: ") + logger.info("1. Add the following line to the trialCommand in your NNI config: ") + logger.info(f"{hint} {self.obj_filename} {{result_dir}}") + logger.info("-" * 140) + logger.info("If NNI will run in a remote env: ") + logger.info( + f"1. Copy the algorithm_templates folder {cast(BundleAlgo, self.algo).template_path} " + f"to remote {{remote_algorithm_templates_dir}}" + ) + logger.info(f"2. Copy the older {self.algo.get_output_path()} to the remote machine {{remote_algo_dir}}") + logger.info("Then add the following line to the trialCommand in your NNI config: ") + logger.info(f"{hint} {{remote_algo_dir}} {{result_dir}} {{remote_algorithm_templates_dir}}") + logger.info("=" * 140) + + def get_hyperparameters(self): + """ + Get parameter for next round of training from NNI server. + """ + if has_nni: + return nni.get_next_parameter() + warn("NNI is not detected. The code will continue to run without NNI.") + return {} + + def update_params(self, params: dict) -> None: + """ + Translate the parameter from monai bundle to meet NNI requirements. + + Args: + params: a dict of parameters. + """ + self.params = params + + def get_task_id(self): + """ + Get the identifier of the current experiment. In the format of listing the searching parameter name and values + connected by underscore in the file name. + """ + return "".join(f"_{k}_{v}" for k, v in self.params.items()) or "_None" + + def generate(self, output_folder: str = ".") -> None: + """ + Generate the record for each Algo. If it is a BundleAlgo, it will generate the config files. + + Args: + output_folder: the directory nni will save the results to. + """ + task_id = self.get_task_id() + task_prefix = os.path.basename(self.algo.get_output_path()) + write_path = os.path.join(output_folder, task_prefix + task_id) + self.obj_filename = os.path.join(write_path, "algo_object.pkl") + + if isinstance(self.algo, BundleAlgo): + self.algo.export_to_disk( + output_folder, task_prefix + task_id, bundle_root=write_path, fill_with_datastats=False + ) + else: + ConfigParser.export_config_file(self.params, write_path) + logger.info(write_path) + + def set_score(self, acc): + """ + Report the acc to NNI server. + """ + if has_nni: + nni.report_final_result(acc) + else: + warn("NNI is not detected. The code will continue to run without NNI.") + + def run_algo(self, obj_filename: str, output_folder: str = ".", template_path: PathLike | None = None) -> None: + """ + The python interface for NNI to run. + + Args: + obj_filename: the pickle-exported Algo object. + output_folder: the root path of the algorithms templates. + template_path: the algorithm_template. It must contain algo.py in the follow path: + ``{algorithm_templates_dir}/{network}/scripts/algo.py`` + """ + if not os.path.isfile(obj_filename): + raise ValueError(f"{obj_filename} is not found") + + self.algo, algo_meta_data = algo_from_pickle(obj_filename, template_path=template_path) + + # step 1 sample hyperparams + params = self.get_hyperparameters() + # step 2 set the update params for the algo to run in the next trial + self.update_params(params) + # step 3 generate the folder to save checkpoints and train + self.generate(output_folder) + self.algo.train(self.params) + # step 4 report validation acc to controller + acc = self.algo.get_score() + algo_meta_data = {str(AlgoKeys.SCORE): acc} + + algo_to_pickle(self.algo, template_path=self.algo.template_path, **algo_meta_data) + self.set_score(acc) + + +class OptunaGen(HPOGen): + """ + Generate algorithms for the Optuna to automate hyperparameter tuning. Please refer to NNI and Optuna + (https://optuna.readthedocs.io/en/stable/) for more information. Optuna has different running scheme + compared to NNI. The hyperparameter samples come from a trial object (trial.suggest...) created by Optuna, + so OptunaGen needs to accept this trial object as input. Meanwhile, Optuna calls OptunaGen, + thus OptunaGen.__call__() should return the accuracy. Use functools.partial to wrap OptunaGen + for addition input arguments. + + Args: + algo: an Algo object (e.g. BundleAlgo). The object must at least define two methods: get_output_path and train + and supports saving to and loading from pickle files via ``algo_from_pickle`` and ``algo_to_pickle``. + params: a set of parameter to override the algo if override is supported by Algo subclass. + + Examples:: + + The experiment will keep generating new folders to save the model checkpoints, scripts, and configs if available. + ├── algorithm_templates + │ └── unet + ├── unet_0 + │ ├── algo_object.pkl + │ ├── configs + │ └── scripts + ├── unet_0_learning_rate_0.01 + │ ├── algo_object.pkl + │ ├── configs + │ ├── model_fold0 + │ └── scripts + └── unet_0_learning_rate_0.1 + ├── algo_object.pkl + ├── configs + ├── model_fold0 + └── scripts + + Notes: + Different from NNI and NNIGen, OptunaGen and Optuna can be ran within the Python process. + + """ + + def __init__(self, algo: Algo | None = None, params: dict | None = None) -> None: + self.algo: Algo + self.obj_filename = "" + + if algo is not None: + if isinstance(algo, BundleAlgo): + if params is None: + self.algo = algo + else: + self.algo = deepcopy(algo) + name = os.path.basename(algo.get_output_path()) + "_override" + output_folder = os.path.dirname(algo.get_output_path()) + + params.update({"fill_with_datastats": False}) # just copy, not using datastats to fill + self.algo.export_to_disk(output_folder, name, **params) + else: + self.algo = algo + + self.obj_filename = algo_to_pickle(self.algo, template_path=self.algo.template_path) + + def get_obj_filename(self): + """Return the dumped pickle object of algo.""" + return self.obj_filename + + def get_hyperparameters(self): + """ + Get parameter for next round of training from optuna trial object. + This function requires user rewrite during usage for different search space. + """ + if has_optuna: + logger.info("Please rewrite this code by creating a child class") + return {"learning_rate": self.trial.suggest_float("learning_rate", 0.0001, 0.1)} + else: + warn("Optuna is not detected. The code will continue to run without Optuna.") + return {} + + def set_score(self, acc): + """Set the accuracy score""" + self.acc = acc + + def set_trial(self, trial): + """Set the Optuna trial""" + self.trial = trial + + def __call__( + self, trial: Any, obj_filename: str, output_folder: str = ".", template_path: PathLike | None = None + ) -> Any: + """ + Callable that Optuna will use to optimize the hyper-parameters + + Args: + obj_filename: the pickle-exported Algo object. + output_folder: the root path of the algorithms templates. + template_path: the algorithm_template. It must contain algo.py in the follow path: + ``{algorithm_templates_dir}/{network}/scripts/algo.py`` + """ + self.set_trial(trial) + self.run_algo(obj_filename, output_folder, template_path) + return self.acc + + def update_params(self, params: dict) -> None: + """ + Translate the parameter from monai bundle. + + Args: + params: a dict of parameters. + """ + self.params = params + + def get_task_id(self): + """ + Get the identifier of the current experiment. In the format of listing the searching parameter name and values + connected by underscore in the file name. + """ + return "".join(f"_{k}_{v}" for k, v in self.params.items()) or "_None" + + def generate(self, output_folder: str = ".") -> None: + """ + Generate the record for each Algo. If it is a BundleAlgo, it will generate the config files. + + Args: + output_folder: the directory nni will save the results to. + """ + task_id = self.get_task_id() + task_prefix = os.path.basename(self.algo.get_output_path()) + write_path = os.path.join(output_folder, task_prefix + task_id) + self.obj_filename = os.path.join(write_path, "algo_object.pkl") + + if isinstance(self.algo, BundleAlgo): + self.algo.export_to_disk(output_folder, task_prefix + task_id, fill_with_datastats=False) + else: + ConfigParser.export_config_file(self.params, write_path) + logger.info(write_path) + + def run_algo(self, obj_filename: str, output_folder: str = ".", template_path: PathLike | None = None) -> None: + """ + The python interface for NNI to run. + + Args: + obj_filename: the pickle-exported Algo object. + output_folder: the root path of the algorithms templates. + template_path: the algorithm_template. It must contain algo.py in the follow path: + ``{algorithm_templates_dir}/{network}/scripts/algo.py`` + """ + if not os.path.isfile(obj_filename): + raise ValueError(f"{obj_filename} is not found") + + self.algo, algo_meta_data = algo_from_pickle(obj_filename, template_path=template_path) + + # step 1 sample hyperparams + params = self.get_hyperparameters() + # step 2 set the update params for the algo to run in the next trial + self.update_params(params) + # step 3 generate the folder to save checkpoints and train + self.generate(output_folder) + self.algo.train(self.params) + # step 4 report validation acc to controller + acc = self.algo.get_score() + algo_meta_data = {str(AlgoKeys.SCORE): acc} + algo_to_pickle(self.algo, template_path=self.algo.template_path, **algo_meta_data) + self.set_score(acc) diff --git a/SegMamba/monai/apps/auto3dseg/transforms.py b/SegMamba/monai/apps/auto3dseg/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..bb755aa78c13ff94f7022c7b6356ee61f4375bb6 --- /dev/null +++ b/SegMamba/monai/apps/auto3dseg/transforms.py @@ -0,0 +1,85 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import warnings +from collections.abc import Hashable, Mapping + +import numpy as np +import torch + +from monai.config import KeysCollection +from monai.networks.utils import pytorch_after +from monai.transforms import MapTransform +from monai.utils.misc import ImageMetaKey + + +class EnsureSameShaped(MapTransform): + """ + Checks if segmentation label images (in keys) have the same spatial shape as the main image (in source_key), + and raise an error if the shapes are significantly different. + If the shapes are only slightly different (within an allowed_shape_difference in each dim), then resize the label using + nearest interpolation. This transform is designed to correct datasets with slight label shape mismatches. + Generally image and segmentation label must have the same spatial shape, however some public datasets are having slight + shape mismatches, which will cause potential crashes when calculating loss or metric functions. + """ + + def __init__( + self, + keys: KeysCollection = "label", + allow_missing_keys: bool = False, + source_key: str = "image", + allowed_shape_difference: int = 5, + warn: bool = True, + ) -> None: + """ + Args: + keys: keys of the corresponding items to be compared to the source_key item shape. + allow_missing_keys: do not raise exception if key is missing. + source_key: key of the item with the reference shape. + allowed_shape_difference: raises error if shapes are different more than this value in any dimension, + otherwise corrects for the shape mismatch using nearest interpolation. + warn: if `True` prints a warning if the label image is resized + + + """ + super().__init__(keys=keys, allow_missing_keys=allow_missing_keys) + self.source_key = source_key + self.allowed_shape_difference = allowed_shape_difference + self.warn = warn + + def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]: + d = dict(data) + image_shape = d[self.source_key].shape[1:] + for key in self.key_iterator(d): + label_shape = d[key].shape[1:] + if label_shape != image_shape: + filename = "" + if hasattr(d[key], "meta") and isinstance(d[key].meta, Mapping): # type: ignore[attr-defined] + filename = d[key].meta.get(ImageMetaKey.FILENAME_OR_OBJ) # type: ignore[attr-defined] + + if np.allclose(list(label_shape), list(image_shape), atol=self.allowed_shape_difference): + if self.warn: + warnings.warn( + f"The {key} with shape {label_shape} was resized to match the source shape {image_shape}" + f", the metadata was not updated {filename}." + ) + d[key] = torch.nn.functional.interpolate( + input=d[key].unsqueeze(0), + size=image_shape, + mode="nearest-exact" if pytorch_after(1, 11) else "nearest", + ).squeeze(0) + else: + raise ValueError( + f"The {key} shape {label_shape} is different from the source shape {image_shape} {filename}." + ) + return d diff --git a/SegMamba/monai/apps/auto3dseg/utils.py b/SegMamba/monai/apps/auto3dseg/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..64e1d2ea2a30c9bfdcedf286886011b48b5a03fb --- /dev/null +++ b/SegMamba/monai/apps/auto3dseg/utils.py @@ -0,0 +1,90 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os + +from monai.apps.auto3dseg.bundle_gen import BundleAlgo +from monai.auto3dseg import algo_from_pickle, algo_to_pickle +from monai.utils.enums import AlgoKeys + +__all__ = ["import_bundle_algo_history", "export_bundle_algo_history", "get_name_from_algo_id"] + + +def import_bundle_algo_history( + output_folder: str = ".", template_path: str | None = None, only_trained: bool = True +) -> list: + """ + import the history of the bundleAlgo objects as a list of algo dicts. + each algo_dict has keys name (folder name), algo (bundleAlgo), is_trained (bool), + + Args: + output_folder: the root path of the algorithms templates. + template_path: the algorithm_template. It must contain algo.py in the follow path: + ``{algorithm_templates_dir}/{network}/scripts/algo.py``. + only_trained: only read the algo history if the algo is trained. + """ + + history = [] + + for name in sorted(os.listdir(output_folder)): + write_path = os.path.join(output_folder, name) + + if not os.path.isdir(write_path): + continue + + obj_filename = os.path.join(write_path, "algo_object.pkl") + if not os.path.isfile(obj_filename): # saved mode pkl + continue + + algo, algo_meta_data = algo_from_pickle(obj_filename, template_path=template_path) + + best_metric = algo_meta_data.get(AlgoKeys.SCORE, None) + if best_metric is None: + try: + best_metric = algo.get_score() + except BaseException: + pass + + is_trained = best_metric is not None + + if (only_trained and is_trained) or not only_trained: + history.append( + {AlgoKeys.ID: name, AlgoKeys.ALGO: algo, AlgoKeys.SCORE: best_metric, AlgoKeys.IS_TRAINED: is_trained} + ) + + return history + + +def export_bundle_algo_history(history: list[dict[str, BundleAlgo]]) -> None: + """ + Save all the BundleAlgo in the history to algo_object.pkl in each individual folder + + Args: + history: a List of Bundle. Typically, the history can be obtained from BundleGen get_history method + """ + for algo_dict in history: + algo = algo_dict[AlgoKeys.ALGO] + algo_to_pickle(algo, template_path=algo.template_path) + + +def get_name_from_algo_id(id: str) -> str: + """ + Get the name of Algo from the identifier of the Algo. + + Args: + id: identifier which follows a convention of "name_fold_other". + + Returns: + name of the Algo. + """ + return id.split("_")[0] diff --git a/SegMamba/monai/apps/datasets.py b/SegMamba/monai/apps/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..67ea3059cce5f098e87d8c5326133326bb2e2fdf --- /dev/null +++ b/SegMamba/monai/apps/datasets.py @@ -0,0 +1,745 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +import shutil +import sys +import warnings +from collections.abc import Callable, Sequence +from pathlib import Path +from typing import Any + +import numpy as np + +from monai.apps.tcia import ( + DCM_FILENAME_REGEX, + download_tcia_series_instance, + get_tcia_metadata, + get_tcia_ref_uid, + match_tcia_ref_uid_in_study, +) +from monai.apps.utils import download_and_extract +from monai.config.type_definitions import PathLike +from monai.data import ( + CacheDataset, + PydicomReader, + load_decathlon_datalist, + load_decathlon_properties, + partition_dataset, + select_cross_validation_folds, +) +from monai.transforms import LoadImaged, Randomizable +from monai.utils import ensure_tuple + +__all__ = ["MedNISTDataset", "DecathlonDataset", "CrossValidation", "TciaDataset"] + + +class MedNISTDataset(Randomizable, CacheDataset): + """ + The Dataset to automatically download MedNIST data and generate items for training, validation or test. + It's based on `CacheDataset` to accelerate the training process. + + Args: + root_dir: target directory to download and load MedNIST dataset. + section: expected data section, can be: `training`, `validation` or `test`. + transform: transforms to execute operations on input data. + download: whether to download and extract the MedNIST from resource link, default is False. + if expected file already exists, skip downloading even set it to True. + user can manually copy `MedNIST.tar.gz` file or `MedNIST` folder to root directory. + seed: random seed to randomly split training, validation and test datasets, default is 0. + val_frac: percentage of validation fraction in the whole dataset, default is 0.1. + test_frac: percentage of test fraction in the whole dataset, default is 0.1. + cache_num: number of items to be cached. Default is `sys.maxsize`. + will take the minimum of (cache_num, data_length x cache_rate, data_length). + cache_rate: percentage of cached data in total, default is 1.0 (cache all). + will take the minimum of (cache_num, data_length x cache_rate, data_length). + num_workers: the number of worker threads if computing cache in the initialization. + If num_workers is None then the number returned by os.cpu_count() is used. + If a value less than 1 is specified, 1 will be used instead. + progress: whether to display a progress bar when downloading dataset and computing the transform cache content. + copy_cache: whether to `deepcopy` the cache content before applying the random transforms, + default to `True`. if the random transforms don't modify the cached content + (for example, randomly crop from the cached image and deepcopy the crop region) + or if every cache item is only used once in a `multi-processing` environment, + may set `copy=False` for better performance. + as_contiguous: whether to convert the cached NumPy array or PyTorch tensor to be contiguous. + it may help improve the performance of following logic. + runtime_cache: whether to compute cache at the runtime, default to `False` to prepare + the cache content at initialization. See: :py:class:`monai.data.CacheDataset`. + + Raises: + ValueError: When ``root_dir`` is not a directory. + RuntimeError: When ``dataset_dir`` doesn't exist and downloading is not selected (``download=False``). + + """ + + resource = "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/MedNIST.tar.gz" + md5 = "0bc7306e7427e00ad1c5526a6677552d" + compressed_file_name = "MedNIST.tar.gz" + dataset_folder_name = "MedNIST" + + def __init__( + self, + root_dir: PathLike, + section: str, + transform: Sequence[Callable] | Callable = (), + download: bool = False, + seed: int = 0, + val_frac: float = 0.1, + test_frac: float = 0.1, + cache_num: int = sys.maxsize, + cache_rate: float = 1.0, + num_workers: int | None = 1, + progress: bool = True, + copy_cache: bool = True, + as_contiguous: bool = True, + runtime_cache: bool = False, + ) -> None: + root_dir = Path(root_dir) + if not root_dir.is_dir(): + raise ValueError("Root directory root_dir must be a directory.") + self.section = section + self.val_frac = val_frac + self.test_frac = test_frac + self.set_random_state(seed=seed) + tarfile_name = root_dir / self.compressed_file_name + dataset_dir = root_dir / self.dataset_folder_name + self.num_class = 0 + if download: + download_and_extract( + url=self.resource, + filepath=tarfile_name, + output_dir=root_dir, + hash_val=self.md5, + hash_type="md5", + progress=progress, + ) + + if not dataset_dir.is_dir(): + raise RuntimeError( + f"Cannot find dataset directory: {dataset_dir}, please use download=True to download it." + ) + data = self._generate_data_list(dataset_dir) + if transform == (): + transform = LoadImaged("image") + CacheDataset.__init__( + self, + data=data, + transform=transform, + cache_num=cache_num, + cache_rate=cache_rate, + num_workers=num_workers, + progress=progress, + copy_cache=copy_cache, + as_contiguous=as_contiguous, + runtime_cache=runtime_cache, + ) + + def randomize(self, data: np.ndarray) -> None: + self.R.shuffle(data) + + def get_num_classes(self) -> int: + """Get number of classes.""" + return self.num_class + + def _generate_data_list(self, dataset_dir: PathLike) -> list[dict]: + """ + Raises: + ValueError: When ``section`` is not one of ["training", "validation", "test"]. + + """ + dataset_dir = Path(dataset_dir) + class_names = sorted(f"{x.name}" for x in dataset_dir.iterdir() if x.is_dir()) # folder name as the class name + self.num_class = len(class_names) + image_files = [[f"{x}" for x in (dataset_dir / class_names[i]).iterdir()] for i in range(self.num_class)] + num_each = [len(image_files[i]) for i in range(self.num_class)] + image_files_list = [] + image_class = [] + class_name = [] + for i in range(self.num_class): + image_files_list.extend(image_files[i]) + image_class.extend([i] * num_each[i]) + class_name.extend([class_names[i]] * num_each[i]) + + length = len(image_files_list) + indices = np.arange(length) + self.randomize(indices) + + test_length = int(length * self.test_frac) + val_length = int(length * self.val_frac) + if self.section == "test": + section_indices = indices[:test_length] + elif self.section == "validation": + section_indices = indices[test_length : test_length + val_length] + elif self.section == "training": + section_indices = indices[test_length + val_length :] + else: + raise ValueError( + f'Unsupported section: {self.section}, available options are ["training", "validation", "test"].' + ) + # the types of label and class name should be compatible with the pytorch dataloader + return [ + {"image": image_files_list[i], "label": image_class[i], "class_name": class_name[i]} + for i in section_indices + ] + + +class DecathlonDataset(Randomizable, CacheDataset): + """ + The Dataset to automatically download the data of Medical Segmentation Decathlon challenge + (http://medicaldecathlon.com/) and generate items for training, validation or test. + It will also load these properties from the JSON config file of dataset. user can call `get_properties()` + to get specified properties or all the properties loaded. + It's based on :py:class:`monai.data.CacheDataset` to accelerate the training process. + + Args: + root_dir: user's local directory for caching and loading the MSD datasets. + task: which task to download and execute: one of list ("Task01_BrainTumour", "Task02_Heart", + "Task03_Liver", "Task04_Hippocampus", "Task05_Prostate", "Task06_Lung", "Task07_Pancreas", + "Task08_HepaticVessel", "Task09_Spleen", "Task10_Colon"). + section: expected data section, can be: `training`, `validation` or `test`. + transform: transforms to execute operations on input data. + for further usage, use `EnsureChannelFirstd` to convert the shape to [C, H, W, D]. + download: whether to download and extract the Decathlon from resource link, default is False. + if expected file already exists, skip downloading even set it to True. + user can manually copy tar file or dataset folder to the root directory. + val_frac: percentage of validation fraction in the whole dataset, default is 0.2. + seed: random seed to randomly shuffle the datalist before splitting into training and validation, default is 0. + note to set same seed for `training` and `validation` sections. + cache_num: number of items to be cached. Default is `sys.maxsize`. + will take the minimum of (cache_num, data_length x cache_rate, data_length). + cache_rate: percentage of cached data in total, default is 1.0 (cache all). + will take the minimum of (cache_num, data_length x cache_rate, data_length). + num_workers: the number of worker threads if computing cache in the initialization. + If num_workers is None then the number returned by os.cpu_count() is used. + If a value less than 1 is specified, 1 will be used instead. + progress: whether to display a progress bar when downloading dataset and computing the transform cache content. + copy_cache: whether to `deepcopy` the cache content before applying the random transforms, + default to `True`. if the random transforms don't modify the cached content + (for example, randomly crop from the cached image and deepcopy the crop region) + or if every cache item is only used once in a `multi-processing` environment, + may set `copy=False` for better performance. + as_contiguous: whether to convert the cached NumPy array or PyTorch tensor to be contiguous. + it may help improve the performance of following logic. + runtime_cache: whether to compute cache at the runtime, default to `False` to prepare + the cache content at initialization. See: :py:class:`monai.data.CacheDataset`. + + Raises: + ValueError: When ``root_dir`` is not a directory. + ValueError: When ``task`` is not one of ["Task01_BrainTumour", "Task02_Heart", + "Task03_Liver", "Task04_Hippocampus", "Task05_Prostate", "Task06_Lung", "Task07_Pancreas", + "Task08_HepaticVessel", "Task09_Spleen", "Task10_Colon"]. + RuntimeError: When ``dataset_dir`` doesn't exist and downloading is not selected (``download=False``). + + Example:: + + transform = Compose( + [ + LoadImaged(keys=["image", "label"]), + EnsureChannelFirstd(keys=["image", "label"]), + ScaleIntensityd(keys="image"), + ToTensord(keys=["image", "label"]), + ] + ) + + val_data = DecathlonDataset( + root_dir="./", task="Task09_Spleen", transform=transform, section="validation", seed=12345, download=True + ) + + print(val_data[0]["image"], val_data[0]["label"]) + + """ + + resource = { + "Task01_BrainTumour": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task01_BrainTumour.tar", + "Task02_Heart": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task02_Heart.tar", + "Task03_Liver": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task03_Liver.tar", + "Task04_Hippocampus": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task04_Hippocampus.tar", + "Task05_Prostate": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task05_Prostate.tar", + "Task06_Lung": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task06_Lung.tar", + "Task07_Pancreas": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task07_Pancreas.tar", + "Task08_HepaticVessel": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task08_HepaticVessel.tar", + "Task09_Spleen": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task09_Spleen.tar", + "Task10_Colon": "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task10_Colon.tar", + } + md5 = { + "Task01_BrainTumour": "240a19d752f0d9e9101544901065d872", + "Task02_Heart": "06ee59366e1e5124267b774dbd654057", + "Task03_Liver": "a90ec6c4aa7f6a3d087205e23d4e6397", + "Task04_Hippocampus": "9d24dba78a72977dbd1d2e110310f31b", + "Task05_Prostate": "35138f08b1efaef89d7424d2bcc928db", + "Task06_Lung": "8afd997733c7fc0432f71255ba4e52dc", + "Task07_Pancreas": "4f7080cfca169fa8066d17ce6eb061e4", + "Task08_HepaticVessel": "641d79e80ec66453921d997fbf12a29c", + "Task09_Spleen": "410d4a301da4e5b2f6f86ec3ddba524e", + "Task10_Colon": "bad7a188931dc2f6acf72b08eb6202d0", + } + + def __init__( + self, + root_dir: PathLike, + task: str, + section: str, + transform: Sequence[Callable] | Callable = (), + download: bool = False, + seed: int = 0, + val_frac: float = 0.2, + cache_num: int = sys.maxsize, + cache_rate: float = 1.0, + num_workers: int = 1, + progress: bool = True, + copy_cache: bool = True, + as_contiguous: bool = True, + runtime_cache: bool = False, + ) -> None: + root_dir = Path(root_dir) + if not root_dir.is_dir(): + raise ValueError("Root directory root_dir must be a directory.") + self.section = section + self.val_frac = val_frac + self.set_random_state(seed=seed) + if task not in self.resource: + raise ValueError(f"Unsupported task: {task}, available options are: {list(self.resource.keys())}.") + dataset_dir = root_dir / task + tarfile_name = f"{dataset_dir}.tar" + if download: + download_and_extract( + url=self.resource[task], + filepath=tarfile_name, + output_dir=root_dir, + hash_val=self.md5[task], + hash_type="md5", + progress=progress, + ) + + if not dataset_dir.exists(): + raise RuntimeError( + f"Cannot find dataset directory: {dataset_dir}, please use download=True to download it." + ) + self.indices: np.ndarray = np.array([]) + data = self._generate_data_list(dataset_dir) + # as `release` key has typo in Task04 config file, ignore it. + property_keys = [ + "name", + "description", + "reference", + "licence", + "tensorImageSize", + "modality", + "labels", + "numTraining", + "numTest", + ] + self._properties = load_decathlon_properties(dataset_dir / "dataset.json", property_keys) + if transform == (): + transform = LoadImaged(["image", "label"]) + CacheDataset.__init__( + self, + data=data, + transform=transform, + cache_num=cache_num, + cache_rate=cache_rate, + num_workers=num_workers, + progress=progress, + copy_cache=copy_cache, + as_contiguous=as_contiguous, + runtime_cache=runtime_cache, + ) + + def get_indices(self) -> np.ndarray: + """ + Get the indices of datalist used in this dataset. + + """ + return self.indices + + def randomize(self, data: np.ndarray) -> None: + self.R.shuffle(data) + + def get_properties(self, keys: Sequence[str] | str | None = None) -> dict: + """ + Get the loaded properties of dataset with specified keys. + If no keys specified, return all the loaded properties. + + """ + if keys is None: + return self._properties + if self._properties is not None: + return {key: self._properties[key] for key in ensure_tuple(keys)} + return {} + + def _generate_data_list(self, dataset_dir: PathLike) -> list[dict]: + # the types of the item in data list should be compatible with the dataloader + dataset_dir = Path(dataset_dir) + section = "training" if self.section in ["training", "validation"] else "test" + datalist = load_decathlon_datalist(dataset_dir / "dataset.json", True, section) + return self._split_datalist(datalist) + + def _split_datalist(self, datalist: list[dict]) -> list[dict]: + if self.section == "test": + return datalist + length = len(datalist) + indices = np.arange(length) + self.randomize(indices) + + val_length = int(length * self.val_frac) + if self.section == "training": + self.indices = indices[val_length:] + else: + self.indices = indices[:val_length] + + return [datalist[i] for i in self.indices] + + +class TciaDataset(Randomizable, CacheDataset): + """ + The Dataset to automatically download the data from a public The Cancer Imaging Archive (TCIA) dataset + and generate items for training, validation or test. + + The Highdicom library is used to load dicom data with modality "SEG", but only a part of collections are + supported, such as: "C4KC-KiTS", "NSCLC-Radiomics", "NSCLC-Radiomics-Interobserver1", " QIN-PROSTATE-Repeatability" + and "PROSTATEx". Therefore, if "seg" is included in `keys` of the `LoadImaged` transform and loading some + other collections, errors may be raised. For supported collections, the original "SEG" information may not + always be consistent for each dicom file. Therefore, to avoid creating different format of labels, please use + the `label_dict` argument of `PydicomReader` when calling the `LoadImaged` transform. The prepared label dicts + of collections that are mentioned above is also saved in: `monai.apps.tcia.TCIA_LABEL_DICT`. You can also refer + to the second example bellow. + + + This class is based on :py:class:`monai.data.CacheDataset` to accelerate the training process. + + Args: + root_dir: user's local directory for caching and loading the TCIA dataset. + collection: name of a TCIA collection. + a TCIA dataset is defined as a collection. Please check the following list to browse + the collection list (only public collections can be downloaded): + https://www.cancerimagingarchive.net/collections/ + section: expected data section, can be: `training`, `validation` or `test`. + transform: transforms to execute operations on input data. + for further usage, use `EnsureChannelFirstd` to convert the shape to [C, H, W, D]. + If not specified, `LoadImaged(reader="PydicomReader", keys=["image"])` will be used as the default + transform. In addition, we suggest to set the argument `labels` for `PydicomReader` if segmentations + are needed to be loaded. The original labels for each dicom series may be different, using this argument + is able to unify the format of labels. + download: whether to download and extract the dataset, default is False. + if expected file already exists, skip downloading even set it to True. + user can manually copy tar file or dataset folder to the root directory. + download_len: number of series that will be downloaded, the value should be larger than 0 or -1, where -1 means + all series will be downloaded. Default is -1. + seg_type: modality type of segmentation that is used to do the first step download. Default is "SEG". + modality_tag: tag of modality. Default is (0x0008, 0x0060). + ref_series_uid_tag: tag of referenced Series Instance UID. Default is (0x0020, 0x000e). + ref_sop_uid_tag: tag of referenced SOP Instance UID. Default is (0x0008, 0x1155). + specific_tags: tags that will be loaded for "SEG" series. This argument will be used in + `monai.data.PydicomReader`. Default is [(0x0008, 0x1115), (0x0008,0x1140), (0x3006, 0x0010), + (0x0020,0x000D), (0x0010,0x0010), (0x0010,0x0020), (0x0020,0x0011), (0x0020,0x0012)]. + fname_regex: a regular expression to match the file names when the input is a folder. + If provided, only the matched files will be included. For example, to include the file name + "image_0001.dcm", the regular expression could be `".*image_(\\d+).dcm"`. + Default to `"^(?!.*LICENSE).*"`, ignoring any file name containing `"LICENSE"`. + val_frac: percentage of validation fraction in the whole dataset, default is 0.2. + seed: random seed to randomly shuffle the datalist before splitting into training and validation, default is 0. + note to set same seed for `training` and `validation` sections. + cache_num: number of items to be cached. Default is `sys.maxsize`. + will take the minimum of (cache_num, data_length x cache_rate, data_length). + cache_rate: percentage of cached data in total, default is 0.0 (no cache). + will take the minimum of (cache_num, data_length x cache_rate, data_length). + num_workers: the number of worker threads if computing cache in the initialization. + If num_workers is None then the number returned by os.cpu_count() is used. + If a value less than 1 is specified, 1 will be used instead. + progress: whether to display a progress bar when downloading dataset and computing the transform cache content. + copy_cache: whether to `deepcopy` the cache content before applying the random transforms, + default to `True`. if the random transforms don't modify the cached content + (for example, randomly crop from the cached image and deepcopy the crop region) + or if every cache item is only used once in a `multi-processing` environment, + may set `copy=False` for better performance. + as_contiguous: whether to convert the cached NumPy array or PyTorch tensor to be contiguous. + it may help improve the performance of following logic. + runtime_cache: whether to compute cache at the runtime, default to `False` to prepare + the cache content at initialization. See: :py:class:`monai.data.CacheDataset`. + + Example:: + + # collection is "Pancreatic-CT-CBCT-SEG", seg_type is "RTSTRUCT" + data = TciaDataset( + root_dir="./", collection="Pancreatic-CT-CBCT-SEG", seg_type="RTSTRUCT", download=True + ) + + # collection is "C4KC-KiTS", seg_type is "SEG", and load both images and segmentations + from monai.apps.tcia import TCIA_LABEL_DICT + transform = Compose( + [ + LoadImaged(reader="PydicomReader", keys=["image", "seg"], label_dict=TCIA_LABEL_DICT["C4KC-KiTS"]), + EnsureChannelFirstd(keys=["image", "seg"]), + ResampleToMatchd(keys="image", key_dst="seg"), + ] + ) + data = TciaDataset( + root_dir="./", collection="C4KC-KiTS", section="validation", seed=12345, download=True + ) + + print(data[0]["seg"].shape) + + """ + + def __init__( + self, + root_dir: PathLike, + collection: str, + section: str, + transform: Sequence[Callable] | Callable = (), + download: bool = False, + download_len: int = -1, + seg_type: str = "SEG", + modality_tag: tuple = (0x0008, 0x0060), + ref_series_uid_tag: tuple = (0x0020, 0x000E), + ref_sop_uid_tag: tuple = (0x0008, 0x1155), + specific_tags: tuple = ( + (0x0008, 0x1115), # Referenced Series Sequence + (0x0008, 0x1140), # Referenced Image Sequence + (0x3006, 0x0010), # Referenced Frame of Reference Sequence + (0x0020, 0x000D), # Study Instance UID + (0x0010, 0x0010), # Patient's Name + (0x0010, 0x0020), # Patient ID + (0x0020, 0x0011), # Series Number + (0x0020, 0x0012), # Acquisition Number + ), + fname_regex: str = DCM_FILENAME_REGEX, + seed: int = 0, + val_frac: float = 0.2, + cache_num: int = sys.maxsize, + cache_rate: float = 0.0, + num_workers: int = 1, + progress: bool = True, + copy_cache: bool = True, + as_contiguous: bool = True, + runtime_cache: bool = False, + ) -> None: + root_dir = Path(root_dir) + if not root_dir.is_dir(): + raise ValueError("Root directory root_dir must be a directory.") + + self.section = section + self.val_frac = val_frac + self.seg_type = seg_type + self.modality_tag = modality_tag + self.ref_series_uid_tag = ref_series_uid_tag + self.ref_sop_uid_tag = ref_sop_uid_tag + + self.set_random_state(seed=seed) + download_dir = os.path.join(root_dir, collection) + load_tags = list(specific_tags) + load_tags += [modality_tag] + self.load_tags = load_tags + if download: + seg_series_list = get_tcia_metadata( + query=f"getSeries?Collection={collection}&Modality={seg_type}", attribute="SeriesInstanceUID" + ) + if download_len > 0: + seg_series_list = seg_series_list[:download_len] + if len(seg_series_list) == 0: + raise ValueError(f"Cannot find data with collection: {collection} seg_type: {seg_type}") + for series_uid in seg_series_list: + self._download_series_reference_data(series_uid, download_dir) + + if not os.path.exists(download_dir): + raise RuntimeError(f"Cannot find dataset directory: {download_dir}.") + self.fname_regex = fname_regex + + self.indices: np.ndarray = np.array([]) + self.datalist = self._generate_data_list(download_dir) + + if transform == (): + transform = LoadImaged(keys=["image"], reader="PydicomReader", fname_regex=self.fname_regex) + CacheDataset.__init__( + self, + data=self.datalist, + transform=transform, + cache_num=cache_num, + cache_rate=cache_rate, + num_workers=num_workers, + progress=progress, + copy_cache=copy_cache, + as_contiguous=as_contiguous, + runtime_cache=runtime_cache, + ) + + def get_indices(self) -> np.ndarray: + """ + Get the indices of datalist used in this dataset. + + """ + return self.indices + + def randomize(self, data: np.ndarray) -> None: + self.R.shuffle(data) + + def _download_series_reference_data(self, series_uid: str, download_dir: str) -> None: + """ + First of all, download a series from TCIA according to `series_uid`. + Then find all referenced series and download. + """ + seg_first_dir = os.path.join(download_dir, "raw", series_uid) + download_tcia_series_instance( + series_uid=series_uid, download_dir=download_dir, output_dir=seg_first_dir, check_md5=False + ) + dicom_files = [f for f in sorted(os.listdir(seg_first_dir)) if f.endswith(".dcm")] + # achieve series number and patient id from the first dicom file + dcm_path = os.path.join(seg_first_dir, dicom_files[0]) + ds = PydicomReader(stop_before_pixels=True, specific_tags=self.load_tags).read(dcm_path) + # (0x0010,0x0020) and (0x0010,0x0010), better to be contained in `specific_tags` + patient_id = ds.PatientID if ds.PatientID else ds.PatientName + if not patient_id: + warnings.warn(f"unable to find patient name of dicom file: {dcm_path}, use 'patient' instead.") + patient_id = "patient" + # (0x0020,0x0011) and (0x0020,0x0012), better to be contained in `specific_tags` + series_num = ds.SeriesNumber if ds.SeriesNumber else ds.AcquisitionNumber + if not series_num: + warnings.warn(f"unable to find series number of dicom file: {dcm_path}, use '0' instead.") + series_num = 0 + + series_num = str(series_num) + seg_dir = os.path.join(download_dir, patient_id, series_num, self.seg_type.lower()) + dcm_dir = os.path.join(download_dir, patient_id, series_num, "image") + + # get ref uuid + ref_uid_list = [] + for dcm_file in dicom_files: + dcm_path = os.path.join(seg_first_dir, dcm_file) + ds = PydicomReader(stop_before_pixels=True, specific_tags=self.load_tags).read(dcm_path) + if ds[self.modality_tag].value == self.seg_type: + ref_uid = get_tcia_ref_uid( + ds, find_sop=False, ref_series_uid_tag=self.ref_series_uid_tag, ref_sop_uid_tag=self.ref_sop_uid_tag + ) + if ref_uid == "": + ref_sop_uid = get_tcia_ref_uid( + ds, + find_sop=True, + ref_series_uid_tag=self.ref_series_uid_tag, + ref_sop_uid_tag=self.ref_sop_uid_tag, + ) + ref_uid = match_tcia_ref_uid_in_study(ds.StudyInstanceUID, ref_sop_uid) + if ref_uid != "": + ref_uid_list.append(ref_uid) + if not ref_uid_list: + warnings.warn(f"Cannot find the referenced Series Instance UID from series: {series_uid}.") + else: + download_tcia_series_instance( + series_uid=ref_uid_list[0], download_dir=download_dir, output_dir=dcm_dir, check_md5=False + ) + if not os.path.exists(seg_dir): + shutil.copytree(seg_first_dir, seg_dir) + + def _generate_data_list(self, dataset_dir: PathLike) -> list[dict]: + # the types of the item in data list should be compatible with the dataloader + dataset_dir = Path(dataset_dir) + datalist = [] + patient_list = [f.name for f in os.scandir(dataset_dir) if f.is_dir() and f.name != "raw"] + for patient_id in patient_list: + series_list = [f.name for f in os.scandir(os.path.join(dataset_dir, patient_id)) if f.is_dir()] + for series_num in series_list: + seg_key = self.seg_type.lower() + image_path = os.path.join(dataset_dir, patient_id, series_num, "image") + mask_path = os.path.join(dataset_dir, patient_id, series_num, seg_key) + + if os.path.exists(image_path): + datalist.append({"image": image_path, seg_key: mask_path}) + else: + datalist.append({seg_key: mask_path}) + + return self._split_datalist(datalist) + + def _split_datalist(self, datalist: list[dict]) -> list[dict]: + if self.section == "test": + return datalist + length = len(datalist) + indices = np.arange(length) + self.randomize(indices) + + val_length = int(length * self.val_frac) + if self.section == "training": + self.indices = indices[val_length:] + else: + self.indices = indices[:val_length] + + return [datalist[i] for i in self.indices] + + +class CrossValidation: + """ + Cross validation dataset based on the general dataset which must have `_split_datalist` API. + + Args: + dataset_cls: dataset class to be used to create the cross validation partitions. + It must have `_split_datalist` API. + nfolds: number of folds to split the data for cross validation. + seed: random seed to randomly shuffle the datalist before splitting into N folds, default is 0. + dataset_params: other additional parameters for the dataset_cls base class. + + Example of 5 folds cross validation training:: + + cvdataset = CrossValidation( + dataset_cls=DecathlonDataset, + nfolds=5, + seed=12345, + root_dir="./", + task="Task09_Spleen", + section="training", + transform=train_transform, + download=True, + ) + dataset_fold0_train = cvdataset.get_dataset(folds=[1, 2, 3, 4]) + dataset_fold0_val = cvdataset.get_dataset(folds=0, transform=val_transform, download=False) + # execute training for fold 0 ... + + dataset_fold1_train = cvdataset.get_dataset(folds=[0, 2, 3, 4]) + dataset_fold1_val = cvdataset.get_dataset(folds=1, transform=val_transform, download=False) + # execute training for fold 1 ... + + ... + + dataset_fold4_train = ... + # execute training for fold 4 ... + + """ + + def __init__(self, dataset_cls: object, nfolds: int = 5, seed: int = 0, **dataset_params: Any) -> None: + if not hasattr(dataset_cls, "_split_datalist"): + raise ValueError("dataset class must have _split_datalist API.") + self.dataset_cls = dataset_cls + self.nfolds = nfolds + self.seed = seed + self.dataset_params = dataset_params + + def get_dataset(self, folds: Sequence[int] | int, **dataset_params: Any) -> object: + """ + Generate dataset based on the specified fold indices in the cross validation group. + + Args: + folds: index of folds for training or validation, if a list of values, concatenate the data. + dataset_params: other additional parameters for the dataset_cls base class, will override + the same parameters in `self.dataset_params`. + + """ + nfolds = self.nfolds + seed = self.seed + dataset_params_ = dict(self.dataset_params) + dataset_params_.update(dataset_params) + + class _NsplitsDataset(self.dataset_cls): # type: ignore + + def _split_datalist(self, datalist: list[dict]) -> list[dict]: + data = partition_dataset(data=datalist, num_partitions=nfolds, shuffle=True, seed=seed) + return select_cross_validation_folds(partitions=data, folds=folds) + + return _NsplitsDataset(**dataset_params_) diff --git a/SegMamba/monai/apps/deepedit/__init__.py b/SegMamba/monai/apps/deepedit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e97f8940782e96a77c1c08483fc41da9a48ae22 --- /dev/null +++ b/SegMamba/monai/apps/deepedit/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/SegMamba/monai/apps/deepedit/interaction.py b/SegMamba/monai/apps/deepedit/interaction.py new file mode 100644 index 0000000000000000000000000000000000000000..07302575c6afa7c0a9f846373746d16a0245fb03 --- /dev/null +++ b/SegMamba/monai/apps/deepedit/interaction.py @@ -0,0 +1,100 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from collections.abc import Callable, Sequence + +import numpy as np +import torch + +from monai.data import decollate_batch, list_data_collate +from monai.engines import SupervisedEvaluator, SupervisedTrainer +from monai.engines.utils import IterationEvents +from monai.transforms import Compose +from monai.utils.enums import CommonKeys + + +class Interaction: + """ + Ignite process_function used to introduce interactions (simulation of clicks) for DeepEdit Training/Evaluation. + + More details about this can be found at: + + Diaz-Pinto et al., MONAI Label: A framework for AI-assisted Interactive + Labeling of 3D Medical Images. (2022) https://arxiv.org/abs/2203.12362 + + Args: + deepgrow_probability: probability of simulating clicks in an iteration + transforms: execute additional transformation during every iteration (before train). + Typically, several Tensor based transforms composed by `Compose`. + train: True for training mode or False for evaluation mode + click_probability_key: key to click/interaction probability + label_names: Dict of label names + max_interactions: maximum number of interactions per iteration + """ + + def __init__( + self, + deepgrow_probability: float, + transforms: Sequence[Callable] | Callable, + train: bool, + label_names: None | dict[str, int] = None, + click_probability_key: str = "probability", + max_interactions: int = 1, + ) -> None: + self.deepgrow_probability = deepgrow_probability + self.transforms = Compose(transforms) if not isinstance(transforms, Compose) else transforms + self.train = train + self.label_names = label_names + self.click_probability_key = click_probability_key + self.max_interactions = max_interactions + + def __call__(self, engine: SupervisedTrainer | SupervisedEvaluator, batchdata: dict[str, torch.Tensor]) -> dict: + if batchdata is None: + raise ValueError("Must provide batch data for current iteration.") + + if np.random.choice([True, False], p=[self.deepgrow_probability, 1 - self.deepgrow_probability]): + for j in range(self.max_interactions): + inputs, _ = engine.prepare_batch(batchdata) + inputs = inputs.to(engine.state.device) + + engine.fire_event(IterationEvents.INNER_ITERATION_STARTED) + engine.network.eval() + + with torch.no_grad(): + if engine.amp: + with torch.cuda.amp.autocast(): + predictions = engine.inferer(inputs, engine.network) + else: + predictions = engine.inferer(inputs, engine.network) + batchdata.update({CommonKeys.PRED: predictions}) + + # decollate/collate batchdata to execute click transforms + batchdata_list = decollate_batch(batchdata, detach=True) + for i in range(len(batchdata_list)): + batchdata_list[i][self.click_probability_key] = ( + (1.0 - ((1.0 / self.max_interactions) * j)) if self.train else 1.0 + ) + batchdata_list[i] = self.transforms(batchdata_list[i]) + + batchdata = list_data_collate(batchdata_list) + engine.fire_event(IterationEvents.INNER_ITERATION_COMPLETED) + else: + # zero out input guidance channels + batchdata_list = decollate_batch(batchdata, detach=True) + for i in range(1, len(batchdata_list[0][CommonKeys.IMAGE])): + batchdata_list[0][CommonKeys.IMAGE][i] *= 0 + batchdata = list_data_collate(batchdata_list) + + # first item in batch only + engine.state.batch = batchdata + return engine._iteration(engine, batchdata) # type: ignore[arg-type] diff --git a/SegMamba/monai/apps/deepedit/transforms.py b/SegMamba/monai/apps/deepedit/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..6d0825f54ab38183b33fbf5036ec0e5dbc3b10db --- /dev/null +++ b/SegMamba/monai/apps/deepedit/transforms.py @@ -0,0 +1,915 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import json +import logging +import random +import warnings +from collections.abc import Hashable, Mapping, Sequence, Sized + +import numpy as np +import torch + +from monai.config import KeysCollection +from monai.data import MetaTensor +from monai.networks.layers import GaussianFilter +from monai.transforms.transform import MapTransform, Randomizable, Transform +from monai.utils import min_version, optional_import + +measure, _ = optional_import("skimage.measure", "0.14.2", min_version) + +logger = logging.getLogger(__name__) + +distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt") + + +class DiscardAddGuidanced(MapTransform): + + def __init__( + self, + keys: KeysCollection, + number_intensity_ch: int = 1, + probability: float = 1.0, + label_names: Sized | None = None, + allow_missing_keys: bool = False, + ): + """ + Discard positive and negative points according to discard probability + + Args: + keys: The ``keys`` parameter will be used to get and set the actual data item to transform + number_intensity_ch: number of intensity channels + probability: probability of discarding clicks + """ + super().__init__(keys, allow_missing_keys) + + self.number_intensity_ch = number_intensity_ch + self.discard_probability = probability + self.label_names = label_names or [] + + def _apply(self, image): + if self.discard_probability >= 1.0 or np.random.choice( + [True, False], p=[self.discard_probability, 1 - self.discard_probability] + ): + signal = np.zeros( + (len(self.label_names), image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32 + ) + if image.shape[0] == self.number_intensity_ch + len(self.label_names): + image[self.number_intensity_ch :, ...] = signal + else: + image = np.concatenate([image, signal], axis=0) + return image + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + for key in self.key_iterator(d): + if key == "image": + tmp_image = self._apply(d[key]) + if isinstance(d[key], MetaTensor): + d[key].array = tmp_image + else: + d[key] = tmp_image + else: + print("This transform only applies to the image") + return d + + +class NormalizeLabelsInDatasetd(MapTransform): + + def __init__( + self, keys: KeysCollection, label_names: dict[str, int] | None = None, allow_missing_keys: bool = False + ): + """ + Normalize label values according to label names dictionary + + Args: + keys: The ``keys`` parameter will be used to get and set the actual data item to transform + label_names: all label names + """ + super().__init__(keys, allow_missing_keys) + + self.label_names = label_names or {} + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + for key in self.key_iterator(d): + # Dictionary containing new label numbers + new_label_names = {} + label = np.zeros(d[key].shape) + # Making sure the range values and number of labels are the same + for idx, (key_label, val_label) in enumerate(self.label_names.items(), start=1): + if key_label != "background": + new_label_names[key_label] = idx + label[d[key] == val_label] = idx + if key_label == "background": + new_label_names["background"] = 0 + + d["label_names"] = new_label_names + if isinstance(d[key], MetaTensor): + d[key].array = label + else: + d[key] = label + return d + + +class SingleLabelSelectiond(MapTransform): + + def __init__( + self, keys: KeysCollection, label_names: Sequence[str] | None = None, allow_missing_keys: bool = False + ): + """ + Selects one label at a time to train the DeepEdit + + Args: + keys: The ``keys`` parameter will be used to get and set the actual data item to transform + label_names: all label names + """ + super().__init__(keys, allow_missing_keys) + + self.label_names: Sequence[str] = label_names or [] + self.all_label_values = { + "spleen": 1, + "right kidney": 2, + "left kidney": 3, + "gallbladder": 4, + "esophagus": 5, + "liver": 6, + "stomach": 7, + "aorta": 8, + "inferior vena cava": 9, + "portal_vein": 10, + "splenic_vein": 11, + "pancreas": 12, + "right adrenal gland": 13, + "left adrenal gland": 14, + } + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + for key in self.key_iterator(d): + if key == "label": + # Taking one label at a time + t_label = np.random.choice(self.label_names) + d["current_label"] = t_label + d[key][d[key] != self.all_label_values[t_label]] = 0.0 + # Convert label to index values following label_names argument + max_label_val = self.label_names.index(t_label) + 1 + d[key][d[key] > 0] = max_label_val + print(f"Using label {t_label} with number: {d[key].max()}") + else: + warnings.warn("This transform only applies to the label") + return d + + +class AddGuidanceSignalDeepEditd(MapTransform): + """ + Add Guidance signal for input image. Multilabel DeepEdit + + Based on the "guidance" points, apply Gaussian to them and add them as new channel for input image. + + Args: + guidance: key to store guidance. + sigma: standard deviation for Gaussian kernel. + number_intensity_ch: channel index. + """ + + def __init__( + self, + keys: KeysCollection, + guidance: str = "guidance", + sigma: int = 3, + number_intensity_ch: int = 1, + allow_missing_keys: bool = False, + ): + super().__init__(keys, allow_missing_keys) + self.guidance = guidance + self.sigma = sigma + self.number_intensity_ch = number_intensity_ch + + def _get_signal(self, image, guidance): + dimensions = 3 if len(image.shape) > 3 else 2 + guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance + guidance = json.loads(guidance) if isinstance(guidance, str) else guidance + + # In inference the user may not provide clicks for some channels/labels + if len(guidance): + if dimensions == 3: + # Assume channel is first and depth is last CHWD + signal = np.zeros((1, image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32) + else: + signal = np.zeros((1, image.shape[-2], image.shape[-1]), dtype=np.float32) + + sshape = signal.shape + for point in guidance: # TO DO: make the guidance a list only - it is currently a list of list + if np.any(np.asarray(point) < 0): + continue + + if dimensions == 3: + # Making sure points fall inside the image dimension + p1 = max(0, min(int(point[-3]), sshape[-3] - 1)) + p2 = max(0, min(int(point[-2]), sshape[-2] - 1)) + p3 = max(0, min(int(point[-1]), sshape[-1] - 1)) + signal[:, p1, p2, p3] = 1.0 + else: + p1 = max(0, min(int(point[-2]), sshape[-2] - 1)) + p2 = max(0, min(int(point[-1]), sshape[-1] - 1)) + signal[:, p1, p2] = 1.0 + + # Apply a Gaussian filter to the signal + if np.max(signal[0]) > 0: + signal_tensor = torch.tensor(signal[0]) + pt_gaussian = GaussianFilter(len(signal_tensor.shape), sigma=self.sigma) + signal_tensor = pt_gaussian(signal_tensor.unsqueeze(0).unsqueeze(0)) + signal_tensor = signal_tensor.squeeze(0).squeeze(0) + signal[0] = signal_tensor.detach().cpu().numpy() + signal[0] = (signal[0] - np.min(signal[0])) / (np.max(signal[0]) - np.min(signal[0])) + return signal + else: + if dimensions == 3: + signal = np.zeros((1, image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32) + else: + signal = np.zeros((1, image.shape[-2], image.shape[-1]), dtype=np.float32) + return signal + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + for key in self.key_iterator(d): + if key == "image": + image = d[key] + tmp_image = image[0 : 0 + self.number_intensity_ch, ...] + guidance = d[self.guidance] + for key_label in guidance.keys(): + # Getting signal based on guidance + signal = self._get_signal(image, guidance[key_label]) + tmp_image = np.concatenate([tmp_image, signal], axis=0) + if isinstance(d[key], MetaTensor): + d[key].array = tmp_image + else: + d[key] = tmp_image + return d + else: + print("This transform only applies to image key") + return d + + +class FindAllValidSlicesDeepEditd(MapTransform): + """ + Find/List all valid slices in the labels. + Label is assumed to be a 4D Volume with shape CHWD, where C=1. + + Args: + sids: key to store slices indices having valid label map. + """ + + def __init__(self, keys: KeysCollection, sids: Hashable = "sids", allow_missing_keys: bool = False): + super().__init__(keys, allow_missing_keys) + self.sids = sids + + def _apply(self, label, d): + sids = {} + for key_label in d["label_names"].keys(): + l_ids = [] + for sid in range(label.shape[-1]): # Assume channel is first and depth is last CHWD + if d["label_names"][key_label] in label[0][..., sid]: + l_ids.append(sid) + sids[key_label] = l_ids + return sids + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + for key in self.key_iterator(d): + if key == "label": + label = d[key] + if label.shape[0] != 1: + raise ValueError("Only supports single channel labels!") + + if len(label.shape) != 4: # only for 3D + raise ValueError("Only supports label with shape CHWD!") + + sids = self._apply(label, d) + if sids is not None and len(sids.keys()): + d[self.sids] = sids + return d + else: + print("This transform only applies to label key") + return d + + +class AddInitialSeedPointDeepEditd(Randomizable, MapTransform): + """ + Add random guidance as initial seed point for a given label. + + Note that the label is of size (C, D, H, W) or (C, H, W) + + The guidance is of size (2, N, # of dims) where N is number of guidance added. + # of dims = 4 when C, D, H, W; # of dims = 3 when (C, H, W) + + Args: + guidance: key to store guidance. + sids: key that represents lists of valid slice indices for the given label. + sid: key that represents the slice to add initial seed point. If not present, random sid will be chosen. + connected_regions: maximum connected regions to use for adding initial points. + """ + + def __init__( + self, + keys: KeysCollection, + guidance: str = "guidance", + sids: str = "sids", + sid: str = "sid", + connected_regions: int = 5, + allow_missing_keys: bool = False, + ): + super().__init__(keys, allow_missing_keys) + self.sids_key = sids + self.sid_key = sid + self.sid: dict[str, int] = dict() + self.guidance = guidance + self.connected_regions = connected_regions + + def _apply(self, label, sid, key_label): + dimensions = 3 if len(label.shape) > 3 else 2 + self.default_guidance = [-1] * (dimensions + 1) + + dims = dimensions + if sid is not None and dimensions == 3: + dims = 2 + label = label[0][..., sid][np.newaxis] # Assume channel is first and depth is last CHWD + + # THERE MAY BE MULTIPLE BLOBS FOR SINGLE LABEL IN THE SELECTED SLICE + label = (label > 0.5).astype(np.float32) + # measure.label: Label connected regions of an integer array - Two pixels are connected + # when they are neighbors and have the same value + blobs_labels = measure.label(label.astype(int), background=0) if dims == 2 else label + if np.max(blobs_labels) <= 0: + raise AssertionError(f"SLICES NOT FOUND FOR LABEL: {key_label}") + + pos_guidance = [] + for ridx in range(1, 2 if dims == 3 else self.connected_regions + 1): + if dims == 2: + label = (blobs_labels == ridx).astype(np.float32) + if np.sum(label) == 0: + pos_guidance.append(self.default_guidance) + continue + + # The distance transform provides a metric or measure of the separation of points in the image. + # This function calculates the distance between each pixel that is set to off (0) and + # the nearest nonzero pixel for binary images - http://matlab.izmiran.ru/help/toolbox/images/morph14.html + distance = distance_transform_cdt(label).flatten() + probability = np.exp(distance) - 1.0 + + idx = np.where(label.flatten() > 0)[0] + seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx])) + dst = distance[seed] + + g = np.asarray(np.unravel_index(seed, label.shape)).transpose().tolist()[0] + g[0] = dst[0] # for debug + if dimensions == 2 or dims == 3: + pos_guidance.append(g) + else: + # Clicks are created using this convention Channel Height Width Depth (CHWD) + pos_guidance.append([g[0], g[-2], g[-1], sid]) # Assume channel is first and depth is last CHWD + + return np.asarray([pos_guidance]) + + def _randomize(self, d, key_label): + sids = d.get(self.sids_key).get(key_label) if d.get(self.sids_key) is not None else None + sid = d.get(self.sid_key).get(key_label) if d.get(self.sid_key) is not None else None + if sids is not None and sids: + if sid is None or sid not in sids: + sid = self.R.choice(sids, replace=False) + else: + logger.info(f"Not slice IDs for label: {key_label}") + sid = None + self.sid[key_label] = sid + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + for key in self.key_iterator(d): + if key == "label": + label_guidances = {} + for key_label in d["sids"].keys(): + # Randomize: Select a random slice + self._randomize(d, key_label) + # Generate guidance base on selected slice + tmp_label = np.copy(d[key]) + # Taking one label to create the guidance + if key_label != "background": + tmp_label[tmp_label != float(d["label_names"][key_label])] = 0 + else: + tmp_label[tmp_label != float(d["label_names"][key_label])] = 1 + tmp_label = 1 - tmp_label + label_guidances[key_label] = json.dumps( + self._apply(tmp_label, self.sid.get(key_label), key_label).astype(int).tolist() + ) + d[self.guidance] = label_guidances + return d + else: + print("This transform only applies to label key") + return d + + +class FindDiscrepancyRegionsDeepEditd(MapTransform): + """ + Find discrepancy between prediction and actual during click interactions during training. + + Args: + pred: key to prediction source. + discrepancy: key to store discrepancies found between label and prediction. + """ + + def __init__( + self, + keys: KeysCollection, + pred: str = "pred", + discrepancy: str = "discrepancy", + allow_missing_keys: bool = False, + ): + super().__init__(keys, allow_missing_keys) + self.pred = pred + self.discrepancy = discrepancy + + @staticmethod + def disparity(label, pred): + disparity = label - pred + # Negative ONES mean predicted label is not part of the ground truth + # Positive ONES mean predicted label missed that region of the ground truth + pos_disparity = (disparity > 0).astype(np.float32) + neg_disparity = (disparity < 0).astype(np.float32) + return [pos_disparity, neg_disparity] + + def _apply(self, label, pred): + return self.disparity(label, pred) + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + for key in self.key_iterator(d): + if key == "label": + all_discrepancies = {} + for _, (key_label, val_label) in enumerate(d["label_names"].items()): + if key_label != "background": + # Taking single label + label = np.copy(d[key]) + label[label != val_label] = 0 + # Label should be represented in 1 + label = (label > 0.5).astype(np.float32) + # Taking single prediction + pred = np.copy(d[self.pred]) + pred[pred != val_label] = 0 + # Prediction should be represented in one + pred = (pred > 0.5).astype(np.float32) + else: + # Taking single label + label = np.copy(d[key]) + label[label != val_label] = 1 + label = 1 - label + # Label should be represented in 1 + label = (label > 0.5).astype(np.float32) + # Taking single prediction + pred = np.copy(d[self.pred]) + pred[pred != val_label] = 1 + pred = 1 - pred + # Prediction should be represented in one + pred = (pred > 0.5).astype(np.float32) + all_discrepancies[key_label] = self._apply(label, pred) + d[self.discrepancy] = all_discrepancies + return d + else: + print("This transform only applies to 'label' key") + return d + + +class AddRandomGuidanceDeepEditd(Randomizable, MapTransform): + """ + Add random guidance based on discrepancies that were found between label and prediction. + + Args: + guidance: key to guidance source, shape (2, N, # of dim) + discrepancy: key to discrepancy map between label and prediction shape (2, C, H, W, D) or (2, C, H, W) + probability: key to click/interaction probability, shape (1) + """ + + def __init__( + self, + keys: KeysCollection, + guidance: str = "guidance", + discrepancy: str = "discrepancy", + probability: str = "probability", + allow_missing_keys: bool = False, + ): + super().__init__(keys, allow_missing_keys) + self.guidance_key = guidance + self.discrepancy = discrepancy + self.probability = probability + self._will_interact = None + self.is_pos: bool | None = None + self.is_other: bool | None = None + self.default_guidance = None + self.guidance: dict[str, list[list[int]]] = {} + + def randomize(self, data=None): + probability = data[self.probability] + self._will_interact = self.R.choice([True, False], p=[probability, 1.0 - probability]) + + def find_guidance(self, discrepancy): + distance = distance_transform_cdt(discrepancy).flatten() + probability = np.exp(distance.flatten()) - 1.0 + idx = np.where(discrepancy.flatten() > 0)[0] + + if np.sum(discrepancy > 0) > 0: + seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx])) + dst = distance[seed] + + g = np.asarray(np.unravel_index(seed, discrepancy.shape)).transpose().tolist()[0] + g[0] = dst[0] + return g + return None + + def add_guidance(self, guidance, discrepancy, label_names, labels): + # Positive clicks of the segment in the iteration + pos_discr = discrepancy[0] # idx 0 is positive discrepancy and idx 1 is negative discrepancy + + # Check the areas that belong to other segments + other_discrepancy_areas = {} + for _, (key_label, val_label) in enumerate(label_names.items()): + if key_label != "background": + tmp_label = np.copy(labels) + tmp_label[tmp_label != val_label] = 0 + tmp_label = (tmp_label > 0.5).astype(np.float32) + other_discrepancy_areas[key_label] = np.sum(discrepancy[1] * tmp_label) + else: + tmp_label = np.copy(labels) + tmp_label[tmp_label != val_label] = 1 + tmp_label = 1 - tmp_label + other_discrepancy_areas[key_label] = np.sum(discrepancy[1] * tmp_label) + + # Add guidance to the current key label + if np.sum(pos_discr) > 0: + guidance.append(self.find_guidance(pos_discr)) + self.is_pos = True + + # Add guidance to the other areas + for key_label in label_names.keys(): + # Areas that cover more than 50 voxels + if other_discrepancy_areas[key_label] > 50: + self.is_other = True + if key_label != "background": + tmp_label = np.copy(labels) + tmp_label[tmp_label != label_names[key_label]] = 0 + tmp_label = (tmp_label > 0.5).astype(np.float32) + self.guidance[key_label].append(self.find_guidance(discrepancy[1] * tmp_label)) + else: + tmp_label = np.copy(labels) + tmp_label[tmp_label != label_names[key_label]] = 1 + tmp_label = 1 - tmp_label + self.guidance[key_label].append(self.find_guidance(discrepancy[1] * tmp_label)) + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + guidance = d[self.guidance_key] + discrepancy = d[self.discrepancy] + self.randomize(data) + if self._will_interact: + # Convert all guidance to lists so new guidance can be easily appended + for key_label in d["label_names"].keys(): + tmp_gui = guidance[key_label] + tmp_gui = tmp_gui.tolist() if isinstance(tmp_gui, np.ndarray) else tmp_gui + tmp_gui = json.loads(tmp_gui) if isinstance(tmp_gui, str) else tmp_gui + self.guidance[key_label] = [j for j in tmp_gui if -1 not in j] + + # Add guidance according to discrepancy + for key_label in d["label_names"].keys(): + # Add guidance based on discrepancy + self.add_guidance(self.guidance[key_label], discrepancy[key_label], d["label_names"], d["label"]) + + # Checking the number of clicks + num_clicks = random.randint(1, 10) + counter = 0 + keep_guidance = [] + while True: + aux_label = random.choice(list(d["label_names"].keys())) + if aux_label in keep_guidance: + pass + else: + keep_guidance.append(aux_label) + counter = counter + len(self.guidance[aux_label]) + # If collected clicks is bigger than max clicks, discard the others + if counter >= num_clicks: + for key_label in d["label_names"].keys(): + if key_label not in keep_guidance: + self.guidance[key_label] = [] + logger.info(f"Number of simulated clicks: {counter}") + break + + # Breaking once all labels are covered + if len(keep_guidance) == len(d["label_names"].keys()): + logger.info(f"Number of simulated clicks: {counter}") + break + d[self.guidance_key] = self.guidance # Update the guidance + return d + + +class AddGuidanceFromPointsDeepEditd(Transform): + """ + Add guidance based on user clicks. ONLY WORKS FOR 3D + + We assume the input is loaded by LoadImaged and has the shape of (H, W, D) originally. + Clicks always specify the coordinates in (H, W, D) + + Args: + ref_image: key to reference image to fetch current and original image details. + guidance: output key to store guidance. + meta_keys: explicitly indicate the key of the metadata dictionary of `ref_image`. + for example, for data with key `image`, the metadata by default is in `image_meta_dict`. + the metadata is a dictionary object which contains: filename, original_shape, etc. + if None, will try to construct meta_keys by `{ref_image}_{meta_key_postfix}`. + meta_key_postfix: if meta_key is None, use `{ref_image}_{meta_key_postfix}` to fetch the metadata according + to the key data, default is `meta_dict`, the metadata is a dictionary object. + For example, to handle key `image`, read/write affine matrices from the + metadata `image_meta_dict` dictionary's `affine` field. + + """ + + def __init__( + self, + ref_image: str, + guidance: str = "guidance", + label_names: dict | None = None, + meta_keys: str | None = None, + meta_key_postfix: str = "meta_dict", + ): + self.ref_image = ref_image + self.guidance = guidance + self.label_names = label_names or {} + self.meta_keys = meta_keys + self.meta_key_postfix = meta_key_postfix + + @staticmethod + def _apply(clicks, factor): + if len(clicks): + guidance = np.multiply(clicks, factor).astype(int).tolist() + return guidance + else: + return [] + + def __call__(self, data): + d = dict(data) + meta_dict_key = self.meta_keys or f"{self.ref_image}_{self.meta_key_postfix}" + # extract affine matrix from metadata + if isinstance(d[self.ref_image], MetaTensor): + meta_dict = d[self.ref_image].meta + elif meta_dict_key in d: + meta_dict = d[meta_dict_key] + else: + raise ValueError( + f"{meta_dict_key} is not found. Please check whether it is the correct the image meta key." + ) + + if "spatial_shape" not in meta_dict: + raise RuntimeError('Missing "spatial_shape" in meta_dict!') + + # Assume channel is first and depth is last CHWD + original_shape = meta_dict["spatial_shape"] + current_shape = list(d[self.ref_image].shape)[1:] + + # in here we assume the depth dimension is in the last dimension of "original_shape" and "current_shape" + factor = np.array(current_shape) / original_shape + + # Creating guidance for all clicks + all_guidances = {} + for key_label in self.label_names.keys(): + clicks = d.get(key_label, []) + clicks = list(np.array(clicks).astype(int)) + all_guidances[key_label] = self._apply(clicks, factor) + d[self.guidance] = all_guidances + return d + + +class ResizeGuidanceMultipleLabelDeepEditd(Transform): + """ + Resize the guidance based on cropped vs resized image. + + """ + + def __init__(self, guidance: str, ref_image: str) -> None: + self.guidance = guidance + self.ref_image = ref_image + + def __call__(self, data): + d = dict(data) + # Assume channel is first and depth is last CHWD + current_shape = d[self.ref_image].shape[1:] + + meta_dict_key = "image_meta_dict" + # extract affine matrix from metadata + if isinstance(d[self.ref_image], MetaTensor): + meta_dict = d[self.ref_image].meta + elif meta_dict_key in d: + meta_dict = d[meta_dict_key] + else: + raise ValueError( + f"{meta_dict_key} is not found. Please check whether it is the correct the image meta key." + ) + + original_shape = meta_dict["spatial_shape"] + + factor = np.divide(current_shape, original_shape) + all_guidances = {} + for key_label in d[self.guidance].keys(): + guidance = ( + np.multiply(d[self.guidance][key_label], factor).astype(int).tolist() + if len(d[self.guidance][key_label]) + else [] + ) + all_guidances[key_label] = guidance + + d[self.guidance] = all_guidances + return d + + +class SplitPredsLabeld(MapTransform): + """ + Split preds and labels for individual evaluation + + """ + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + for key in self.key_iterator(d): + if key == "pred": + for idx, (key_label, _) in enumerate(d["label_names"].items()): + if key_label != "background": + d[f"pred_{key_label}"] = d[key][idx + 1, ...][None] + d[f"label_{key_label}"] = d["label"][idx + 1, ...][None] + elif key != "pred": + logger.info("This is only for pred key") + return d + + +class AddInitialSeedPointMissingLabelsd(Randomizable, MapTransform): + """ + Add random guidance as initial seed point for a given label. + Note that the label is of size (C, D, H, W) or (C, H, W) + The guidance is of size (2, N, # of dims) where N is number of guidance added. + # of dims = 4 when C, D, H, W; # of dims = 3 when (C, H, W) + Args: + guidance: key to store guidance. + sids: key that represents lists of valid slice indices for the given label. + sid: key that represents the slice to add initial seed point. If not present, random sid will be chosen. + connected_regions: maximum connected regions to use for adding initial points. + """ + + def __init__( + self, + keys: KeysCollection, + guidance: str = "guidance", + sids: str = "sids", + sid: str = "sid", + connected_regions: int = 5, + allow_missing_keys: bool = False, + ): + super().__init__(keys, allow_missing_keys) + self.sids_key = sids + self.sid_key = sid + self.sid: dict[str, int] = dict() + self.guidance = guidance + self.connected_regions = connected_regions + + def _apply(self, label, sid): + dimensions = 3 if len(label.shape) > 3 else 2 + self.default_guidance = [-1] * (dimensions + 1) + + dims = dimensions + if sid is not None and dimensions == 3: + dims = 2 + label = label[0][..., sid][np.newaxis] # Assume channel is first and depth is last CHWD + + # THERE MAY BE MULTIPLE BLOBS FOR SINGLE LABEL IN THE SELECTED SLICE + label = (label > 0.5).astype(np.float32) + # measure.label: Label connected regions of an integer array - Two pixels are connected + # when they are neighbors and have the same value + blobs_labels = measure.label(label.astype(int), background=0) if dims == 2 else label + + label_guidance = [] + # If there are is presence of that label in this slice + if np.max(blobs_labels) <= 0: + label_guidance.append(self.default_guidance) + else: + for ridx in range(1, 2 if dims == 3 else self.connected_regions + 1): + if dims == 2: + label = (blobs_labels == ridx).astype(np.float32) + if np.sum(label) == 0: + label_guidance.append(self.default_guidance) + continue + + # The distance transform provides a metric or measure of the separation of points in the image. + # This function calculates the distance between each pixel that is set to off (0) and + # the nearest nonzero pixel for binary images + # http://matlab.izmiran.ru/help/toolbox/images/morph14.html + distance = distance_transform_cdt(label).flatten() + probability = np.exp(distance) - 1.0 + + idx = np.where(label.flatten() > 0)[0] + seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx])) + dst = distance[seed] + + g = np.asarray(np.unravel_index(seed, label.shape)).transpose().tolist()[0] + g[0] = dst[0] # for debug + if dimensions == 2 or dims == 3: + label_guidance.append(g) + else: + # Clicks are created using this convention Channel Height Width Depth (CHWD) + label_guidance.append([g[0], g[-2], g[-1], sid]) # Assume channel is first and depth is last CHWD + + return np.asarray(label_guidance) + + def _randomize(self, d, key_label): + sids = d.get(self.sids_key).get(key_label) if d.get(self.sids_key) is not None else None + sid = d.get(self.sid_key).get(key_label) if d.get(self.sid_key) is not None else None + if sids is not None and sids: + if sid is None or sid not in sids: + sid = self.R.choice(sids, replace=False) + else: + logger.info(f"Not slice IDs for label: {key_label}") + sid = None + self.sid[key_label] = sid + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + for key in self.key_iterator(d): + if key == "label": + label_guidances = {} + for key_label in d["sids"].keys(): + # Randomize: Select a random slice + self._randomize(d, key_label) + # Generate guidance base on selected slice + tmp_label = np.copy(d[key]) + # Taking one label to create the guidance + if key_label != "background": + tmp_label[tmp_label != float(d["label_names"][key_label])] = 0 + else: + tmp_label[tmp_label != float(d["label_names"][key_label])] = 1 + tmp_label = 1 - tmp_label + label_guidances[key_label] = json.dumps( + self._apply(tmp_label, self.sid.get(key_label)).astype(int).tolist() + ) + d[self.guidance] = label_guidances + return d + else: + print("This transform only applies to label key") + return d + + +class FindAllValidSlicesMissingLabelsd(MapTransform): + """ + Find/List all valid slices in the labels. + Label is assumed to be a 4D Volume with shape CHWD, where C=1. + Args: + sids: key to store slices indices having valid label map. + """ + + def __init__(self, keys: KeysCollection, sids: Hashable = "sids", allow_missing_keys: bool = False): + super().__init__(keys, allow_missing_keys) + self.sids = sids + + def _apply(self, label, d): + sids = {} + for key_label in d["label_names"].keys(): + l_ids = [] + for sid in range(label.shape[-1]): # Assume channel is first and depth is last CHWD + if d["label_names"][key_label] in label[0][..., sid]: + l_ids.append(sid) + # If there are not slices with the label + if l_ids == []: + l_ids = [-1] * 10 + sids[key_label] = l_ids + return sids + + def __call__(self, data: Mapping[Hashable, np.ndarray]) -> dict[Hashable, np.ndarray]: + d: dict = dict(data) + for key in self.key_iterator(d): + if key == "label": + label = d[key] + if label.shape[0] != 1: + raise ValueError("Only supports single channel labels!") + + if len(label.shape) != 4: # only for 3D + raise ValueError("Only supports label with shape CHWD!") + + sids = self._apply(label, d) + if sids is not None and len(sids.keys()): + d[self.sids] = sids + return d + else: + print("This transform only applies to label key") + return d diff --git a/SegMamba/monai/apps/deepgrow/__init__.py b/SegMamba/monai/apps/deepgrow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e97f8940782e96a77c1c08483fc41da9a48ae22 --- /dev/null +++ b/SegMamba/monai/apps/deepgrow/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/SegMamba/monai/apps/deepgrow/dataset.py b/SegMamba/monai/apps/deepgrow/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..802d86e0c72d9ac9c6ebfce0ed4270801290f8a5 --- /dev/null +++ b/SegMamba/monai/apps/deepgrow/dataset.py @@ -0,0 +1,271 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import logging +import os +from collections.abc import Sequence + +import numpy as np + +from monai.config import PathLike +from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, Orientationd, Spacingd, SqueezeDimd, Transform +from monai.utils import GridSampleMode + + +def create_dataset( + datalist: list[dict], + output_dir: str, + dimension: int, + pixdim: Sequence[float] | float, + image_key: str = "image", + label_key: str = "label", + base_dir: PathLike | None = None, + limit: int = 0, + relative_path: bool = False, + transforms: Transform | None = None, +) -> list[dict]: + """ + Utility to pre-process and create dataset list for Deepgrow training over on existing one. + The input data list is normally a list of images and labels (3D volume) that needs pre-processing + for Deepgrow training pipeline. + + Args: + datalist: A list of data dictionary. Each entry should at least contain 'image_key': . + For example, typical input data can be a list of dictionaries:: + + [{'image': , 'label':