Datasets:

ArXiv:
emad2001 commited on
Commit
b4d7ac8
·
verified ·
1 Parent(s): 783249a

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +17 -0
  2. SegMamba/.DS_Store +0 -0
  3. SegMamba/.gitignore +160 -0
  4. SegMamba/0_inference.py +20 -0
  5. SegMamba/1_rename_mri_data.py +26 -0
  6. SegMamba/2_preprocessing_mri.py +45 -0
  7. SegMamba/3_train.py +168 -0
  8. SegMamba/4_predict.py +139 -0
  9. SegMamba/5_compute_metrics.py +84 -0
  10. SegMamba/README.md +132 -0
  11. SegMamba/causal-conv1d/.DS_Store +0 -0
  12. SegMamba/causal-conv1d/AUTHORS +1 -0
  13. SegMamba/causal-conv1d/LICENSE +29 -0
  14. SegMamba/causal-conv1d/README.md +1 -0
  15. SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py +3 -0
  16. SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py +104 -0
  17. SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so +3 -0
  18. SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o +3 -0
  19. SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o +3 -0
  20. SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o +3 -0
  21. SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o +3 -0
  22. SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO +29 -0
  23. SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt +16 -0
  24. SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt +1 -0
  25. SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt +3 -0
  26. SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt +2 -0
  27. SegMamba/causal-conv1d/causal_conv1d/__init__.py +3 -0
  28. SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py +104 -0
  29. SegMamba/causal-conv1d/csrc/causal_conv1d.cpp +333 -0
  30. SegMamba/causal-conv1d/csrc/causal_conv1d.h +53 -0
  31. SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu +525 -0
  32. SegMamba/causal-conv1d/csrc/causal_conv1d_common.h +64 -0
  33. SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu +350 -0
  34. SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu +96 -0
  35. SegMamba/causal-conv1d/csrc/static_switch.h +25 -0
  36. SegMamba/causal-conv1d/setup.py +264 -0
  37. SegMamba/causal-conv1d/tests/test_causal_conv1d.py +173 -0
  38. SegMamba/images/data_structure.jpg +3 -0
  39. SegMamba/images/method_figure.jpg +3 -0
  40. SegMamba/images/modules.jpg +3 -0
  41. SegMamba/images/segmamba_ablation.jpg +3 -0
  42. SegMamba/light_training/.DS_Store +0 -0
  43. SegMamba/light_training/augment/multi_processor.py +10 -0
  44. SegMamba/light_training/augment/train_augment.py +279 -0
  45. SegMamba/light_training/dataloading/__init__.py +0 -0
  46. SegMamba/light_training/dataloading/base_data_loader.py +213 -0
  47. SegMamba/light_training/dataloading/dataset.py +319 -0
  48. SegMamba/light_training/dataloading/dataset_sdm_edge.py +331 -0
  49. SegMamba/light_training/dataloading/get_train_val_test_datalist.py +36 -0
  50. SegMamba/light_training/dataloading/utils.py +25 -0
.gitattributes CHANGED
@@ -57,3 +57,20 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
61
+ SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o filter=lfs diff=lfs merge=lfs -text
62
+ SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o filter=lfs diff=lfs merge=lfs -text
63
+ SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o filter=lfs diff=lfs merge=lfs -text
64
+ SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o filter=lfs diff=lfs merge=lfs -text
65
+ SegMamba/mamba/build/lib.linux-x86_64-cpython-312/selective_scan_cuda.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
66
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/.ninja_deps filter=lfs diff=lfs merge=lfs -text
67
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o filter=lfs diff=lfs merge=lfs -text
68
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o filter=lfs diff=lfs merge=lfs -text
69
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o filter=lfs diff=lfs merge=lfs -text
70
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o filter=lfs diff=lfs merge=lfs -text
71
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o filter=lfs diff=lfs merge=lfs -text
72
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o filter=lfs diff=lfs merge=lfs -text
73
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o filter=lfs diff=lfs merge=lfs -text
74
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o filter=lfs diff=lfs merge=lfs -text
75
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o filter=lfs diff=lfs merge=lfs -text
76
+ SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o filter=lfs diff=lfs merge=lfs -text
SegMamba/.DS_Store ADDED
Binary file (8.2 kB). View file
 
SegMamba/.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
SegMamba/0_inference.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import torch
4
+ from model_segmamba.segmamba import SegMamba
5
+
6
+ t1 = torch.rand(1, 4, 128, 128, 128).cuda()
7
+
8
+
9
+ model = SegMamba(in_chans=4,
10
+ out_chans=4,
11
+ depths=[2,2,2,2],
12
+ feat_size=[48, 96, 192, 384]).cuda()
13
+
14
+ out = model(t1)
15
+
16
+ print(out.shape)
17
+
18
+
19
+
20
+
SegMamba/1_rename_mri_data.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ import os
5
+
6
+ data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/"
7
+
8
+ all_cases = os.listdir(data_dir)
9
+
10
+ for case_name in all_cases:
11
+ case_dir = os.path.join(data_dir, case_name)
12
+
13
+ for data_name in os.listdir(case_dir):
14
+
15
+ if "-" not in data_name:
16
+ continue
17
+ new_name = data_name.split("-")[-1]
18
+
19
+ new_path = os.path.join(case_dir, new_name)
20
+
21
+ old_path = os.path.join(case_dir, data_name)
22
+
23
+ os.rename(old_path, new_path)
24
+
25
+ print(f"{new_path} 命名成功")
26
+
SegMamba/2_preprocessing_mri.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from light_training.preprocessing.preprocessors.preprocessor_mri import MultiModalityPreprocessor
3
+ import numpy as np
4
+ import pickle
5
+ import json
6
+
7
+ data_filename = ["t2w.nii.gz",
8
+ "t2f.nii.gz",
9
+ "t1n.nii.gz",
10
+ "t1c.nii.gz"]
11
+ seg_filename = "seg.nii.gz"
12
+
13
+ base_dir = "./data/raw_data/BraTS2023/"
14
+ image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData"
15
+
16
+ def process_train():
17
+ preprocessor = MultiModalityPreprocessor(base_dir=base_dir,
18
+ image_dir=image_dir,
19
+ data_filenames=data_filename,
20
+ seg_filename=seg_filename
21
+ )
22
+
23
+ out_spacing = [1.0, 1.0, 1.0]
24
+ output_dir = "./data/fullres/train/"
25
+
26
+ preprocessor.run(output_spacing=out_spacing,
27
+ output_dir=output_dir,
28
+ all_labels=[1, 2, 3],
29
+ )
30
+
31
+ def plan():
32
+ preprocessor = MultiModalityPreprocessor(base_dir=base_dir,
33
+ image_dir=image_dir,
34
+ data_filenames=data_filename,
35
+ seg_filename=seg_filename
36
+ )
37
+
38
+ preprocessor.run_plan()
39
+
40
+
41
+ if __name__ == "__main__":
42
+
43
+ plan()
44
+ process_train()
45
+
SegMamba/3_train.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from light_training.dataloading.dataset import get_train_val_test_loader_from_train
3
+ import torch
4
+ import torch.nn as nn
5
+ from monai.inferers import SlidingWindowInferer
6
+ from light_training.evaluation.metric import dice
7
+ from light_training.trainer import Trainer
8
+ from monai.utils import set_determinism
9
+ from light_training.utils.files_helper import save_new_model_and_delete_last
10
+ from monai.losses.dice import DiceLoss
11
+ set_determinism(123)
12
+ import os
13
+
14
+ data_dir = "./data/fullres/train"
15
+ logdir = f"./logs/segmamba"
16
+
17
+ model_save_path = os.path.join(logdir, "model")
18
+ # augmentation = "nomirror"
19
+ augmentation = True
20
+
21
+ env = "pytorch"
22
+ max_epoch = 1000
23
+ batch_size = 2
24
+ val_every = 2
25
+ num_gpus = 1
26
+ device = "cuda:0"
27
+ roi_size = [128, 128, 128]
28
+
29
+ def func(m, epochs):
30
+ return np.exp(-10*(1- m / epochs)**2)
31
+
32
+ class BraTSTrainer(Trainer):
33
+ def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
34
+ super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
35
+ self.window_infer = SlidingWindowInferer(roi_size=roi_size,
36
+ sw_batch_size=1,
37
+ overlap=0.5)
38
+ self.augmentation = augmentation
39
+ from model_segmamba.segmamba import SegMamba
40
+
41
+ self.model = SegMamba(in_chans=4,
42
+ out_chans=4,
43
+ depths=[2,2,2,2],
44
+ feat_size=[48, 96, 192, 384])
45
+
46
+ self.patch_size = roi_size
47
+ self.best_mean_dice = 0.0
48
+ self.ce = nn.CrossEntropyLoss()
49
+ self.mse = nn.MSELoss()
50
+ self.train_process = 18
51
+ self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5,
52
+ momentum=0.99, nesterov=True)
53
+
54
+ self.scheduler_type = "poly"
55
+ self.cross = nn.CrossEntropyLoss()
56
+
57
+ def training_step(self, batch):
58
+ image, label = self.get_input(batch)
59
+
60
+ pred = self.model(image)
61
+
62
+ loss = self.cross(pred, label)
63
+
64
+ self.log("training_loss", loss, step=self.global_step)
65
+
66
+ return loss
67
+
68
+ def convert_labels(self, labels):
69
+ ## TC, WT and ET
70
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
71
+
72
+ return torch.cat(result, dim=1).float()
73
+
74
+
75
+ def get_input(self, batch):
76
+ image = batch["data"]
77
+ label = batch["seg"]
78
+
79
+ label = label[:, 0].long()
80
+ return image, label
81
+
82
+ def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]):
83
+ if pred.sum() > 0 and gt.sum() > 0:
84
+ d = dice(pred, gt)
85
+ return np.array([d, 50])
86
+
87
+ elif gt.sum() == 0 and pred.sum() == 0:
88
+ return np.array([1.0, 50])
89
+
90
+ else:
91
+ return np.array([0.0, 50])
92
+
93
+ def validation_step(self, batch):
94
+ image, label = self.get_input(batch)
95
+
96
+ output = self.model(image)
97
+
98
+ output = output.argmax(dim=1)
99
+
100
+ output = output[:, None]
101
+ output = self.convert_labels(output)
102
+
103
+ label = label[:, None]
104
+ label = self.convert_labels(label)
105
+
106
+ output = output.cpu().numpy()
107
+ target = label.cpu().numpy()
108
+
109
+ dices = []
110
+
111
+ c = 3
112
+ for i in range(0, c):
113
+ pred_c = output[:, i]
114
+ target_c = target[:, i]
115
+
116
+ cal_dice, _ = self.cal_metric(target_c, pred_c)
117
+ dices.append(cal_dice)
118
+
119
+ return dices
120
+
121
+ def validation_end(self, val_outputs):
122
+ dices = val_outputs
123
+
124
+ tc, wt, et = dices[0].mean(), dices[1].mean(), dices[2].mean()
125
+
126
+ print(f"dices is {tc, wt, et}")
127
+
128
+ mean_dice = (tc + wt + et) / 3
129
+
130
+ self.log("tc", tc, step=self.epoch)
131
+ self.log("wt", wt, step=self.epoch)
132
+ self.log("et", et, step=self.epoch)
133
+
134
+ self.log("mean_dice", mean_dice, step=self.epoch)
135
+
136
+ if mean_dice > self.best_mean_dice:
137
+ self.best_mean_dice = mean_dice
138
+ save_new_model_and_delete_last(self.model,
139
+ os.path.join(model_save_path,
140
+ f"best_model_{mean_dice:.4f}.pt"),
141
+ delete_symbol="best_model")
142
+
143
+ save_new_model_and_delete_last(self.model,
144
+ os.path.join(model_save_path,
145
+ f"final_model_{mean_dice:.4f}.pt"),
146
+ delete_symbol="final_model")
147
+
148
+
149
+ if (self.epoch + 1) % 100 == 0:
150
+ torch.save(self.model.state_dict(), os.path.join(model_save_path, f"tmp_model_ep{self.epoch}_{mean_dice:.4f}.pt"))
151
+
152
+ print(f"mean_dice is {mean_dice}")
153
+
154
+ if __name__ == "__main__":
155
+
156
+ trainer = BraTSTrainer(env_type=env,
157
+ max_epochs=max_epoch,
158
+ batch_size=batch_size,
159
+ device=device,
160
+ logdir=logdir,
161
+ val_every=val_every,
162
+ num_gpus=num_gpus,
163
+ master_port=17759,
164
+ training_script=__file__)
165
+
166
+ train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir)
167
+
168
+ trainer.train(train_dataset=train_ds, val_dataset=val_ds)
SegMamba/4_predict.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from light_training.dataloading.dataset import get_train_val_test_loader_from_train
3
+ import torch
4
+ import torch.nn as nn
5
+ from monai.inferers import SlidingWindowInferer
6
+ from light_training.evaluation.metric import dice
7
+ from light_training.trainer import Trainer
8
+ from monai.utils import set_determinism
9
+ from light_training.evaluation.metric import dice
10
+ set_determinism(123)
11
+ import os
12
+ from light_training.prediction import Predictor
13
+
14
+ data_dir = "./data/fullres/train"
15
+ env = "pytorch"
16
+ max_epoch = 1000
17
+ batch_size = 2
18
+ val_every = 2
19
+ num_gpus = 1
20
+ device = "cuda:0"
21
+ patch_size = [128, 128, 128]
22
+
23
+ class BraTSTrainer(Trainer):
24
+ def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
25
+ super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
26
+
27
+ self.patch_size = patch_size
28
+ self.augmentation = False
29
+
30
+ def convert_labels(self, labels):
31
+ ## TC, WT and ET
32
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
33
+
34
+ return torch.cat(result, dim=1).float()
35
+
36
+ def get_input(self, batch):
37
+ image = batch["data"]
38
+ label = batch["seg"]
39
+ properties = batch["properties"]
40
+ label = self.convert_labels(label)
41
+
42
+ return image, label, properties
43
+
44
+ def define_model_segmamba(self):
45
+ from model_segmamba.segmamba import SegMamba
46
+ model = SegMamba(in_chans=4,
47
+ out_chans=4,
48
+ depths=[2,2,2,2],
49
+ feat_size=[48, 96, 192, 384])
50
+
51
+ model_path = "/home/xingzhaohu/dev/jiuding_code/brats23/logs/segmamba/model/final_model_0.9038.pt"
52
+ new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
53
+ model.load_state_dict(new_sd)
54
+ model.eval()
55
+ window_infer = SlidingWindowInferer(roi_size=patch_size,
56
+ sw_batch_size=2,
57
+ overlap=0.5,
58
+ progress=True,
59
+ mode="gaussian")
60
+
61
+ predictor = Predictor(window_infer=window_infer,
62
+ mirror_axes=[0,1,2])
63
+
64
+ save_path = "./prediction_results/segmamba"
65
+ os.makedirs(save_path, exist_ok=True)
66
+
67
+ return model, predictor, save_path
68
+
69
+ def validation_step(self, batch):
70
+ image, label, properties = self.get_input(batch)
71
+ ddim = False
72
+
73
+ model, predictor, save_path = self.define_model_segmamba()
74
+
75
+ model_output = predictor.maybe_mirror_and_predict(image, model, device=device)
76
+
77
+ model_output = predictor.predict_raw_probability(model_output,
78
+ properties=properties)
79
+
80
+
81
+ model_output = model_output.argmax(dim=0)[None]
82
+ model_output = self.convert_labels_dim0(model_output)
83
+
84
+ label = label[0]
85
+ c = 3
86
+ dices = []
87
+ for i in range(0, c):
88
+ output_i = model_output[i].cpu().numpy()
89
+ label_i = label[i].cpu().numpy()
90
+ d = dice(output_i, label_i)
91
+ dices.append(d)
92
+
93
+ print(dices)
94
+
95
+ model_output = predictor.predict_noncrop_probability(model_output, properties)
96
+ predictor.save_to_nii(model_output,
97
+ raw_spacing=[1,1,1],
98
+ case_name = properties['name'][0],
99
+ save_dir=save_path)
100
+
101
+ return 0
102
+
103
+ def convert_labels_dim0(self, labels):
104
+ ## TC, WT and ET
105
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
106
+
107
+ return torch.cat(result, dim=0).float()
108
+
109
+
110
+ def filte_state_dict(self, sd):
111
+ if "module" in sd :
112
+ sd = sd["module"]
113
+ new_sd = {}
114
+ for k, v in sd.items():
115
+ k = str(k)
116
+ new_k = k[7:] if k.startswith("module") else k
117
+ new_sd[new_k] = v
118
+ del sd
119
+ return new_sd
120
+
121
+ if __name__ == "__main__":
122
+
123
+ trainer = BraTSTrainer(env_type=env,
124
+ max_epochs=max_epoch,
125
+ batch_size=batch_size,
126
+ device=device,
127
+ logdir="",
128
+ val_every=val_every,
129
+ num_gpus=num_gpus,
130
+ master_port=17751,
131
+ training_script=__file__)
132
+
133
+ train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir)
134
+
135
+ trainer.validation_single_gpu(test_ds)
136
+
137
+ # print(f"result is {v_mean}")
138
+
139
+
SegMamba/5_compute_metrics.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from light_training.dataloading.dataset import get_train_val_test_loader_from_train
2
+ from monai.utils import set_determinism
3
+ import torch
4
+ import os
5
+ import numpy as np
6
+ import SimpleITK as sitk
7
+ from medpy import metric
8
+ import argparse
9
+ from tqdm import tqdm
10
+
11
+ import numpy as np
12
+
13
+ set_determinism(123)
14
+
15
+ parser = argparse.ArgumentParser()
16
+
17
+ parser.add_argument("--pred_name", required=True, type=str)
18
+
19
+ results_root = "prediction_results"
20
+ args = parser.parse_args()
21
+
22
+ pred_name = args.pred_name
23
+
24
+ def cal_metric(gt, pred, voxel_spacing):
25
+ if pred.sum() > 0 and gt.sum() > 0:
26
+ dice = metric.binary.dc(pred, gt)
27
+ hd95 = metric.binary.hd95(pred, gt, voxelspacing=voxel_spacing)
28
+ return np.array([dice, hd95])
29
+ else:
30
+ return np.array([0.0, 50])
31
+
32
+ def each_cases_metric(gt, pred, voxel_spacing):
33
+ classes_num = 3
34
+ class_wise_metric = np.zeros((classes_num, 2))
35
+ for cls in range(0, classes_num):
36
+ class_wise_metric[cls, ...] = cal_metric(pred[cls], gt[cls], voxel_spacing)
37
+ print(class_wise_metric)
38
+ return class_wise_metric
39
+
40
+ def convert_labels(labels):
41
+ ## TC, WT and ET
42
+ labels = labels.unsqueeze(dim=0)
43
+
44
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
45
+
46
+ return torch.cat(result, dim=0).float()
47
+
48
+
49
+ if __name__ == "__main__":
50
+ data_dir = "./data/fullres/train"
51
+ raw_data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/"
52
+ train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir)
53
+ print(len(test_ds))
54
+ all_results = np.zeros((250,3,2))
55
+
56
+ ind = 0
57
+ for batch in tqdm(test_ds, total=len(test_ds)):
58
+ properties = batch["properties"]
59
+ case_name = properties["name"]
60
+ gt_itk = os.path.join(raw_data_dir, case_name, f"seg.nii.gz")
61
+ voxel_spacing = [1, 1, 1]
62
+ gt_itk = sitk.ReadImage(gt_itk)
63
+ gt_array = sitk.GetArrayFromImage(gt_itk).astype(np.int32)
64
+ gt_array = torch.from_numpy(gt_array)
65
+ gt_array = convert_labels(gt_array).numpy()
66
+ pred_itk = sitk.ReadImage(f"./{results_root}/{pred_name}/{case_name}.nii.gz")
67
+ pred_array = sitk.GetArrayFromImage(pred_itk)
68
+
69
+ m = each_cases_metric(gt_array, pred_array, voxel_spacing)
70
+
71
+ all_results[ind, ...] = m
72
+
73
+ ind += 1
74
+
75
+ os.makedirs(f"./{results_root}/result_metrics/", exist_ok=True)
76
+ np.save(f"./{results_root}/result_metrics/{pred_name}.npy", all_results)
77
+
78
+ result = np.load(f"./{results_root}/result_metrics/{pred_name}.npy")
79
+ print(result.shape)
80
+ print(result.mean(axis=0))
81
+ print(result.std(axis=0))
82
+
83
+
84
+
SegMamba/README.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SegMamba
2
+
3
+ **Recent news: If you are interested in the research about vision language models, please refers to the latest work: https://github.com/MrGiovanni/RadGPT (ICCV2025)**
4
+
5
+ **Now we have open-sourced the pre-processing, training, inference, and metrics computation codes.**
6
+
7
+ SegMamba: Long-range Sequential Modeling Mamba For 3D Medical Image Segmentation
8
+
9
+ [https://arxiv.org/abs/2401.13560](https://arxiv.org/abs/2401.13560)
10
+
11
+ ![](images/method_figure.jpg)
12
+
13
+ ![](images/modules.jpg)
14
+
15
+ Our advantage in speed and memory.
16
+ ![](images/segmamba_ablation.jpg)
17
+
18
+ ## Contact
19
+ If you have any questions about our project, please feel free to contact us by email at zxing565@connect.hkust-gz.edu.cn or via WeChat at 18340097191. Furthermore, the data underlying this article will be shared on reasonable request to gaof57@mail.sysu.edu.cn.
20
+
21
+ ## Environment install
22
+ Clone this repository and navigate to the root directory of the project.
23
+
24
+ ```bash
25
+ git clone https://github.com/ge-xing/SegMamba.git
26
+
27
+ cd SegMamba
28
+ ```
29
+ ### Install causal-conv1d
30
+
31
+ ```bash
32
+ cd causal-conv1d
33
+
34
+ python setup.py install
35
+ ```
36
+
37
+ ### Install mamba
38
+
39
+ ```bash
40
+ cd mamba
41
+
42
+ python setup.py install
43
+ ```
44
+
45
+ ### Install monai
46
+
47
+ ```bash
48
+ pip install monai
49
+ ```
50
+
51
+ ## Simple test
52
+
53
+ ```bash
54
+ python 0_inference.py
55
+ ```
56
+
57
+ ## Preprocessing, training, testing, inference, and metrics computation
58
+
59
+ ### Data downloading
60
+
61
+ Data is from [https://arxiv.org/abs/2305.17033](https://arxiv.org/abs/2305.17033)
62
+
63
+ Download from Baidu Disk [https://pan.baidu.com/s/1C0FUHdDtWNaYWLtDDP9TnA?pwd=ty22提取码ty22](https://pan.baidu.com/s/1C0FUHdDtWNaYWLtDDP9TnA?pwd=ty22)
64
+
65
+ Download from OneDrive [https://hkustgz-my.sharepoint.com/:f:/g/personal/zxing565_connect_hkust-gz_edu_cn/EqqaINbHRxREuIj0XGicY2EBv8hjwEFKgFOhF_Ub0mvENw?e=yTpE9B](https://hkustgz-my.sharepoint.com/:f:/g/personal/zxing565_connect_hkust-gz_edu_cn/EqqaINbHRxREuIj0XGicY2EBv8hjwEFKgFOhF_Ub0mvENw?e=yTpE9B)
66
+
67
+ ### Preprocessing
68
+ In my setting, the data directory of BraTS2023 is : "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/"
69
+
70
+ First, we need to run the rename process.
71
+
72
+ ```bash
73
+ python 1_rename_mri_data.py
74
+ ```
75
+
76
+ Then, we need to run the pre-processing code to do resample, normalization, and crop processes.
77
+
78
+ ```bash
79
+ python 2_preprocessing_mri.py
80
+ ```
81
+
82
+ After pre-processing, the data structure will be in this format:
83
+
84
+ ![](images/data_structure.jpg)
85
+ ### Training
86
+
87
+ When the pre-processing process is done, we can train our model.
88
+
89
+ We mainly use the pre-processde data from last step: **data_dir = "./data/fullres/train"**
90
+
91
+
92
+ ```bash
93
+ python 3_train.py
94
+ ```
95
+
96
+ The training logs and checkpoints are saved in:
97
+ **logdir = f"./logs/segmamba"**
98
+
99
+
100
+
101
+
102
+ ### Inference
103
+
104
+ When we have trained our models, we can inference all the data in testing set.
105
+
106
+ ```bash
107
+ python 4_predict.py
108
+ ```
109
+
110
+ When this process is done, the prediction cases will be put in this path:
111
+ **save_path = "./prediction_results/segmamba"**
112
+
113
+ ### Metrics computation
114
+ We can obtain the Dice score and HD95 on each segmentation target (WT, TC, ET for BraTS2023 dataset) using this code:
115
+
116
+ ```bash
117
+ python 5_compute_metrics.py --pred_name="segmamba"
118
+ ```
119
+
120
+
121
+
122
+ ## Acknowledgement
123
+ Many thanks for these repos for their great contribution!
124
+
125
+ [https://github.com/MIC-DKFZ/nnUNet](https://github.com/MIC-DKFZ/nnUNet)
126
+
127
+ [https://github.com/Project-MONAI/MONAI](https://github.com/Project-MONAI/MONAI)
128
+
129
+ [https://github.com/hustvl/Vim](https://github.com/hustvl/Vim)
130
+
131
+ [https://github.com/bowang-lab/U-Mamba](https://github.com/bowang-lab/U-Mamba)
132
+
SegMamba/causal-conv1d/.DS_Store ADDED
Binary file (6.15 kB). View file
 
SegMamba/causal-conv1d/AUTHORS ADDED
@@ -0,0 +1 @@
 
 
1
+ Tri Dao, tri@tridao.me
SegMamba/causal-conv1d/LICENSE ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2022, the respective contributors, as shown by the AUTHORS file.
4
+ All rights reserved.
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are met:
8
+
9
+ * Redistributions of source code must retain the above copyright notice, this
10
+ list of conditions and the following disclaimer.
11
+
12
+ * Redistributions in binary form must reproduce the above copyright notice,
13
+ this list of conditions and the following disclaimer in the documentation
14
+ and/or other materials provided with the distribution.
15
+
16
+ * Neither the name of the copyright holder nor the names of its
17
+ contributors may be used to endorse or promote products derived from
18
+ this software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
SegMamba/causal-conv1d/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # Causal depthwise conv1d in CUDA with a PyTorch interface
SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __version__ = "1.0.0"
2
+
3
+ from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_update
SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+
6
+
7
+ import causal_conv1d_cuda
8
+
9
+
10
+ class CausalConv1dFn(torch.autograd.Function):
11
+ @staticmethod
12
+ def forward(ctx, x, weight, bias=None, activation=None):
13
+ if activation not in [None, "silu", "swish"]:
14
+ raise NotImplementedError("activation must be None, silu, or swish")
15
+ if x.stride(2) != 1 and x.stride(1) != 1:
16
+ x = x.contiguous()
17
+ bias = bias.contiguous() if bias is not None else None
18
+ ctx.save_for_backward(x, weight, bias)
19
+ ctx.activation = activation in ["silu", "swish"]
20
+ out = causal_conv1d_cuda.causal_conv1d_fwd(x, weight, bias, ctx.activation)
21
+ return out
22
+
23
+ @staticmethod
24
+ def backward(ctx, dout):
25
+ x, weight, bias = ctx.saved_tensors
26
+ if dout.stride(2) != 1 and dout.stride(1) != 1:
27
+ dout = dout.contiguous()
28
+ # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
29
+ # backward of conv1d with the backward of chunk).
30
+ # Here we just pass in None and dx will be allocated in the C++ code.
31
+ dx, dweight, dbias = causal_conv1d_cuda.causal_conv1d_bwd(
32
+ x, weight, bias, dout, None, ctx.activation
33
+ )
34
+ return dx, dweight, dbias if bias is not None else None, None
35
+
36
+
37
+ def causal_conv1d_fn(x, weight, bias=None, activation=None):
38
+ """
39
+ x: (batch, dim, seqlen)
40
+ weight: (dim, width)
41
+ bias: (dim,)
42
+ activation: either None or "silu" or "swish"
43
+
44
+ out: (batch, dim, seqlen)
45
+ """
46
+ return CausalConv1dFn.apply(x, weight, bias, activation)
47
+
48
+
49
+ def causal_conv1d_ref(x, weight, bias=None, activation=None):
50
+ """
51
+ x: (batch, dim, seqlen)
52
+ weight: (dim, width)
53
+ bias: (dim,)
54
+
55
+ out: (batch, dim, seqlen)
56
+ """
57
+ if activation not in [None, "silu", "swish"]:
58
+ raise NotImplementedError("activation must be None, silu, or swish")
59
+ dtype_in = x.dtype
60
+ x = x.to(weight.dtype)
61
+ seqlen = x.shape[-1]
62
+ dim, width = weight.shape
63
+ out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim)
64
+ out = out[..., :seqlen]
65
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
66
+
67
+
68
+ def causal_conv1d_update(x, conv_state, weight, bias=None, activation=None):
69
+ """
70
+ x: (batch, dim)
71
+ conv_state: (batch, dim, width)
72
+ weight: (dim, width)
73
+ bias: (dim,)
74
+
75
+ out: (batch, dim)
76
+ """
77
+ if activation not in [None, "silu", "swish"]:
78
+ raise NotImplementedError("activation must be None, silu, or swish")
79
+ activation = activation in ["silu", "swish"]
80
+ return causal_conv1d_cuda.causal_conv1d_update(x, conv_state, weight, bias, activation)
81
+
82
+
83
+ def causal_conv1d_update_ref(x, conv_state, weight, bias=None, activation=None):
84
+ """
85
+ x: (batch, dim)
86
+ conv_state: (batch, dim, width)
87
+ weight: (dim, width)
88
+ bias: (dim,)
89
+
90
+ out: (batch, dim)
91
+ """
92
+ if activation not in [None, "silu", "swish"]:
93
+ raise NotImplementedError("activation must be None, silu, or swish")
94
+ dtype_in = x.dtype
95
+ batch, dim = x.shape
96
+ width = weight.shape[1]
97
+ assert conv_state.shape == (batch, dim, width)
98
+ assert weight.shape == (dim, width)
99
+ conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W)
100
+ conv_state[:, :, -1] = x
101
+ out = torch.sum(conv_state * weight, dim=-1) # (B D)
102
+ if bias is not None:
103
+ out += bias
104
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d_cuda.cpython-312-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dedd9d99881bf7f043ac14c79ad2b71fea8e93f166482597bfe5a3a09849b627
3
+ size 30227360
SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef366a1da5c5f81e3aa761f5cd37bc90c046b17068b504191043faaa162230e5
3
+ size 377648
SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e581d37f9a1a942c98bdb8b2986540fab333e42092bcdf19e211c12fcc347bdb
3
+ size 22535976
SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0f5650bf1e870dbbcaafaa14acbde28f03014ccaf73d8137ae5fa2967807af7
3
+ size 6723096
SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1391cc2ab0159135d0887ac6d659fa2ec85466bb6c5978722d55868054b12726
3
+ size 910152
SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: causal_conv1d
3
+ Version: 1.0.0
4
+ Summary: Causal depthwise conv1d in CUDA, with a PyTorch interface
5
+ Home-page: https://github.com/Dao-AILab/causal-conv1d
6
+ Author: Tri Dao
7
+ Author-email: tri@tridao.me
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: BSD License
10
+ Classifier: Operating System :: Unix
11
+ Requires-Python: >=3.7
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ License-File: AUTHORS
15
+ Requires-Dist: torch
16
+ Requires-Dist: packaging
17
+ Requires-Dist: ninja
18
+ Dynamic: author
19
+ Dynamic: author-email
20
+ Dynamic: classifier
21
+ Dynamic: description
22
+ Dynamic: description-content-type
23
+ Dynamic: home-page
24
+ Dynamic: license-file
25
+ Dynamic: requires-dist
26
+ Dynamic: requires-python
27
+ Dynamic: summary
28
+
29
+ # Causal depthwise conv1d in CUDA with a PyTorch interface
SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ AUTHORS
2
+ LICENSE
3
+ README.md
4
+ setup.py
5
+ causal_conv1d/__init__.py
6
+ causal_conv1d/causal_conv1d_interface.py
7
+ causal_conv1d.egg-info/PKG-INFO
8
+ causal_conv1d.egg-info/SOURCES.txt
9
+ causal_conv1d.egg-info/dependency_links.txt
10
+ causal_conv1d.egg-info/requires.txt
11
+ causal_conv1d.egg-info/top_level.txt
12
+ csrc/causal_conv1d.cpp
13
+ csrc/causal_conv1d_bwd.cu
14
+ csrc/causal_conv1d_fwd.cu
15
+ csrc/causal_conv1d_update.cu
16
+ tests/test_causal_conv1d.py
SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ packaging
3
+ ninja
SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ causal_conv1d
2
+ causal_conv1d_cuda
SegMamba/causal-conv1d/causal_conv1d/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __version__ = "1.0.0"
2
+
3
+ from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_update
SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+
6
+
7
+ import causal_conv1d_cuda
8
+
9
+
10
+ class CausalConv1dFn(torch.autograd.Function):
11
+ @staticmethod
12
+ def forward(ctx, x, weight, bias=None, activation=None):
13
+ if activation not in [None, "silu", "swish"]:
14
+ raise NotImplementedError("activation must be None, silu, or swish")
15
+ if x.stride(2) != 1 and x.stride(1) != 1:
16
+ x = x.contiguous()
17
+ bias = bias.contiguous() if bias is not None else None
18
+ ctx.save_for_backward(x, weight, bias)
19
+ ctx.activation = activation in ["silu", "swish"]
20
+ out = causal_conv1d_cuda.causal_conv1d_fwd(x, weight, bias, ctx.activation)
21
+ return out
22
+
23
+ @staticmethod
24
+ def backward(ctx, dout):
25
+ x, weight, bias = ctx.saved_tensors
26
+ if dout.stride(2) != 1 and dout.stride(1) != 1:
27
+ dout = dout.contiguous()
28
+ # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
29
+ # backward of conv1d with the backward of chunk).
30
+ # Here we just pass in None and dx will be allocated in the C++ code.
31
+ dx, dweight, dbias = causal_conv1d_cuda.causal_conv1d_bwd(
32
+ x, weight, bias, dout, None, ctx.activation
33
+ )
34
+ return dx, dweight, dbias if bias is not None else None, None
35
+
36
+
37
+ def causal_conv1d_fn(x, weight, bias=None, activation=None):
38
+ """
39
+ x: (batch, dim, seqlen)
40
+ weight: (dim, width)
41
+ bias: (dim,)
42
+ activation: either None or "silu" or "swish"
43
+
44
+ out: (batch, dim, seqlen)
45
+ """
46
+ return CausalConv1dFn.apply(x, weight, bias, activation)
47
+
48
+
49
+ def causal_conv1d_ref(x, weight, bias=None, activation=None):
50
+ """
51
+ x: (batch, dim, seqlen)
52
+ weight: (dim, width)
53
+ bias: (dim,)
54
+
55
+ out: (batch, dim, seqlen)
56
+ """
57
+ if activation not in [None, "silu", "swish"]:
58
+ raise NotImplementedError("activation must be None, silu, or swish")
59
+ dtype_in = x.dtype
60
+ x = x.to(weight.dtype)
61
+ seqlen = x.shape[-1]
62
+ dim, width = weight.shape
63
+ out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim)
64
+ out = out[..., :seqlen]
65
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
66
+
67
+
68
+ def causal_conv1d_update(x, conv_state, weight, bias=None, activation=None):
69
+ """
70
+ x: (batch, dim)
71
+ conv_state: (batch, dim, width)
72
+ weight: (dim, width)
73
+ bias: (dim,)
74
+
75
+ out: (batch, dim)
76
+ """
77
+ if activation not in [None, "silu", "swish"]:
78
+ raise NotImplementedError("activation must be None, silu, or swish")
79
+ activation = activation in ["silu", "swish"]
80
+ return causal_conv1d_cuda.causal_conv1d_update(x, conv_state, weight, bias, activation)
81
+
82
+
83
+ def causal_conv1d_update_ref(x, conv_state, weight, bias=None, activation=None):
84
+ """
85
+ x: (batch, dim)
86
+ conv_state: (batch, dim, width)
87
+ weight: (dim, width)
88
+ bias: (dim,)
89
+
90
+ out: (batch, dim)
91
+ """
92
+ if activation not in [None, "silu", "swish"]:
93
+ raise NotImplementedError("activation must be None, silu, or swish")
94
+ dtype_in = x.dtype
95
+ batch, dim = x.shape
96
+ width = weight.shape[1]
97
+ assert conv_state.shape == (batch, dim, width)
98
+ assert weight.shape == (dim, width)
99
+ conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W)
100
+ conv_state[:, :, -1] = x
101
+ out = torch.sum(conv_state * weight, dim=-1) # (B D)
102
+ if bias is not None:
103
+ out += bias
104
+ return (out if activation is None else F.silu(out)).to(dtype=dtype_in)
SegMamba/causal-conv1d/csrc/causal_conv1d.cpp ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #include <ATen/cuda/CUDAContext.h>
6
+ #include <c10/cuda/CUDAGuard.h>
7
+ #include <torch/extension.h>
8
+ #include <vector>
9
+
10
+ #include "causal_conv1d.h"
11
+
12
+ #define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")")
13
+
14
+ #define DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(ITYPE, NAME, ...) \
15
+ if (ITYPE == at::ScalarType::Half) { \
16
+ using input_t = at::Half; \
17
+ __VA_ARGS__(); \
18
+ } else if (ITYPE == at::ScalarType::BFloat16) { \
19
+ using input_t = at::BFloat16; \
20
+ __VA_ARGS__(); \
21
+ } else if (ITYPE == at::ScalarType::Float) { \
22
+ using input_t = float; \
23
+ __VA_ARGS__(); \
24
+ } else { \
25
+ AT_ERROR(#NAME, " not implemented for input type '", toString(ITYPE), "'"); \
26
+ }
27
+
28
+ #define DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(WTYPE, NAME, ...) \
29
+ if (WTYPE == at::ScalarType::Half) { \
30
+ using weight_t = at::Half; \
31
+ __VA_ARGS__(); \
32
+ } else if (WTYPE == at::ScalarType::BFloat16) { \
33
+ using weight_t = at::BFloat16; \
34
+ __VA_ARGS__(); \
35
+ } else if (WTYPE == at::ScalarType::Float) { \
36
+ using weight_t = float; \
37
+ __VA_ARGS__(); \
38
+ } else { \
39
+ AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \
40
+ }
41
+
42
+ template<typename input_t, typename weight_t>
43
+ void causal_conv1d_fwd_cuda(ConvParamsBase &params, cudaStream_t stream);
44
+ template <typename input_t, typename weight_t>
45
+ void causal_conv1d_channellast_fwd_cuda(ConvParamsBase &params, cudaStream_t stream);
46
+
47
+ template<typename input_t, typename weight_t>
48
+ void causal_conv1d_bwd_cuda(ConvParamsBwd &params, cudaStream_t stream);
49
+ template<typename input_t, typename weight_t>
50
+ void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd &params, cudaStream_t stream);
51
+
52
+ template<typename input_t, typename weight_t>
53
+ void causal_conv1d_update_cuda(ConvParamsBase &params, cudaStream_t stream);
54
+
55
+ void set_conv_params_fwd(ConvParamsBase &params,
56
+ // sizes
57
+ const size_t batch,
58
+ const size_t dim,
59
+ const size_t seqlen,
60
+ const size_t width,
61
+ // device pointers
62
+ const at::Tensor x,
63
+ const at::Tensor weight,
64
+ const at::Tensor out,
65
+ void* bias_ptr,
66
+ bool silu_activation) {
67
+
68
+ // Reset the parameters
69
+ memset(&params, 0, sizeof(params));
70
+
71
+ params.batch = batch;
72
+ params.dim = dim;
73
+ params.seqlen = seqlen;
74
+ params.width = width;
75
+
76
+ params.silu_activation = silu_activation;
77
+
78
+ // Set the pointers and strides.
79
+ params.x_ptr = x.data_ptr();
80
+ params.weight_ptr = weight.data_ptr();
81
+ params.bias_ptr = bias_ptr;
82
+ params.out_ptr = out.data_ptr();
83
+ // All stride are in elements, not bytes.
84
+ params.x_batch_stride = x.stride(0);
85
+ params.x_c_stride = x.stride(1);
86
+ params.x_l_stride = x.stride(-1);
87
+ params.weight_c_stride = weight.stride(0);
88
+ params.weight_width_stride = weight.stride(1);
89
+ params.out_batch_stride = out.stride(0);
90
+ params.out_c_stride = out.stride(1);
91
+ params.out_l_stride = out.stride(-1);
92
+ }
93
+
94
+
95
+ void set_conv_params_bwd(ConvParamsBwd &params,
96
+ // sizes
97
+ const size_t batch,
98
+ const size_t dim,
99
+ const size_t seqlen,
100
+ const size_t width,
101
+ // device pointers
102
+ const at::Tensor x,
103
+ const at::Tensor weight,
104
+ void* bias_ptr,
105
+ const at::Tensor dout,
106
+ const at::Tensor dx,
107
+ const at::Tensor dweight,
108
+ void* dbias_ptr,
109
+ bool silu_activation) {
110
+ // Pass in "dout" instead of "out", we're not gonna use "out" at all.
111
+ set_conv_params_fwd(params, batch, dim, seqlen, width,
112
+ x, weight, dout, bias_ptr, silu_activation);
113
+
114
+ // Set the pointers and strides.
115
+ params.dout_ptr = dout.data_ptr();
116
+ params.dx_ptr = dx.data_ptr();
117
+ params.dweight_ptr = dweight.data_ptr();
118
+ params.dbias_ptr = dbias_ptr;
119
+ // All stride are in elements, not bytes.
120
+ params.dout_batch_stride = dout.stride(0);
121
+ params.dout_c_stride = dout.stride(1);
122
+ params.dout_l_stride = dout.stride(2);
123
+ params.dweight_c_stride = dweight.stride(0);
124
+ params.dweight_width_stride = dweight.stride(1);
125
+ params.dx_batch_stride = dx.stride(0);
126
+ params.dx_c_stride = dx.stride(1);
127
+ params.dx_l_stride = dx.stride(2);
128
+ }
129
+
130
+ at::Tensor
131
+ causal_conv1d_fwd(const at::Tensor &x, const at::Tensor &weight,
132
+ const c10::optional<at::Tensor> &bias_,
133
+ bool silu_activation) {
134
+ auto input_type = x.scalar_type();
135
+ auto weight_type = weight.scalar_type();
136
+ TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
137
+ TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16);
138
+
139
+ TORCH_CHECK(x.is_cuda());
140
+ TORCH_CHECK(weight.is_cuda());
141
+
142
+ const auto sizes = x.sizes();
143
+ const int batch_size = sizes[0];
144
+ const int dim = sizes[1];
145
+ const int seqlen = sizes[2];
146
+ const int width = weight.size(-1);
147
+
148
+ CHECK_SHAPE(x, batch_size, dim, seqlen);
149
+ CHECK_SHAPE(weight, dim, width);
150
+
151
+ TORCH_CHECK(x.stride(2) == 1 || x.stride(1) == 1);
152
+ const bool is_channel_last = x.stride(1) == 1 && x.stride(2) > 1;
153
+
154
+ if (is_channel_last) {
155
+ TORCH_CHECK(dim % 8 == 0, "causal_conv1d only supports channel dimension divisible by 8 for now");
156
+ }
157
+ TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4");
158
+
159
+
160
+ if (bias_.has_value()) {
161
+ auto bias = bias_.value();
162
+ TORCH_CHECK(bias.scalar_type() == weight_type);
163
+ TORCH_CHECK(bias.is_cuda());
164
+ TORCH_CHECK(bias.stride(-1) == 1);
165
+ CHECK_SHAPE(bias, dim);
166
+ }
167
+
168
+ at::Tensor out = torch::empty_like(x);
169
+
170
+ ConvParamsBase params;
171
+ set_conv_params_fwd(params, batch_size, dim, seqlen, width, x, weight, out,
172
+ bias_.has_value() ? bias_.value().data_ptr() : nullptr,
173
+ silu_activation);
174
+
175
+ // Otherwise the kernel will be launched from cuda:0 device
176
+ // Cast to char to avoid compiler warning about narrowing
177
+ at::cuda::CUDAGuard device_guard{(char)x.get_device()};
178
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
179
+ DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_fwd", [&] {
180
+ DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_fwd", [&] {
181
+ if (!is_channel_last) {
182
+ causal_conv1d_fwd_cuda<input_t, weight_t>(params, stream);
183
+ } else {
184
+ causal_conv1d_channellast_fwd_cuda<input_t, weight_t>(params, stream);
185
+ }
186
+ });
187
+ });
188
+ return out;
189
+ }
190
+
191
+ std::vector<at::Tensor>
192
+ causal_conv1d_bwd(const at::Tensor &x, const at::Tensor &weight,
193
+ const c10::optional<at::Tensor> &bias_,
194
+ at::Tensor &dout,
195
+ c10::optional<at::Tensor> &dx_,
196
+ bool silu_activation) {
197
+ auto input_type = x.scalar_type();
198
+ auto weight_type = weight.scalar_type();
199
+ TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
200
+ TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16);
201
+
202
+ TORCH_CHECK(x.is_cuda());
203
+ TORCH_CHECK(weight.is_cuda());
204
+ TORCH_CHECK(dout.is_cuda());
205
+
206
+ const auto sizes = x.sizes();
207
+ const int batch_size = sizes[0];
208
+ const int dim = sizes[1];
209
+ const int seqlen = sizes[2];
210
+ const int width = weight.size(-1);
211
+
212
+ TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4");
213
+
214
+ CHECK_SHAPE(x, batch_size, dim, seqlen);
215
+ CHECK_SHAPE(weight, dim, width);
216
+ CHECK_SHAPE(dout, batch_size, dim, seqlen);
217
+
218
+ TORCH_CHECK(x.stride(2) == 1 || x.stride(1) == 1);
219
+ const bool is_channel_last = x.stride(1) == 1 && x.stride(2) > 1;
220
+ if (!is_channel_last && dout.stride(2) != 1) { dout = dout.contiguous(); }
221
+ if (is_channel_last && dout.stride(1) != 1) { dout = dout.transpose(-1, -2).contiguous().transpose(-1, -2); }
222
+
223
+ if (bias_.has_value()) {
224
+ auto bias = bias_.value();
225
+ TORCH_CHECK(bias.scalar_type() == weight_type);
226
+ TORCH_CHECK(bias.is_cuda());
227
+ TORCH_CHECK(bias.stride(-1) == 1);
228
+ CHECK_SHAPE(bias, dim);
229
+ }
230
+
231
+ at::Tensor dx;
232
+ if (dx_.has_value()) {
233
+ dx = dx_.value();
234
+ TORCH_CHECK(dx.scalar_type() == input_type);
235
+ TORCH_CHECK(dx.is_cuda());
236
+ CHECK_SHAPE(dx, batch_size, dim, seqlen);
237
+ if (!is_channel_last) { TORCH_CHECK(dx.stride(2) == 1); }
238
+ if (is_channel_last) { TORCH_CHECK(dx.stride(1) == 1); }
239
+ } else {
240
+ dx = torch::empty_like(x);
241
+ }
242
+
243
+ // Otherwise the kernel will be launched from cuda:0 device
244
+ // Cast to char to avoid compiler warning about narrowing
245
+ at::cuda::CUDAGuard device_guard{(char)x.get_device()};
246
+
247
+ at::Tensor dweight = torch::zeros_like(weight, weight.options().dtype(at::kFloat));
248
+ at::Tensor dbias;
249
+ if (bias_.has_value()) { dbias = torch::zeros_like(bias_.value(), bias_.value().options().dtype(at::kFloat)); }
250
+
251
+ ConvParamsBwd params;
252
+ set_conv_params_bwd(params, batch_size, dim, seqlen, width,
253
+ x, weight, bias_.has_value() ? bias_.value().data_ptr() : nullptr,
254
+ dout, dx, dweight, bias_.has_value() ? dbias.data_ptr() : nullptr,
255
+ silu_activation);
256
+
257
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
258
+ DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_bwd", [&] {
259
+ DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_bwd", [&] {
260
+ if (!is_channel_last) {
261
+ causal_conv1d_bwd_cuda<input_t, weight_t>(params, stream);
262
+ } else {
263
+ causal_conv1d_channellast_bwd_cuda<input_t, weight_t>(params, stream);
264
+ }
265
+ });
266
+ });
267
+ return {dx, dweight.to(weight.dtype()), bias_.has_value() ? dbias.to(bias_.value().dtype()) : dbias};
268
+ }
269
+
270
+ at::Tensor
271
+ causal_conv1d_update(const at::Tensor &x,
272
+ const at::Tensor &conv_state,
273
+ const at::Tensor &weight,
274
+ const c10::optional<at::Tensor> &bias_,
275
+ bool silu_activation) {
276
+ auto input_type = x.scalar_type();
277
+ auto weight_type = weight.scalar_type();
278
+ TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
279
+ TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16);
280
+ TORCH_CHECK(conv_state.scalar_type() == input_type);
281
+
282
+ TORCH_CHECK(x.is_cuda());
283
+ TORCH_CHECK(conv_state.is_cuda());
284
+ TORCH_CHECK(weight.is_cuda());
285
+
286
+ const auto sizes = x.sizes();
287
+ const int batch_size = sizes[0];
288
+ const int dim = sizes[1];
289
+ const int width = weight.size(-1);
290
+
291
+ CHECK_SHAPE(x, batch_size, dim);
292
+ CHECK_SHAPE(conv_state, batch_size, dim, width);
293
+ CHECK_SHAPE(weight, dim, width);
294
+
295
+ TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4");
296
+
297
+ if (bias_.has_value()) {
298
+ auto bias = bias_.value();
299
+ TORCH_CHECK(bias.scalar_type() == weight_type);
300
+ TORCH_CHECK(bias.is_cuda());
301
+ TORCH_CHECK(bias.stride(-1) == 1);
302
+ CHECK_SHAPE(bias, dim);
303
+ }
304
+
305
+ at::Tensor out = torch::empty_like(x);
306
+
307
+ ConvParamsBase params;
308
+ set_conv_params_fwd(params, batch_size, dim, /*seqlen=*/1, width, x, weight, out,
309
+ bias_.has_value() ? bias_.value().data_ptr() : nullptr,
310
+ silu_activation);
311
+ params.conv_state_ptr = conv_state.data_ptr();
312
+ // All stride are in elements, not bytes.
313
+ params.conv_state_batch_stride = conv_state.stride(0);
314
+ params.conv_state_c_stride = conv_state.stride(1);
315
+ params.conv_state_l_stride = conv_state.stride(2);
316
+
317
+ // Otherwise the kernel will be launched from cuda:0 device
318
+ // Cast to char to avoid compiler warning about narrowing
319
+ at::cuda::CUDAGuard device_guard{(char)x.get_device()};
320
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
321
+ DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_update", [&] {
322
+ DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_update", [&] {
323
+ causal_conv1d_update_cuda<input_t, weight_t>(params, stream);
324
+ });
325
+ });
326
+ return out;
327
+ }
328
+
329
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
330
+ m.def("causal_conv1d_fwd", &causal_conv1d_fwd, "Causal conv1d forward");
331
+ m.def("causal_conv1d_bwd", &causal_conv1d_bwd, "Causal conv1d backward");
332
+ m.def("causal_conv1d_update", &causal_conv1d_update, "Causal conv1d update");
333
+ }
SegMamba/causal-conv1d/csrc/causal_conv1d.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
8
+
9
+ struct ConvParamsBase {
10
+ using index_t = uint32_t;
11
+
12
+ int batch, dim, seqlen, width;
13
+ bool silu_activation;
14
+
15
+ index_t x_batch_stride;
16
+ index_t x_c_stride;
17
+ index_t x_l_stride;
18
+ index_t weight_c_stride;
19
+ index_t weight_width_stride;
20
+ index_t out_batch_stride;
21
+ index_t out_c_stride;
22
+ index_t out_l_stride;
23
+
24
+ index_t conv_state_batch_stride;
25
+ index_t conv_state_c_stride;
26
+ index_t conv_state_l_stride;
27
+
28
+ // Common data pointers.
29
+ void *__restrict__ x_ptr;
30
+ void *__restrict__ weight_ptr;
31
+ void *__restrict__ bias_ptr;
32
+ void *__restrict__ out_ptr;
33
+
34
+ void *__restrict__ conv_state_ptr;
35
+ };
36
+
37
+ struct ConvParamsBwd: public ConvParamsBase {
38
+ index_t dx_batch_stride;
39
+ index_t dx_c_stride;
40
+ index_t dx_l_stride;
41
+ index_t dweight_c_stride;
42
+ index_t dweight_width_stride;
43
+ index_t dout_batch_stride;
44
+ index_t dout_c_stride;
45
+ index_t dout_l_stride;
46
+
47
+ // Common data pointers.
48
+ void *__restrict__ dx_ptr;
49
+ void *__restrict__ dweight_ptr;
50
+ void *__restrict__ dbias_ptr;
51
+ void *__restrict__ dout_ptr;
52
+ };
53
+
SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #include <c10/util/BFloat16.h>
6
+ #include <c10/util/Half.h>
7
+ #include <c10/cuda/CUDAException.h> // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK
8
+
9
+ #include <cub/block/block_load.cuh>
10
+ #include <cub/block/block_store.cuh>
11
+ #include <cub/block/block_reduce.cuh>
12
+
13
+ #include "causal_conv1d.h"
14
+ #include "causal_conv1d_common.h"
15
+ #include "static_switch.h"
16
+
17
+ template<int kNThreads_, int kWidth_, bool kSiluAct_, bool kIsVecLoad_, typename input_t_, typename weight_t_>
18
+ struct Causal_conv1d_bwd_kernel_traits {
19
+ using input_t = input_t_;
20
+ using weight_t = weight_t_;
21
+ static constexpr int kNThreads = kNThreads_;
22
+ static constexpr int kWidth = kWidth_;
23
+ static constexpr bool kSiluAct = kSiluAct_;
24
+ static constexpr int kNBytes = sizeof(input_t);
25
+ static_assert(kNBytes == 2 || kNBytes == 4);
26
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
27
+ static_assert(kWidth <= kNElts);
28
+ // It's possible that we need to do 2 rounds of exchange if input_t is 16 bits
29
+ // (since then we'd have 8 values of float, and each round we can exchange 4 floats).
30
+ static constexpr int kNExchangeRounds = sizeof(float) / sizeof(input_t);
31
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
32
+ using vec_t = typename BytesToType<kNBytes * kNElts>::Type;
33
+ using BlockLoadT = cub::BlockLoad<input_t, kNThreads, kNElts, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
34
+ using BlockLoadVecT = cub::BlockLoad<vec_t, kNThreads, 1, cub::BLOCK_LOAD_DIRECT>;
35
+ using BlockStoreT = cub::BlockStore<input_t, kNThreads, kNElts, cub::BLOCK_STORE_WARP_TRANSPOSE>;
36
+ using BlockStoreVecT = cub::BlockStore<vec_t, kNThreads, 1, cub::BLOCK_STORE_DIRECT>;
37
+ using BlockReduceFloatT = cub::BlockReduce<float, kNThreads>;
38
+ static constexpr int kSmemIOSize = kIsVecLoad
39
+ ? 0
40
+ : std::max({sizeof(typename BlockLoadT::TempStorage), sizeof(typename BlockStoreT::TempStorage)});
41
+ static constexpr int kSmemExchangeSize = kNThreads * kNBytes * kNElts * (!kSiluAct ? 1 : kNExchangeRounds + 1);
42
+ static constexpr int kSmemSize = std::max({kSmemExchangeSize,
43
+ int(sizeof(typename BlockReduceFloatT::TempStorage))}) + (kIsVecLoad ? 0 : kSmemIOSize);
44
+ };
45
+
46
+ template<typename Ktraits>
47
+ __global__ __launch_bounds__(Ktraits::kNThreads)
48
+ void causal_conv1d_bwd_kernel(ConvParamsBwd params) {
49
+ constexpr int kWidth = Ktraits::kWidth;
50
+ constexpr int kNThreads = Ktraits::kNThreads;
51
+ constexpr bool kSiluAct = Ktraits::kSiluAct;
52
+ constexpr int kNElts = Ktraits::kNElts;
53
+ constexpr int kNExchangeRounds = Ktraits::kNExchangeRounds;
54
+ constexpr bool kIsVecLoad = Ktraits::kIsVecLoad;
55
+ using input_t = typename Ktraits::input_t;
56
+ using vec_t = typename Ktraits::vec_t;
57
+ using weight_t = typename Ktraits::weight_t;
58
+
59
+ // Shared memory.
60
+ extern __shared__ char smem_[];
61
+ auto& smem_load = reinterpret_cast<typename Ktraits::BlockLoadT::TempStorage&>(smem_);
62
+ auto& smem_load_vec = reinterpret_cast<typename Ktraits::BlockLoadVecT::TempStorage&>(smem_);
63
+ auto& smem_store = reinterpret_cast<typename Ktraits::BlockStoreT::TempStorage&>(smem_);
64
+ auto& smem_store_vec = reinterpret_cast<typename Ktraits::BlockStoreVecT::TempStorage&>(smem_);
65
+ vec_t *smem_exchange = reinterpret_cast<vec_t *>(smem_ + Ktraits::kSmemIOSize);
66
+ vec_t *smem_exchange_x = reinterpret_cast<vec_t *>(smem_ + Ktraits::kSmemIOSize) + kNThreads * kNExchangeRounds;
67
+ auto& smem_reduce_float = *reinterpret_cast<typename Ktraits::BlockReduceFloatT::TempStorage*>(smem_ + Ktraits::kSmemIOSize);
68
+
69
+ const int tidx = threadIdx.x;
70
+ const int batch_id = blockIdx.x;
71
+ const int dim_id = blockIdx.y;
72
+ input_t *x = reinterpret_cast<input_t *>(params.x_ptr) + batch_id * params.x_batch_stride
73
+ + dim_id * params.x_c_stride;
74
+ weight_t *weight = reinterpret_cast<weight_t *>(params.weight_ptr) + dim_id * params.weight_c_stride;
75
+ input_t *dout = reinterpret_cast<input_t *>(params.dout_ptr) + batch_id * params.dout_batch_stride
76
+ + dim_id * params.dout_c_stride;
77
+ input_t *dx = reinterpret_cast<input_t *>(params.dx_ptr) + batch_id * params.dx_batch_stride
78
+ + dim_id * params.dx_c_stride;
79
+ float *dweight = reinterpret_cast<float *>(params.dweight_ptr) + dim_id * params.dweight_c_stride;
80
+ float bias_val = params.bias_ptr == nullptr ? 0.f : float(reinterpret_cast<weight_t *>(params.bias_ptr)[dim_id]);
81
+
82
+ // Thread kNThreads - 1 will load the first elements of the next chunk so we initialize those to 0.
83
+ if (tidx == 0) {
84
+ if constexpr (!kSiluAct) {
85
+ input_t zeros[kNElts] = {0};
86
+ smem_exchange[0] = reinterpret_cast<vec_t *>(zeros)[0];
87
+ } else {
88
+ float zeros[kNElts] = {0};
89
+ #pragma unroll
90
+ for (int r = 0; r < kNExchangeRounds; ++r) {
91
+ smem_exchange[r * kNThreads] = reinterpret_cast<vec_t *>(zeros)[r];
92
+ }
93
+ }
94
+ }
95
+
96
+ float weight_vals[kWidth];
97
+ #pragma unroll
98
+ for (int i = 0; i < kWidth; ++i) { weight_vals[i] = weight[i * params.weight_width_stride]; }
99
+
100
+ float dweight_vals[kWidth] = {0};
101
+ float dbias_val = 0;
102
+
103
+ constexpr int kChunkSize = kNThreads * kNElts;
104
+ const int n_chunks = (params.seqlen + kChunkSize - 1) / kChunkSize;
105
+ x += (n_chunks - 1) * kChunkSize;
106
+ dout += (n_chunks - 1) * kChunkSize;
107
+ dx += (n_chunks - 1) * kChunkSize;
108
+ for (int chunk = n_chunks - 1; chunk >= 0; --chunk) {
109
+ input_t x_vals_load[2 * kNElts] = {0};
110
+ input_t dout_vals_load[2 * kNElts] = {0};
111
+ if constexpr(kIsVecLoad) {
112
+ Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast<vec_t*>(x), *reinterpret_cast<vec_t (*)[1]>(&x_vals_load[kNElts]), (params.seqlen - chunk * kChunkSize) / kNElts);
113
+ Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast<vec_t*>(dout), *reinterpret_cast<vec_t (*)[1]>(&dout_vals_load[0]), (params.seqlen - chunk * kChunkSize) / kNElts);
114
+ } else {
115
+ __syncthreads();
116
+ Ktraits::BlockLoadT(smem_load).Load(x, *reinterpret_cast<input_t (*)[kNElts]>(&x_vals_load[kNElts]), params.seqlen - chunk * kChunkSize);
117
+ __syncthreads();
118
+ Ktraits::BlockLoadT(smem_load).Load(dout, *reinterpret_cast<input_t (*)[kNElts]>(&dout_vals_load[0]), params.seqlen - chunk * kChunkSize);
119
+ }
120
+ float dout_vals[2 * kNElts], x_vals[2 * kNElts];
121
+ if constexpr (!kSiluAct) {
122
+ __syncthreads();
123
+ // Thread 0 don't write yet, so that thread kNThreads - 1 can read
124
+ // the first elements of the next chunk.
125
+ if (tidx > 0) { smem_exchange[tidx] = reinterpret_cast<vec_t *>(dout_vals_load)[0]; }
126
+ __syncthreads();
127
+ reinterpret_cast<vec_t *>(dout_vals_load)[1] = smem_exchange[tidx < kNThreads - 1 ? tidx + 1 : 0];
128
+ __syncthreads();
129
+ // Now thread 0 can write the first elements of the current chunk.
130
+ if (tidx == 0) { smem_exchange[tidx] = reinterpret_cast<vec_t *>(dout_vals_load)[0]; }
131
+ #pragma unroll
132
+ for (int i = 0; i < 2 * kNElts; ++i) {
133
+ dout_vals[i] = float(dout_vals_load[i]);
134
+ x_vals[i] = float(x_vals_load[i]);
135
+ }
136
+ } else {
137
+ if (tidx == 0 && chunk > 0) {
138
+ if constexpr(kIsVecLoad) {
139
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = reinterpret_cast<vec_t *>(x)[-1];
140
+ } else {
141
+ #pragma unroll
142
+ for (int i = 0; i < kNElts; ++i) {
143
+ if (chunk * kChunkSize + i < params.seqlen) { x_vals_load[i] = x[-kNElts + i]; }
144
+ }
145
+ }
146
+ }
147
+ __syncthreads();
148
+ smem_exchange_x[tidx] = reinterpret_cast<vec_t *>(x_vals_load)[1];
149
+ __syncthreads();
150
+ if (tidx > 0) { reinterpret_cast<vec_t *>(x_vals_load)[0] = smem_exchange_x[tidx - 1]; }
151
+ #pragma unroll
152
+ for (int i = 0; i < 2 * kNElts; ++i) { x_vals[i] = float(x_vals_load[i]); }
153
+ // Recompute the output
154
+ #pragma unroll
155
+ for (int i = 0; i < kNElts; ++i) {
156
+ float out_val = bias_val;
157
+ #pragma unroll
158
+ for (int w = 0; w < kWidth; ++w) {
159
+ out_val += weight_vals[w] * x_vals[kNElts + i - (kWidth - w - 1)];
160
+ }
161
+ float out_sigmoid_val = 1.0f / (1.0f + expf(-out_val));
162
+ dout_vals[i] = float(dout_vals_load[i]) * out_sigmoid_val
163
+ * (1.0f + out_val * (1.0f - out_sigmoid_val));
164
+ }
165
+ // Exchange the dout_vals. It's possible that we need to do 2 rounds of exchange
166
+ // if input_t is 16 bits (since then we'd have 8 values of float)
167
+ __syncthreads();
168
+ // Thread 0 don't write yet, so that thread kNThreads - 1 can read
169
+ // the first elements of the next chunk.
170
+ if (tidx > 0) {
171
+ #pragma unroll
172
+ for (int r = 0; r < kNExchangeRounds; ++r) {
173
+ smem_exchange[r * kNThreads + tidx] = reinterpret_cast<vec_t *>(dout_vals)[r];
174
+ }
175
+ }
176
+ __syncthreads();
177
+ #pragma unroll
178
+ for (int r = 0; r < kNExchangeRounds; ++r) {
179
+ reinterpret_cast<vec_t *>(dout_vals)[kNExchangeRounds + r]
180
+ = smem_exchange[r * kNThreads + (tidx < kNThreads - 1 ? tidx + 1 : 0)];
181
+ }
182
+ __syncthreads();
183
+ // Now thread 0 can write the first elements of the current chunk.
184
+ if (tidx == 0) {
185
+ #pragma unroll
186
+ for (int r = 0; r < kNExchangeRounds; ++r) {
187
+ smem_exchange[r * kNThreads + tidx] = reinterpret_cast<vec_t *>(dout_vals)[r];
188
+ }
189
+ }
190
+ }
191
+ dout -= kChunkSize;
192
+ x -= kChunkSize;
193
+
194
+ #pragma unroll
195
+ for (int i = 0; i < kNElts; ++i) { dbias_val += dout_vals[i]; }
196
+
197
+ float dx_vals[kNElts] = {0};
198
+ #pragma unroll
199
+ for (int i = 0; i < kNElts; ++i) {
200
+ #pragma unroll
201
+ for (int w = 0; w < kWidth; ++w) {
202
+ dx_vals[i] += weight_vals[w] * dout_vals[i + kWidth - w - 1];
203
+ }
204
+ }
205
+
206
+ input_t dx_vals_store[kNElts];
207
+ #pragma unroll
208
+ for (int i = 0; i < kNElts; ++i) { dx_vals_store[i] = dx_vals[i]; }
209
+ if constexpr(kIsVecLoad) {
210
+ Ktraits::BlockStoreVecT(smem_store_vec).Store(reinterpret_cast<vec_t*>(dx), reinterpret_cast<vec_t (&)[1]>(dx_vals_store), (params.seqlen - chunk * kChunkSize) / kNElts);
211
+ } else {
212
+ Ktraits::BlockStoreT(smem_store).Store(dx, dx_vals_store, params.seqlen - chunk * kChunkSize);
213
+ }
214
+ dx -= kChunkSize;
215
+
216
+ #pragma unroll
217
+ for (int w = 0; w < kWidth; ++w) {
218
+ #pragma unroll
219
+ for (int i = 0; i < kNElts; ++i) {
220
+ dweight_vals[w] += x_vals[kNElts + i] * dout_vals[i + kWidth - w - 1];
221
+ }
222
+ }
223
+ }
224
+
225
+ #pragma unroll
226
+ for (int w = 0; w < kWidth; ++w) {
227
+ __syncthreads();
228
+ dweight_vals[w] = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dweight_vals[w]);
229
+ if (tidx == 0) {
230
+ atomicAdd(&reinterpret_cast<float *>(dweight)[w * params.dweight_width_stride], dweight_vals[w]);
231
+ }
232
+ }
233
+ if (params.bias_ptr != nullptr) {
234
+ __syncthreads();
235
+ dbias_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dbias_val);
236
+ if (tidx == 0) {
237
+ atomicAdd(&reinterpret_cast<float *>(params.dbias_ptr)[dim_id], dbias_val);
238
+ }
239
+ }
240
+ }
241
+
242
+ template<int kNThreads, int kWidth, typename input_t, typename weight_t>
243
+ void causal_conv1d_bwd_launch(ConvParamsBwd &params, cudaStream_t stream) {
244
+ static constexpr int kNElts = sizeof(input_t) == 4 ? 4 : 8;
245
+ BOOL_SWITCH(params.seqlen % kNElts == 0, kIsVecLoad, [&] {
246
+ BOOL_SWITCH(params.silu_activation, kSiluAct, [&] {
247
+ using Ktraits = Causal_conv1d_bwd_kernel_traits<kNThreads, kWidth, kSiluAct, kIsVecLoad, input_t, weight_t>;
248
+ constexpr int kSmemSize = Ktraits::kSmemSize;
249
+ dim3 grid(params.batch, params.dim);
250
+ auto kernel = &causal_conv1d_bwd_kernel<Ktraits>;
251
+ if (kSmemSize >= 48 * 1024) {
252
+ C10_CUDA_CHECK(cudaFuncSetAttribute(
253
+ kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
254
+ }
255
+ kernel<<<grid, Ktraits::kNThreads, kSmemSize, stream>>>(params);
256
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
257
+ });
258
+ });
259
+ }
260
+
261
+ template<typename input_t, typename weight_t>
262
+ void causal_conv1d_bwd_cuda(ConvParamsBwd &params, cudaStream_t stream) {
263
+ if (params.width == 2) {
264
+ causal_conv1d_bwd_launch<128, 2, input_t, weight_t>(params, stream);
265
+ } else if (params.width == 3) {
266
+ causal_conv1d_bwd_launch<128, 3, input_t, weight_t>(params, stream);
267
+ } else if (params.width == 4) {
268
+ causal_conv1d_bwd_launch<128, 4, input_t, weight_t>(params, stream);
269
+ }
270
+ }
271
+
272
+ template<int kNThreads_, int kWidth_, int kChunkSizeL_, bool kSiluAct_, bool kIsVecLoad_, typename input_t_, typename weight_t_>
273
+ struct Causal_conv1d_channellast_bwd_kernel_traits {
274
+ // The cache line is 128 bytes, and we try to read 16 bytes per thread.
275
+ // So we have 8 threads per "row", so 32 or 64 elements in the channel dimension.
276
+ // That leaves 4 columns per warp, and so 16 columns per block (assuming each block has 128
277
+ // threads). Each each load is 16 x 32|64 elements in the L x C dimensions.
278
+ using input_t = input_t_;
279
+ using weight_t = weight_t_;
280
+ static constexpr bool kSiluAct = kSiluAct_;
281
+ static constexpr int kNThreads = kNThreads_;
282
+ static_assert(kNThreads % 32 == 0);
283
+ static constexpr int kNWarps = kNThreads / 32;
284
+ static constexpr int kWidth = kWidth_;
285
+ static constexpr int kChunkSizeL = kChunkSizeL_;
286
+ static constexpr int kNBytes = sizeof(input_t);
287
+ static_assert(kNBytes == 2 || kNBytes == 4);
288
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
289
+ static constexpr int kNEltsPerRow = 128 / kNBytes;
290
+ static constexpr int kNThreadsPerRow = kNEltsPerRow / kNElts; // Always 8 for now
291
+ static_assert(kNThreadsPerRow * kNBytes * kNElts == 128);
292
+ static constexpr int kNColsPerWarp = 32 / kNThreadsPerRow; // Always 4 for now
293
+ static_assert(kNColsPerWarp * kNThreadsPerRow == 32);
294
+ static constexpr int kNColsPerLoad = kNColsPerWarp * kNWarps;
295
+ static constexpr int kNLoads = kChunkSizeL / kNColsPerLoad;
296
+ static_assert(kNLoads * kNColsPerLoad == kChunkSizeL);
297
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
298
+ using vec_t = typename BytesToType<kNBytes * kNElts>::Type;
299
+ // using BlockLoadT = cub::BlockLoad<input_t, kNThreads, kNItems, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
300
+ // using BlockStoreT = cub::BlockStore<input_t, kNThreads, kNItems, cub::BLOCK_STORE_WARP_TRANSPOSE>;
301
+ // static constexpr int kSmemSize = std::max({sizeof(typename BlockLoadT::TempStorage),
302
+ // sizeof(typename BlockStoreT::TempStorage)});
303
+ // static constexpr int kSmemSize = kChunkSizeL * kNEltsPerRow * kNBytes;
304
+ };
305
+
306
+ template<typename Ktraits>
307
+ __global__ __launch_bounds__(Ktraits::kNThreads)
308
+ void causal_conv1d_channellast_bwd_kernel(ConvParamsBwd params) {
309
+ constexpr int kWidth = Ktraits::kWidth;
310
+ constexpr int kNThreads = Ktraits::kNThreads;
311
+ constexpr bool kSiluAct = Ktraits::kSiluAct;
312
+ constexpr int kNElts = Ktraits::kNElts;
313
+ constexpr int kNWarp = Ktraits::kNWarps;
314
+ constexpr int kNThreadsPerC = Ktraits::kNThreadsPerRow;
315
+ constexpr int kLPerLoad = Ktraits::kNColsPerLoad;
316
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
317
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
318
+ using input_t = typename Ktraits::input_t;
319
+ using vec_t = typename Ktraits::vec_t;
320
+ using weight_t = typename Ktraits::weight_t;
321
+
322
+ // Shared memory.
323
+ __shared__ input_t dout_smem[kChunkSizeL + kWidth - 1][kChunkSizeC + kNElts];
324
+ __shared__ input_t x_smem[kWidth - 1 + kChunkSizeL + kWidth - 1][kChunkSizeC + kNElts];
325
+
326
+ const int tid = threadIdx.x;
327
+ const int l_idx = tid / kNThreadsPerC;
328
+ const int c_idx = tid % kNThreadsPerC;
329
+ const int batch_id = blockIdx.x;
330
+ const int chunk_l_id = blockIdx.y;
331
+ const int chunk_c_id = blockIdx.z;
332
+ input_t *x = reinterpret_cast<input_t *>(params.x_ptr) + batch_id * params.x_batch_stride
333
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.x_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
334
+ weight_t *weight = reinterpret_cast<weight_t *>(params.weight_ptr)
335
+ + chunk_c_id * kChunkSizeC * params.weight_c_stride;
336
+ input_t *dout = reinterpret_cast<input_t *>(params.dout_ptr) + batch_id * params.dout_batch_stride
337
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.dout_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
338
+ input_t *dx = reinterpret_cast<input_t *>(params.dx_ptr) + batch_id * params.dx_batch_stride
339
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.dx_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
340
+ float *dweight = reinterpret_cast<float *>(params.dweight_ptr)
341
+ + chunk_c_id * kChunkSizeC * params.dweight_c_stride;
342
+
343
+ #pragma unroll
344
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
345
+ input_t dout_vals_load[kNElts] = {0};
346
+ input_t x_vals_load[kNElts] = {0};
347
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
348
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
349
+ reinterpret_cast<vec_t *>(dout_vals_load)[0] = *reinterpret_cast<vec_t *>(dout + l * kLPerLoad * params.dout_l_stride);
350
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = *reinterpret_cast<vec_t *>(x + l * kLPerLoad * params.x_l_stride);
351
+ }
352
+ reinterpret_cast<vec_t *>(dout_smem[l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast<vec_t *>(dout_vals_load)[0];
353
+ reinterpret_cast<vec_t *>(x_smem[kWidth - 1 + l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast<vec_t *>(x_vals_load)[0];
354
+ }
355
+ // Load the elements from the previous chunk or next chunk that are needed for convolution.
356
+ if (l_idx < kWidth - 1) {
357
+ input_t dout_vals_load[kNElts] = {0};
358
+ input_t x_vals_load[kNElts] = {0};
359
+ if ((chunk_l_id + 1) * kChunkSizeL + l_idx < params.seqlen
360
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
361
+ reinterpret_cast<vec_t *>(dout_vals_load)[0] = *reinterpret_cast<vec_t *>(dout + kChunkSizeL * params.dout_l_stride);
362
+ }
363
+ if (chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) >= 0
364
+ && chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) < params.seqlen
365
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
366
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = *reinterpret_cast<vec_t *>(x - (kWidth - 1) * params.x_l_stride);
367
+ }
368
+ reinterpret_cast<vec_t *>(dout_smem[kChunkSizeL + l_idx])[c_idx] = reinterpret_cast<vec_t *>(dout_vals_load)[0];
369
+ reinterpret_cast<vec_t *>(x_smem[l_idx])[c_idx] = reinterpret_cast<vec_t *>(x_vals_load)[0];
370
+ }
371
+ // Need to load (kWdith - 1) extra x's on the right to recompute the (kChunkSizeL + kWidth - 1) outputs
372
+ if constexpr (kSiluAct) {
373
+ if (l_idx < kWidth - 1) {
374
+ input_t x_vals_load[kNElts] = {0};
375
+ if ((chunk_l_id + 1) * kChunkSizeL + l_idx < params.seqlen
376
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
377
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = *reinterpret_cast<vec_t *>(x + kChunkSizeL * params.x_l_stride);
378
+ }
379
+ reinterpret_cast<vec_t *>(x_smem[kWidth - 1 + kChunkSizeL + l_idx])[c_idx] = reinterpret_cast<vec_t *>(x_vals_load)[0];
380
+ }
381
+ }
382
+
383
+ __syncthreads();
384
+
385
+ constexpr int kLPerThread = std::min(kChunkSizeL * kChunkSizeC / kNThreads, kChunkSizeL);
386
+ static_assert(kLPerThread * kNThreads == kChunkSizeL * kChunkSizeC);
387
+ constexpr int kNThreadsPerRow = kChunkSizeL / kLPerThread;
388
+ static_assert(kNThreadsPerRow * kLPerThread == kChunkSizeL);
389
+ // kChunkSizeL, kLPerThread, kNThreadsPerRow should be powers of 2 for simplicity
390
+ static_assert((kChunkSizeL & (kChunkSizeL - 1)) == 0);
391
+ static_assert((kLPerThread & (kLPerThread - 1)) == 0);
392
+ static_assert((kNThreadsPerRow & (kNThreadsPerRow - 1)) == 0);
393
+ static_assert(kNThreadsPerRow <= 32);
394
+
395
+ const int row_idx = tid / kNThreadsPerRow;
396
+ const int col_idx = tid % kNThreadsPerRow;
397
+
398
+ float bias_val = params.bias_ptr == nullptr || chunk_c_id * kChunkSizeC + row_idx >= params.dim ? 0.f : float(reinterpret_cast<weight_t *>(params.bias_ptr)[chunk_c_id * kChunkSizeC + row_idx]);
399
+ float weight_vals[kWidth] = {0};
400
+ if (chunk_c_id * kChunkSizeC + row_idx < params.dim) {
401
+ #pragma unroll
402
+ for (int w = 0; w < kWidth; ++w) {
403
+ weight_vals[w] = weight[row_idx * params.weight_c_stride + w * params.weight_width_stride];
404
+ }
405
+ }
406
+ float dout_vals[kLPerThread + kWidth - 1];
407
+ float x_vals[kWidth - 1 + kLPerThread + kWidth - 1];
408
+ #pragma unroll
409
+ for (int i = 0; i < kWidth - 1 + kLPerThread; ++i) {
410
+ dout_vals[i] = float(dout_smem[col_idx * kLPerThread + i][row_idx]);
411
+ x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]);
412
+ }
413
+
414
+ if constexpr (kSiluAct) { // Recompute the output
415
+ #pragma unroll
416
+ for (int i = kWidth - 1 + kLPerThread; i < kWidth - 1 + kLPerThread + kWidth - 1; ++i) {
417
+ x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]);
418
+ }
419
+ #pragma unroll
420
+ for (int i = 0; i < kLPerThread + kWidth - 1; ++i) {
421
+ float out_val = bias_val;
422
+ #pragma unroll
423
+ for (int w = 0; w < kWidth; ++w) { out_val += weight_vals[w] * x_vals[i + w]; }
424
+ float out_val_sigmoid = 1.f / (1.f + expf(-out_val));
425
+ dout_vals[i] *= out_val_sigmoid * (1 + out_val * (1 - out_val_sigmoid));
426
+ }
427
+ }
428
+
429
+ float dweight_vals[kWidth] = {0};
430
+ SumOp<float> sum_op;
431
+ #pragma unroll
432
+ for (int w = 0; w < kWidth; ++w) {
433
+ #pragma unroll
434
+ for (int i = 0; i < kLPerThread; ++i) { dweight_vals[w] += x_vals[i + w] * dout_vals[i]; }
435
+ dweight_vals[w] = Allreduce<kNThreadsPerRow>::run(dweight_vals[w], sum_op);
436
+ if (col_idx == 0 && chunk_c_id * kChunkSizeC + row_idx < params.dim) {
437
+ atomicAdd(&reinterpret_cast<float *>(dweight)[row_idx * params.dweight_c_stride + w * params.dweight_width_stride], dweight_vals[w]);
438
+ }
439
+ }
440
+
441
+ if (params.bias_ptr != nullptr) {
442
+ float dbias_val = 0.f;
443
+ for (int i = 0; i < kLPerThread; ++i) { dbias_val += dout_vals[i]; }
444
+ dbias_val = Allreduce<kNThreadsPerRow>::run(dbias_val, sum_op);
445
+ if (col_idx == 0 && chunk_c_id * kChunkSizeC + row_idx < params.dim) {
446
+ atomicAdd(&reinterpret_cast<float *>(params.dbias_ptr)[chunk_c_id * kChunkSizeC + row_idx], dbias_val);
447
+ }
448
+ }
449
+
450
+ float dx_vals[kLPerThread] = {0};
451
+ #pragma unroll
452
+ for (int i = 0; i < kLPerThread; ++i) {
453
+ #pragma unroll
454
+ for (int w = 0; w < kWidth; ++w) { dx_vals[i] += weight_vals[kWidth - 1 - w] * dout_vals[i + w]; }
455
+ }
456
+ // Since kNThreadsPerRow is a power of 2 and <= 32, we only need syncwarp and not syncthreads.
457
+ __syncwarp();
458
+ #pragma unroll
459
+ for (int i = 0; i < kLPerThread; ++i) { x_smem[col_idx * kLPerThread + i][row_idx] = dx_vals[i]; }
460
+ __syncthreads();
461
+
462
+ #pragma unroll
463
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
464
+ input_t dx_vals_store[kNElts];
465
+ reinterpret_cast<vec_t *>(dx_vals_store)[0] = reinterpret_cast<vec_t *>(x_smem[l * kLPerLoad + l_idx])[c_idx];
466
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
467
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
468
+ *reinterpret_cast<vec_t *>(dx + l * kLPerLoad * params.dx_l_stride) = reinterpret_cast<vec_t *>(dx_vals_store)[0];
469
+ }
470
+ }
471
+
472
+ }
473
+
474
+ template<int kNThreads, int kWidth, typename input_t, typename weight_t>
475
+ void causal_conv1d_channellast_bwd_launch(ConvParamsBwd &params, cudaStream_t stream) {
476
+ BOOL_SWITCH(params.silu_activation, kSiluAct, [&] {
477
+ using Ktraits = Causal_conv1d_channellast_bwd_kernel_traits<kNThreads, kWidth, 64, kSiluAct, true, input_t, weight_t>;
478
+ // constexpr int kSmemSize = Ktraits::kSmemSize;
479
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
480
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
481
+ const int n_chunks_L = (params.seqlen + kChunkSizeL - 1) / kChunkSizeL;
482
+ const int n_chunks_C = (params.dim + kChunkSizeC - 1) / kChunkSizeC;
483
+ dim3 grid(params.batch, n_chunks_L, n_chunks_C);
484
+ dim3 block(Ktraits::kNThreads);
485
+ auto kernel = &causal_conv1d_channellast_bwd_kernel<Ktraits>;
486
+ // if (kSmemSize >= 48 * 1024) {
487
+ // C10_CUDA_CHECK(cudaFuncSetAttribute(
488
+ // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
489
+ // }
490
+ // kernel<<<grid, Ktraits::kNThreads, kSmemSize, stream>>>(params);
491
+ kernel<<<grid, Ktraits::kNThreads, 0, stream>>>(params);
492
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
493
+ });
494
+ }
495
+
496
+ template<typename input_t, typename weight_t>
497
+ void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd &params, cudaStream_t stream) {
498
+ if (params.width == 2) {
499
+ causal_conv1d_channellast_bwd_launch<128, 2, input_t, weight_t>(params, stream);
500
+ } else if (params.width == 3) {
501
+ causal_conv1d_channellast_bwd_launch<128, 3, input_t, weight_t>(params, stream);
502
+ } else if (params.width == 4) {
503
+ causal_conv1d_channellast_bwd_launch<128, 4, input_t, weight_t>(params, stream);
504
+ }
505
+ }
506
+
507
+ template void causal_conv1d_bwd_cuda<float, float>(ConvParamsBwd &params, cudaStream_t stream);
508
+ template void causal_conv1d_bwd_cuda<at::Half, float>(ConvParamsBwd &params, cudaStream_t stream);
509
+ template void causal_conv1d_bwd_cuda<at::BFloat16, float>(ConvParamsBwd &params, cudaStream_t stream);
510
+ template void causal_conv1d_bwd_cuda<float, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
511
+ template void causal_conv1d_bwd_cuda<at::Half, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
512
+ template void causal_conv1d_bwd_cuda<at::BFloat16, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
513
+ template void causal_conv1d_bwd_cuda<float, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
514
+ template void causal_conv1d_bwd_cuda<at::Half, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
515
+ template void causal_conv1d_bwd_cuda<at::BFloat16, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
516
+
517
+ template void causal_conv1d_channellast_bwd_cuda<float, float>(ConvParamsBwd &params, cudaStream_t stream);
518
+ template void causal_conv1d_channellast_bwd_cuda<at::Half, float>(ConvParamsBwd &params, cudaStream_t stream);
519
+ template void causal_conv1d_channellast_bwd_cuda<at::BFloat16, float>(ConvParamsBwd &params, cudaStream_t stream);
520
+ template void causal_conv1d_channellast_bwd_cuda<float, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
521
+ template void causal_conv1d_channellast_bwd_cuda<at::Half, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
522
+ template void causal_conv1d_channellast_bwd_cuda<at::BFloat16, at::Half>(ConvParamsBwd &params, cudaStream_t stream);
523
+ template void causal_conv1d_channellast_bwd_cuda<float, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
524
+ template void causal_conv1d_channellast_bwd_cuda<at::Half, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
525
+ template void causal_conv1d_channellast_bwd_cuda<at::BFloat16, at::BFloat16>(ConvParamsBwd &params, cudaStream_t stream);
SegMamba/causal-conv1d/csrc/causal_conv1d_common.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include <cuda_bf16.h>
8
+ #include <cuda_fp16.h>
9
+
10
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
11
+
12
+ template<int BYTES> struct BytesToType {};
13
+
14
+ template<> struct BytesToType<16> {
15
+ using Type = uint4;
16
+ static_assert(sizeof(Type) == 16);
17
+ };
18
+
19
+ template<> struct BytesToType<8> {
20
+ using Type = uint64_t;
21
+ static_assert(sizeof(Type) == 8);
22
+ };
23
+
24
+ template<> struct BytesToType<4> {
25
+ using Type = uint32_t;
26
+ static_assert(sizeof(Type) == 4);
27
+ };
28
+
29
+ template<> struct BytesToType<2> {
30
+ using Type = uint16_t;
31
+ static_assert(sizeof(Type) == 2);
32
+ };
33
+
34
+ template<> struct BytesToType<1> {
35
+ using Type = uint8_t;
36
+ static_assert(sizeof(Type) == 1);
37
+ };
38
+
39
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
40
+
41
+ template<typename T>
42
+ struct SumOp {
43
+ __device__ inline T operator()(T const & x, T const & y) { return x + y; }
44
+ };
45
+
46
+ template<int THREADS>
47
+ struct Allreduce {
48
+ static_assert(THREADS == 32 || THREADS == 16 || THREADS == 8 || THREADS == 4);
49
+ template<typename T, typename Operator>
50
+ static __device__ inline T run(T x, Operator &op) {
51
+ constexpr int OFFSET = THREADS / 2;
52
+ x = op(x, __shfl_xor_sync(uint32_t(-1), x, OFFSET));
53
+ return Allreduce<OFFSET>::run(x, op);
54
+ }
55
+ };
56
+
57
+ template<>
58
+ struct Allreduce<2> {
59
+ template<typename T, typename Operator>
60
+ static __device__ inline T run(T x, Operator &op) {
61
+ x = op(x, __shfl_xor_sync(uint32_t(-1), x, 1));
62
+ return x;
63
+ }
64
+ };
SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #include <c10/util/BFloat16.h>
6
+ #include <c10/util/Half.h>
7
+ #include <c10/cuda/CUDAException.h> // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK
8
+
9
+ #include <cub/block/block_load.cuh>
10
+ #include <cub/block/block_store.cuh>
11
+
12
+ #include "causal_conv1d.h"
13
+ #include "causal_conv1d_common.h"
14
+ #include "static_switch.h"
15
+
16
+ template<int kNThreads_, int kWidth_, bool kIsVecLoad_, typename input_t_, typename weight_t_>
17
+ struct Causal_conv1d_fwd_kernel_traits {
18
+ using input_t = input_t_;
19
+ using weight_t = weight_t_;
20
+ static constexpr int kNThreads = kNThreads_;
21
+ static constexpr int kWidth = kWidth_;
22
+ static constexpr int kNBytes = sizeof(input_t);
23
+ static_assert(kNBytes == 2 || kNBytes == 4);
24
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
25
+ static_assert(kWidth <= kNElts);
26
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
27
+ using vec_t = typename BytesToType<kNBytes * kNElts>::Type;
28
+ using BlockLoadT = cub::BlockLoad<input_t, kNThreads, kNElts, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
29
+ using BlockLoadVecT = cub::BlockLoad<vec_t, kNThreads, 1, cub::BLOCK_LOAD_DIRECT>;
30
+ using BlockStoreT = cub::BlockStore<input_t, kNThreads, kNElts, cub::BLOCK_STORE_WARP_TRANSPOSE>;
31
+ using BlockStoreVecT = cub::BlockStore<vec_t, kNThreads, 1, cub::BLOCK_STORE_DIRECT>;
32
+ static constexpr int kSmemIOSize = kIsVecLoad
33
+ ? 0
34
+ : std::max({sizeof(typename BlockLoadT::TempStorage), sizeof(typename BlockStoreT::TempStorage)});
35
+ static constexpr int kSmemExchangeSize = kNThreads * kNBytes * kNElts;
36
+ static constexpr int kSmemSize = kSmemIOSize + kSmemExchangeSize;
37
+ };
38
+
39
+ template<typename Ktraits>
40
+ __global__ __launch_bounds__(Ktraits::kNThreads)
41
+ void causal_conv1d_fwd_kernel(ConvParamsBase params) {
42
+ constexpr int kWidth = Ktraits::kWidth;
43
+ constexpr int kNThreads = Ktraits::kNThreads;
44
+ constexpr int kNElts = Ktraits::kNElts;
45
+ constexpr bool kIsVecLoad = Ktraits::kIsVecLoad;
46
+ using input_t = typename Ktraits::input_t;
47
+ using vec_t = typename Ktraits::vec_t;
48
+ using weight_t = typename Ktraits::weight_t;
49
+
50
+ // Shared memory.
51
+ extern __shared__ char smem_[];
52
+ auto& smem_load = reinterpret_cast<typename Ktraits::BlockLoadT::TempStorage&>(smem_);
53
+ auto& smem_load_vec = reinterpret_cast<typename Ktraits::BlockLoadVecT::TempStorage&>(smem_);
54
+ auto& smem_store = reinterpret_cast<typename Ktraits::BlockStoreT::TempStorage&>(smem_);
55
+ auto& smem_store_vec = reinterpret_cast<typename Ktraits::BlockStoreVecT::TempStorage&>(smem_);
56
+ vec_t *smem_exchange = reinterpret_cast<vec_t *>(smem_ + Ktraits::kSmemIOSize);
57
+
58
+ const int tidx = threadIdx.x;
59
+ const int batch_id = blockIdx.x;
60
+ const int channel_id = blockIdx.y;
61
+ input_t *x = reinterpret_cast<input_t *>(params.x_ptr) + batch_id * params.x_batch_stride
62
+ + channel_id * params.x_c_stride;
63
+ weight_t *weight = reinterpret_cast<weight_t *>(params.weight_ptr) + channel_id * params.weight_c_stride;
64
+ input_t *out = reinterpret_cast<input_t *>(params.out_ptr) + batch_id * params.out_batch_stride
65
+ + channel_id * params.out_c_stride;
66
+ float bias_val = params.bias_ptr == nullptr ? 0.f : float(reinterpret_cast<weight_t *>(params.bias_ptr)[channel_id]);
67
+
68
+ // Thread 0 will load the last elements of the previous chunk, so we initialize those to 0.
69
+ if (tidx == 0) {
70
+ input_t zeros[kNElts] = {0};
71
+ smem_exchange[kNThreads - 1] = reinterpret_cast<vec_t *>(zeros)[0];
72
+ }
73
+
74
+ float weight_vals[kWidth];
75
+ #pragma unroll
76
+ for (int i = 0; i < kWidth; ++i) { weight_vals[i] = float(weight[i * params.weight_width_stride]); }
77
+
78
+ constexpr int kChunkSize = kNThreads * kNElts;
79
+ const int n_chunks = (params.seqlen + kChunkSize - 1) / kChunkSize;
80
+ for (int chunk = 0; chunk < n_chunks; ++chunk) {
81
+ input_t x_vals_load[2 * kNElts] = {0};
82
+ if constexpr(kIsVecLoad) {
83
+ Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast<vec_t*>(x), *reinterpret_cast<vec_t (*)[1]>(&x_vals_load[kNElts]), (params.seqlen - chunk * kChunkSize) / kNElts);
84
+ } else {
85
+ __syncthreads();
86
+ Ktraits::BlockLoadT(smem_load).Load(x, *reinterpret_cast<input_t (*)[kNElts]>(&x_vals_load[kNElts]), params.seqlen - chunk * kChunkSize);
87
+ }
88
+ x += kChunkSize;
89
+ __syncthreads();
90
+ // Thread kNThreads - 1 don't write yet, so that thread 0 can read
91
+ // the last elements of the previous chunk.
92
+ if (tidx < kNThreads - 1) { smem_exchange[tidx] = reinterpret_cast<vec_t *>(x_vals_load)[1]; }
93
+ __syncthreads();
94
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = smem_exchange[tidx > 0 ? tidx - 1 : kNThreads - 1];
95
+ __syncthreads();
96
+ // Now thread kNThreads - 1 can write the last elements of the current chunk.
97
+ if (tidx == kNThreads - 1) { smem_exchange[tidx] = reinterpret_cast<vec_t *>(x_vals_load)[1]; }
98
+
99
+ float x_vals[2 * kNElts];
100
+ #pragma unroll
101
+ for (int i = 0; i < 2 * kNElts; ++i) { x_vals[i] = float(x_vals_load[i]); }
102
+
103
+ float out_vals[kNElts];
104
+ #pragma unroll
105
+ for (int i = 0; i < kNElts; ++i) {
106
+ out_vals[i] = bias_val;
107
+ #pragma unroll
108
+ for (int w = 0; w < kWidth; ++w) {
109
+ out_vals[i] += weight_vals[w] * x_vals[kNElts + i - (kWidth - w - 1)];
110
+ }
111
+ }
112
+
113
+ if (params.silu_activation) {
114
+ #pragma unroll
115
+ for (int i = 0; i < kNElts; ++i) {
116
+ out_vals[i] = out_vals[i] / (1 + expf(-out_vals[i]));
117
+ }
118
+ }
119
+
120
+ input_t out_vals_store[kNElts];
121
+ #pragma unroll
122
+ for (int i = 0; i < kNElts; ++i) { out_vals_store[i] = out_vals[i]; }
123
+ if constexpr(kIsVecLoad) {
124
+ Ktraits::BlockStoreVecT(smem_store_vec).Store(reinterpret_cast<vec_t*>(out), reinterpret_cast<vec_t (&)[1]>(out_vals_store), (params.seqlen - chunk * kChunkSize) / kNElts);
125
+ } else {
126
+ Ktraits::BlockStoreT(smem_store).Store(out, out_vals_store, params.seqlen - chunk * kChunkSize);
127
+ }
128
+ out += kChunkSize;
129
+ }
130
+ }
131
+
132
+ template<int kNThreads, int kWidth, typename input_t, typename weight_t>
133
+ void causal_conv1d_fwd_launch(ConvParamsBase &params, cudaStream_t stream) {
134
+ static constexpr int kNElts = sizeof(input_t) == 4 ? 4 : 8;
135
+ BOOL_SWITCH(params.seqlen % kNElts == 0, kIsVecLoad, [&] {
136
+ using Ktraits = Causal_conv1d_fwd_kernel_traits<kNThreads, kWidth, kIsVecLoad, input_t, weight_t>;
137
+ constexpr int kSmemSize = Ktraits::kSmemSize;
138
+ dim3 grid(params.batch, params.dim);
139
+ auto kernel = &causal_conv1d_fwd_kernel<Ktraits>;
140
+ if (kSmemSize >= 48 * 1024) {
141
+ C10_CUDA_CHECK(cudaFuncSetAttribute(
142
+ kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
143
+ }
144
+ kernel<<<grid, Ktraits::kNThreads, kSmemSize, stream>>>(params);
145
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
146
+ });
147
+ }
148
+
149
+ template<typename input_t, typename weight_t>
150
+ void causal_conv1d_fwd_cuda(ConvParamsBase &params, cudaStream_t stream) {
151
+ if (params.width == 2) {
152
+ causal_conv1d_fwd_launch<128, 2, input_t, weight_t>(params, stream);
153
+ } else if (params.width == 3) {
154
+ causal_conv1d_fwd_launch<128, 3, input_t, weight_t>(params, stream);
155
+ } else if (params.width == 4) {
156
+ causal_conv1d_fwd_launch<128, 4, input_t, weight_t>(params, stream);
157
+ }
158
+ }
159
+
160
+ template<int kNThreads_, int kWidth_, int kChunkSizeL_, bool kIsVecLoad_, typename input_t_, typename weight_t_>
161
+ struct Causal_conv1d_channellast_fwd_kernel_traits {
162
+ // The cache line is 128 bytes, and we try to read 16 bytes per thread.
163
+ // So we have 8 threads per "row", so 32 or 64 elements in the channel dimension.
164
+ // That leaves 4 columns per warp, and so 16 columns per block (assuming each block has 128
165
+ // threads). Each each load is 16 x 32|64 elements in the L x C dimensions.
166
+ using input_t = input_t_;
167
+ using weight_t = weight_t_;
168
+ static constexpr int kNThreads = kNThreads_;
169
+ static_assert(kNThreads % 32 == 0);
170
+ static constexpr int kNWarps = kNThreads / 32;
171
+ static constexpr int kWidth = kWidth_;
172
+ static constexpr int kChunkSizeL = kChunkSizeL_;
173
+ static constexpr int kNBytes = sizeof(input_t);
174
+ static_assert(kNBytes == 2 || kNBytes == 4);
175
+ static constexpr int kNElts = kNBytes == 4 ? 4 : 8;
176
+ static constexpr int kNEltsPerRow = 128 / kNBytes;
177
+ static constexpr int kNThreadsPerRow = kNEltsPerRow / kNElts; // Always 8 for now
178
+ static_assert(kNThreadsPerRow * kNBytes * kNElts == 128);
179
+ static constexpr int kNColsPerWarp = 32 / kNThreadsPerRow; // Always 4 for now
180
+ static_assert(kNColsPerWarp * kNThreadsPerRow == 32);
181
+ static constexpr int kNColsPerLoad = kNColsPerWarp * kNWarps;
182
+ static constexpr int kNLoads = kChunkSizeL / kNColsPerLoad;
183
+ static_assert(kNLoads * kNColsPerLoad == kChunkSizeL);
184
+ static constexpr bool kIsVecLoad = kIsVecLoad_;
185
+ using vec_t = typename BytesToType<kNBytes * kNElts>::Type;
186
+ // using BlockLoadT = cub::BlockLoad<input_t, kNThreads, kNItems, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
187
+ // using BlockStoreT = cub::BlockStore<input_t, kNThreads, kNItems, cub::BLOCK_STORE_WARP_TRANSPOSE>;
188
+ // static constexpr int kSmemSize = std::max({sizeof(typename BlockLoadT::TempStorage),
189
+ // sizeof(typename BlockStoreT::TempStorage)});
190
+ // static constexpr int kSmemSize = kChunkSizeL * kNEltsPerRow * kNBytes;
191
+ };
192
+
193
+ template<typename Ktraits>
194
+ __global__ __launch_bounds__(Ktraits::kNThreads)
195
+ void causal_conv1d_channellast_fwd_kernel(ConvParamsBase params) {
196
+ constexpr int kWidth = Ktraits::kWidth;
197
+ constexpr int kNThreads = Ktraits::kNThreads;
198
+ constexpr int kNElts = Ktraits::kNElts;
199
+ constexpr int kNWarp = Ktraits::kNWarps;
200
+ constexpr int kNThreadsPerC = Ktraits::kNThreadsPerRow;
201
+ constexpr int kLPerLoad = Ktraits::kNColsPerLoad;
202
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
203
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
204
+ using input_t = typename Ktraits::input_t;
205
+ using vec_t = typename Ktraits::vec_t;
206
+ using weight_t = typename Ktraits::weight_t;
207
+
208
+ // Shared memory.
209
+ __shared__ input_t x_smem[kWidth - 1 + kChunkSizeL][kChunkSizeC + kNElts];
210
+
211
+ const int tid = threadIdx.x;
212
+ const int l_idx = tid / kNThreadsPerC;
213
+ const int c_idx = tid % kNThreadsPerC;
214
+ const int batch_id = blockIdx.x;
215
+ const int chunk_l_id = blockIdx.y;
216
+ const int chunk_c_id = blockIdx.z;
217
+ input_t *x = reinterpret_cast<input_t *>(params.x_ptr) + batch_id * params.x_batch_stride
218
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.x_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
219
+ weight_t *weight = reinterpret_cast<weight_t *>(params.weight_ptr)
220
+ + chunk_c_id * kChunkSizeC * params.weight_c_stride;
221
+ input_t *out = reinterpret_cast<input_t *>(params.out_ptr) + batch_id * params.out_batch_stride
222
+ + (chunk_l_id * kChunkSizeL + l_idx) * params.out_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts;
223
+
224
+ #pragma unroll
225
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
226
+ input_t x_vals_load[kNElts] = {0};
227
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
228
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
229
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = *reinterpret_cast<vec_t *>(x + l * kLPerLoad * params.x_l_stride);
230
+ }
231
+ reinterpret_cast<vec_t *>(x_smem[kWidth - 1 + l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast<vec_t *>(x_vals_load)[0];
232
+ }
233
+ // Load the elements from the previous chunk that are needed for convolution.
234
+ if (l_idx < kWidth - 1) {
235
+ input_t x_vals_load[kNElts] = {0};
236
+ if (chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) >= 0
237
+ && chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) < params.seqlen
238
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
239
+ reinterpret_cast<vec_t *>(x_vals_load)[0] = *reinterpret_cast<vec_t *>(x - (kWidth - 1) * params.x_l_stride);
240
+ }
241
+ reinterpret_cast<vec_t *>(x_smem[l_idx])[c_idx] = reinterpret_cast<vec_t *>(x_vals_load)[0];
242
+ }
243
+
244
+ __syncthreads();
245
+
246
+ constexpr int kLPerThread = std::min(kChunkSizeL * kChunkSizeC / kNThreads, kChunkSizeL);
247
+ static_assert(kLPerThread * kNThreads == kChunkSizeL * kChunkSizeC);
248
+ constexpr int kNThreadsPerRow = kChunkSizeL / kLPerThread;
249
+ static_assert(kNThreadsPerRow * kLPerThread == kChunkSizeL);
250
+ // kChunkSizeL, kLPerThread, kNThreadsPerRow should be powers of 2 for simplicity
251
+ static_assert((kChunkSizeL & (kChunkSizeL - 1)) == 0);
252
+ static_assert((kLPerThread & (kLPerThread - 1)) == 0);
253
+ static_assert((kNThreadsPerRow & (kNThreadsPerRow - 1)) == 0);
254
+ static_assert(kNThreadsPerRow <= 32);
255
+
256
+ const int row_idx = tid / kNThreadsPerRow;
257
+ const int col_idx = tid % kNThreadsPerRow;
258
+
259
+ float bias_val = params.bias_ptr == nullptr || chunk_c_id * kChunkSizeC + row_idx >= params.dim ? 0.f : float(reinterpret_cast<weight_t *>(params.bias_ptr)[chunk_c_id * kChunkSizeC + row_idx]);
260
+ float weight_vals[kWidth] = {0};
261
+ if (chunk_c_id + kChunkSizeC + row_idx < params.dim) {
262
+ #pragma unroll
263
+ for (int w = 0; w < kWidth; ++w) {
264
+ weight_vals[w] = weight[row_idx * params.weight_c_stride + w * params.weight_width_stride];
265
+ }
266
+ }
267
+ float x_vals[kWidth - 1 + kLPerThread];
268
+ #pragma unroll
269
+ for (int i = 0; i < kWidth - 1 + kLPerThread; ++i) {
270
+ x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]);
271
+ }
272
+
273
+ float out_vals[kLPerThread];
274
+ #pragma unroll
275
+ for (int i = 0; i < kLPerThread; ++i) {
276
+ out_vals[i] = bias_val;
277
+ #pragma unroll
278
+ for (int w = 0; w < kWidth; ++w) { out_vals[i] += weight_vals[w] * x_vals[i + w]; }
279
+ if (params.silu_activation) {out_vals[i] = out_vals[i] / (1 + expf(-out_vals[i])); }
280
+ }
281
+
282
+ // Since kNThreadsPerRow is a power of 2 and <= 32, we only need syncwarp and not syncthreads.
283
+ __syncwarp();
284
+ #pragma unroll
285
+ for (int i = 0; i < kLPerThread; ++i) { x_smem[col_idx * kLPerThread + i][row_idx] = out_vals[i]; }
286
+ __syncthreads();
287
+
288
+ #pragma unroll
289
+ for (int l = 0; l < Ktraits::kNLoads; ++l) {
290
+ input_t out_vals_store[kNElts];
291
+ reinterpret_cast<vec_t *>(out_vals_store)[0] = reinterpret_cast<vec_t *>(x_smem[l * kLPerLoad + l_idx])[c_idx];
292
+ if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen
293
+ && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) {
294
+ *reinterpret_cast<vec_t *>(out + l * kLPerLoad * params.out_l_stride) = reinterpret_cast<vec_t *>(out_vals_store)[0];
295
+ }
296
+ }
297
+
298
+ }
299
+
300
+ template<int kNThreads, int kWidth, typename input_t, typename weight_t>
301
+ void causal_conv1d_channellast_fwd_launch(ConvParamsBase &params, cudaStream_t stream) {
302
+ using Ktraits = Causal_conv1d_channellast_fwd_kernel_traits<kNThreads, kWidth, 64, true, input_t, weight_t>;
303
+ // constexpr int kSmemSize = Ktraits::kSmemSize;
304
+ constexpr int kChunkSizeL = Ktraits::kChunkSizeL;
305
+ constexpr int kChunkSizeC = Ktraits::kNEltsPerRow;
306
+ const int n_chunks_L = (params.seqlen + kChunkSizeL - 1) / kChunkSizeL;
307
+ const int n_chunks_C = (params.dim + kChunkSizeC - 1) / kChunkSizeC;
308
+ // printf("n_chunks_L: %d, n_chunks_C: %d\n", n_chunks_L, n_chunks_C);
309
+ dim3 grid(params.batch, n_chunks_L, n_chunks_C);
310
+ dim3 block(Ktraits::kNThreads);
311
+ auto kernel = &causal_conv1d_channellast_fwd_kernel<Ktraits>;
312
+ // if (kSmemSize >= 48 * 1024) {
313
+ // C10_CUDA_CHECK(cudaFuncSetAttribute(
314
+ // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize));
315
+ // }
316
+ // kernel<<<grid, Ktraits::kNThreads, kSmemSize, stream>>>(params);
317
+ kernel<<<grid, Ktraits::kNThreads, 0, stream>>>(params);
318
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
319
+ }
320
+
321
+ template<typename input_t, typename weight_t>
322
+ void causal_conv1d_channellast_fwd_cuda(ConvParamsBase &params, cudaStream_t stream) {
323
+ if (params.width == 2) {
324
+ causal_conv1d_channellast_fwd_launch<128, 2, input_t, weight_t>(params, stream);
325
+ } else if (params.width == 3) {
326
+ causal_conv1d_channellast_fwd_launch<128, 3, input_t, weight_t>(params, stream);
327
+ } else if (params.width == 4) {
328
+ causal_conv1d_channellast_fwd_launch<128, 4, input_t, weight_t>(params, stream);
329
+ }
330
+ }
331
+
332
+ template void causal_conv1d_fwd_cuda<float, float>(ConvParamsBase &params, cudaStream_t stream);
333
+ template void causal_conv1d_fwd_cuda<at::Half, float>(ConvParamsBase &params, cudaStream_t stream);
334
+ template void causal_conv1d_fwd_cuda<at::BFloat16, float>(ConvParamsBase &params, cudaStream_t stream);
335
+ template void causal_conv1d_fwd_cuda<float, at::Half>(ConvParamsBase &params, cudaStream_t stream);
336
+ template void causal_conv1d_fwd_cuda<at::Half, at::Half>(ConvParamsBase &params, cudaStream_t stream);
337
+ template void causal_conv1d_fwd_cuda<at::BFloat16, at::Half>(ConvParamsBase &params, cudaStream_t stream);
338
+ template void causal_conv1d_fwd_cuda<float, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
339
+ template void causal_conv1d_fwd_cuda<at::Half, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
340
+ template void causal_conv1d_fwd_cuda<at::BFloat16, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
341
+
342
+ template void causal_conv1d_channellast_fwd_cuda<float, float>(ConvParamsBase &params, cudaStream_t stream);
343
+ template void causal_conv1d_channellast_fwd_cuda<at::Half, float>(ConvParamsBase &params, cudaStream_t stream);
344
+ template void causal_conv1d_channellast_fwd_cuda<at::BFloat16, float>(ConvParamsBase &params, cudaStream_t stream);
345
+ template void causal_conv1d_channellast_fwd_cuda<float, at::Half>(ConvParamsBase &params, cudaStream_t stream);
346
+ template void causal_conv1d_channellast_fwd_cuda<at::Half, at::Half>(ConvParamsBase &params, cudaStream_t stream);
347
+ template void causal_conv1d_channellast_fwd_cuda<at::BFloat16, at::Half>(ConvParamsBase &params, cudaStream_t stream);
348
+ template void causal_conv1d_channellast_fwd_cuda<float, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
349
+ template void causal_conv1d_channellast_fwd_cuda<at::Half, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
350
+ template void causal_conv1d_channellast_fwd_cuda<at::BFloat16, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #include <c10/util/BFloat16.h>
6
+ #include <c10/util/Half.h>
7
+ #include <c10/cuda/CUDAException.h> // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK
8
+
9
+ #include <cub/block/block_load.cuh>
10
+ #include <cub/block/block_store.cuh>
11
+
12
+ #include "causal_conv1d.h"
13
+ #include "causal_conv1d_common.h"
14
+ #include "static_switch.h"
15
+
16
+ template<int kNThreads_, int kWidth_, typename input_t_, typename weight_t_>
17
+ struct Causal_conv1d_update_kernel_traits {
18
+ using input_t = input_t_;
19
+ using weight_t = weight_t_;
20
+ static constexpr int kNThreads = kNThreads_;
21
+ static constexpr int kWidth = kWidth_;
22
+ static constexpr int kNBytes = sizeof(input_t);
23
+ static_assert(kNBytes == 2 || kNBytes == 4);
24
+ };
25
+
26
+ template<typename Ktraits>
27
+ __global__ __launch_bounds__(Ktraits::kNThreads)
28
+ void causal_conv1d_update_kernel(ConvParamsBase params) {
29
+ constexpr int kWidth = Ktraits::kWidth;
30
+ constexpr int kNThreads = Ktraits::kNThreads;
31
+ using input_t = typename Ktraits::input_t;
32
+ using weight_t = typename Ktraits::weight_t;
33
+
34
+ const int tidx = threadIdx.x;
35
+ const int batch_id = blockIdx.x;
36
+ const int channel_id = blockIdx.y * kNThreads + tidx;
37
+ input_t *x = reinterpret_cast<input_t *>(params.x_ptr) + batch_id * params.x_batch_stride
38
+ + channel_id * params.x_c_stride;
39
+ input_t *conv_state = reinterpret_cast<input_t *>(params.conv_state_ptr) + batch_id * params.conv_state_batch_stride
40
+ + channel_id * params.conv_state_c_stride;
41
+ weight_t *weight = reinterpret_cast<weight_t *>(params.weight_ptr) + channel_id * params.weight_c_stride;
42
+ input_t *out = reinterpret_cast<input_t *>(params.out_ptr) + batch_id * params.out_batch_stride
43
+ + channel_id * params.out_c_stride;
44
+ float bias_val = params.bias_ptr == nullptr || channel_id >= params.dim ? 0.f : float(reinterpret_cast<weight_t *>(params.bias_ptr)[channel_id]);
45
+
46
+ float weight_vals[kWidth] = {0};
47
+ if (channel_id < params.dim) {
48
+ #pragma unroll
49
+ for (int i = 0; i < kWidth; ++i) { weight_vals[i] = float(weight[i * params.weight_width_stride]); }
50
+ }
51
+
52
+ float x_vals[kWidth] = {0};
53
+ if (channel_id < params.dim) {
54
+ #pragma unroll
55
+ for (int i = 0; i < kWidth - 1; ++i) { x_vals[i] = float(conv_state[(i + 1) * params.conv_state_l_stride]); }
56
+ x_vals[kWidth - 1] = float(x[0]);
57
+ #pragma unroll
58
+ for (int i = 0; i < kWidth; ++i) { conv_state[i * params.conv_state_l_stride] = input_t(x_vals[i]); }
59
+ }
60
+
61
+ float out_val = bias_val;
62
+ #pragma unroll
63
+ for (int i = 0; i < kWidth; ++i) { out_val += weight_vals[i] * x_vals[i]; }
64
+ if (params.silu_activation) { out_val = out_val / (1 + expf(-out_val)); }
65
+ if (channel_id < params.dim) { out[0] = input_t(out_val); }
66
+ }
67
+
68
+ template<int kNThreads, int kWidth, typename input_t, typename weight_t>
69
+ void causal_conv1d_update_launch(ConvParamsBase &params, cudaStream_t stream) {
70
+ using Ktraits = Causal_conv1d_update_kernel_traits<kNThreads, kWidth, input_t, weight_t>;
71
+ dim3 grid(params.batch, (params.dim + kNThreads - 1) / kNThreads);
72
+ auto kernel = &causal_conv1d_update_kernel<Ktraits>;
73
+ kernel<<<grid, Ktraits::kNThreads, 0, stream>>>(params);
74
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
75
+ }
76
+
77
+ template<typename input_t, typename weight_t>
78
+ void causal_conv1d_update_cuda(ConvParamsBase &params, cudaStream_t stream) {
79
+ if (params.width == 2) {
80
+ causal_conv1d_update_launch<64, 2, input_t, weight_t>(params, stream);
81
+ } else if (params.width == 3) {
82
+ causal_conv1d_update_launch<64, 3, input_t, weight_t>(params, stream);
83
+ } else if (params.width == 4) {
84
+ causal_conv1d_update_launch<64, 4, input_t, weight_t>(params, stream);
85
+ }
86
+ }
87
+
88
+ template void causal_conv1d_update_cuda<float, float>(ConvParamsBase &params, cudaStream_t stream);
89
+ template void causal_conv1d_update_cuda<at::Half, float>(ConvParamsBase &params, cudaStream_t stream);
90
+ template void causal_conv1d_update_cuda<at::BFloat16, float>(ConvParamsBase &params, cudaStream_t stream);
91
+ template void causal_conv1d_update_cuda<float, at::Half>(ConvParamsBase &params, cudaStream_t stream);
92
+ template void causal_conv1d_update_cuda<at::Half, at::Half>(ConvParamsBase &params, cudaStream_t stream);
93
+ template void causal_conv1d_update_cuda<at::BFloat16, at::Half>(ConvParamsBase &params, cudaStream_t stream);
94
+ template void causal_conv1d_update_cuda<float, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
95
+ template void causal_conv1d_update_cuda<at::Half, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
96
+ template void causal_conv1d_update_cuda<at::BFloat16, at::BFloat16>(ConvParamsBase &params, cudaStream_t stream);
SegMamba/causal-conv1d/csrc/static_switch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Inspired by https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
2
+ // and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
3
+
4
+ #pragma once
5
+
6
+ /// @param COND - a boolean expression to switch by
7
+ /// @param CONST_NAME - a name given for the constexpr bool variable.
8
+ /// @param ... - code to execute for true and false
9
+ ///
10
+ /// Usage:
11
+ /// ```
12
+ /// BOOL_SWITCH(flag, BoolConst, [&] {
13
+ /// some_function<BoolConst>(...);
14
+ /// });
15
+ /// ```
16
+ #define BOOL_SWITCH(COND, CONST_NAME, ...) \
17
+ [&] { \
18
+ if (COND) { \
19
+ static constexpr bool CONST_NAME = true; \
20
+ return __VA_ARGS__(); \
21
+ } else { \
22
+ static constexpr bool CONST_NAME = false; \
23
+ return __VA_ARGS__(); \
24
+ } \
25
+ }()
SegMamba/causal-conv1d/setup.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+ import sys
3
+ import warnings
4
+ import os
5
+ import re
6
+ import ast
7
+ from pathlib import Path
8
+ from packaging.version import parse, Version
9
+ import platform
10
+
11
+ from setuptools import setup, find_packages
12
+ import subprocess
13
+
14
+ import urllib.request
15
+ import urllib.error
16
+ from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
17
+
18
+ import torch
19
+ from torch.utils.cpp_extension import (
20
+ BuildExtension,
21
+ CppExtension,
22
+ CUDAExtension,
23
+ CUDA_HOME,
24
+ )
25
+
26
+
27
+ with open("README.md", "r", encoding="utf-8") as fh:
28
+ long_description = fh.read()
29
+
30
+
31
+ # ninja build does not work unless include_dirs are abs path
32
+ this_dir = os.path.dirname(os.path.abspath(__file__))
33
+
34
+ PACKAGE_NAME = "causal_conv1d"
35
+
36
+ BASE_WHEEL_URL = "https://github.com/Dao-AILab/causal-conv1d/releases/download/{tag_name}/{wheel_name}"
37
+
38
+ # FORCE_BUILD: Force a fresh build locally, instead of attempting to find prebuilt wheels
39
+ # SKIP_CUDA_BUILD: Intended to allow CI to use a simple `python setup.py sdist` run to copy over raw files, without any cuda compilation
40
+ FORCE_BUILD = os.getenv("CAUSAL_CONV1D_FORCE_BUILD", "FALSE") == "TRUE"
41
+ SKIP_CUDA_BUILD = os.getenv("CAUSAL_CONV1D_SKIP_CUDA_BUILD", "FALSE") == "TRUE"
42
+ # For CI, we want the option to build with C++11 ABI since the nvcr images use C++11 ABI
43
+ FORCE_CXX11_ABI = os.getenv("CAUSAL_CONV1D_FORCE_CXX11_ABI", "FALSE") == "TRUE"
44
+
45
+
46
+ def get_platform():
47
+ """
48
+ Returns the platform name as used in wheel filenames.
49
+ """
50
+ if sys.platform.startswith("linux"):
51
+ return "linux_x86_64"
52
+ elif sys.platform == "darwin":
53
+ mac_version = ".".join(platform.mac_ver()[0].split(".")[:2])
54
+ return f"macosx_{mac_version}_x86_64"
55
+ elif sys.platform == "win32":
56
+ return "win_amd64"
57
+ else:
58
+ raise ValueError("Unsupported platform: {}".format(sys.platform))
59
+
60
+
61
+ def get_cuda_bare_metal_version(cuda_dir):
62
+ raw_output = subprocess.check_output(
63
+ [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True
64
+ )
65
+ output = raw_output.split()
66
+ release_idx = output.index("release") + 1
67
+ bare_metal_version = parse(output[release_idx].split(",")[0])
68
+
69
+ return raw_output, bare_metal_version
70
+
71
+
72
+ def check_if_cuda_home_none(global_option: str) -> None:
73
+ if CUDA_HOME is not None:
74
+ return
75
+ # warn instead of error because user could be downloading prebuilt wheels, so nvcc won't be necessary
76
+ # in that case.
77
+ warnings.warn(
78
+ f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
79
+ "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
80
+ "only images whose names contain 'devel' will provide nvcc."
81
+ )
82
+
83
+
84
+ def append_nvcc_threads(nvcc_extra_args):
85
+ return nvcc_extra_args + ["--threads", "4"]
86
+
87
+
88
+ cmdclass = {}
89
+ ext_modules = []
90
+
91
+ if not SKIP_CUDA_BUILD:
92
+ print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
93
+ TORCH_MAJOR = int(torch.__version__.split(".")[0])
94
+ TORCH_MINOR = int(torch.__version__.split(".")[1])
95
+
96
+ check_if_cuda_home_none("causal_conv1d")
97
+ # Check, if CUDA11 is installed for compute capability 8.0
98
+ cc_flag = []
99
+ if CUDA_HOME is not None:
100
+ _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
101
+ if bare_metal_version < Version("11.6"):
102
+ raise RuntimeError(
103
+ "causal_conv1d is only supported on CUDA 11.6 and above. "
104
+ "Note: make sure nvcc has a supported version by running nvcc -V."
105
+ )
106
+
107
+ cc_flag.append("-gencode")
108
+ cc_flag.append("arch=compute_70,code=sm_70")
109
+ cc_flag.append("-gencode")
110
+ cc_flag.append("arch=compute_80,code=sm_80")
111
+ if bare_metal_version >= Version("11.8"):
112
+ cc_flag.append("-gencode")
113
+ cc_flag.append("arch=compute_90,code=sm_90")
114
+
115
+ # HACK: The compiler flag -D_GLIBCXX_USE_CXX11_ABI is set to be the same as
116
+ # torch._C._GLIBCXX_USE_CXX11_ABI
117
+ # https://github.com/pytorch/pytorch/blob/8472c24e3b5b60150096486616d98b7bea01500b/torch/utils/cpp_extension.py#L920
118
+ if FORCE_CXX11_ABI:
119
+ torch._C._GLIBCXX_USE_CXX11_ABI = True
120
+
121
+ ext_modules.append(
122
+ CUDAExtension(
123
+ name="causal_conv1d_cuda",
124
+ sources=[
125
+ "csrc/causal_conv1d.cpp",
126
+ "csrc/causal_conv1d_fwd.cu",
127
+ "csrc/causal_conv1d_bwd.cu",
128
+ "csrc/causal_conv1d_update.cu",
129
+ ],
130
+ extra_compile_args={
131
+ "cxx": ["-O3"],
132
+ "nvcc": append_nvcc_threads(
133
+ [
134
+ "-O3",
135
+ "-U__CUDA_NO_HALF_OPERATORS__",
136
+ "-U__CUDA_NO_HALF_CONVERSIONS__",
137
+ "-U__CUDA_NO_BFLOAT16_OPERATORS__",
138
+ "-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
139
+ "-U__CUDA_NO_BFLOAT162_OPERATORS__",
140
+ "-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
141
+ "--expt-relaxed-constexpr",
142
+ "--expt-extended-lambda",
143
+ "--use_fast_math",
144
+ "--ptxas-options=-v",
145
+ "-lineinfo",
146
+ ]
147
+ + cc_flag
148
+ ),
149
+ },
150
+ include_dirs=[this_dir],
151
+ )
152
+ )
153
+
154
+
155
+ def get_package_version():
156
+ with open(Path(this_dir) / "causal_conv1d" / "__init__.py", "r") as f:
157
+ version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE)
158
+ public_version = ast.literal_eval(version_match.group(1))
159
+ local_version = os.environ.get("CAUSAL_CONV1D_LOCAL_VERSION")
160
+ if local_version:
161
+ return f"{public_version}+{local_version}"
162
+ else:
163
+ return str(public_version)
164
+
165
+
166
+ def get_wheel_url():
167
+ # Determine the version numbers that will be used to determine the correct wheel
168
+ # We're using the CUDA version used to build torch, not the one currently installed
169
+ # _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME)
170
+ torch_cuda_version = parse(torch.version.cuda)
171
+ torch_version_raw = parse(torch.__version__)
172
+ # For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.2
173
+ # to save CI time. Minor versions should be compatible.
174
+ torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.2")
175
+ python_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
176
+ platform_name = get_platform()
177
+ causal_conv1d_version = get_package_version()
178
+ # cuda_version = f"{cuda_version_raw.major}{cuda_version_raw.minor}"
179
+ cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}"
180
+ torch_version = f"{torch_version_raw.major}.{torch_version_raw.minor}"
181
+ cxx11_abi = str(torch._C._GLIBCXX_USE_CXX11_ABI).upper()
182
+
183
+ # Determine wheel URL based on CUDA version, torch version, python version and OS
184
+ wheel_filename = f"{PACKAGE_NAME}-{causal_conv1d_version}+cu{cuda_version}torch{torch_version}cxx11abi{cxx11_abi}-{python_version}-{python_version}-{platform_name}.whl"
185
+ wheel_url = BASE_WHEEL_URL.format(
186
+ tag_name=f"v{causal_conv1d_version}", wheel_name=wheel_filename
187
+ )
188
+ return wheel_url, wheel_filename
189
+
190
+
191
+ class CachedWheelsCommand(_bdist_wheel):
192
+ """
193
+ The CachedWheelsCommand plugs into the default bdist wheel, which is ran by pip when it cannot
194
+ find an existing wheel (which is currently the case for all installs). We use
195
+ the environment parameters to detect whether there is already a pre-built version of a compatible
196
+ wheel available and short-circuits the standard full build pipeline.
197
+ """
198
+
199
+ def run(self):
200
+ if FORCE_BUILD:
201
+ return super().run()
202
+
203
+ wheel_url, wheel_filename = get_wheel_url()
204
+ print("Guessing wheel URL: ", wheel_url)
205
+ try:
206
+ urllib.request.urlretrieve(wheel_url, wheel_filename)
207
+
208
+ # Make the archive
209
+ # Lifted from the root wheel processing command
210
+ # https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85
211
+ if not os.path.exists(self.dist_dir):
212
+ os.makedirs(self.dist_dir)
213
+
214
+ impl_tag, abi_tag, plat_tag = self.get_tag()
215
+ archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}"
216
+
217
+ wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl")
218
+ print("Raw wheel path", wheel_path)
219
+ os.rename(wheel_filename, wheel_path)
220
+ except urllib.error.HTTPError:
221
+ print("Precompiled wheel not found. Building from source...")
222
+ # If the wheel could not be downloaded, build from source
223
+ super().run()
224
+
225
+
226
+ setup(
227
+ name=PACKAGE_NAME,
228
+ version=get_package_version(),
229
+ packages=find_packages(
230
+ exclude=(
231
+ "build",
232
+ "csrc",
233
+ "include",
234
+ "tests",
235
+ "dist",
236
+ "docs",
237
+ "benchmarks",
238
+ "causal_conv1d.egg-info",
239
+ )
240
+ ),
241
+ author="Tri Dao",
242
+ author_email="tri@tridao.me",
243
+ description="Causal depthwise conv1d in CUDA, with a PyTorch interface",
244
+ long_description=long_description,
245
+ long_description_content_type="text/markdown",
246
+ url="https://github.com/Dao-AILab/causal-conv1d",
247
+ classifiers=[
248
+ "Programming Language :: Python :: 3",
249
+ "License :: OSI Approved :: BSD License",
250
+ "Operating System :: Unix",
251
+ ],
252
+ ext_modules=ext_modules,
253
+ cmdclass={"bdist_wheel": CachedWheelsCommand, "build_ext": BuildExtension}
254
+ if ext_modules
255
+ else {
256
+ "bdist_wheel": CachedWheelsCommand,
257
+ },
258
+ python_requires=">=3.7",
259
+ install_requires=[
260
+ "torch",
261
+ "packaging",
262
+ "ninja",
263
+ ],
264
+ )
SegMamba/causal-conv1d/tests/test_causal_conv1d.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2023, Tri Dao.
2
+
3
+ import math
4
+
5
+ import torch
6
+ import pytest
7
+
8
+ from einops import rearrange
9
+
10
+ from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_ref
11
+ from causal_conv1d.causal_conv1d_interface import causal_conv1d_update, causal_conv1d_update_ref
12
+
13
+
14
+ @pytest.mark.parametrize("channel_last", [False, True])
15
+ # @pytest.mark.parametrize('channel_last', [True])
16
+ @pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
17
+ # @pytest.mark.parametrize('itype', [torch.float16])
18
+ @pytest.mark.parametrize("silu_activation", [False, True])
19
+ # @pytest.mark.parametrize('silu_activation', [True])
20
+ @pytest.mark.parametrize("has_bias", [False, True])
21
+ # @pytest.mark.parametrize('has_bias', [True])
22
+ @pytest.mark.parametrize("width", [2, 3, 4])
23
+ # @pytest.mark.parametrize('width', [2])
24
+ @pytest.mark.parametrize(
25
+ "seqlen", [8, 16, 32, 64, 128, 151, 256, 372, 512, 784, 1024, 1134, 2048, 4096]
26
+ )
27
+ # @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 512, 784, 1024, 2048, 4096])
28
+ # @pytest.mark.parametrize('seqlen', [128])
29
+ def test_causal_conv1d(seqlen, width, has_bias, silu_activation, itype, channel_last):
30
+ device = "cuda"
31
+ rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3)
32
+ if itype == torch.bfloat16:
33
+ rtol, atol = 1e-2, 5e-2
34
+ rtolw, atolw = (1e-3, 1e-3)
35
+ # set seed
36
+ torch.random.manual_seed(0)
37
+ batch_size = 2
38
+ # batch_size = 1
39
+ dim = 4096 + 32 # Try dim not divisible by 64
40
+ # dim = 64
41
+ if not channel_last:
42
+ x = torch.randn(batch_size, 4096 + dim + 64, seqlen, device=device, dtype=itype)[:, 4096:4096 + dim, :].requires_grad_()
43
+ else:
44
+ x = rearrange(
45
+ torch.randn(batch_size, seqlen, 4096 + dim + 64, device=device, dtype=itype)[:, :, 4096:4096 + dim], "b s d -> b d s"
46
+ ).requires_grad_()
47
+ weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True)
48
+ if has_bias:
49
+ bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
50
+ else:
51
+ bias = None
52
+ x_ref = x.detach().clone().requires_grad_()
53
+ weight_ref = weight.detach().clone().requires_grad_()
54
+ bias_ref = bias.detach().clone().requires_grad_() if bias is not None else None
55
+ activation = None if not silu_activation else "silu"
56
+ out = causal_conv1d_fn(x, weight, bias, activation=activation)
57
+ out_ref = causal_conv1d_ref(x_ref, weight_ref, bias_ref, activation=activation)
58
+
59
+ print(f"Output max diff: {(out - out_ref).abs().max().item()}")
60
+ print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
61
+ assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
62
+
63
+ g = torch.randn_like(out)
64
+ out_ref.backward(g)
65
+ out.backward(g)
66
+
67
+ print(f"dx max diff: {(x.grad - x_ref.grad).abs().max().item()}")
68
+ print(f"dweight max diff: {(weight.grad - weight_ref.grad).abs().max().item()}")
69
+ if has_bias:
70
+ print(f"dbias max diff: {(bias.grad - bias_ref.grad).abs().max().item()}")
71
+
72
+ assert torch.allclose(x.grad, x_ref.grad.to(dtype=itype), rtol=rtol, atol=atol)
73
+ assert torch.allclose(weight.grad, weight_ref.grad, rtol=rtolw, atol=atolw)
74
+ if has_bias:
75
+ assert torch.allclose(bias.grad, bias_ref.grad, rtol=rtolw, atol=atolw)
76
+
77
+
78
+ @pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
79
+ # @pytest.mark.parametrize('itype', [torch.float16])
80
+ @pytest.mark.parametrize("silu_activation", [False, True])
81
+ # @pytest.mark.parametrize('silu_activation', [False])
82
+ @pytest.mark.parametrize("has_bias", [False, True])
83
+ # @pytest.mark.parametrize('has_bias', [True])
84
+ @pytest.mark.parametrize("width", [2, 3, 4])
85
+ # @pytest.mark.parametrize('width', [2])
86
+ @pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096])
87
+ # @pytest.mark.parametrize("dim", [2048])
88
+ def test_causal_conv1d_update(dim, width, has_bias, silu_activation, itype):
89
+ device = "cuda"
90
+ rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3)
91
+ if itype == torch.bfloat16:
92
+ rtol, atol = 1e-2, 5e-2
93
+ rtolw, atolw = (1e-3, 1e-3)
94
+ # set seed
95
+ torch.random.manual_seed(0)
96
+ batch_size = 2
97
+ # batch_size = 1
98
+ # dim = 64
99
+ x = torch.randn(batch_size, dim, device=device, dtype=itype)
100
+ conv_state = torch.randn(batch_size, dim, width, device=device, dtype=itype)
101
+ weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True)
102
+ if has_bias:
103
+ bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
104
+ else:
105
+ bias = None
106
+ conv_state_ref = conv_state.detach().clone()
107
+ activation = None if not silu_activation else "silu"
108
+ out = causal_conv1d_update(x, conv_state, weight, bias, activation=activation)
109
+ out_ref = causal_conv1d_update_ref(x, conv_state_ref, weight, bias, activation=activation)
110
+
111
+ print(f"Output max diff: {(out - out_ref).abs().max().item()}")
112
+ print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
113
+ assert torch.equal(conv_state, conv_state_ref)
114
+ assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
115
+
116
+
117
+ # @pytest.mark.parametrize("channel_last", [False, True])
118
+ @pytest.mark.parametrize('channel_last', [True])
119
+ # @pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
120
+ @pytest.mark.parametrize('itype', [torch.bfloat16])
121
+ # @pytest.mark.parametrize("silu_activation", [False, True])
122
+ @pytest.mark.parametrize('silu_activation', [True])
123
+ # @pytest.mark.parametrize("has_bias", [False, True])
124
+ @pytest.mark.parametrize('has_bias', [True])
125
+ # @pytest.mark.parametrize("width", [2, 3, 4])
126
+ @pytest.mark.parametrize('width', [4])
127
+ @pytest.mark.parametrize(
128
+ # "seqlen", [8, 16, 32, 64, 128, 151, 256, 372, 512, 784, 1024, 1134, 2048, 4096]
129
+ "seqlen", [2048]
130
+ )
131
+ # @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 512, 784, 1024, 2048, 4096])
132
+ # @pytest.mark.parametrize('seqlen', [128])
133
+ def test_causal_conv1d_race_condition(seqlen, width, has_bias, silu_activation, itype, channel_last):
134
+ device = "cuda"
135
+ # set seed
136
+ torch.random.manual_seed(0)
137
+ batch_size = 2
138
+ # batch_size = 1
139
+ dim = 4096 + 32 # Try dim not divisible by 64
140
+ # dim = 64
141
+ if not channel_last:
142
+ x = torch.randn(batch_size, 4096 + dim + 64, seqlen, device=device, dtype=itype)[:, 4096:4096 + dim, :].requires_grad_()
143
+ else:
144
+ x = rearrange(
145
+ torch.randn(batch_size, seqlen, 4096 + dim + 64, device=device, dtype=itype)[:, :, 4096:4096 + dim], "b s d -> b d s"
146
+ ).requires_grad_()
147
+ weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True)
148
+ if has_bias:
149
+ bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
150
+ else:
151
+ bias = None
152
+ activation = None if not silu_activation else "silu"
153
+ out0 = causal_conv1d_fn(x, weight, bias, activation=activation)
154
+ g = torch.randn_like(out0)
155
+ dx0, dw0, db0 = torch.autograd.grad(out0, (x, weight, bias), g)
156
+ dw_atol = 1e-4
157
+ db_atol = 1e-4
158
+
159
+ for i in range(10000):
160
+ out = causal_conv1d_fn(x, weight, bias, activation=activation)
161
+ dx, dw, db = torch.autograd.grad(out, (x, weight, bias), g)
162
+ dw_equal = torch.allclose(dw, dw0, atol=dw_atol)
163
+ # if not dw_equal:
164
+ # breakpoint()
165
+ if has_bias:
166
+ db_equal = torch.allclose(db, db0, atol=db_atol)
167
+ # if not db_equal:
168
+ # breakpoint()
169
+ assert torch.equal(out, out0)
170
+ assert torch.equal(dx, dx0)
171
+ assert dw_equal
172
+ if has_bias:
173
+ assert dw_equal
SegMamba/images/data_structure.jpg ADDED

Git LFS Details

  • SHA256: 811073efa09d8196f0b0dd9a37418025d5969f204f226b055be9349dec8117db
  • Pointer size: 130 Bytes
  • Size of remote file: 45 kB
SegMamba/images/method_figure.jpg ADDED

Git LFS Details

  • SHA256: 91fc1b545acec5d4f48af3a5d4498e25bc5c973639b7527076eebab46e78e2ba
  • Pointer size: 131 Bytes
  • Size of remote file: 215 kB
SegMamba/images/modules.jpg ADDED

Git LFS Details

  • SHA256: e511d5a09ef804053ea85d19075f7a8d922ea59aba475f8b223f2c9a6a15c23c
  • Pointer size: 131 Bytes
  • Size of remote file: 153 kB
SegMamba/images/segmamba_ablation.jpg ADDED

Git LFS Details

  • SHA256: 7bbaaa25995dd1f3b7a3b1c326dd939dce40c80722c74f4104ae787ed6d936db
  • Pointer size: 130 Bytes
  • Size of remote file: 100 kB
SegMamba/light_training/.DS_Store ADDED
Binary file (8.2 kB). View file
 
SegMamba/light_training/augment/multi_processor.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from batchgenerators.dataloading.nondet_multi_threaded_augmenter import NonDetMultiThreadedAugmenter
2
+
3
+
4
+ class LimitedLenWrapper(NonDetMultiThreadedAugmenter):
5
+ def __init__(self, my_imaginary_length, *args, **kwargs):
6
+ super().__init__(*args, **kwargs)
7
+ self.len = my_imaginary_length
8
+
9
+ def __len__(self):
10
+ return self.len
SegMamba/light_training/augment/train_augment.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import multiprocessing
3
+ import os
4
+ import shutil
5
+ import sys
6
+ import warnings
7
+ from copy import deepcopy
8
+ from datetime import datetime
9
+ from time import time, sleep
10
+ from typing import Union, Tuple, List
11
+ import numpy as np
12
+ import torch
13
+ from batchgenerators.dataloading.single_threaded_augmenter import SingleThreadedAugmenter
14
+ from batchgenerators.transforms.abstract_transforms import AbstractTransform, Compose
15
+ from batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, \
16
+ ContrastAugmentationTransform, GammaTransform
17
+ from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform
18
+ from batchgenerators.transforms.resample_transforms import SimulateLowResolutionTransform
19
+ from batchgenerators.transforms.spatial_transforms import SpatialTransform, MirrorTransform
20
+ from batchgenerators.transforms.utility_transforms import RemoveLabelTransform, RenameTransform, NumpyToTensor
21
+
22
+
23
+ def get_train_transforms(patch_size, mirror_axes=None):
24
+ tr_transforms = []
25
+ patch_size_spatial = patch_size
26
+ ignore_axes = None
27
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
28
+
29
+ tr_transforms.append(SpatialTransform(
30
+ patch_size_spatial, patch_center_dist_from_border=None,
31
+ do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
32
+ do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
33
+ p_rot_per_axis=1, # todo experiment with this
34
+ do_scale=True, scale=(0.7, 1.4),
35
+ border_mode_data="constant", border_cval_data=0, order_data=3,
36
+ border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
37
+ random_crop=False, # random cropping is part of our dataloaders
38
+ p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
39
+ independent_scale_for_each_axis=False # todo experiment with this
40
+ ))
41
+
42
+ tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
43
+ tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
44
+ p_per_channel=0.5))
45
+ tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
46
+ tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
47
+ tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
48
+ p_per_channel=0.5,
49
+ order_downsample=0, order_upsample=3, p_per_sample=0.25,
50
+ ignore_axes=ignore_axes))
51
+ tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
52
+ tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
53
+
54
+ if mirror_axes is not None and len(mirror_axes) > 0:
55
+ tr_transforms.append(MirrorTransform(mirror_axes))
56
+
57
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
58
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
59
+
60
+ tr_transforms = Compose(tr_transforms)
61
+
62
+ return tr_transforms
63
+
64
+ def get_train_transforms_nomirror(patch_size, mirror_axes=None):
65
+ tr_transforms = []
66
+ patch_size_spatial = patch_size
67
+ ignore_axes = None
68
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
69
+
70
+ tr_transforms.append(SpatialTransform(
71
+ patch_size_spatial, patch_center_dist_from_border=None,
72
+ do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
73
+ do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
74
+ p_rot_per_axis=1, # todo experiment with this
75
+ do_scale=True, scale=(0.7, 1.4),
76
+ border_mode_data="constant", border_cval_data=0, order_data=3,
77
+ border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
78
+ random_crop=False, # random cropping is part of our dataloaders
79
+ p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
80
+ independent_scale_for_each_axis=False # todo experiment with this
81
+ ))
82
+
83
+ tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
84
+ tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
85
+ p_per_channel=0.5))
86
+ tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
87
+ tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
88
+ tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
89
+ p_per_channel=0.5,
90
+ order_downsample=0, order_upsample=3, p_per_sample=0.25,
91
+ ignore_axes=ignore_axes))
92
+ tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
93
+ tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
94
+
95
+ # if mirror_axes is not None and len(mirror_axes) > 0:
96
+ # tr_transforms.append(MirrorTransform(mirror_axes))
97
+
98
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
99
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
100
+
101
+ tr_transforms = Compose(tr_transforms)
102
+
103
+ return tr_transforms
104
+
105
+ def get_train_transforms_onlymirror(patch_size, mirror_axes=None):
106
+ tr_transforms = []
107
+ patch_size_spatial = patch_size
108
+ ignore_axes = None
109
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
110
+
111
+ # tr_transforms.append(SpatialTransform(
112
+ # patch_size_spatial, patch_center_dist_from_border=None,
113
+ # do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
114
+ # do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
115
+ # p_rot_per_axis=1, # todo experiment with this
116
+ # do_scale=True, scale=(0.7, 1.4),
117
+ # border_mode_data="constant", border_cval_data=0, order_data=3,
118
+ # border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
119
+ # random_crop=False, # random cropping is part of our dataloaders
120
+ # p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
121
+ # independent_scale_for_each_axis=False # todo experiment with this
122
+ # ))
123
+
124
+ tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
125
+ tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
126
+ p_per_channel=0.5))
127
+ tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
128
+ tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
129
+ tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
130
+ p_per_channel=0.5,
131
+ order_downsample=0, order_upsample=3, p_per_sample=0.25,
132
+ ignore_axes=ignore_axes))
133
+ tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
134
+ tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
135
+
136
+ if mirror_axes is not None and len(mirror_axes) > 0:
137
+ tr_transforms.append(MirrorTransform(mirror_axes))
138
+
139
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
140
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
141
+
142
+ tr_transforms = Compose(tr_transforms)
143
+
144
+ return tr_transforms
145
+
146
+ def get_train_transforms_onlyspatial(patch_size, mirror_axes=None):
147
+ tr_transforms = []
148
+ patch_size_spatial = patch_size
149
+ ignore_axes = None
150
+ angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
151
+
152
+ tr_transforms.append(SpatialTransform(
153
+ patch_size_spatial, patch_center_dist_from_border=None,
154
+ do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
155
+ do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
156
+ p_rot_per_axis=1, # todo experiment with this
157
+ do_scale=True, scale=(0.7, 1.4),
158
+ border_mode_data="constant", border_cval_data=0, order_data=3,
159
+ border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
160
+ random_crop=False, # random cropping is part of our dataloaders
161
+ p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
162
+ independent_scale_for_each_axis=False # todo experiment with this
163
+ ))
164
+
165
+ # tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
166
+ # tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
167
+ # p_per_channel=0.5))
168
+ # tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
169
+ # tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
170
+ # tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
171
+ # p_per_channel=0.5,
172
+ # order_downsample=0, order_upsample=3, p_per_sample=0.25,
173
+ # ignore_axes=ignore_axes))
174
+ # tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
175
+ # tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
176
+
177
+ if mirror_axes is not None and len(mirror_axes) > 0:
178
+ tr_transforms.append(MirrorTransform(mirror_axes))
179
+
180
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
181
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
182
+
183
+ tr_transforms = Compose(tr_transforms)
184
+
185
+ return tr_transforms
186
+
187
+ def get_train_transforms_noaug(patch_size, mirror_axes=None):
188
+ tr_transforms = []
189
+ # patch_size_spatial = patch_size
190
+ # ignore_axes = None
191
+ # angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
192
+
193
+ # tr_transforms.append(SpatialTransform(
194
+ # patch_size_spatial, patch_center_dist_from_border=None,
195
+ # do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),
196
+ # do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle,
197
+ # p_rot_per_axis=1, # todo experiment with this
198
+ # do_scale=True, scale=(0.7, 1.4),
199
+ # border_mode_data="constant", border_cval_data=0, order_data=3,
200
+ # border_mode_seg="constant", border_cval_seg=-1, order_seg=1,
201
+ # random_crop=False, # random cropping is part of our dataloaders
202
+ # p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,
203
+ # independent_scale_for_each_axis=False # todo experiment with this
204
+ # ))
205
+
206
+ # tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
207
+ # tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,
208
+ # p_per_channel=0.5))
209
+ # tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))
210
+ # tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))
211
+ # tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
212
+ # p_per_channel=0.5,
213
+ # order_downsample=0, order_upsample=3, p_per_sample=0.25,
214
+ # ignore_axes=ignore_axes))
215
+ # tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))
216
+ # tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))
217
+
218
+ # if mirror_axes is not None and len(mirror_axes) > 0:
219
+ # tr_transforms.append(MirrorTransform(mirror_axes))
220
+
221
+ tr_transforms.append(RemoveLabelTransform(-1, 0))
222
+ tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
223
+
224
+ tr_transforms = Compose(tr_transforms)
225
+
226
+ return tr_transforms
227
+
228
+ def get_validation_transforms() -> AbstractTransform:
229
+ val_transforms = []
230
+ val_transforms.append(RemoveLabelTransform(-1, 0))
231
+
232
+ # val_transforms.append(RenameTransform('seg', 'target', True))
233
+
234
+ val_transforms.append(NumpyToTensor(['data', 'seg'], 'float'))
235
+ val_transforms = Compose(val_transforms)
236
+ return val_transforms
237
+
238
+ # import SimpleITK as sitk
239
+ # import matplotlib.pyplot as plt
240
+
241
+ # image = sitk.ReadImage("/Users/xingzhaohu/Documents/工作/code/medical_image_processing/SSL/BraTS20_Training_365/BraTS20_Training_365_flair.nii.gz")
242
+ # label = sitk.ReadImage("/Users/xingzhaohu/Documents/工作/code/medical_image_processing/SSL/BraTS20_Training_365/BraTS20_Training_365_seg.nii.gz")
243
+
244
+ # # image = sitk.ReadImage("./AIIB/image/AIIB23_171.nii.gz")
245
+ # # label = sitk.ReadImage("./AIIB/gt/AIIB23_171.nii.gz")
246
+
247
+ # image_arr = sitk.GetArrayFromImage(image)
248
+ # label_arr = sitk.GetArrayFromImage(label)
249
+ # intensityproperties = {}
250
+
251
+ # norm = RescaleTo01Normalization(intensityproperties=intensityproperties)
252
+ # image_arr = image_arr[0:128, 0:128, 0:128][None, None]
253
+ # label_arr = label_arr[0:128, 0:128, 0:128][None, None]
254
+
255
+
256
+ # image_arr = norm.run(image_arr, label_arr)
257
+
258
+ # print(image_arr.shape, label_arr.shape)
259
+
260
+ # tr_transforms = Compose(tr_transforms)
261
+
262
+ # trans_out = tr_transforms(data=image_arr, seg=label_arr)
263
+
264
+ # image_arr_aug = trans_out["data"]
265
+ # label_arr_aug = trans_out["seg"]
266
+
267
+ # print(image_arr_aug.shape, label_arr_aug.shape)
268
+
269
+
270
+ # for i in range(40, 128):
271
+ # plt.subplot(1, 4, 1)
272
+ # plt.imshow(image_arr[0, 0, i], cmap="gray")
273
+ # plt.subplot(1, 4, 2)
274
+ # plt.imshow(label_arr[0, 0, i], cmap="gray")
275
+ # plt.subplot(1, 4, 3)
276
+ # plt.imshow(image_arr_aug[0, 0, i], cmap="gray")
277
+ # plt.subplot(1, 4, 4)
278
+ # plt.imshow(label_arr_aug[0, 0, i], cmap="gray")
279
+ # plt.show()
SegMamba/light_training/dataloading/__init__.py ADDED
File without changes
SegMamba/light_training/dataloading/base_data_loader.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from typing import Union, Tuple
3
+ import time
4
+
5
+ class DataLoaderMultiProcess:
6
+ def __init__(self, dataset,
7
+ patch_size,
8
+ batch_size=2,
9
+ oversample_foreground_percent=0.33,
10
+ probabilistic_oversampling=False,
11
+ print_time=False):
12
+ pass
13
+ self.dataset = dataset
14
+ self.patch_size = patch_size
15
+ # self.annotated_classes_key = annotated_classes_key ## (1, 2, 3 ..)
16
+ self.batch_size = batch_size
17
+ self.keys = [i for i in range(len(dataset))]
18
+ self.thread_id = 0
19
+ self.oversample_foreground_percent = oversample_foreground_percent
20
+ self.need_to_pad = (np.array([0, 0, 0])).astype(int)
21
+
22
+ self.get_do_oversample = self._oversample_last_XX_percent if not probabilistic_oversampling \
23
+ else self._probabilistic_oversampling
24
+ self.data_shape = None
25
+ self.seg_shape = None
26
+ self.print_time = print_time
27
+
28
+ def determine_shapes(self):
29
+ # load one case
30
+ item = self.dataset.__getitem__(0)
31
+ data, seg, properties = item["data"], item["seg"], item["properties"]
32
+ num_color_channels = data.shape[0]
33
+ num_output_channels = seg.shape[0]
34
+ patch_size = self.patch_size
35
+ data_shape = (self.batch_size, num_color_channels, patch_size[0], patch_size[1], patch_size[2])
36
+ seg_shape = (self.batch_size, num_output_channels, patch_size[0], patch_size[1], patch_size[2])
37
+ return data_shape, seg_shape
38
+
39
+ def generate_train_batch(self):
40
+
41
+ selected_keys = np.random.choice(self.keys, self.batch_size, True, None)
42
+ if self.data_shape is None:
43
+ self.data_shape, self.seg_shape = self.determine_shapes()
44
+
45
+ data_all = np.zeros(self.data_shape, dtype=np.float32)
46
+ data_all_global = np.zeros(self.data_shape, dtype=np.float32)
47
+ seg_all_global = np.zeros(self.seg_shape, dtype=np.float32)
48
+ data_global = None
49
+ seg_global = None
50
+ seg_all = np.zeros(self.seg_shape, dtype=np.float32)
51
+
52
+ case_properties = []
53
+
54
+ index = 0
55
+ for j, key in enumerate(selected_keys):
56
+
57
+ force_fg = self.get_do_oversample(j)
58
+ s = time.time()
59
+ item = self.dataset.__getitem__(key)
60
+ e = time.time()
61
+ if self.print_time:
62
+ print(f"read single data time is {e - s}")
63
+ # print(f"read data time is {e - s}")
64
+ data, seg, properties = item["data"], item["seg"], item["properties"]
65
+
66
+ if "data_global" in item:
67
+ data_global = item["data_global"]
68
+
69
+ if "seg_global" in item:
70
+ seg_global = item["seg_global"]
71
+
72
+ case_properties.append(properties)
73
+ # If we are doing the cascade then the segmentation from the previous stage will already have been loaded by
74
+ # self._data.load_case(i) (see nnUNetDataset.load_case)
75
+ shape = data.shape[1:]
76
+ dim = len(shape)
77
+
78
+ s = time.time()
79
+ bbox_lbs, bbox_ubs = self.get_bbox(shape, force_fg, properties['class_locations'])
80
+ e = time.time()
81
+ if self.print_time:
82
+ print(f"get bbox time is {e - s}")
83
+ # whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the
84
+ # bbox that actually lies within the data. This will result in a smaller array which is then faster to pad.
85
+ # valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size
86
+ # later
87
+ valid_bbox_lbs = [max(0, bbox_lbs[i]) for i in range(dim)]
88
+ valid_bbox_ubs = [min(shape[i], bbox_ubs[i]) for i in range(dim)]
89
+
90
+ # At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage.
91
+ # Why not just concatenate them here and forget about the if statements? Well that's because segneeds to
92
+ # be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also
93
+ # remove label -1 in the data augmentation but this way it is less error prone)
94
+ this_slice = tuple([slice(0, data.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)])
95
+ data = data[this_slice]
96
+
97
+ this_slice = tuple([slice(0, seg.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)])
98
+ seg = seg[this_slice]
99
+
100
+
101
+ s = time.time()
102
+ padding = [(-min(0, bbox_lbs[i]), max(bbox_ubs[i] - shape[i], 0)) for i in range(dim)]
103
+ # print(f"box is {bbox_lbs, bbox_ubs}, padding is {padding}")
104
+ data_all[j] = np.pad(data, ((0, 0), *padding), 'constant', constant_values=0)
105
+ seg_all[j] = np.pad(seg, ((0, 0), *padding), 'constant', constant_values=0)
106
+
107
+ if data_global is not None :
108
+ data_all_global[j] = data_global
109
+
110
+ if seg_global is not None :
111
+ seg_all_global[j] = seg_global
112
+
113
+
114
+ e = time.time()
115
+ if self.print_time:
116
+ print(f"box is {bbox_lbs, bbox_ubs}, padding is {padding}")
117
+ print(f"setting data value time is {e - s}")
118
+
119
+
120
+ if data_global is None:
121
+ return {'data': data_all,
122
+ 'seg': seg_all, 'properties': case_properties,
123
+ 'keys': selected_keys}
124
+
125
+ return {'data': data_all, "data_global": data_all_global,
126
+ "seg_global": seg_all_global,
127
+ 'seg': seg_all, 'properties': case_properties,
128
+ 'keys': selected_keys}
129
+
130
+ def __next__(self):
131
+
132
+ return self.generate_train_batch()
133
+
134
+ def set_thread_id(self, thread_id):
135
+ self.thread_id = thread_id
136
+
137
+ def _oversample_last_XX_percent(self, sample_idx: int) -> bool:
138
+ """
139
+ determines whether sample sample_idx in a minibatch needs to be guaranteed foreground
140
+ """
141
+ return not sample_idx < round(self.batch_size * (1 - self.oversample_foreground_percent))
142
+
143
+ def _probabilistic_oversampling(self, sample_idx: int) -> bool:
144
+ # print('YEAH BOIIIIII')
145
+ return np.random.uniform() < self.oversample_foreground_percent
146
+
147
+ def get_bbox(self, data_shape: np.ndarray, force_fg: bool, class_locations: Union[dict, None],
148
+ overwrite_class: Union[int, Tuple[int, ...]] = None, verbose: bool = False):
149
+ # in dataloader 2d we need to select the slice prior to this and also modify the class_locations to only have
150
+ # locations for the given slice
151
+ need_to_pad = self.need_to_pad.copy()
152
+ dim = len(data_shape)
153
+
154
+ for d in range(dim):
155
+ # if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides
156
+ # always
157
+ if need_to_pad[d] + data_shape[d] < self.patch_size[d]:
158
+ need_to_pad[d] = self.patch_size[d] - data_shape[d]
159
+
160
+ # we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we
161
+ # define what the upper and lower bound can be to then sample form them with np.random.randint
162
+ lbs = [- need_to_pad[i] // 2 for i in range(dim)]
163
+ ubs = [data_shape[i] + need_to_pad[i] // 2 + need_to_pad[i] % 2 - self.patch_size[i] for i in range(dim)]
164
+
165
+ # if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get
166
+ # at least one of the foreground classes in the patch
167
+ if not force_fg:
168
+ bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)]
169
+ # print('I want a random location')
170
+ else:
171
+ assert class_locations is not None, 'if force_fg is set class_locations cannot be None'
172
+ if overwrite_class is not None:
173
+ assert overwrite_class in class_locations.keys(), 'desired class ("overwrite_class") does not ' \
174
+ 'have class_locations (missing key)'
175
+ # this saves us a np.unique. Preprocessing already did that for all cases. Neat.
176
+ # class_locations keys can also be tuple
177
+ eligible_classes_or_regions = [i for i in class_locations.keys() if len(class_locations[i]) > 0]
178
+
179
+ # if we have annotated_classes_key locations and other classes are present, remove the annotated_classes_key from the list
180
+ # strange formulation needed to circumvent
181
+ # ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
182
+ # tmp = [i == self.annotated_classes_key if isinstance(i, tuple) else False for i in eligible_classes_or_regions]
183
+ # if any(tmp):
184
+ # if len(eligible_classes_or_regions) > 1:
185
+ # eligible_classes_or_regions.pop(np.where(tmp)[0][0])
186
+
187
+ if len(eligible_classes_or_regions) == 0:
188
+ # this only happens if some image does not contain foreground voxels at all
189
+ selected_class = None
190
+ if verbose:
191
+ print('case does not contain any foreground classes')
192
+ else:
193
+ # I hate myself. Future me aint gonna be happy to read this
194
+ # 2022_11_25: had to read it today. Wasn't too bad
195
+ selected_class = eligible_classes_or_regions[np.random.choice(len(eligible_classes_or_regions))] if \
196
+ (overwrite_class is None or (overwrite_class not in eligible_classes_or_regions)) else overwrite_class
197
+ # print(f'I want to have foreground, selected class: {selected_class}')
198
+
199
+ voxels_of_that_class = class_locations[selected_class] if selected_class is not None else None
200
+
201
+ if voxels_of_that_class is not None and len(voxels_of_that_class) > 0:
202
+ selected_voxel = voxels_of_that_class[np.random.choice(len(voxels_of_that_class))]
203
+ # selected voxel is center voxel. Subtract half the patch size to get lower bbox voxel.
204
+ # Make sure it is within the bounds of lb and ub
205
+ # i + 1 because we have first dimension 0!
206
+ bbox_lbs = [max(lbs[i], selected_voxel[i + 1] - self.patch_size[i] // 2) for i in range(dim)]
207
+ else:
208
+ # If the image does not contain any foreground classes, we fall back to random cropping
209
+ bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)]
210
+
211
+ bbox_ubs = [bbox_lbs[i] + self.patch_size[i] for i in range(dim)]
212
+
213
+ return bbox_lbs, bbox_ubs
SegMamba/light_training/dataloading/dataset.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright 2020 - 2022 MONAI Consortium
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+ from sklearn.model_selection import KFold ## K折交叉验证
13
+ import pickle
14
+ import os
15
+ import json
16
+ import math
17
+ import numpy as np
18
+ import torch
19
+ from monai import transforms
20
+ import SimpleITK as sitk
21
+ from tqdm import tqdm
22
+ from torch.utils.data import Dataset
23
+ import glob
24
+ from light_training.dataloading.utils import unpack_dataset
25
+ import random
26
+
27
+ class MedicalDataset(Dataset):
28
+ def __init__(self, datalist, test=False) -> None:
29
+ super().__init__()
30
+
31
+ self.datalist = datalist
32
+ self.test = test
33
+
34
+ self.data_cached = []
35
+ for p in tqdm(self.datalist, total=len(self.datalist)):
36
+ info = self.load_pkl(p)
37
+
38
+ self.data_cached.append(info)
39
+
40
+ ## unpacking
41
+ print(f"unpacking data ....")
42
+ # for
43
+ folder = []
44
+ for p in self.datalist:
45
+ f = os.path.dirname(p)
46
+ if f not in folder:
47
+ folder.append(f)
48
+ for f in folder:
49
+ unpack_dataset(f,
50
+ unpack_segmentation=True,
51
+ overwrite_existing=False,
52
+ num_processes=8)
53
+
54
+
55
+ print(f"data length is {len(self.datalist)}")
56
+
57
+ def load_pkl(self, data_path):
58
+ pass
59
+ properties_path = f"{data_path[:-4]}.pkl"
60
+ df = open(properties_path, "rb")
61
+ info = pickle.load(df)
62
+
63
+ return info
64
+
65
+ def post(self, batch_data):
66
+ return batch_data
67
+
68
+ def read_data(self, data_path):
69
+
70
+ image_path = data_path.replace(".npz", ".npy")
71
+ seg_path = data_path.replace(".npz", "_seg.npy")
72
+ image_data = np.load(image_path, "r+")
73
+
74
+ seg_data = None
75
+ if not self.test:
76
+ seg_data = np.load(seg_path, "r+")
77
+ return image_data, seg_data
78
+
79
+ def __getitem__(self, i):
80
+
81
+ image, seg = self.read_data(self.datalist[i])
82
+
83
+ properties = self.data_cached[i]
84
+
85
+ if seg is None:
86
+ return {
87
+ "data": image,
88
+ "properties": properties
89
+ }
90
+ else :
91
+ return {
92
+ "data": image,
93
+ "seg": seg,
94
+ "properties": properties
95
+ }
96
+
97
+ def __len__(self):
98
+ return len(self.datalist)
99
+
100
+ def get_train_test_loader_from_test_list(data_dir, test_list):
101
+ all_paths = glob.glob(f"{data_dir}/*.npz")
102
+
103
+ test_datalist = []
104
+ train_datalist = []
105
+
106
+ test_list_1 = []
107
+ for t in test_list:
108
+ test_list_1.append(t.replace(".nii.gz", ""))
109
+
110
+ test_list = test_list_1
111
+ for p in all_paths:
112
+ p2 = p.split("/")[-1].split(".")[0]
113
+ if p2 in test_list:
114
+ test_datalist.append(p)
115
+ else :
116
+ train_datalist.append(p)
117
+
118
+ print(f"training data is {len(train_datalist)}")
119
+ print(f"test data is {len(test_datalist)}", test_datalist)
120
+
121
+ train_ds = MedicalDataset(train_datalist)
122
+ test_ds = MedicalDataset(test_datalist)
123
+
124
+ loader = [train_ds, test_ds]
125
+
126
+ return loader
127
+
128
+ def get_kfold_data(data_paths, n_splits, shuffle=False):
129
+ X = np.arange(len(data_paths))
130
+ kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象
131
+ return_res = []
132
+ for a, b in kfold.split(X):
133
+ fold_train = []
134
+ fold_val = []
135
+ for i in a:
136
+ fold_train.append(data_paths[i])
137
+ for j in b:
138
+ fold_val.append(data_paths[j])
139
+ return_res.append({"train_data": fold_train, "val_data": fold_val})
140
+
141
+ return return_res
142
+
143
+ def get_kfold_loader(data_dir, fold=0, test_dir=None):
144
+
145
+ all_paths = glob.glob(f"{data_dir}/*.npz")
146
+ fold_data = get_kfold_data(all_paths, 5)[fold]
147
+
148
+ train_datalist = fold_data["train_data"]
149
+ val_datalist = fold_data["val_data"]
150
+
151
+ print(f"training data is {len(train_datalist)}")
152
+ print(f"validation data is {len(val_datalist)}")
153
+ train_ds = MedicalDataset(train_datalist)
154
+
155
+ val_ds = MedicalDataset(val_datalist)
156
+
157
+ if test_dir is not None:
158
+ test_paths = glob.glob(f"{test_dir}/*.npz")
159
+ test_ds = MedicalDataset(test_paths, test=True)
160
+ else:
161
+ test_ds = None
162
+
163
+ loader = [train_ds, val_ds, test_ds]
164
+
165
+ return loader
166
+
167
+ def get_all_training_loader(data_dir, fold=0, test_dir=None):
168
+ ## train all labeled data
169
+ ## fold denote the validation data in training data
170
+ all_paths = glob.glob(f"{data_dir}/*.npz")
171
+ fold_data = get_kfold_data(all_paths, 5)[fold]
172
+
173
+ train_datalist = all_paths
174
+ val_datalist = fold_data["val_data"]
175
+
176
+ print(f"training data is {len(train_datalist)}")
177
+ print(f"validation data is {len(val_datalist)}")
178
+ train_ds = MedicalDataset(train_datalist)
179
+
180
+ val_ds = MedicalDataset(val_datalist)
181
+
182
+ if test_dir is not None:
183
+ test_paths = glob.glob(f"{test_dir}/*.npz")
184
+ test_ds = MedicalDataset(test_paths, test=True)
185
+ else:
186
+ test_ds = None
187
+
188
+ loader = [train_ds, val_ds, test_ds]
189
+
190
+ return loader
191
+
192
+ def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None):
193
+ train_datalist = glob.glob(f"{train_dir}/*.npz")
194
+ val_datalist = glob.glob(f"{val_dir}/*.npz")
195
+
196
+ print(f"training data is {len(train_datalist)}")
197
+ print(f"validation data is {len(val_datalist)}")
198
+
199
+ if test_dir is not None:
200
+ test_datalist = glob.glob(f"{test_dir}/*.npz")
201
+ print(f"test data is {len(test_datalist)}")
202
+ test_ds = MedicalDataset(test_datalist, test=True)
203
+ else :
204
+ test_ds = None
205
+
206
+ train_ds = MedicalDataset(train_datalist)
207
+ val_ds = MedicalDataset(val_datalist)
208
+
209
+ loader = [train_ds, val_ds, test_ds]
210
+
211
+ return loader
212
+
213
+ def get_train_val_test_loader_from_split_json(data_dir, split_json_file):
214
+ import json
215
+
216
+ with open(split_json_file, "r") as f:
217
+
218
+ datalist = json.loads(f.read())
219
+
220
+ train_datalist = datalist["train"]
221
+ val_datalist = datalist["validation"]
222
+ test_datalist = datalist["test"]
223
+
224
+ def add_pre(datalist):
225
+ for i in range(len(datalist)):
226
+ datalist[i] = os.path.join(data_dir, datalist[i])
227
+
228
+ add_pre(train_datalist)
229
+ add_pre(val_datalist)
230
+ add_pre(test_datalist)
231
+ print(f"training data is {len(train_datalist)}")
232
+ print(f"validation data is {len(val_datalist)}")
233
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
234
+
235
+ train_ds = MedicalDataset(train_datalist)
236
+ val_ds = MedicalDataset(val_datalist)
237
+ test_ds = MedicalDataset(test_datalist)
238
+
239
+ loader = [train_ds, val_ds, test_ds]
240
+
241
+ return loader
242
+
243
+
244
+ def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42):
245
+ ## train all labeled data
246
+ ## fold denote the validation data in training data
247
+ all_paths = glob.glob(f"{data_dir}/*.npz")
248
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
249
+
250
+ train_number = int(len(all_paths) * train_rate)
251
+ val_number = int(len(all_paths) * val_rate)
252
+ test_number = int(len(all_paths) * test_rate)
253
+ random.seed(seed)
254
+ # random_state = random.random
255
+ random.shuffle(all_paths)
256
+
257
+ train_datalist = all_paths[:train_number]
258
+ val_datalist = all_paths[train_number: train_number + val_number]
259
+ test_datalist = all_paths[-test_number:]
260
+
261
+ print(f"training data is {len(train_datalist)}")
262
+ print(f"validation data is {len(val_datalist)}")
263
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
264
+
265
+ train_ds = MedicalDataset(train_datalist)
266
+ val_ds = MedicalDataset(val_datalist)
267
+ test_ds = MedicalDataset(test_datalist)
268
+
269
+ loader = [train_ds, val_ds, test_ds]
270
+
271
+ return loader
272
+
273
+ def get_train_loader_from_train(data_dir):
274
+ ## train all labeled data
275
+ ## fold denote the validation data in training data
276
+ all_paths = glob.glob(f"{data_dir}/*.npz")
277
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
278
+
279
+ train_ds = MedicalDataset(all_paths)
280
+
281
+ return train_ds
282
+
283
+ def get_test_loader_from_test(data_dir):
284
+ all_paths = glob.glob(f"{data_dir}/*.npz")
285
+
286
+ test_ds = MedicalDataset(all_paths)
287
+
288
+ return test_ds
289
+
290
+ def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None):
291
+ ## train all labeled data
292
+ ## fold denote the validation data in training data
293
+ all_paths = []
294
+ for p in data_dir:
295
+ paths = glob.glob(f"{p}/*.npz")
296
+ for pp in paths:
297
+ all_paths.append(pp)
298
+
299
+ # print(all_paths)
300
+ fold_data = get_kfold_data(all_paths, 5)[fold]
301
+
302
+ train_datalist = all_paths
303
+ val_datalist = fold_data["val_data"]
304
+
305
+ print(f"training data is {len(train_datalist)}")
306
+ print(f"validation data is {len(val_datalist)}")
307
+ train_ds = MedicalDataset(train_datalist)
308
+
309
+ val_ds = MedicalDataset(val_datalist)
310
+
311
+ if test_dir is not None:
312
+ test_paths = glob.glob(f"{test_dir}/*.npz")
313
+ test_ds = MedicalDataset(test_paths, test=True)
314
+ else:
315
+ test_ds = None
316
+
317
+ loader = [train_ds, val_ds, test_ds]
318
+
319
+ return loader
SegMamba/light_training/dataloading/dataset_sdm_edge.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright 2020 - 2022 MONAI Consortium
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+ from sklearn.model_selection import KFold ## K折交叉验证
13
+ import pickle
14
+ import os
15
+ import json
16
+ import math
17
+ import numpy as np
18
+ import torch
19
+ from monai import transforms
20
+ import SimpleITK as sitk
21
+ from tqdm import tqdm
22
+ from torch.utils.data import Dataset
23
+ import glob
24
+ from light_training.dataloading.utils import unpack_dataset
25
+ import random
26
+ import torch
27
+ import numpy as np
28
+ from scipy.ndimage import distance_transform_edt as distance
29
+ from skimage import segmentation as skimage_seg
30
+ from skimage.morphology import dilation, disk
31
+ import scipy.ndimage as ndimage
32
+
33
+ def get_edge_points(img):
34
+ """
35
+ get edge points of a binary segmentation result
36
+ """
37
+ dim = len(img.shape)
38
+ if (dim == 2):
39
+ strt = ndimage.generate_binary_structure(2, 1)
40
+ else:
41
+ strt = ndimage.generate_binary_structure(3, 1)
42
+ ero = ndimage.binary_erosion(img, strt)
43
+ edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8)
44
+ return edge
45
+
46
+ def edge_3d(image_3d):
47
+ # image_3d = torch.from_numpy(image_3d)
48
+ return_edge = np.zeros_like(image_3d)
49
+
50
+ for i in range(image_3d.shape[0]):
51
+ for j in range(image_3d.shape[1]):
52
+ return_edge[i, j] = get_edge_points(image_3d[i, j])
53
+
54
+ return return_edge
55
+
56
+ def compute_sdf(img_gt, out_shape):
57
+ """
58
+ compute the signed distance map of binary mask
59
+ input: segmentation, shape = (batch_size,c, x, y, z)
60
+ output: the Signed Distance Map (SDM)
61
+ sdf(x) = 0; x in segmentation boundary
62
+ -inf|x-y|; x in segmentation
63
+ +inf|x-y|; x out of segmentation
64
+ normalize sdf to [-1,1]
65
+
66
+ """
67
+
68
+ img_gt = img_gt.astype(np.uint8)
69
+ normalized_sdf = np.zeros(out_shape)
70
+
71
+ for b in range(out_shape[0]): # batch size
72
+ for c in range(out_shape[1]):
73
+ posmask = img_gt[b, c].astype(np.bool_)
74
+ if posmask.any():
75
+ negmask = ~posmask
76
+ posdis = distance(posmask)
77
+ negdis = distance(negmask)
78
+ boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
79
+ sdf = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) - (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))
80
+ sdf[boundary==1] = 0
81
+ normalized_sdf[b][c] = sdf
82
+ assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis))
83
+ assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis))
84
+
85
+ return normalized_sdf
86
+
87
+ def convert_labels(labels):
88
+ ## TC, WT and ET
89
+ labels = labels[None, None]
90
+ result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
91
+
92
+ return torch.cat(result, dim=1).float()
93
+
94
+ class MedicalDataset(Dataset):
95
+ def __init__(self, datalist, test=False) -> None:
96
+ super().__init__()
97
+
98
+ self.datalist = datalist
99
+ self.test = test
100
+
101
+ self.data_cached = []
102
+ for p in tqdm(self.datalist, total=len(self.datalist)):
103
+ info = self.load_pkl(p)
104
+
105
+ self.data_cached.append(info)
106
+
107
+ ## unpacking
108
+ print(f"unpacking data ....")
109
+ # for
110
+ folder = []
111
+ for p in self.datalist:
112
+ f = os.path.dirname(p)
113
+ if f not in folder:
114
+ folder.append(f)
115
+ for f in folder:
116
+ unpack_dataset(f,
117
+ unpack_segmentation=True,
118
+ overwrite_existing=False,
119
+ num_processes=8)
120
+
121
+
122
+ print(f"data length is {len(self.datalist)}")
123
+
124
+ def load_pkl(self, data_path):
125
+ pass
126
+ properties_path = f"{data_path[:-4]}.pkl"
127
+ df = open(properties_path, "rb")
128
+ info = pickle.load(df)
129
+
130
+ return info
131
+
132
+ def read_data(self, data_path):
133
+
134
+ image_path = data_path.replace(".npz", ".npy")
135
+ seg_path = data_path.replace(".npz", "_seg.npy")
136
+ image_data = np.load(image_path, "r")
137
+
138
+ seg_data = None
139
+ if not self.test:
140
+ seg_data = np.load(seg_path, "r")
141
+ return image_data, seg_data
142
+
143
+ # def post(self, batch_data):
144
+ # seg = convert_labels(batch_data["seg"]).numpy()
145
+ # seg_shape = seg.shape
146
+ # seg_edge = edge_3d(seg)
147
+ # seg_sdm = 1 - compute_sdf(seg, out_shape=seg_shape)
148
+ # seg_sdm = seg_sdm + seg_edge
149
+
150
+ # seg_edge = torch.from_numpy(seg_edge)
151
+ # seg_sdm = torch.from_numpy(seg_sdm)
152
+
153
+ # batch_data["seg_edge"] = seg_edge
154
+ # batch_data["seg_sdm"] = seg_sdm
155
+
156
+ # print(f"post!!!!!!!!!")
157
+ # return batch_data
158
+
159
+ def __getitem__(self, i):
160
+
161
+ image, seg = self.read_data(self.datalist[i])
162
+
163
+ properties = self.data_cached[i]
164
+ case_name = properties["name"]
165
+
166
+ if seg is not None:
167
+ sdm = np.load(os.path.join("./data/fullres/train_sdm/", f"{case_name}_seg_sdm.npy"), "r")
168
+
169
+ # print(seg.shape, sdm.shape)
170
+ sdm = sdm[0]
171
+ seg = np.concatenate([seg, sdm], axis=0)
172
+
173
+ # print(f"sdm sum is {sdm.sum()}")
174
+ if seg is None:
175
+ return {
176
+ "data": image,
177
+ "properties": properties
178
+ }
179
+ else :
180
+ return {
181
+ "data": image,
182
+ "seg": seg,
183
+ "properties": properties
184
+ }
185
+
186
+ def __len__(self):
187
+ return len(self.datalist)
188
+
189
+ def get_kfold_data(data_paths, n_splits, shuffle=False):
190
+ X = np.arange(len(data_paths))
191
+ kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象
192
+ return_res = []
193
+ for a, b in kfold.split(X):
194
+ fold_train = []
195
+ fold_val = []
196
+ for i in a:
197
+ fold_train.append(data_paths[i])
198
+ for j in b:
199
+ fold_val.append(data_paths[j])
200
+ return_res.append({"train_data": fold_train, "val_data": fold_val})
201
+
202
+ return return_res
203
+
204
+ def get_kfold_loader(data_dir, fold=0, test_dir=None):
205
+
206
+ all_paths = glob.glob(f"{data_dir}/*.npz")
207
+ fold_data = get_kfold_data(all_paths, 5)[fold]
208
+
209
+ train_datalist = fold_data["train_data"]
210
+ val_datalist = fold_data["val_data"]
211
+
212
+ print(f"training data is {len(train_datalist)}")
213
+ print(f"validation data is {len(val_datalist)}")
214
+ train_ds = MedicalDataset(train_datalist)
215
+
216
+ val_ds = MedicalDataset(val_datalist)
217
+
218
+ if test_dir is not None:
219
+ test_paths = glob.glob(f"{test_dir}/*.npz")
220
+ test_ds = MedicalDataset(test_paths, test=True)
221
+ else:
222
+ test_ds = None
223
+
224
+ loader = [train_ds, val_ds, test_ds]
225
+
226
+ return loader
227
+
228
+ def get_all_training_loader(data_dir, fold=0, test_dir=None):
229
+ ## train all labeled data
230
+ ## fold denote the validation data in training data
231
+ all_paths = glob.glob(f"{data_dir}/*.npz")
232
+ fold_data = get_kfold_data(all_paths, 5)[fold]
233
+
234
+ train_datalist = all_paths
235
+ val_datalist = fold_data["val_data"]
236
+
237
+ print(f"training data is {len(train_datalist)}")
238
+ print(f"validation data is {len(val_datalist)}")
239
+ train_ds = MedicalDataset(train_datalist)
240
+
241
+ val_ds = MedicalDataset(val_datalist)
242
+
243
+ if test_dir is not None:
244
+ test_paths = glob.glob(f"{test_dir}/*.npz")
245
+ test_ds = MedicalDataset(test_paths, test=True)
246
+ else:
247
+ test_ds = None
248
+
249
+ loader = [train_ds, val_ds, test_ds]
250
+
251
+ return loader
252
+
253
+ def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None):
254
+ train_datalist = glob.glob(f"{train_dir}/*.npz")
255
+ val_datalist = glob.glob(f"{val_dir}/*.npz")
256
+
257
+ print(f"training data is {len(train_datalist)}")
258
+ print(f"validation data is {len(val_datalist)}")
259
+
260
+ if test_dir is not None:
261
+ test_datalist = glob.glob(f"{test_dir}/*.npz")
262
+ print(f"test data is {len(test_datalist)}")
263
+ test_ds = MedicalDataset(test_datalist, test=True)
264
+ else :
265
+ test_ds = None
266
+
267
+ train_ds = MedicalDataset(train_datalist)
268
+ val_ds = MedicalDataset(val_datalist)
269
+
270
+ loader = [train_ds, val_ds, test_ds]
271
+
272
+ return loader
273
+
274
+ def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2):
275
+ ## train all labeled data
276
+ ## fold denote the validation data in training data
277
+ all_paths = glob.glob(f"{data_dir}/*.npz")
278
+ # fold_data = get_kfold_data(all_paths, 5)[fold]
279
+
280
+ train_number = int(len(all_paths) * train_rate)
281
+ val_number = int(len(all_paths) * val_rate)
282
+ test_number = int(len(all_paths) * test_rate)
283
+
284
+ random.shuffle(all_paths)
285
+
286
+ train_datalist = all_paths[:train_number]
287
+ val_datalist = all_paths[train_number: train_number + val_number]
288
+ test_datalist = all_paths[-test_number:]
289
+
290
+ print(f"training data is {len(train_datalist)}")
291
+ print(f"validation data is {len(val_datalist)}")
292
+ print(f"test data is {len(test_datalist)}")
293
+
294
+ train_ds = MedicalDataset(train_datalist)
295
+ val_ds = MedicalDataset(val_datalist)
296
+ test_ds = MedicalDataset(test_datalist)
297
+
298
+ loader = [train_ds, val_ds, test_ds]
299
+
300
+ return loader
301
+
302
+ def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None):
303
+ ## train all labeled data
304
+ ## fold denote the validation data in training data
305
+ all_paths = []
306
+ for p in data_dir:
307
+ paths = glob.glob(f"{p}/*.npz")
308
+ for pp in paths:
309
+ all_paths.append(pp)
310
+
311
+ # print(all_paths)
312
+ fold_data = get_kfold_data(all_paths, 5)[fold]
313
+
314
+ train_datalist = all_paths
315
+ val_datalist = fold_data["val_data"]
316
+
317
+ print(f"training data is {len(train_datalist)}")
318
+ print(f"validation data is {len(val_datalist)}")
319
+ train_ds = MedicalDataset(train_datalist)
320
+
321
+ val_ds = MedicalDataset(val_datalist)
322
+
323
+ if test_dir is not None:
324
+ test_paths = glob.glob(f"{test_dir}/*.npz")
325
+ test_ds = MedicalDataset(test_paths, test=True)
326
+ else:
327
+ test_ds = None
328
+
329
+ loader = [train_ds, val_ds, test_ds]
330
+
331
+ return loader
SegMamba/light_training/dataloading/get_train_val_test_datalist.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import glob
3
+ import random
4
+ import json
5
+
6
+ def get_train_val_test_list_from_fulldata(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42):
7
+ all_paths = glob.glob(f"{data_dir}/*.npz")
8
+
9
+ ## eliminate the pre
10
+ all_paths_save = []
11
+ for p in all_paths:
12
+ all_paths_save.append(p.split("/")[-1])
13
+ all_paths = all_paths_save
14
+ train_number = int(len(all_paths) * train_rate)
15
+ val_number = int(len(all_paths) * val_rate)
16
+ test_number = int(len(all_paths) * test_rate)
17
+ random.seed(seed)
18
+ random.shuffle(all_paths)
19
+ train_datalist = all_paths[:train_number]
20
+ val_datalist = all_paths[train_number: train_number + val_number]
21
+ test_datalist = all_paths[-test_number:]
22
+
23
+ print(f"training data is {len(train_datalist)}")
24
+ print(f"validation data is {len(val_datalist)}")
25
+ print(f"test data is {len(test_datalist)}", sorted(test_datalist))
26
+
27
+ datalist = {
28
+ "train": train_datalist,
29
+ "validation": val_datalist,
30
+ "test": test_datalist
31
+ }
32
+
33
+ datalist = json.dumps(datalist)
34
+
35
+ with open("./data_split.json", "w") as f:
36
+ f.write(datalist)
SegMamba/light_training/dataloading/utils.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os
3
+ from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles
4
+ import multiprocessing
5
+
6
+ def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None:
7
+ # try:
8
+ a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata
9
+ if overwrite_existing or not isfile(npz_file[:-3] + "npy"):
10
+ np.save(npz_file[:-3] + "npy", a['data'])
11
+
12
+ if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")):
13
+ np.save(npz_file[:-4] + "_seg.npy", a['seg'])
14
+
15
+ def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False,
16
+ num_processes: int = 8):
17
+ """
18
+ all npz files in this folder belong to the dataset, unpack them all
19
+ """
20
+ with multiprocessing.get_context("spawn").Pool(num_processes) as p:
21
+ npz_files = subfiles(folder, True, None, ".npz", True)
22
+ p.starmap(_convert_to_npy, zip(npz_files,
23
+ [unpack_segmentation] * len(npz_files),
24
+ [overwrite_existing] * len(npz_files))
25
+ )