| import inspect | |
| import torch.nn as nn | |
| try: | |
| from mamba_ssm import Mamba | |
| except ImportError as exc: | |
| raise ImportError( | |
| "mamba_ssm is required for SegMamba. Install it via the SegMamba repo (mamba/ setup.py install) " | |
| "or pip install mamba-ssm." | |
| ) from exc | |
| from .unet import TwoConv | |
| class MambaLayer(nn.Module): | |
| def __init__(self, dim, d_state=16, d_conv=4, expand=2, num_slices=None): | |
| super().__init__() | |
| self.dim = dim | |
| self.norm = nn.LayerNorm(dim) | |
| kwargs = { | |
| "d_model": dim, | |
| "d_state": d_state, | |
| "d_conv": d_conv, | |
| "expand": expand, | |
| } | |
| sig = inspect.signature(Mamba.__init__) | |
| if "bimamba_type" in sig.parameters: | |
| kwargs["bimamba_type"] = "v3" | |
| if num_slices is not None and "nslices" in sig.parameters: | |
| kwargs["nslices"] = num_slices | |
| self.mamba = Mamba(**kwargs) | |
| def forward(self, x): | |
| b, c = x.shape[:2] | |
| x_skip = x | |
| if c != self.dim: | |
| raise ValueError(f"Expected {self.dim} channels, got {c}") | |
| n_tokens = x.shape[2:].numel() | |
| img_dims = x.shape[2:] | |
| x_flat = x.reshape(b, c, n_tokens).transpose(-1, -2) | |
| x_norm = self.norm(x_flat) | |
| x_mamba = self.mamba(x_norm) | |
| out = x_mamba.transpose(-1, -2).reshape(b, c, *img_dims) | |
| return out + x_skip | |
| class MlpChannel(nn.Module): | |
| def __init__(self, hidden_size, mlp_dim): | |
| super().__init__() | |
| self.fc1 = nn.Conv3d(hidden_size, mlp_dim, 1) | |
| self.act = nn.GELU() | |
| self.fc2 = nn.Conv3d(mlp_dim, hidden_size, 1) | |
| def forward(self, x): | |
| x = self.fc1(x) | |
| x = self.act(x) | |
| x = self.fc2(x) | |
| return x | |
| class GSC(nn.Module): | |
| def __init__(self, in_channels): | |
| super().__init__() | |
| self.proj = nn.Conv3d(in_channels, in_channels, 3, 1, 1) | |
| self.norm = nn.InstanceNorm3d(in_channels) | |
| self.nonliner = nn.ReLU() | |
| self.proj2 = nn.Conv3d(in_channels, in_channels, 3, 1, 1) | |
| self.norm2 = nn.InstanceNorm3d(in_channels) | |
| self.nonliner2 = nn.ReLU() | |
| self.proj3 = nn.Conv3d(in_channels, in_channels, 1, 1, 0) | |
| self.norm3 = nn.InstanceNorm3d(in_channels) | |
| self.nonliner3 = nn.ReLU() | |
| self.proj4 = nn.Conv3d(in_channels, in_channels, 1, 1, 0) | |
| self.norm4 = nn.InstanceNorm3d(in_channels) | |
| self.nonliner4 = nn.ReLU() | |
| def forward(self, x): | |
| x_residual = x | |
| x1 = self.proj(x) | |
| x1 = self.norm(x1) | |
| x1 = self.nonliner(x1) | |
| x1 = self.proj2(x1) | |
| x1 = self.norm2(x1) | |
| x1 = self.nonliner2(x1) | |
| x2 = self.proj3(x) | |
| x2 = self.norm3(x2) | |
| x2 = self.nonliner3(x2) | |
| x = x1 + x2 | |
| x = self.proj4(x) | |
| x = self.norm4(x) | |
| x = self.nonliner4(x) | |
| return x + x_residual | |
| class MambaEncoder(nn.Module): | |
| def __init__( | |
| self, | |
| in_chans=1, | |
| depths=(2, 2, 2, 2), | |
| dims=(48, 96, 192, 384), | |
| drop_path_rate=0.0, | |
| layer_scale_init_value=1e-6, | |
| out_indices=(0, 1, 2, 3), | |
| img_size=128, | |
| ): | |
| super().__init__() | |
| self.downsample_layers = nn.ModuleList() | |
| stem = nn.Sequential( | |
| nn.Conv3d(in_chans, dims[0], kernel_size=7, stride=2, padding=3), | |
| ) | |
| self.downsample_layers.append(stem) | |
| for i in range(3): | |
| downsample_layer = nn.Sequential( | |
| nn.InstanceNorm3d(dims[i]), | |
| nn.Conv3d(dims[i], dims[i + 1], kernel_size=2, stride=2), | |
| ) | |
| self.downsample_layers.append(downsample_layer) | |
| self.stages = nn.ModuleList() | |
| self.gscs = nn.ModuleList() | |
| num_slices_list = [max(1, img_size // (2 ** (i + 1))) for i in range(4)] | |
| for i in range(4): | |
| gsc = GSC(dims[i]) | |
| stage = nn.Sequential( | |
| *[MambaLayer(dim=dims[i], num_slices=num_slices_list[i]) for _ in range(depths[i])] | |
| ) | |
| self.stages.append(stage) | |
| self.gscs.append(gsc) | |
| self.out_indices = out_indices | |
| self.mlps = nn.ModuleList() | |
| for i_layer in range(4): | |
| layer = nn.InstanceNorm3d(dims[i_layer]) | |
| self.add_module(f"norm{i_layer}", layer) | |
| self.mlps.append(MlpChannel(dims[i_layer], 2 * dims[i_layer])) | |
| def forward_features(self, x): | |
| outs = [] | |
| for i in range(4): | |
| x = self.downsample_layers[i](x) | |
| x = self.gscs[i](x) | |
| x = self.stages[i](x) | |
| if i in self.out_indices: | |
| norm_layer = getattr(self, f"norm{i}") | |
| x_out = norm_layer(x) | |
| x_out = self.mlps[i](x_out) | |
| outs.append(x_out) | |
| return tuple(outs) | |
| def forward(self, x): | |
| return self.forward_features(x) | |
| class ImageEncoderSegMamba(nn.Module): | |
| def __init__( | |
| self, | |
| args, | |
| img_size=128, | |
| in_chans=1, | |
| embed_dim=384, | |
| depths=(2, 2, 2, 2), | |
| dims=(48, 96, 192, 384), | |
| ): | |
| super().__init__() | |
| self.args = args | |
| self.img_size = img_size | |
| self.backbone = MambaEncoder( | |
| in_chans=in_chans, | |
| depths=depths, | |
| dims=dims, | |
| img_size=img_size, | |
| ) | |
| act = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}) | |
| norm = ("instance", {"affine": True}) | |
| self.stem = TwoConv(3, in_chans, 32, act, norm, True, 0.0) | |
| self.proj1 = self._proj_block(dims[0], 32) | |
| self.proj2 = self._proj_block(dims[1], 64) | |
| self.proj3 = self._proj_block(dims[2], 128) | |
| self.out_proj = None | |
| if dims[3] != embed_dim: | |
| self.out_proj = nn.Conv3d(dims[3], embed_dim, kernel_size=1, bias=False) | |
| self.out_norm = nn.InstanceNorm3d(embed_dim) | |
| def _proj_block(self, in_ch, out_ch): | |
| return nn.Sequential( | |
| nn.Conv3d(in_ch, out_ch, kernel_size=1, bias=False), | |
| nn.InstanceNorm3d(out_ch), | |
| nn.GELU(), | |
| ) | |
| def forward(self, x): | |
| x0 = self.stem(x) | |
| x1, x2, x3, x4 = self.backbone(x) | |
| f1 = self.proj1(x1) | |
| f2 = self.proj2(x2) | |
| f3 = self.proj3(x3) | |
| if self.out_proj is not None: | |
| x4 = self.out_norm(self.out_proj(x4)) | |
| feature_list = [x0, f1, f2, f3] | |
| return x4, feature_list | |