Yannick Kirchhoff commited on
Commit
e02a0ad
·
1 Parent(s): bb7888d

add trained model

Browse files
Files changed (4) hide show
  1. config.json +7 -0
  2. dataset.json +16 -0
  3. fold_all/checkpoint_final.pth +3 -0
  4. plans.json +200 -0
config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "nnUNet",
3
+ "modality": "MRI",
4
+ "task": "Breast Segmentation",
5
+ "reference": "Rokuss, M., Hamm, B., Kirchhoff, Y., & Maier-Hein, K. (2025). Divide and Conquer: A Large-Scale Dataset and Model for Left-Right Breast MRI Segmentation. arXiv preprint arXiv:2507.13830.",
6
+ "license": "CC BY-NC-SA 4.0"
7
+ }
dataset.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "channel_names": {
3
+ "0": "MR"
4
+ },
5
+ "description": "BreastDivider: A Large-Scale Dataset for Left–Right Breast MRI Segmentation",
6
+ "file_ending": ".nii.gz",
7
+ "labels": {
8
+ "background": 0,
9
+ "left": 1,
10
+ "right": 2
11
+ },
12
+ "licence": "CC BY-NC-SA 4.0",
13
+ "numTraining": 13752,
14
+ "reference": "Rokuss, M., Hamm, B., Kirchhoff, Y., & Maier-Hein, K. (2025). Divide and Conquer: A Large-Scale Dataset and Model for Left-Right Breast MRI Segmentation. arXiv preprint arXiv:2507.13830.",
15
+ "release": "July 2025"
16
+ }
fold_all/checkpoint_final.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18441fe5536ce9209d216f8a9ba6e6331c69f54a0acb3fa2fca6428214b92d18
3
+ size 104387177
plans.json ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset_name": "Dataset203_BreastDivider",
3
+ "plans_name": "nnUNetPlans",
4
+ "original_median_spacing_after_transp": [
5
+ 1.9861830472946167,
6
+ 0.7031000256538391,
7
+ 0.7031000256538391
8
+ ],
9
+ "original_median_shape_after_transp": [
10
+ 116,
11
+ 489,
12
+ 510
13
+ ],
14
+ "image_reader_writer": "SimpleITKIOWithReorient",
15
+ "transpose_forward": [
16
+ 0,
17
+ 1,
18
+ 2
19
+ ],
20
+ "transpose_backward": [
21
+ 0,
22
+ 1,
23
+ 2
24
+ ],
25
+ "configurations": {
26
+ "3d_dac": {
27
+ "data_identifier": "nnUNetPlans_3d_dac",
28
+ "preprocessor_name": "DefaultPreprocessor",
29
+ "batch_size": 3,
30
+ "patch_size": [
31
+ 128,
32
+ 128,
33
+ 128
34
+ ],
35
+ "median_image_size_in_voxels": [
36
+ 106.0,
37
+ 477.0,
38
+ 484.0
39
+ ],
40
+ "spacing": [
41
+ 1.6448078360408545,
42
+ 2.620146189350635,
43
+ 2.658596972003579
44
+ ],
45
+ "normalization_schemes": [
46
+ "ZScoreNormalization"
47
+ ],
48
+ "use_mask_for_norm": [
49
+ false
50
+ ],
51
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
52
+ "resampling_fn_seg": "resample_data_or_seg_to_shape",
53
+ "resampling_fn_data_kwargs": {
54
+ "is_seg": false,
55
+ "order": 3,
56
+ "order_z": 0,
57
+ "force_separate_z": null
58
+ },
59
+ "resampling_fn_seg_kwargs": {
60
+ "is_seg": true,
61
+ "order": 1,
62
+ "order_z": 0,
63
+ "force_separate_z": null
64
+ },
65
+ "resampling_fn_probabilities": "resample_data_or_seg_to_shape",
66
+ "resampling_fn_probabilities_kwargs": {
67
+ "is_seg": false,
68
+ "order": 1,
69
+ "order_z": 0,
70
+ "force_separate_z": null
71
+ },
72
+ "architecture": {
73
+ "network_class_name": "dynamic_network_architectures.architectures.unet.PlainConvUNet",
74
+ "arch_kwargs": {
75
+ "n_stages": 6,
76
+ "features_per_stage": [
77
+ 32,
78
+ 64,
79
+ 128,
80
+ 256,
81
+ 320,
82
+ 320
83
+ ],
84
+ "conv_op": "torch.nn.modules.conv.Conv3d",
85
+ "kernel_sizes": [
86
+ [
87
+ 3,
88
+ 3,
89
+ 3
90
+ ],
91
+ [
92
+ 3,
93
+ 3,
94
+ 3
95
+ ],
96
+ [
97
+ 3,
98
+ 3,
99
+ 3
100
+ ],
101
+ [
102
+ 3,
103
+ 3,
104
+ 3
105
+ ],
106
+ [
107
+ 3,
108
+ 3,
109
+ 3
110
+ ],
111
+ [
112
+ 3,
113
+ 3,
114
+ 3
115
+ ]
116
+ ],
117
+ "strides": [
118
+ [
119
+ 1,
120
+ 1,
121
+ 1
122
+ ],
123
+ [
124
+ 2,
125
+ 2,
126
+ 2
127
+ ],
128
+ [
129
+ 2,
130
+ 2,
131
+ 2
132
+ ],
133
+ [
134
+ 2,
135
+ 2,
136
+ 2
137
+ ],
138
+ [
139
+ 2,
140
+ 2,
141
+ 2
142
+ ],
143
+ [
144
+ 2,
145
+ 2,
146
+ 2
147
+ ]
148
+ ],
149
+ "n_conv_per_stage": [
150
+ 2,
151
+ 2,
152
+ 2,
153
+ 2,
154
+ 2,
155
+ 2
156
+ ],
157
+ "n_conv_per_stage_decoder": [
158
+ 1,
159
+ 1,
160
+ 1,
161
+ 1,
162
+ 1
163
+ ],
164
+ "conv_bias": true,
165
+ "norm_op": "torch.nn.modules.instancenorm.InstanceNorm3d",
166
+ "norm_op_kwargs": {
167
+ "eps": 1e-05,
168
+ "affine": true
169
+ },
170
+ "dropout_op": null,
171
+ "dropout_op_kwargs": null,
172
+ "nonlin": "torch.nn.LeakyReLU",
173
+ "nonlin_kwargs": {
174
+ "inplace": true
175
+ }
176
+ },
177
+ "_kw_requires_import": [
178
+ "conv_op",
179
+ "norm_op",
180
+ "dropout_op",
181
+ "nonlin"
182
+ ]
183
+ },
184
+ "batch_dice": false
185
+ }
186
+ },
187
+ "experiment_planner_used": "ExperimentPlanner",
188
+ "label_manager": "LabelManager",
189
+ "foreground_intensity_properties_per_channel": {
190
+ "0": {
191
+ "max": 32760.0,
192
+ "mean": 341.6798400878906,
193
+ "median": 139.00372314453125,
194
+ "min": -737.0,
195
+ "percentile_00_5": 0.0,
196
+ "percentile_99_5": 3482.0,
197
+ "std": 559.0700073242188
198
+ }
199
+ }
200
+ }