realzL commited on
Commit
fabc231
·
verified ·
1 Parent(s): 19edae7

Add files using upload-large-folder tool

Browse files
MaskTokenizer/projects/vlm/vq_sam2/configs/a100_qwen25vl/qwen25vl-7b_vqsam2_refseg.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
2
+ LoggerHook, ParamSchedulerHook)
3
+ from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
4
+ from torch.optim import AdamW
5
+ from peft import LoraConfig
6
+
7
+ from xtuner.dataset import ConcatDataset
8
+ from xtuner.dataset.samplers import LengthGroupedSampler
9
+ from xtuner.engine.runner import TrainLoop
10
+
11
+ import torch
12
+ from transformers import (
13
+ Qwen2_5_VLForConditionalGeneration,
14
+ AutoTokenizer,
15
+ AutoProcessor,
16
+ )
17
+
18
+ from projects.transformers.vq_sam2 import VQ_SAM2, VQ_SAM2Config, SAM2Config
19
+ from projects.vlm.vq_sam2.models import QWEN25VL_VQSAM2Model, DirectResize
20
+
21
+ from projects.vlm.vq_sam2.datasets_qwen25vl import LazySupervisedDataset, qwen25vl_vqsam2_collate_fn
22
+
23
+ #######################################################################
24
+ # PART 1 Settings #
25
+ #######################################################################
26
+ qwen25vl_path = "./work_dirs/qwen25vl_7b_mask_loss_refseg_only_v2/hf_ckpt"
27
+ vqsam2_pretrained_pth = "pretrained_weights/iter_17923_resampled_256x4.pth"
28
+ sam2_path = "pretrained_weights/sam2.1_hiera_large.pt"
29
+ pretrained_pth = None
30
+
31
+ work_dir = "work_dirs/qwen25vl_7b_mask_loss_refseg_only_v3"
32
+
33
+ # Scheduler & Optimizer
34
+ batch_size = 4 # per_device
35
+ accumulative_counts = 2
36
+ dataloader_num_workers = 4
37
+ max_epochs = 1
38
+ optim_type = AdamW
39
+ lr = 4e-5
40
+ betas = (0.9, 0.999)
41
+ weight_decay = 0.05
42
+ max_norm = 1 # grad clip
43
+ warmup_ratio = 0.05
44
+
45
+ # Save
46
+ save_steps = 1000
47
+ save_total_limit = 2 # Maximum checkpoints to keep (-1 means unlimited)
48
+
49
+ model_max_length=8192
50
+
51
+
52
+ #######################################################################
53
+ # PART 2 Model & Tokenizer & Image Processor #
54
+ #######################################################################
55
+ sam2_config = dict(
56
+ type=SAM2Config,
57
+ cfg_path="sam2.1_hiera_l.yaml",
58
+ ckpt_path=sam2_path,
59
+ )
60
+
61
+ vq_sam2_config = dict(
62
+ type=VQ_SAM2Config,
63
+ sam2_config=sam2_config,
64
+ codebook_size=256,
65
+ codebook_depth=4,
66
+ shared_codebook=False,
67
+ latent_dim=256,
68
+ loss_sample_points=True,
69
+ vq_loss_weight=1.0,
70
+ )
71
+
72
+ model = dict(
73
+ type=QWEN25VL_VQSAM2Model,
74
+ qwen25vl_hf_model=dict(
75
+ type=Qwen2_5_VLForConditionalGeneration.from_pretrained,
76
+ pretrained_model_name_or_path=qwen25vl_path,
77
+ attn_implementation="flash_attention_2",
78
+ torch_dtype=torch.bfloat16,
79
+ trust_remote_code=True,
80
+ ),
81
+ vqsam2_hf_model=dict(
82
+ type=VQ_SAM2,
83
+ config=vq_sam2_config,
84
+ ),
85
+ tokenizer=dict(
86
+ type=AutoTokenizer.from_pretrained,
87
+ pretrained_model_name_or_path=qwen25vl_path,
88
+ cache_dir="./cache",
89
+ model_max_length=model_max_length,
90
+ padding_side="right",
91
+ use_fast=False,
92
+ ),
93
+ preprocessor=dict(
94
+ type=AutoProcessor.from_pretrained,
95
+ pretrained_model_name_or_path=qwen25vl_path,
96
+ ),
97
+ llm_lora=dict(
98
+ type=LoraConfig,
99
+ r=128,
100
+ lora_alpha=256,
101
+ lora_dropout=0.05,
102
+ bias='none',
103
+ task_type='CAUSAL_LM'
104
+ ),
105
+ vqsam2_pretrained_weights=vqsam2_pretrained_pth,
106
+ pretrained_pth=pretrained_pth,
107
+ freeze_sam2_decoder=True,
108
+ repa_loss=False,
109
+ )
110
+
111
+ #######################################################################
112
+ # PART 3 Dataset & Dataloader #
113
+ #######################################################################
114
+
115
+ sam2_image_processor = dict(
116
+ type=DirectResize,
117
+ target_length=1024,
118
+ )
119
+
120
+ standard_dataset = dict(
121
+ type=LazySupervisedDataset,
122
+ tokenizer=dict(
123
+ type=AutoTokenizer.from_pretrained,
124
+ pretrained_model_name_or_path=qwen25vl_path,
125
+ cache_dir="./cache",
126
+ model_max_length=model_max_length,
127
+ padding_side="right",
128
+ use_fast=False,
129
+ ),
130
+ data_args=dict(
131
+ model_type="qwen2.5vl",
132
+ dataset_use="mask_generation_refseg%500",
133
+ max_pixels=2048*28*28,
134
+ min_pixels=4*28*28,
135
+ video_max_total_pixels=576*28*28,
136
+ video_min_total_pixels=144*28*28,
137
+ image_processor=dict(
138
+ type=AutoProcessor.from_pretrained,
139
+ pretrained_model_name_or_path=qwen25vl_path,
140
+ ),
141
+ ),
142
+ sam_preprocessor=sam2_image_processor,
143
+ )
144
+
145
+
146
+ train_dataset = dict(
147
+ type=ConcatDataset, datasets=[
148
+ standard_dataset,
149
+ ]
150
+ )
151
+ train_dataloader = dict(
152
+ batch_size=batch_size,
153
+ num_workers=dataloader_num_workers,
154
+ dataset=train_dataset,
155
+ sampler=dict(
156
+ type=LengthGroupedSampler,
157
+ length_property='modality_length',
158
+ per_device_batch_size=batch_size * accumulative_counts),
159
+ collate_fn=dict(type=qwen25vl_vqsam2_collate_fn),
160
+ )
161
+
162
+ #######################################################################
163
+ # PART 4 Scheduler & Optimizer #
164
+ #######################################################################
165
+ # optimizer
166
+ optim_wrapper = dict(
167
+ type=AmpOptimWrapper,
168
+ optimizer=dict(
169
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
170
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
171
+ accumulative_counts=accumulative_counts,
172
+ loss_scale='dynamic',
173
+ dtype='bfloat16'
174
+ )
175
+
176
+
177
+ # learning policy
178
+ # More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
179
+ param_scheduler = [
180
+ dict(
181
+ type=LinearLR,
182
+ start_factor=1e-5,
183
+ by_epoch=True,
184
+ begin=0,
185
+ end=warmup_ratio * max_epochs,
186
+ convert_to_iter_based=True),
187
+ dict(
188
+ type=CosineAnnealingLR,
189
+ eta_min=0.0,
190
+ by_epoch=True,
191
+ begin=warmup_ratio * max_epochs,
192
+ end=max_epochs,
193
+ convert_to_iter_based=True)
194
+ ]
195
+
196
+ # train, val, test setting
197
+ train_cfg = dict(type=TrainLoop, max_epochs=max_epochs)
198
+
199
+ #######################################################################
200
+ # PART 5 Runtime #
201
+ #######################################################################
202
+ # Log the dialogue periodically during the training process, optional
203
+ custom_hooks = [
204
+ # dict(type=DatasetInfoHook, tokenizer=tokenizer),
205
+ ]
206
+
207
+ # configure default hooks
208
+ default_hooks = dict(
209
+ # record the time of every iteration.
210
+ timer=dict(type=IterTimerHook),
211
+ # print log every 10 iterations.
212
+ logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10),
213
+ # enable the parameter scheduler.
214
+ param_scheduler=dict(type=ParamSchedulerHook),
215
+ # save checkpoint per `save_steps`.
216
+ checkpoint=dict(
217
+ type=CheckpointHook,
218
+ save_optimizer=False,
219
+ by_epoch=False,
220
+ interval=save_steps,
221
+ max_keep_ckpts=save_total_limit),
222
+ # set sampler seed in distributed evrionment.
223
+ sampler_seed=dict(type=DistSamplerSeedHook),
224
+ )
225
+
226
+ # configure environment
227
+ env_cfg = dict(
228
+ # whether to enable cudnn benchmark
229
+ cudnn_benchmark=False,
230
+ # set multi process parameters
231
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
232
+ # set distributed parameters
233
+ dist_cfg=dict(backend='nccl'),
234
+ )
235
+
236
+ # set visualizer
237
+ # visualizer = None
238
+ from mmengine.visualization import Visualizer, TensorboardVisBackend
239
+ visualizer = dict(type=Visualizer, vis_backends=[dict(type=TensorboardVisBackend)])
240
+
241
+ # set log level
242
+ log_level = 'INFO'
243
+
244
+ # load from which checkpoint
245
+ load_from = None
246
+
247
+ # whether to resume training from the loaded checkpoint
248
+ resume = False
249
+
250
+ # Defaults to use random seed and disable `deterministic`
251
+ randomness = dict(seed=None, deterministic=False)
252
+
253
+ # set log processor
254
+ log_processor = dict(by_epoch=False)
MaskTokenizer/projects/vlm/vq_sam2/configs/a100_qwen25vl/qwen25vl_vqsam2_refseg_debug.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
2
+ LoggerHook, ParamSchedulerHook)
3
+ from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
4
+ from torch.optim import AdamW
5
+ from peft import LoraConfig
6
+
7
+ from xtuner.dataset import ConcatDataset
8
+ from xtuner.dataset.samplers import LengthGroupedSampler
9
+ from xtuner.engine.runner import TrainLoop
10
+
11
+ import torch
12
+ from transformers import (
13
+ Qwen2_5_VLForConditionalGeneration,
14
+ AutoTokenizer,
15
+ AutoProcessor,
16
+ )
17
+
18
+ from projects.transformers.vq_sam2 import VQ_SAM2, VQ_SAM2Config, SAM2Config
19
+ from projects.vlm.vq_sam2.models import QWEN25VL_VQSAM2Model, DirectResize
20
+
21
+ from projects.vlm.vq_sam2.datasets_qwen25vl import LazySupervisedDataset, qwen25vl_vqsam2_collate_fn
22
+
23
+ #######################################################################
24
+ # PART 1 Settings #
25
+ #######################################################################
26
+ qwen25vl_path = "pretrained_weights/qwen2_5_vl_vq_sam2_3b_256x4"
27
+ vqsam2_pretrained_pth = "pretrained_weights/iter_17923_resampled_256x4.pth"
28
+ sam2_path = "pretrained_weights/sam2.1_hiera_large.pt"
29
+ pretrained_pth = "./work_dirs/qwen25vl_3b_mask_loss_refseg_only_v4/iter_10000.pth"
30
+
31
+ work_dir = "work_dirs/qwen25vl_3b_mask_loss_refseg_only_v4_debug"
32
+
33
+ # Scheduler & Optimizer
34
+ batch_size = 4 # per_device
35
+ accumulative_counts = 2
36
+ dataloader_num_workers = 4
37
+ max_epochs = 1
38
+ optim_type = AdamW
39
+ lr = 4e-5
40
+ betas = (0.9, 0.999)
41
+ weight_decay = 0.05
42
+ max_norm = 1 # grad clip
43
+ warmup_ratio = 0.05
44
+
45
+ # Save
46
+ save_steps = 1000
47
+ save_total_limit = -1 # Maximum checkpoints to keep (-1 means unlimited)
48
+
49
+ model_max_length=8192
50
+
51
+
52
+ #######################################################################
53
+ # PART 2 Model & Tokenizer & Image Processor #
54
+ #######################################################################
55
+ sam2_config = dict(
56
+ type=SAM2Config,
57
+ cfg_path="sam2.1_hiera_l.yaml",
58
+ ckpt_path=sam2_path,
59
+ )
60
+
61
+ vq_sam2_config = dict(
62
+ type=VQ_SAM2Config,
63
+ sam2_config=sam2_config,
64
+ codebook_size=256,
65
+ codebook_depth=4,
66
+ shared_codebook=False,
67
+ latent_dim=256,
68
+ loss_sample_points=True,
69
+ vq_loss_weight=1.0,
70
+ )
71
+
72
+ model = dict(
73
+ type=QWEN25VL_VQSAM2Model,
74
+ qwen25vl_hf_model=dict(
75
+ type=Qwen2_5_VLForConditionalGeneration.from_pretrained,
76
+ pretrained_model_name_or_path=qwen25vl_path,
77
+ attn_implementation="flash_attention_2",
78
+ torch_dtype=torch.bfloat16,
79
+ trust_remote_code=True,
80
+ ),
81
+ vqsam2_hf_model=dict(
82
+ type=VQ_SAM2,
83
+ config=vq_sam2_config,
84
+ ),
85
+ tokenizer=dict(
86
+ type=AutoTokenizer.from_pretrained,
87
+ pretrained_model_name_or_path=qwen25vl_path,
88
+ cache_dir="./cache",
89
+ model_max_length=model_max_length,
90
+ padding_side="right",
91
+ use_fast=False,
92
+ ),
93
+ preprocessor=dict(
94
+ type=AutoProcessor.from_pretrained,
95
+ pretrained_model_name_or_path=qwen25vl_path,
96
+ ),
97
+ llm_lora=dict(
98
+ type=LoraConfig,
99
+ r=128,
100
+ lora_alpha=256,
101
+ lora_dropout=0.05,
102
+ bias='none',
103
+ task_type='CAUSAL_LM'
104
+ ),
105
+ vqsam2_pretrained_weights=vqsam2_pretrained_pth,
106
+ pretrained_pth=pretrained_pth,
107
+ freeze_sam2_decoder=True,
108
+ )
109
+
110
+ #######################################################################
111
+ # PART 3 Dataset & Dataloader #
112
+ #######################################################################
113
+
114
+ sam2_image_processor = dict(
115
+ type=DirectResize,
116
+ target_length=1024,
117
+ )
118
+
119
+ standard_dataset = dict(
120
+ type=LazySupervisedDataset,
121
+ tokenizer=dict(
122
+ type=AutoTokenizer.from_pretrained,
123
+ pretrained_model_name_or_path=qwen25vl_path,
124
+ cache_dir="./cache",
125
+ model_max_length=model_max_length,
126
+ padding_side="right",
127
+ use_fast=False,
128
+ ),
129
+ data_args=dict(
130
+ model_type="qwen2.5vl",
131
+ dataset_use="mask_generation_visual7w%500",
132
+ max_pixels=2048*28*28,
133
+ min_pixels=4*28*28,
134
+ video_max_total_pixels=576*28*28,
135
+ video_min_total_pixels=144*28*28,
136
+ image_processor=dict(
137
+ type=AutoProcessor.from_pretrained,
138
+ pretrained_model_name_or_path=qwen25vl_path,
139
+ ),
140
+ ),
141
+ sam_preprocessor=sam2_image_processor,
142
+ )
143
+
144
+
145
+ train_dataset = dict(
146
+ type=ConcatDataset, datasets=[
147
+ standard_dataset,
148
+ ]
149
+ )
150
+ train_dataloader = dict(
151
+ batch_size=batch_size,
152
+ num_workers=dataloader_num_workers,
153
+ dataset=train_dataset,
154
+ sampler=dict(
155
+ type=LengthGroupedSampler,
156
+ length_property='modality_length',
157
+ per_device_batch_size=batch_size * accumulative_counts),
158
+ collate_fn=dict(type=qwen25vl_vqsam2_collate_fn),
159
+ )
160
+
161
+ #######################################################################
162
+ # PART 4 Scheduler & Optimizer #
163
+ #######################################################################
164
+ # optimizer
165
+ optim_wrapper = dict(
166
+ type=AmpOptimWrapper,
167
+ optimizer=dict(
168
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
169
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
170
+ accumulative_counts=accumulative_counts,
171
+ loss_scale='dynamic',
172
+ dtype='bfloat16'
173
+ )
174
+
175
+ # learning policy
176
+ # More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
177
+ param_scheduler = [
178
+ dict(
179
+ type=LinearLR,
180
+ start_factor=1e-5,
181
+ by_epoch=True,
182
+ begin=0,
183
+ end=warmup_ratio * max_epochs,
184
+ convert_to_iter_based=True),
185
+ dict(
186
+ type=CosineAnnealingLR,
187
+ eta_min=0.0,
188
+ by_epoch=True,
189
+ begin=warmup_ratio * max_epochs,
190
+ end=max_epochs,
191
+ convert_to_iter_based=True)
192
+ ]
193
+
194
+ # train, val, test setting
195
+ train_cfg = dict(type=TrainLoop, max_epochs=max_epochs)
196
+
197
+ #######################################################################
198
+ # PART 5 Runtime #
199
+ #######################################################################
200
+ # Log the dialogue periodically during the training process, optional
201
+ custom_hooks = [
202
+ # dict(type=DatasetInfoHook, tokenizer=tokenizer),
203
+ ]
204
+
205
+ # configure default hooks
206
+ default_hooks = dict(
207
+ # record the time of every iteration.
208
+ timer=dict(type=IterTimerHook),
209
+ # print log every 10 iterations.
210
+ logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10),
211
+ # enable the parameter scheduler.
212
+ param_scheduler=dict(type=ParamSchedulerHook),
213
+ # save checkpoint per `save_steps`.
214
+ checkpoint=dict(
215
+ type=CheckpointHook,
216
+ save_optimizer=False,
217
+ by_epoch=False,
218
+ interval=save_steps,
219
+ max_keep_ckpts=save_total_limit),
220
+ # set sampler seed in distributed evrionment.
221
+ sampler_seed=dict(type=DistSamplerSeedHook),
222
+ )
223
+
224
+ # configure environment
225
+ env_cfg = dict(
226
+ # whether to enable cudnn benchmark
227
+ cudnn_benchmark=False,
228
+ # set multi process parameters
229
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
230
+ # set distributed parameters
231
+ dist_cfg=dict(backend='nccl'),
232
+ )
233
+
234
+ # set visualizer
235
+ # visualizer = None
236
+ from mmengine.visualization import Visualizer, TensorboardVisBackend
237
+ visualizer = dict(type=Visualizer, vis_backends=[dict(type=TensorboardVisBackend)])
238
+
239
+ # set log level
240
+ log_level = 'INFO'
241
+
242
+ # load from which checkpoint
243
+ load_from = None
244
+
245
+ # whether to resume training from the loaded checkpoint
246
+ resume = False
247
+
248
+ # Defaults to use random seed and disable `deterministic`
249
+ randomness = dict(seed=None, deterministic=False)
250
+
251
+ # set log processor
252
+ log_processor = dict(by_epoch=False)
MaskTokenizer/projects/vlm/vq_sam2/configs/a100_train/vq_sam2_codebookx4depthx1024sizex256dimxunsharex1MT_datasetxsa1bxcoconutx1.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
2
+ LoggerHook, ParamSchedulerHook)
3
+ from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
4
+ from torch.optim import AdamW
5
+
6
+ from xtuner.dataset import ConcatDataset
7
+ from xtuner.dataset.samplers import LengthGroupedSampler
8
+ from xtuner.engine.runner import TrainLoop
9
+
10
+ from projects.transformers.vq_sam2 import VQ_SAM2, VQ_SAM2Config, SAM2Config
11
+ from projects.vlm.vq_sam2.models import VQ_SAM2Model, DirectResize
12
+
13
+ from projects.vlm.vq_sam2.datasets import CoCoPanoSegDataset, SA1BDataset, COCONUTDataset
14
+ from projects.vlm.vq_sam2.datasets import vq_sam2_collate_fn
15
+
16
+ #######################################################################
17
+ # PART 1 Settings #
18
+ #######################################################################
19
+ sam2_path = "pretrained_weights/sam2.1_hiera_large.pt"
20
+ pretrained_pth = None
21
+
22
+ # Scheduler & Optimizer
23
+ batch_size = 16 # per_device
24
+ accumulative_counts = 1
25
+ dataloader_num_workers = 16
26
+ max_epochs = 1
27
+ optim_type = AdamW
28
+ lr = 2e-5
29
+ betas = (0.9, 0.999)
30
+ weight_decay = 0.05
31
+ max_norm = 1 # grad clip
32
+ warmup_ratio = 0.05
33
+
34
+ # Save
35
+ save_steps = 3000
36
+ save_total_limit = 2 # Maximum checkpoints to keep (-1 means unlimited)
37
+
38
+
39
+ #######################################################################
40
+ # PART 2 Model & Tokenizer & Image Processor #
41
+ #######################################################################
42
+ sam2_config = dict(
43
+ type=SAM2Config,
44
+ cfg_path="sam2.1_hiera_l.yaml",
45
+ ckpt_path=sam2_path,
46
+ )
47
+
48
+ vq_sam2_config = dict(
49
+ type=VQ_SAM2Config,
50
+ sam2_config=sam2_config,
51
+ codebook_size=1024,
52
+ codebook_depth=4,
53
+ shared_codebook=False,
54
+ latent_dim=256,
55
+ loss_sample_points=True,
56
+ vq_loss_weight=1.0,
57
+ )
58
+
59
+ model = dict(
60
+ type=VQ_SAM2Model,
61
+ hf_model=dict(
62
+ type=VQ_SAM2,
63
+ config=vq_sam2_config,
64
+ ),
65
+ sam2_pretrained_weights=sam2_path,
66
+ pretrained_pth=pretrained_pth,
67
+ freeze_sam2_decoder=False,
68
+ )
69
+
70
+ #######################################################################
71
+ # PART 3 Dataset & Dataloader #
72
+ #######################################################################
73
+
74
+ sam2_image_processor = dict(
75
+ type=DirectResize,
76
+ target_length=1024,
77
+ )
78
+
79
+ DATA_ROOT = ''
80
+ sam_info_json = "./data/sam_info.json"
81
+ coconut_info_json = "./data/coconut_segments.json"
82
+
83
+ sa1b_dataset = dict(
84
+ type=SA1BDataset,
85
+ image_folder=DATA_ROOT,
86
+ preprocessor=sam2_image_processor,
87
+ multi_targets=False,
88
+ repeats=1.0,
89
+ fast_load=True,
90
+ sam_info_json=sam_info_json,
91
+ scan_record_folder='./left_sa1b_indices/vq_sam2_codebookx4depthx1024sizex256dimxunsharex1MT_datasetxsa1bxcoconutx1/'
92
+ )
93
+
94
+ coconut_dataset = dict(
95
+ type=COCONUTDataset,
96
+ image_folder=DATA_ROOT,
97
+ preprocessor=sam2_image_processor,
98
+ repeats=1.0,
99
+ coconut_info_json=coconut_info_json,
100
+ )
101
+
102
+
103
+
104
+
105
+ train_dataset = dict(
106
+ type=ConcatDataset, datasets=[
107
+ sa1b_dataset, coconut_dataset,
108
+ ]
109
+ )
110
+ train_dataloader = dict(
111
+ batch_size=batch_size,
112
+ num_workers=dataloader_num_workers,
113
+ dataset=train_dataset,
114
+ sampler=dict(
115
+ type=LengthGroupedSampler,
116
+ length_property='modality_length',
117
+ per_device_batch_size=batch_size * accumulative_counts),
118
+ collate_fn=dict(type=vq_sam2_collate_fn),
119
+ )
120
+
121
+ #######################################################################
122
+ # PART 4 Scheduler & Optimizer #
123
+ #######################################################################
124
+ # optimizer
125
+ optim_wrapper = dict(
126
+ type=AmpOptimWrapper,
127
+ optimizer=dict(
128
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
129
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
130
+ accumulative_counts=accumulative_counts,
131
+ loss_scale='dynamic',
132
+ dtype='float32'
133
+ )
134
+
135
+ # learning policy
136
+ # More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
137
+ param_scheduler = [
138
+ dict(
139
+ type=LinearLR,
140
+ start_factor=1e-5,
141
+ by_epoch=True,
142
+ begin=0,
143
+ end=warmup_ratio * max_epochs,
144
+ convert_to_iter_based=True),
145
+ dict(
146
+ type=CosineAnnealingLR,
147
+ eta_min=0.0,
148
+ by_epoch=True,
149
+ begin=warmup_ratio * max_epochs,
150
+ end=max_epochs,
151
+ convert_to_iter_based=True)
152
+ ]
153
+
154
+ # train, val, test setting
155
+ train_cfg = dict(type=TrainLoop, max_epochs=max_epochs)
156
+
157
+ #######################################################################
158
+ # PART 5 Runtime #
159
+ #######################################################################
160
+ # Log the dialogue periodically during the training process, optional
161
+ custom_hooks = [
162
+ # dict(type=DatasetInfoHook, tokenizer=tokenizer),
163
+ ]
164
+
165
+ # configure default hooks
166
+ default_hooks = dict(
167
+ # record the time of every iteration.
168
+ timer=dict(type=IterTimerHook),
169
+ # print log every 10 iterations.
170
+ logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10),
171
+ # enable the parameter scheduler.
172
+ param_scheduler=dict(type=ParamSchedulerHook),
173
+ # save checkpoint per `save_steps`.
174
+ checkpoint=dict(
175
+ type=CheckpointHook,
176
+ save_optimizer=False,
177
+ by_epoch=False,
178
+ interval=save_steps,
179
+ max_keep_ckpts=save_total_limit),
180
+ # set sampler seed in distributed evrionment.
181
+ sampler_seed=dict(type=DistSamplerSeedHook),
182
+ )
183
+
184
+ # configure environment
185
+ env_cfg = dict(
186
+ # whether to enable cudnn benchmark
187
+ cudnn_benchmark=False,
188
+ # set multi process parameters
189
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
190
+ # set distributed parameters
191
+ dist_cfg=dict(backend='nccl'),
192
+ )
193
+
194
+ # set visualizer
195
+ # visualizer = None
196
+ from mmengine.visualization import Visualizer, TensorboardVisBackend
197
+ visualizer = dict(type=Visualizer, vis_backends=[dict(type=TensorboardVisBackend)])
198
+
199
+ # set log level
200
+ log_level = 'INFO'
201
+
202
+ # load from which checkpoint
203
+ load_from = None
204
+
205
+ # whether to resume training from the loaded checkpoint
206
+ resume = False
207
+
208
+ # Defaults to use random seed and disable `deterministic`
209
+ randomness = dict(seed=None, deterministic=False)
210
+
211
+ # set log processor
212
+ log_processor = dict(by_epoch=False)