yuanxuewei commited on
Commit
79e7168
·
verified ·
1 Parent(s): 821a145

Add files using upload-large-folder tool

Browse files
checkpoints/steps_30000_pytorch_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee2ae998dba854f33a43c7940ee8cf0addb0fda11bd9392574400b6614be0e9b
3
+ size 8935435793
config.json ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "run_id": "0903_libero_10_augsteps_0_wo_flash_attention_wo_augsteps_two_view_action_chunk_16_pretrained_vlm",
3
+ "run_root_dir": "./playground/Checkpoints",
4
+ "seed": 42,
5
+ "trackers": [
6
+ "jsonl",
7
+ "wandb"
8
+ ],
9
+ "wandb_entity": "michaelyu-1101-fudanuniversity",
10
+ "wandb_project": "Internvla",
11
+ "is_debug": false,
12
+ "framework": {
13
+ "framework_py": "DinoQFormerACT",
14
+ "qwenvl": {
15
+ "base_vlm": "/mnt/phwfile/efm_t/zhuyangkun_tmp_need_del/exp/exp_08_09/manip_sys2_qwen25_3b_onevision_molmo_a0all_refsp20/checkpoint-20000",
16
+ "attn_implementation": "flash_attention_2",
17
+ "vl_hidden_dim": 2048
18
+ },
19
+ "dino": {
20
+ "dino_backbone": "dinov2_vitl14"
21
+ },
22
+ "layer_qformer": {
23
+ "qformer_end_layer": 37,
24
+ "qformer_start_layer": 36,
25
+ "num_query_tokens": 64,
26
+ "grad_scale": 0.5
27
+ },
28
+ "action_model": {
29
+ "action_model_type": "DiT-B",
30
+ "action_hidden_dim": 768,
31
+ "action_dim": 7,
32
+ "use_ema": false,
33
+ "future_action_window_size": 7,
34
+ "past_action_window_size": 0,
35
+ "repeated_diffusion_steps": 8
36
+ },
37
+ "reduce_in_full_precision": true
38
+ },
39
+ "datasets": {
40
+ "vlm_data": {
41
+ "dataformat": "llava_json",
42
+ "dataset_use": "asv2_conversation_en,asv2_detailed_description_en,asv2_region_captioning_en,coco_internvl_longcap_en,coco_karpathy_train_567_en,coco_negative_gpt4o_en,coco_poetry_zh,coco_rem_en_zh,cocorem_exist_yorn_en,cocotextv2_en,cocotextv2_gpt4o_en,okvqa_en,refcoco_grounding_aug_en,refcoco_grounding_en,tallyqa_coco_en,toloka_grounding_aug_en,vqav2_en,vsr_en",
43
+ "eval_dataset": "aokvqa_cauldron_llava_format",
44
+ "data_flatten": false,
45
+ "base_interval": 2,
46
+ "max_pixels": 50176,
47
+ "min_pixels": 784,
48
+ "fix_image_size": [
49
+ 224,
50
+ 224
51
+ ],
52
+ "model_max_length": 1024,
53
+ "model_type": "qwen2.5vl",
54
+ "per_device_batch_size": 4
55
+ },
56
+ "vla_data": {
57
+ "dataset_py": "lerobot_libero",
58
+ "data_root_dir": "playground/Datasets/LEROBOT_LIBERO_DATA",
59
+ "data_mix": "libero_10",
60
+ "action_type": "delta_qpos",
61
+ "CoT_prompt": "Your task is {instruction}. To identify the key objects for your task. Locate their bounding boxes in [x1,y1,x2,y2] format.",
62
+ "CoT_answer": "bbox",
63
+ "default_image_resolution": [
64
+ 3,
65
+ 224,
66
+ 224
67
+ ],
68
+ "per_device_batch_size": 16,
69
+ "load_all_data_for_training": true,
70
+ "obs": [
71
+ "image_0"
72
+ ]
73
+ }
74
+ },
75
+ "trainer": {
76
+ "epochs": 100,
77
+ "max_train_steps": 100000,
78
+ "num_warmup_steps": 5000,
79
+ "save_interval": 10000,
80
+ "eval_interval": 1000,
81
+ "learning_rate": {
82
+ "base": 2.5e-05
83
+ },
84
+ "lr_scheduler_type": "cosine_with_min_lr",
85
+ "scheduler_specific_kwargs": {
86
+ "min_lr": 1e-06
87
+ },
88
+ "freeze_modules": "",
89
+ "loss_scale": {
90
+ "vla": 1.0,
91
+ "vlm": 0.1
92
+ },
93
+ "max_grad_norm": 1.0,
94
+ "warmup_ratio": 0.1,
95
+ "weight_decay": 0.0,
96
+ "logging_frequency": 10,
97
+ "gradient_clipping": 1.0,
98
+ "gradient_accumulation_steps": 1,
99
+ "optimizer": {
100
+ "name": "AdamW",
101
+ "betas": [
102
+ 0.9,
103
+ 0.95
104
+ ],
105
+ "eps": 1e-08,
106
+ "weight_decay": 1e-08
107
+ },
108
+ "is_resume": false,
109
+ "resume_epoch": null,
110
+ "resume_step": null,
111
+ "enable_gradient_checkpointing": true,
112
+ "enable_mixed_precision_training": true
113
+ },
114
+ "output_dir": "./playground/Checkpoints/0903_libero_10_augsteps_0_wo_flash_attention_wo_augsteps_two_view_action_chunk_16_pretrained_vlm"
115
+ }
config.yaml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ run_id: 0903_libero_10_augsteps_0_wo_flash_attention_wo_augsteps_two_view_action_chunk_16_pretrained_vlm
2
+ run_root_dir: ./playground/Checkpoints
3
+ seed: 42
4
+ trackers:
5
+ - jsonl
6
+ - wandb
7
+ wandb_entity: michaelyu-1101-fudanuniversity
8
+ wandb_project: Internvla
9
+ is_debug: false
10
+ framework:
11
+ framework_py: DinoQFormerACT
12
+ qwenvl:
13
+ base_vlm: /mnt/phwfile/efm_t/zhuyangkun_tmp_need_del/exp/exp_08_09/manip_sys2_qwen25_3b_onevision_molmo_a0all_refsp20/checkpoint-20000
14
+ attn_implementation: flash_attention_2
15
+ vl_hidden_dim: 2048
16
+ dino:
17
+ dino_backbone: dinov2_vitl14
18
+ layer_qformer:
19
+ qformer_end_layer: 37
20
+ qformer_start_layer: 36
21
+ num_query_tokens: 64
22
+ grad_scale: 0.5
23
+ action_model:
24
+ action_model_type: DiT-B
25
+ action_hidden_dim: 768
26
+ action_dim: 7
27
+ use_ema: false
28
+ future_action_window_size: 7
29
+ past_action_window_size: 0
30
+ repeated_diffusion_steps: 8
31
+ reduce_in_full_precision: true
32
+ datasets:
33
+ vlm_data:
34
+ dataformat: llava_json
35
+ dataset_use: asv2_conversation_en,asv2_detailed_description_en,asv2_region_captioning_en,coco_internvl_longcap_en,coco_karpathy_train_567_en,coco_negative_gpt4o_en,coco_poetry_zh,coco_rem_en_zh,cocorem_exist_yorn_en,cocotextv2_en,cocotextv2_gpt4o_en,okvqa_en,refcoco_grounding_aug_en,refcoco_grounding_en,tallyqa_coco_en,toloka_grounding_aug_en,vqav2_en,vsr_en
36
+ eval_dataset: aokvqa_cauldron_llava_format
37
+ data_flatten: false
38
+ base_interval: 2
39
+ max_pixels: 50176
40
+ min_pixels: 784
41
+ fix_image_size:
42
+ - 224
43
+ - 224
44
+ model_max_length: 1024
45
+ model_type: qwen2.5vl
46
+ per_device_batch_size: 4
47
+ vla_data:
48
+ dataset_py: lerobot_libero
49
+ data_root_dir: playground/Datasets/LEROBOT_LIBERO_DATA
50
+ data_mix: libero_10
51
+ action_type: delta_qpos
52
+ CoT_prompt: Your task is {instruction}. To identify the key objects for your task.
53
+ Locate their bounding boxes in [x1,y1,x2,y2] format.
54
+ CoT_answer: bbox
55
+ default_image_resolution:
56
+ - 3
57
+ - 224
58
+ - 224
59
+ per_device_batch_size: 16
60
+ load_all_data_for_training: true
61
+ obs:
62
+ - image_0
63
+ trainer:
64
+ epochs: 100
65
+ max_train_steps: 100000
66
+ num_warmup_steps: 5000
67
+ save_interval: 10000
68
+ eval_interval: 1000
69
+ learning_rate:
70
+ base: 2.5e-05
71
+ lr_scheduler_type: cosine_with_min_lr
72
+ scheduler_specific_kwargs:
73
+ min_lr: 1.0e-06
74
+ freeze_modules: ''
75
+ loss_scale:
76
+ vla: 1.0
77
+ vlm: 0.1
78
+ max_grad_norm: 1.0
79
+ warmup_ratio: 0.1
80
+ weight_decay: 0.0
81
+ logging_frequency: 10
82
+ gradient_clipping: 1.0
83
+ gradient_accumulation_steps: 1
84
+ optimizer:
85
+ name: AdamW
86
+ betas:
87
+ - 0.9
88
+ - 0.95
89
+ eps: 1.0e-08
90
+ weight_decay: 1.0e-08
91
+ is_resume: false
92
+ resume_epoch: null
93
+ resume_step: null
94
+ enable_gradient_checkpointing: true
95
+ enable_mixed_precision_training: true
96
+ output_dir: ./playground/Checkpoints/0903_libero_10_augsteps_0_wo_flash_attention_wo_augsteps_two_view_action_chunk_16_pretrained_vlm
dataset_statistics.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "franka": {
3
+ "action": {
4
+ "mean": [
5
+ 0.01820324920117855,
6
+ 0.05858374014496803,
7
+ -0.05592384561896324,
8
+ 0.004626928828656673,
9
+ 0.00289608770981431,
10
+ -0.007673131301999092,
11
+ 0.5457824468612671
12
+ ],
13
+ "std": [
14
+ 0.2825464606285095,
15
+ 0.35904666781425476,
16
+ 0.3673802614212036,
17
+ 0.03770702704787254,
18
+ 0.05429719388484955,
19
+ 0.08725254982709885,
20
+ 0.49815231561660767
21
+ ],
22
+ "max": [
23
+ 0.9375,
24
+ 0.9375,
25
+ 0.9375,
26
+ 0.30000001192092896,
27
+ 0.29357144236564636,
28
+ 0.375,
29
+ 1.0
30
+ ],
31
+ "min": [
32
+ -0.9375,
33
+ -0.9375,
34
+ -0.9375,
35
+ -0.23642857372760773,
36
+ -0.3053571283817291,
37
+ -0.3675000071525574,
38
+ 0.0
39
+ ],
40
+ "q01": [
41
+ -0.6348214149475098,
42
+ -0.7741071581840515,
43
+ -0.7633928656578064,
44
+ -0.09749999642372131,
45
+ -0.14819999992847435,
46
+ -0.2742857038974762,
47
+ 0.0
48
+ ],
49
+ "q99": [
50
+ 0.7714285850524902,
51
+ 0.8464285731315613,
52
+ 0.9375,
53
+ 0.13928571343421936,
54
+ 0.15964286029338837,
55
+ 0.3246428668498993,
56
+ 1.0
57
+ ],
58
+ "mask": [
59
+ true,
60
+ true,
61
+ true,
62
+ true,
63
+ true,
64
+ true,
65
+ false
66
+ ]
67
+ },
68
+ "state": {
69
+ "mean": [
70
+ -0.04190658777952194,
71
+ 0.03539430722594261,
72
+ 0.8257141709327698,
73
+ 2.908308267593384,
74
+ -0.5562185049057007,
75
+ -0.16649018228054047,
76
+ 0.028316624462604523,
77
+ -0.028561657294631004
78
+ ],
79
+ "std": [
80
+ 0.10743364691734314,
81
+ 0.14424669742584229,
82
+ 0.2572328448295593,
83
+ 0.3441362977027893,
84
+ 1.234421730041504,
85
+ 0.3579835891723633,
86
+ 0.013308707624673843,
87
+ 0.013174631632864475
88
+ ],
89
+ "max": [
90
+ 0.21031762659549713,
91
+ 0.39128610491752625,
92
+ 1.3332009315490723,
93
+ 3.6714255809783936,
94
+ 3.560650587081909,
95
+ 1.386339545249939,
96
+ 0.04160946607589722,
97
+ 0.0013633022317662835
98
+ ],
99
+ "min": [
100
+ -0.4828203022480011,
101
+ -0.3255046010017395,
102
+ 0.445506751537323,
103
+ 1.1321442127227783,
104
+ -3.641430377960205,
105
+ -1.842738389968872,
106
+ -0.0010040868073701859,
107
+ -0.04111652821302414
108
+ ],
109
+ "q01": [
110
+ -0.3899900782108307,
111
+ -0.2838300323486328,
112
+ 0.44795057058334353,
113
+ 1.8810229921340942,
114
+ -2.886677579879761,
115
+ -1.1599004411697387,
116
+ 0.002066459748893976,
117
+ -0.04001387819647789
118
+ ],
119
+ "q99": [
120
+ 0.1530261474847791,
121
+ 0.32915401458740223,
122
+ 1.2546923208236693,
123
+ 3.303542451858519,
124
+ 2.7496529006957933,
125
+ 0.6893712210655194,
126
+ 0.040048558115959164,
127
+ -0.0017598449345678235
128
+ ]
129
+ },
130
+ "num_transitions": 101469,
131
+ "num_trajectories": 379
132
+ }
133
+ }
run_lerobot_datasets.sh ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export HF_HOME=/mnt/petrelfs/share/yejinhui/Models/huggingface_cache
2
+
3
+ export NCCL_SOCKET_IFNAME=bond0
4
+ export NCCL_IB_HCA=mlx5_2,mlx5_3
5
+
6
+ # 用于check save 的时候的通信
7
+ export NCCL_BLOCKING_WAIT=1
8
+ export NCCL_ASYNC_ERROR_HANDLING=1
9
+ export NCCL_TIMEOUT=1000 # 超时时间设为 1 小时(单位:秒)
10
+
11
+ cd /mnt/petrelfs/yujunqiu/code/vla-baseline/llavavla-00hf1
12
+
13
+ # MODEL_PATH=/mnt/petrelfs/yejinhui/Projects/llavavla/playground/Pretrained_models/Qwen2.5-VL-3B-Instruct # must be a local path, due to simpler will run in other where
14
+ # data_root_dir=./playground/Datasets/OXE_LEROBOT_DATASET
15
+ run_root_dir=./playground/Checkpoints
16
+ task_name=libero_10
17
+ run_id=0903_${task_name}_augsteps_0_wo_flash_attention_wo_augsteps_two_view_action_chunk_16_pretrained_vlm
18
+
19
+
20
+ export WANDB_MODE=disabled
21
+
22
+ output_dir=${run_root_dir}/${run_id}
23
+ mkdir -p ${output_dir}
24
+ # mv this script to the output dir
25
+ cp $0 ${output_dir}/
26
+
27
+ # --pretrained_checkpoint ${MODEL_PATH} \
28
+ # export CUDA_VISIBLE_DEVICES=4,5,6,7
29
+
30
+ # --datasets.vla_data.data_mix libero_goal \
31
+ # --framework.framework_py qwenpi \
32
+
33
+ DEBUG=False
34
+ # DEBUG=True
35
+
36
+ if [ "$DEBUG" = True ]; then
37
+ num_processes=1
38
+ run_id=debug
39
+ else
40
+ num_processes=8
41
+ fi
42
+
43
+
44
+ accelerate launch \
45
+ --config_file scripts/run_scripts/deepspeed_zero2.yaml \
46
+ --num_processes ${num_processes} \
47
+ llavavla/training/train_qwenvla.py \
48
+ --config_yaml ./llavavla/config/lerobot_data/qwenvla_cotrain_libero.yaml \
49
+ --datasets.vla_data.per_device_batch_size 16 \
50
+ --datasets.vla_data.data_mix ${task_name} \
51
+ --framework.action_model.future_action_window_size 7 \
52
+ --trainer.max_train_steps 100_000 \
53
+ --trainer.save_interval 10_000 \
54
+ --run_root_dir ${run_root_dir} \
55
+ --run_id ${run_id} \
56
+ --wandb_project Internvla \
57
+ --wandb_entity michaelyu-1101-fudanuniversity \
58
+ --is_debug ${DEBUG} \
59
+ --framework.qwenvl.base_vlm /mnt/phwfile/efm_t/zhuyangkun_tmp_need_del/exp/exp_08_09/manip_sys2_qwen25_3b_onevision_molmo_a0all_refsp20/checkpoint-20000
60
+
61
+ # --framework.qwenvl.base_vlm ${MODEL_PATH} \
62
+ # --data_root_dir ${data_root_dir} \
63
+
64
+ # --is_debug True
summary.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {"steps": 10000}
2
+ {"steps": 20000}
3
+ {"steps": 30000}