| { | |
| "data_root_dir": "/mnt/blob/open_x", | |
| "diffusion_model_type": "DiT-B", | |
| "future_action_window_size": 15, | |
| "hf_token": "HF_TOKEN", | |
| "image_aug": true, | |
| "is_resume": false, | |
| "load_all_data_for_training": true, | |
| "past_action_window_size": 0, | |
| "pretrained_checkpoint": "/mnt/blob/vla_model/openvla-7b-prismatic/checkpoints/step-295000-epoch-40-loss=0.2200.pt", | |
| "repeated_diffusion_steps": 8, | |
| "resume_epoch": null, | |
| "resume_step": null, | |
| "run_id": "prism-dinosiglip-224px+oxe+diffusion+n2+b16+x42--image_aug", | |
| "run_id_note": null, | |
| "run_root_dir": "/mnt/blob/cogact/ditb_8_lr2e-5_b16_fa15_pa0_shuffle_oxe", | |
| "save_interval": 5000, | |
| "seed": 42, | |
| "trackers": [ | |
| "jsonl", | |
| "wandb" | |
| ], | |
| "vla": { | |
| "base_vlm": "prism-dinosiglip-224px+7b", | |
| "data_mix": "oxe_magic_soup_plus_minus", | |
| "enable_gradient_checkpointing": true, | |
| "enable_mixed_precision_training": true, | |
| "epochs": 100, | |
| "expected_world_size": 16, | |
| "freeze_llm_backbone": false, | |
| "freeze_vision_backbone": false, | |
| "global_batch_size": 256, | |
| "learning_rate": 2e-05, | |
| "lr_scheduler_type": "constant", | |
| "max_grad_norm": 1.0, | |
| "max_steps": null, | |
| "per_device_batch_size": 16, | |
| "reduce_in_full_precision": true, | |
| "shuffle_buffer_size": 250000, | |
| "train_strategy": "fsdp-full-shard", | |
| "type": "prism-dinosiglip-224px+oxe+diffusion", | |
| "unfreeze_last_llm_layer": false, | |
| "vla_id": "prism-dinosiglip-224px+oxe+diffusion", | |
| "warmup_ratio": 0.0, | |
| "weight_decay": 0.0 | |
| }, | |
| "wandb_entity": null, | |
| "wandb_project": null | |
| } |