Robotics
LeRobot
Safetensors
act
swpark5 commited on
Commit
90d4912
·
verified ·
1 Parent(s): 7323a07

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +62 -0
  2. config.json +62 -0
  3. model.safetensors +3 -0
  4. train_config.json +224 -0
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets: lerobot/aloha_sim_transfer_cube_human
3
+ library_name: lerobot
4
+ license: apache-2.0
5
+ model_name: act
6
+ pipeline_tag: robotics
7
+ tags:
8
+ - act
9
+ - lerobot
10
+ - robotics
11
+ ---
12
+
13
+ # Model Card for act
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+ [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high success rates.
19
+
20
+
21
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
22
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
23
+
24
+ ---
25
+
26
+ ## How to Get Started with the Model
27
+
28
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
29
+ Below is the short version on how to train and run inference/eval:
30
+
31
+ ### Train from scratch
32
+
33
+ ```bash
34
+ lerobot-train \
35
+ --dataset.repo_id=${HF_USER}/<dataset> \
36
+ --policy.type=act \
37
+ --output_dir=outputs/train/<desired_policy_repo_id> \
38
+ --job_name=lerobot_training \
39
+ --policy.device=cuda \
40
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
41
+ --wandb.enable=true
42
+ ```
43
+
44
+ _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._
45
+
46
+ ### Evaluate the policy/run inference
47
+
48
+ ```bash
49
+ lerobot-record \
50
+ --robot.type=so100_follower \
51
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
52
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
53
+ --episodes=10
54
+ ```
55
+
56
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
57
+
58
+ ---
59
+
60
+ ## Model Details
61
+
62
+ - **License:** apache-2.0
config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "act",
3
+ "n_obs_steps": 1,
4
+ "input_features": {
5
+ "observation.images.top": {
6
+ "type": "VISUAL",
7
+ "shape": [
8
+ 3,
9
+ 480,
10
+ 640
11
+ ]
12
+ },
13
+ "observation.state": {
14
+ "type": "STATE",
15
+ "shape": [
16
+ 14
17
+ ]
18
+ }
19
+ },
20
+ "output_features": {
21
+ "action": {
22
+ "type": "ACTION",
23
+ "shape": [
24
+ 14
25
+ ]
26
+ }
27
+ },
28
+ "device": "cuda",
29
+ "use_amp": false,
30
+ "push_to_hub": true,
31
+ "repo_id": "swpark5/baseline_act_aloha_sim_transfer_cube",
32
+ "private": null,
33
+ "tags": null,
34
+ "license": null,
35
+ "pretrained_path": "outputs/train/2025-11-17/22-38-12_baseline_act_aloha_sim_transfer_cube/checkpoints/last/pretrained_model",
36
+ "chunk_size": 100,
37
+ "n_action_steps": 100,
38
+ "normalization_mapping": {
39
+ "VISUAL": "MEAN_STD",
40
+ "STATE": "MEAN_STD",
41
+ "ACTION": "MEAN_STD"
42
+ },
43
+ "vision_backbone": "resnet18",
44
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
45
+ "replace_final_stride_with_dilation": 0,
46
+ "pre_norm": false,
47
+ "dim_model": 512,
48
+ "n_heads": 8,
49
+ "dim_feedforward": 3200,
50
+ "feedforward_activation": "relu",
51
+ "n_encoder_layers": 4,
52
+ "n_decoder_layers": 1,
53
+ "use_vae": true,
54
+ "latent_dim": 32,
55
+ "n_vae_encoder_layers": 4,
56
+ "temporal_ensemble_coeff": null,
57
+ "dropout": 0.1,
58
+ "kl_weight": 10.0,
59
+ "optimizer_lr": 1e-05,
60
+ "optimizer_weight_decay": 0.0001,
61
+ "optimizer_lr_backbone": 1e-05
62
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d161c643dabb2b867da5da2db8d613a7b5fa5b44674d447c95c3cb7ac816ff49
3
+ size 206765304
train_config.json ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "lerobot/aloha_sim_transfer_cube_human",
4
+ "root": null,
5
+ "episodes": null,
6
+ "image_transforms": {
7
+ "enable": false,
8
+ "max_num_transforms": 3,
9
+ "random_order": false,
10
+ "tfs": {
11
+ "brightness": {
12
+ "weight": 1.0,
13
+ "type": "ColorJitter",
14
+ "kwargs": {
15
+ "brightness": [
16
+ 0.8,
17
+ 1.2
18
+ ]
19
+ }
20
+ },
21
+ "contrast": {
22
+ "weight": 1.0,
23
+ "type": "ColorJitter",
24
+ "kwargs": {
25
+ "contrast": [
26
+ 0.8,
27
+ 1.2
28
+ ]
29
+ }
30
+ },
31
+ "saturation": {
32
+ "weight": 1.0,
33
+ "type": "ColorJitter",
34
+ "kwargs": {
35
+ "saturation": [
36
+ 0.5,
37
+ 1.5
38
+ ]
39
+ }
40
+ },
41
+ "hue": {
42
+ "weight": 1.0,
43
+ "type": "ColorJitter",
44
+ "kwargs": {
45
+ "hue": [
46
+ -0.05,
47
+ 0.05
48
+ ]
49
+ }
50
+ },
51
+ "sharpness": {
52
+ "weight": 1.0,
53
+ "type": "SharpnessJitter",
54
+ "kwargs": {
55
+ "sharpness": [
56
+ 0.5,
57
+ 1.5
58
+ ]
59
+ }
60
+ },
61
+ "affine": {
62
+ "weight": 1.0,
63
+ "type": "RandomAffine",
64
+ "kwargs": {
65
+ "degrees": [
66
+ -5.0,
67
+ 5.0
68
+ ],
69
+ "translate": [
70
+ 0.05,
71
+ 0.05
72
+ ]
73
+ }
74
+ }
75
+ }
76
+ },
77
+ "revision": null,
78
+ "use_imagenet_stats": true,
79
+ "video_backend": "torchcodec",
80
+ "streaming": false
81
+ },
82
+ "env": {
83
+ "type": "aloha",
84
+ "task": "AlohaTransferCube-v0",
85
+ "fps": 50,
86
+ "features": {
87
+ "action": {
88
+ "type": "ACTION",
89
+ "shape": [
90
+ 14
91
+ ]
92
+ },
93
+ "agent_pos": {
94
+ "type": "STATE",
95
+ "shape": [
96
+ 14
97
+ ]
98
+ },
99
+ "pixels/top": {
100
+ "type": "VISUAL",
101
+ "shape": [
102
+ 480,
103
+ 640,
104
+ 3
105
+ ]
106
+ }
107
+ },
108
+ "features_map": {
109
+ "action": "action",
110
+ "agent_pos": "observation.state",
111
+ "top": "observation.image.top",
112
+ "pixels/top": "observation.images.top"
113
+ },
114
+ "max_parallel_tasks": 1,
115
+ "disable_env_checker": true,
116
+ "episode_length": 400,
117
+ "obs_type": "pixels_agent_pos",
118
+ "observation_height": 480,
119
+ "observation_width": 640,
120
+ "render_mode": "rgb_array"
121
+ },
122
+ "policy": {
123
+ "type": "act",
124
+ "n_obs_steps": 1,
125
+ "input_features": {
126
+ "observation.images.top": {
127
+ "type": "VISUAL",
128
+ "shape": [
129
+ 3,
130
+ 480,
131
+ 640
132
+ ]
133
+ },
134
+ "observation.state": {
135
+ "type": "STATE",
136
+ "shape": [
137
+ 14
138
+ ]
139
+ }
140
+ },
141
+ "output_features": {
142
+ "action": {
143
+ "type": "ACTION",
144
+ "shape": [
145
+ 14
146
+ ]
147
+ }
148
+ },
149
+ "device": "cuda",
150
+ "use_amp": false,
151
+ "push_to_hub": true,
152
+ "repo_id": "swpark5/baseline_act_aloha_sim_transfer_cube",
153
+ "private": null,
154
+ "tags": null,
155
+ "license": null,
156
+ "pretrained_path": "outputs/train/2025-11-17/22-38-12_baseline_act_aloha_sim_transfer_cube/checkpoints/last/pretrained_model",
157
+ "chunk_size": 100,
158
+ "n_action_steps": 100,
159
+ "normalization_mapping": {
160
+ "VISUAL": "MEAN_STD",
161
+ "STATE": "MEAN_STD",
162
+ "ACTION": "MEAN_STD"
163
+ },
164
+ "vision_backbone": "resnet18",
165
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
166
+ "replace_final_stride_with_dilation": 0,
167
+ "pre_norm": false,
168
+ "dim_model": 512,
169
+ "n_heads": 8,
170
+ "dim_feedforward": 3200,
171
+ "feedforward_activation": "relu",
172
+ "n_encoder_layers": 4,
173
+ "n_decoder_layers": 1,
174
+ "use_vae": true,
175
+ "latent_dim": 32,
176
+ "n_vae_encoder_layers": 4,
177
+ "temporal_ensemble_coeff": null,
178
+ "dropout": 0.1,
179
+ "kl_weight": 10.0,
180
+ "optimizer_lr": 1e-05,
181
+ "optimizer_weight_decay": 0.0001,
182
+ "optimizer_lr_backbone": 1e-05
183
+ },
184
+ "output_dir": "outputs/train/2025-11-17/22-38-12_baseline_act_aloha_sim_transfer_cube",
185
+ "job_name": "baseline_act_aloha_sim_transfer_cube",
186
+ "resume": true,
187
+ "seed": 1000,
188
+ "num_workers": 4,
189
+ "batch_size": 8,
190
+ "steps": 100000,
191
+ "eval_freq": 10000,
192
+ "log_freq": 100,
193
+ "save_checkpoint": true,
194
+ "save_freq": 10000,
195
+ "use_policy_training_preset": true,
196
+ "optimizer": {
197
+ "type": "adamw",
198
+ "lr": 1e-05,
199
+ "weight_decay": 0.0001,
200
+ "grad_clip_norm": 10.0,
201
+ "betas": [
202
+ 0.9,
203
+ 0.999
204
+ ],
205
+ "eps": 1e-08
206
+ },
207
+ "scheduler": null,
208
+ "eval": {
209
+ "n_episodes": 10,
210
+ "batch_size": 5,
211
+ "use_async_envs": false
212
+ },
213
+ "wandb": {
214
+ "enable": true,
215
+ "disable_artifact": false,
216
+ "project": "lerobot",
217
+ "entity": null,
218
+ "notes": null,
219
+ "run_id": "ed84wjdd",
220
+ "mode": null
221
+ },
222
+ "checkpoint_path": "outputs/train/2025-11-17/22-38-12_baseline_act_aloha_sim_transfer_cube/checkpoints/last",
223
+ "rename_map": {}
224
+ }