jadechoghari commited on
Commit
6abd04f
·
verified ·
1 Parent(s): 83f42ad

add initial files

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. README.md +71 -0
  3. config.json +124 -0
  4. config.yaml +322 -0
  5. model.safetensors +3 -0
  6. replay.mp4 +3 -0
  7. train_config.json +282 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ replay.mp4 filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: lerobot
3
+ tags:
4
+ - model_hub_mixin
5
+ - pytorch_model_hub_mixin
6
+ - robotics
7
+ - dot
8
+ license: apache-2.0
9
+ datasets:
10
+ - lerobot/aloha_sim_insertion_human
11
+ pipeline_tag: robotics
12
+ ---
13
+
14
+ # Model Card for "Decoder Only Transformer (DOT) Policy" for ALOHA bimanual insert problem
15
+
16
+ Read more about the model and implementation details in the [DOT Policy repository](https://github.com/IliaLarchenko/dot_policy).
17
+
18
+ This model is trained using the [LeRobot library](https://huggingface.co/lerobot) and achieves state-of-the-art results on behavior cloning on ALOHA bimanual insert dataset. It achieves 29.6% success rate vs. 21% for the previous state-of-the-art model (ACT).
19
+
20
+ This result is achieved without the checkpoint selection and is easy to reproduce.
21
+
22
+ You can use this model by installing LeRobot from [this branch](https://github.com/IliaLarchenko/lerobot/tree/dot_new_config)
23
+
24
+ To train the model:
25
+
26
+ ```bash
27
+ python lerobot/scripts/train.py \
28
+ --policy.type=dot \
29
+ --dataset.repo_id=lerobot/aloha_sim_insertion_human \
30
+ --env.type=aloha \
31
+ --env.task=AlohaInsertion-v0 \
32
+ --env.episode_length=500 \
33
+ --output_dir=outputs/train/pusht_aloha_insert \
34
+ --batch_size=24 \
35
+ --log_freq=1000 \
36
+ --eval_freq=10000 \
37
+ --save_freq=10000 \
38
+ --offline.steps=100000 \
39
+ --seed=100000 \
40
+ --wandb.enable=true \
41
+ --num_workers=24 \
42
+ --use_amp=true \
43
+ --device=cuda \
44
+ --policy.optimizer_lr=0.00003 \
45
+ --policy.optimizer_min_lr=0.00001 \
46
+ --policy.optimizer_lr_cycle_steps=100000 \
47
+ --policy.train_horizon=150 \
48
+ --policy.inference_horizon=100 \
49
+ --policy.lookback_obs_steps=30 \
50
+ --policy.lookback_aug=5 \
51
+ --policy.rescale_shape="[480,640]" \
52
+ --policy.alpha=0.98 \
53
+ --policy.train_alpha=0.99
54
+ ```
55
+
56
+ To evaluate the model:
57
+
58
+ ```bash
59
+ python lerobot/scripts/eval.py \
60
+ --policy.path=IliaLarchenko/dot_bimanual_insert \
61
+ --env.type=aloha \
62
+ --env.task=AlohaInsertion-v0 \
63
+ --env.episode_length=500 \
64
+ --eval.n_episodes=1000 \
65
+ --eval.batch_size=100 \
66
+ --seed=1000000
67
+ ```
68
+
69
+ Model size:
70
+ - Total parameters: 14.1m
71
+ - Trainable parameters: 2.9m
config.json ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "dot",
3
+ "n_obs_steps": 3,
4
+ "normalization_mapping": {
5
+ "VISUAL": "MEAN_STD",
6
+ "STATE": "MIN_MAX",
7
+ "ENV": "MIN_MAX",
8
+ "ACTION": "MIN_MAX"
9
+ },
10
+ "input_features": {
11
+ "observation.images.top": {
12
+ "type": "VISUAL",
13
+ "shape": [
14
+ 3,
15
+ 480,
16
+ 640
17
+ ]
18
+ },
19
+ "observation.state": {
20
+ "type": "STATE",
21
+ "shape": [
22
+ 14
23
+ ]
24
+ }
25
+ },
26
+ "output_features": {
27
+ "action": {
28
+ "type": "ACTION",
29
+ "shape": [
30
+ 14
31
+ ]
32
+ }
33
+ },
34
+ "train_horizon": 150,
35
+ "inference_horizon": 100,
36
+ "lookback_obs_steps": 30,
37
+ "lookback_aug": 5,
38
+ "override_dataset_stats": false,
39
+ "new_dataset_stats": {
40
+ "action": {
41
+ "max": [
42
+ 512.0,
43
+ 512.0
44
+ ],
45
+ "min": [
46
+ 0.0,
47
+ 0.0
48
+ ]
49
+ },
50
+ "observation.environment_state": {
51
+ "max": [
52
+ 512.0,
53
+ 512.0,
54
+ 512.0,
55
+ 512.0,
56
+ 512.0,
57
+ 512.0,
58
+ 512.0,
59
+ 512.0,
60
+ 512.0,
61
+ 512.0,
62
+ 512.0,
63
+ 512.0,
64
+ 512.0,
65
+ 512.0,
66
+ 512.0,
67
+ 512.0
68
+ ],
69
+ "min": [
70
+ 0.0,
71
+ 0.0,
72
+ 0.0,
73
+ 0.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 0.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 0.0,
84
+ 0.0,
85
+ 0.0
86
+ ]
87
+ },
88
+ "observation.state": {
89
+ "max": [
90
+ 512.0,
91
+ 512.0
92
+ ],
93
+ "min": [
94
+ 0.0,
95
+ 0.0
96
+ ]
97
+ }
98
+ },
99
+ "vision_backbone": "resnet18",
100
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
101
+ "pre_norm": true,
102
+ "lora_rank": 20,
103
+ "merge_lora": false,
104
+ "dim_model": 128,
105
+ "n_heads": 8,
106
+ "dim_feedforward": 512,
107
+ "n_decoder_layers": 8,
108
+ "rescale_shape": [
109
+ 480,
110
+ 640
111
+ ],
112
+ "crop_scale": 1.0,
113
+ "state_noise": 0.01,
114
+ "noise_decay": 0.999995,
115
+ "dropout": 0.1,
116
+ "alpha": 0.98,
117
+ "train_alpha": 0.99,
118
+ "predict_every_n": 1,
119
+ "return_every_n": 1,
120
+ "optimizer_lr": 3e-05,
121
+ "optimizer_min_lr": 1e-05,
122
+ "optimizer_lr_cycle_steps": 100000,
123
+ "optimizer_weight_decay": 1e-05
124
+ }
config.yaml ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ resume: false
2
+ device: cuda
3
+ use_amp: true
4
+ seed: 100000
5
+ dataset_repo_id: lerobot/aloha_sim_insertion_human
6
+ video_backend: pyav
7
+ training:
8
+ offline_steps: 100000
9
+ num_workers: 12
10
+ batch_size: 12
11
+ eval_freq: 10000
12
+ log_freq: 1000
13
+ save_checkpoint: true
14
+ save_freq: 10000
15
+ online_steps: 0
16
+ online_rollout_n_episodes: 1
17
+ online_rollout_batch_size: 1
18
+ online_steps_between_rollouts: 1
19
+ online_sampling_ratio: 0.5
20
+ online_env_seed: null
21
+ online_buffer_capacity: null
22
+ online_buffer_seed_size: 0
23
+ do_online_rollout_async: false
24
+ image_transforms:
25
+ enable: false
26
+ max_num_transforms: 3
27
+ random_order: false
28
+ brightness:
29
+ weight: 1
30
+ min_max:
31
+ - 0.8
32
+ - 1.2
33
+ contrast:
34
+ weight: 1
35
+ min_max:
36
+ - 0.8
37
+ - 1.2
38
+ saturation:
39
+ weight: 1
40
+ min_max:
41
+ - 0.5
42
+ - 1.5
43
+ hue:
44
+ weight: 1
45
+ min_max:
46
+ - -0.05
47
+ - 0.05
48
+ sharpness:
49
+ weight: 1
50
+ min_max:
51
+ - 0.8
52
+ - 1.2
53
+ save_model: true
54
+ grad_clip_norm: 50
55
+ lr: 3.0e-05
56
+ min_lr: 1.0e-05
57
+ lr_cycle_steps: 100000
58
+ weight_decay: 1.0e-05
59
+ delta_timestamps:
60
+ observation.images.top:
61
+ - -0.7
62
+ - -0.68
63
+ - -0.66
64
+ - -0.64
65
+ - -0.62
66
+ - -0.6
67
+ - -0.58
68
+ - -0.56
69
+ - -0.54
70
+ - -0.52
71
+ - -0.5
72
+ - -0.02
73
+ - 0.0
74
+ observation.state:
75
+ - -0.7
76
+ - -0.68
77
+ - -0.66
78
+ - -0.64
79
+ - -0.62
80
+ - -0.6
81
+ - -0.58
82
+ - -0.56
83
+ - -0.54
84
+ - -0.52
85
+ - -0.5
86
+ - -0.02
87
+ - 0.0
88
+ action:
89
+ - -0.7
90
+ - -0.68
91
+ - -0.66
92
+ - -0.64
93
+ - -0.62
94
+ - -0.6
95
+ - -0.58
96
+ - -0.56
97
+ - -0.54
98
+ - -0.52
99
+ - -0.5
100
+ - -0.02
101
+ - 0.0
102
+ - 0.02
103
+ - 0.04
104
+ - 0.06
105
+ - 0.08
106
+ - 0.1
107
+ - 0.12
108
+ - 0.14
109
+ - 0.16
110
+ - 0.18
111
+ - 0.2
112
+ - 0.22
113
+ - 0.24
114
+ - 0.26
115
+ - 0.28
116
+ - 0.3
117
+ - 0.32
118
+ - 0.34
119
+ - 0.36
120
+ - 0.38
121
+ - 0.4
122
+ - 0.42
123
+ - 0.44
124
+ - 0.46
125
+ - 0.48
126
+ - 0.5
127
+ - 0.52
128
+ - 0.54
129
+ - 0.56
130
+ - 0.58
131
+ - 0.6
132
+ - 0.62
133
+ - 0.64
134
+ - 0.66
135
+ - 0.68
136
+ - 0.7
137
+ - 0.72
138
+ - 0.74
139
+ - 0.76
140
+ - 0.78
141
+ - 0.8
142
+ - 0.82
143
+ - 0.84
144
+ - 0.86
145
+ - 0.88
146
+ - 0.9
147
+ - 0.92
148
+ - 0.94
149
+ - 0.96
150
+ - 0.98
151
+ - 1.0
152
+ - 1.02
153
+ - 1.04
154
+ - 1.06
155
+ - 1.08
156
+ - 1.1
157
+ - 1.12
158
+ - 1.14
159
+ - 1.16
160
+ - 1.18
161
+ - 1.2
162
+ - 1.22
163
+ - 1.24
164
+ - 1.26
165
+ - 1.28
166
+ - 1.3
167
+ - 1.32
168
+ - 1.34
169
+ - 1.36
170
+ - 1.38
171
+ - 1.4
172
+ - 1.42
173
+ - 1.44
174
+ - 1.46
175
+ - 1.48
176
+ - 1.5
177
+ - 1.52
178
+ - 1.54
179
+ - 1.56
180
+ - 1.58
181
+ - 1.6
182
+ - 1.62
183
+ - 1.64
184
+ - 1.66
185
+ - 1.68
186
+ - 1.7
187
+ - 1.72
188
+ - 1.74
189
+ - 1.76
190
+ - 1.78
191
+ - 1.8
192
+ - 1.82
193
+ - 1.84
194
+ - 1.86
195
+ - 1.88
196
+ - 1.9
197
+ - 1.92
198
+ - 1.94
199
+ - 1.96
200
+ - 1.98
201
+ - 2.0
202
+ - 2.02
203
+ - 2.04
204
+ - 2.06
205
+ - 2.08
206
+ - 2.1
207
+ - 2.12
208
+ - 2.14
209
+ - 2.16
210
+ - 2.18
211
+ - 2.2
212
+ - 2.22
213
+ - 2.24
214
+ - 2.26
215
+ - 2.28
216
+ - 2.3
217
+ - 2.32
218
+ - 2.34
219
+ - 2.36
220
+ - 2.38
221
+ - 2.4
222
+ - 2.42
223
+ - 2.44
224
+ - 2.46
225
+ - 2.48
226
+ - 2.5
227
+ - 2.52
228
+ - 2.54
229
+ - 2.56
230
+ - 2.58
231
+ - 2.6
232
+ - 2.62
233
+ - 2.64
234
+ - 2.66
235
+ - 2.68
236
+ - 2.7
237
+ - 2.72
238
+ - 2.74
239
+ - 2.76
240
+ - 2.78
241
+ - 2.8
242
+ - 2.82
243
+ - 2.84
244
+ - 2.86
245
+ - 2.88
246
+ - 2.9
247
+ - 2.92
248
+ - 2.94
249
+ - 2.96
250
+ - 2.98
251
+ eval:
252
+ n_episodes: 50
253
+ batch_size: 10
254
+ use_async_envs: false
255
+ wandb:
256
+ enable: true
257
+ disable_artifact: false
258
+ project: insert
259
+ notes: ''
260
+ fps: 50
261
+ env:
262
+ name: aloha
263
+ task: AlohaInsertion-v0
264
+ state_dim: 14
265
+ action_dim: 14
266
+ fps: ${fps}
267
+ episode_length: 500
268
+ gym:
269
+ obs_type: pixels_agent_pos
270
+ render_mode: rgb_array
271
+ override_dataset_stats:
272
+ observation.images.top:
273
+ mean:
274
+ - - - 0.485
275
+ - - - 0.456
276
+ - - - 0.406
277
+ std:
278
+ - - - 0.229
279
+ - - - 0.224
280
+ - - - 0.225
281
+ policy:
282
+ name: dot
283
+ n_obs_steps: 3
284
+ train_horizon: 150
285
+ inference_horizon: 100
286
+ lookback_obs_steps: 30
287
+ lookback_aug: 5
288
+ input_shapes:
289
+ observation.images.top:
290
+ - 3
291
+ - 480
292
+ - 640
293
+ observation.state:
294
+ - ${env.state_dim}
295
+ output_shapes:
296
+ action:
297
+ - ${env.action_dim}
298
+ input_normalization_modes:
299
+ observation.images.top: mean_std
300
+ observation.state: min_max
301
+ output_normalization_modes:
302
+ action: min_max
303
+ vision_backbone: resnet18
304
+ pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
305
+ rescale_shape:
306
+ - 480
307
+ - 640
308
+ lora_rank: 20
309
+ merge_lora: true
310
+ crop_scale: 0.8
311
+ state_noise: 0.01
312
+ noise_decay: 0.999995
313
+ pre_norm: true
314
+ dim_model: 128
315
+ n_heads: 8
316
+ dim_feedforward: 512
317
+ n_decoder_layers: 8
318
+ dropout: 0.1
319
+ alpha: 0.98
320
+ train_alpha: 0.99
321
+ predict_every_n: 1
322
+ return_every_n: 1
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:706683a6b1c1c69f0b5cc577c9dcf08a8761ff30b1b25ab3511f7a0ab050ae5e
3
+ size 56555664
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff0d0e1c523e870ff2a57f9cd4823d07335973367c2e9e0ee71913b5894234e9
3
+ size 202117
train_config.json ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "lerobot/aloha_sim_insertion_human",
4
+ "episodes": null,
5
+ "image_transforms": {
6
+ "enable": false,
7
+ "max_num_transforms": 3,
8
+ "random_order": false,
9
+ "tfs": {
10
+ "brightness": {
11
+ "weight": 1.0,
12
+ "type": "ColorJitter",
13
+ "kwargs": {
14
+ "brightness": [
15
+ 0.8,
16
+ 1.2
17
+ ]
18
+ }
19
+ },
20
+ "contrast": {
21
+ "weight": 1.0,
22
+ "type": "ColorJitter",
23
+ "kwargs": {
24
+ "contrast": [
25
+ 0.8,
26
+ 1.2
27
+ ]
28
+ }
29
+ },
30
+ "saturation": {
31
+ "weight": 1.0,
32
+ "type": "ColorJitter",
33
+ "kwargs": {
34
+ "saturation": [
35
+ 0.5,
36
+ 1.5
37
+ ]
38
+ }
39
+ },
40
+ "hue": {
41
+ "weight": 1.0,
42
+ "type": "ColorJitter",
43
+ "kwargs": {
44
+ "hue": [
45
+ -0.05,
46
+ 0.05
47
+ ]
48
+ }
49
+ },
50
+ "sharpness": {
51
+ "weight": 1.0,
52
+ "type": "SharpnessJitter",
53
+ "kwargs": {
54
+ "sharpness": [
55
+ 0.5,
56
+ 1.5
57
+ ]
58
+ }
59
+ }
60
+ }
61
+ },
62
+ "local_files_only": false,
63
+ "use_imagenet_stats": true,
64
+ "video_backend": "pyav"
65
+ },
66
+ "env": {
67
+ "type": "aloha",
68
+ "task": "AlohaInsertion-v0",
69
+ "fps": 50,
70
+ "features": {
71
+ "action": {
72
+ "type": "ACTION",
73
+ "shape": [
74
+ 14
75
+ ]
76
+ },
77
+ "agent_pos": {
78
+ "type": "STATE",
79
+ "shape": [
80
+ 14
81
+ ]
82
+ },
83
+ "pixels/top": {
84
+ "type": "VISUAL",
85
+ "shape": [
86
+ 480,
87
+ 640,
88
+ 3
89
+ ]
90
+ }
91
+ },
92
+ "features_map": {
93
+ "action": "action",
94
+ "agent_pos": "observation.state",
95
+ "top": "observation.image.top",
96
+ "pixels/top": "observation.images.top"
97
+ },
98
+ "episode_length": 500,
99
+ "obs_type": "pixels_agent_pos",
100
+ "render_mode": "rgb_array"
101
+ },
102
+ "policy": {
103
+ "type": "dot",
104
+ "n_obs_steps": 3,
105
+ "normalization_mapping": {
106
+ "VISUAL": "MEAN_STD",
107
+ "STATE": "MIN_MAX",
108
+ "ENV": "MIN_MAX",
109
+ "ACTION": "MIN_MAX"
110
+ },
111
+ "input_features": {
112
+ "observation.images.top": {
113
+ "type": "VISUAL",
114
+ "shape": [
115
+ 3,
116
+ 480,
117
+ 640
118
+ ]
119
+ },
120
+ "observation.state": {
121
+ "type": "STATE",
122
+ "shape": [
123
+ 14
124
+ ]
125
+ }
126
+ },
127
+ "output_features": {
128
+ "action": {
129
+ "type": "ACTION",
130
+ "shape": [
131
+ 14
132
+ ]
133
+ }
134
+ },
135
+ "train_horizon": 150,
136
+ "inference_horizon": 100,
137
+ "lookback_obs_steps": 30,
138
+ "lookback_aug": 5,
139
+ "override_dataset_stats": false,
140
+ "new_dataset_stats": {
141
+ "action": {
142
+ "max": [
143
+ 512.0,
144
+ 512.0
145
+ ],
146
+ "min": [
147
+ 0.0,
148
+ 0.0
149
+ ]
150
+ },
151
+ "observation.environment_state": {
152
+ "max": [
153
+ 512.0,
154
+ 512.0,
155
+ 512.0,
156
+ 512.0,
157
+ 512.0,
158
+ 512.0,
159
+ 512.0,
160
+ 512.0,
161
+ 512.0,
162
+ 512.0,
163
+ 512.0,
164
+ 512.0,
165
+ 512.0,
166
+ 512.0,
167
+ 512.0,
168
+ 512.0
169
+ ],
170
+ "min": [
171
+ 0.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 0.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 0.0,
182
+ 0.0,
183
+ 0.0,
184
+ 0.0,
185
+ 0.0,
186
+ 0.0
187
+ ]
188
+ },
189
+ "observation.state": {
190
+ "max": [
191
+ 512.0,
192
+ 512.0
193
+ ],
194
+ "min": [
195
+ 0.0,
196
+ 0.0
197
+ ]
198
+ }
199
+ },
200
+ "vision_backbone": "resnet18",
201
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
202
+ "pre_norm": true,
203
+ "lora_rank": 20,
204
+ "merge_lora": false,
205
+ "dim_model": 128,
206
+ "n_heads": 8,
207
+ "dim_feedforward": 512,
208
+ "n_decoder_layers": 8,
209
+ "rescale_shape": [
210
+ 480,
211
+ 640
212
+ ],
213
+ "crop_scale": 1.0,
214
+ "state_noise": 0.01,
215
+ "noise_decay": 0.999995,
216
+ "dropout": 0.1,
217
+ "alpha": 0.98,
218
+ "train_alpha": 0.99,
219
+ "predict_every_n": 1,
220
+ "return_every_n": 1,
221
+ "optimizer_lr": 3e-05,
222
+ "optimizer_min_lr": 1e-05,
223
+ "optimizer_lr_cycle_steps": 100000,
224
+ "optimizer_weight_decay": 1e-05
225
+ },
226
+ "output_dir": "outputs/train/pusht_aloha_insert",
227
+ "job_name": "aloha_dot",
228
+ "resume": false,
229
+ "device": "cuda",
230
+ "use_amp": true,
231
+ "seed": 100000,
232
+ "num_workers": 24,
233
+ "batch_size": 24,
234
+ "eval_freq": 10000,
235
+ "log_freq": 1000,
236
+ "save_checkpoint": true,
237
+ "save_freq": 10000,
238
+ "offline": {
239
+ "steps": 100000
240
+ },
241
+ "online": {
242
+ "steps": 0,
243
+ "rollout_n_episodes": 1,
244
+ "rollout_batch_size": 1,
245
+ "steps_between_rollouts": null,
246
+ "sampling_ratio": 0.5,
247
+ "env_seed": null,
248
+ "buffer_capacity": null,
249
+ "buffer_seed_size": 0,
250
+ "do_rollout_async": false
251
+ },
252
+ "use_policy_training_preset": true,
253
+ "optimizer": {
254
+ "type": "adamw",
255
+ "lr": 3e-05,
256
+ "weight_decay": 1e-05,
257
+ "grad_clip_norm": 10.0,
258
+ "betas": [
259
+ 0.9,
260
+ 0.999
261
+ ],
262
+ "eps": 1e-08
263
+ },
264
+ "scheduler": {
265
+ "type": "cosine_annealing",
266
+ "num_warmup_steps": 0,
267
+ "min_lr": 1e-05,
268
+ "T_max": 100000
269
+ },
270
+ "eval": {
271
+ "n_episodes": 50,
272
+ "batch_size": 50,
273
+ "use_async_envs": false
274
+ },
275
+ "wandb": {
276
+ "enable": true,
277
+ "disable_artifact": false,
278
+ "project": "insert",
279
+ "entity": null,
280
+ "notes": null
281
+ }
282
+ }