AdamLucek commited on
Commit
5f55ee9
1 Parent(s): f1af368

Upload 2 files

Browse files

lerobot yaml files

lerobot_configs/act_koch_real.yaml ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Use `act_koch_real.yaml` to train on real-world datasets collected on Alexander Koch's robots.
4
+ # Compared to `act.yaml`, it contains 2 cameras (i.e. laptop, phone) instead of 1 camera (i.e. top).
5
+ # Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps.
6
+ # When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script.
7
+ # Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy.
8
+ #
9
+ # Example of usage for training:
10
+ # ```bash
11
+ # python lerobot/scripts/train.py \
12
+ # policy=act_koch_real \
13
+ # env=koch_real
14
+ # ```
15
+
16
+ seed: 1000
17
+ dataset_repo_id: lerobot/koch_pick_place_lego
18
+
19
+ override_dataset_stats:
20
+ observation.images.laptop:
21
+ # stats from imagenet, since we use a pretrained vision model
22
+ mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
23
+ std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
24
+ observation.images.logitech:
25
+ # stats from imagenet, since we use a pretrained vision model
26
+ mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
27
+ std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
28
+
29
+ training:
30
+ offline_steps: 80000
31
+ online_steps: 0
32
+ eval_freq: -1
33
+ save_freq: 10000
34
+ log_freq: 100
35
+ save_checkpoint: true
36
+
37
+ batch_size: 8
38
+ lr: 1e-5
39
+ lr_backbone: 1e-5
40
+ weight_decay: 1e-4
41
+ grad_clip_norm: 10
42
+ online_steps_between_rollouts: 1
43
+
44
+ delta_timestamps:
45
+ action: "[i / ${fps} for i in range(${policy.chunk_size})]"
46
+
47
+ eval:
48
+ n_episodes: 50
49
+ batch_size: 50
50
+
51
+ # See `configuration_act.py` for more details.
52
+ policy:
53
+ name: act
54
+
55
+ # Input / output structure.
56
+ n_obs_steps: 1
57
+ chunk_size: 100
58
+ n_action_steps: 100
59
+
60
+ input_shapes:
61
+ # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
62
+ observation.images.laptop: [3, 480, 640]
63
+ observation.images.logitech: [3, 480, 640]
64
+ observation.state: ["${env.state_dim}"]
65
+ output_shapes:
66
+ action: ["${env.action_dim}"]
67
+
68
+ # Normalization / Unnormalization
69
+ input_normalization_modes:
70
+ observation.images.laptop: mean_std
71
+ observation.images.logitech: mean_std
72
+ observation.state: mean_std
73
+ output_normalization_modes:
74
+ action: mean_std
75
+
76
+ # Architecture.
77
+ # Vision backbone.
78
+ vision_backbone: resnet18
79
+ pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
80
+ replace_final_stride_with_dilation: false
81
+ # Transformer layers.
82
+ pre_norm: false
83
+ dim_model: 512
84
+ n_heads: 8
85
+ dim_feedforward: 3200
86
+ feedforward_activation: relu
87
+ n_encoder_layers: 4
88
+ # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
89
+ # that means only the first layer is used. Here we match the original implementation by setting this to 1.
90
+ # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
91
+ n_decoder_layers: 1
92
+ # VAE.
93
+ use_vae: true
94
+ latent_dim: 32
95
+ n_vae_encoder_layers: 4
96
+
97
+ # Inference.
98
+ temporal_ensemble_momentum: null
99
+
100
+ # Training and loss computation.
101
+ dropout: 0.1
102
+ kl_weight: 10.0
lerobot_configs/koch.yaml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _target_: lerobot.common.robot_devices.robots.koch.KochRobot
2
+ calibration_path: .cache/calibration/koch.pkl
3
+ leader_arms:
4
+ main:
5
+ _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
6
+ port: /dev/tty.usbmodem585A0085151
7
+ motors:
8
+ # name: (index, model)
9
+ shoulder_pan: [1, "xl330-m077"]
10
+ shoulder_lift: [2, "xl330-m077"]
11
+ elbow_flex: [3, "xl330-m077"]
12
+ wrist_flex: [4, "xl330-m077"]
13
+ wrist_roll: [5, "xl330-m077"]
14
+ gripper: [6, "xl330-m077"]
15
+ follower_arms:
16
+ main:
17
+ _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
18
+ port: /dev/tty.usbmodem585A0081771
19
+ motors:
20
+ # name: (index, model)
21
+ shoulder_pan: [1, "xl430-w250"]
22
+ shoulder_lift: [2, "xl430-w250"]
23
+ elbow_flex: [3, "xl330-m288"]
24
+ wrist_flex: [4, "xl330-m288"]
25
+ wrist_roll: [5, "xl330-m288"]
26
+ gripper: [6, "xl330-m288"]
27
+ cameras:
28
+ logitech:
29
+ _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
30
+ camera_index: 0
31
+ fps: 30
32
+ width: 640
33
+ height: 480
34
+ laptop:
35
+ _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
36
+ camera_index: 3
37
+ fps: 30
38
+ width: 640
39
+ height: 480
40
+ # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
41
+ # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
42
+ # the number of motors in your follower arms.
43
+ max_relative_target: null
44
+ # Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible
45
+ # to squeeze the gripper and have it spring back to an open position on its own.
46
+ gripper_open_degree: 35.156