liruiw commited on
Commit
d9b8fd5
1 Parent(s): a880d09

Upload 2 files

Browse files
Files changed (2) hide show
  1. config.yaml +168 -0
  2. trunk.pth +3 -0
config.yaml ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 0
2
+ output_dir: output/08_04_2024_18_13_30_2327515
3
+ domains: austin_sailor_dataset_converted_externally_to_rlds, stanford_hydra_dataset_converted_externally_to_rlds,
4
+ austin_buds_dataset_converted_externally_to_rlds, austin_sirius_dataset_converted_externally_to_rlds,
5
+ berkeley_mvp_converted_externally_to_rlds, berkeley_rpt_converted_externally_to_rlds,
6
+ iamlab_cmu_pickup_insert_converted_externally_to_rlds, utaustin_mutex, imperialcollege_sawyer_wrist_cam,
7
+ stanford_mask_vit_converted_externally_to_rlds, language_table, kuka, bc_z, robo_net,
8
+ dlr_sara_pour_converted_externally_to_rlds, stanford_robocook_converted_externally_to_rlds,
9
+ cmu_play_fusion, bridge, furniture_bench_dataset_converted_externally_to_rlds, ucsd_pick_and_place_dataset_converted_externally_to_rlds,
10
+ usc_cloth_sim_converted_externally_to_rlds, stanford_kuka_multimodal_dataset_converted_externally_to_rlds,
11
+ roboturk, kaist_nonprehensile_converted_externally_to_rlds, asu_table_top_converted_externally_to_rlds,
12
+ utokyo_xarm_pick_and_place_converted_externally_to_rlds, berkeley_cable_routing
13
+ log_dir: output/08_04_2024_18_13_30_2327515
14
+ debug_distributed: false
15
+ wb_tag: default
16
+ wb_cont_run: 24yg5gb8
17
+ log_interval: 10
18
+ script_name: run_resnet_30dataset_traj10000_embed256_batch2048
19
+ save_wb_checkpoint: true
20
+ slurm_job_id: '26140239'
21
+ effective_total_epochs: 100
22
+ effective_batch_size: 256
23
+ epoch_size: 10
24
+ total_num_traj: 0
25
+ total_num_sample: 0
26
+ rank: 0
27
+ gpu: 0
28
+ task_per_gpu: 1
29
+ world_size: 64
30
+ debug_submitit: false
31
+ ngpus: 8
32
+ nodes: 8
33
+ timeout: 4320
34
+ job_dir: logs/
35
+ partition: learnlab
36
+ use_volta32: true
37
+ comment: ''
38
+ resume: logs/
39
+ dist_url: file:///checkpoint/xinleic/experiments/0a3d948fc6f644428ef132eb4f3a0d15_init
40
+ dist_on_itp: false
41
+ local_rank: 1
42
+ distributed: true
43
+ dist_backend: nccl
44
+ dset_w_temperature: 2.0
45
+ dataset_shuffle: true
46
+ dataset_groups: ''
47
+ nodelist: learnlab,learnfair,scavenge
48
+ fsdp: false
49
+ dataset:
50
+ _target_: hpt_pretrain.dataset.traj_dataset.TrajDataset
51
+ horizon: 5
52
+ val_ratio: 0.1
53
+ pad_after: 0
54
+ precompute_feat: true
55
+ image_encoder: resnet
56
+ episode_cnt: 10000
57
+ step_cnt: 10000000
58
+ data_augmentation: false
59
+ use_disk: true
60
+ pad_before: 0
61
+ data_ratio: 1
62
+ action_horizon: 8
63
+ observation_horizon: 4
64
+ dataset_postfix: _traj100000
65
+ dataset_encoder_postfix: _resnet
66
+ use_multiview: false
67
+ normalize_state: true
68
+ use_heldout_dataset: true
69
+ heldout_dataset: false
70
+ regenerate: false
71
+ continue_generate: false
72
+ network:
73
+ _target_: hpt_pretrain.models.policy.Policy
74
+ embed_dim: 256
75
+ num_blocks: 16
76
+ num_heads: 8
77
+ use_modality_embedding: true
78
+ use_domain_embedding: false
79
+ token_postprocessing: mean
80
+ weight_init_style: pytorch
81
+ drop_path: 0.1
82
+ mae_loss_scale: 0.0
83
+ masked_autoencoding: false
84
+ stem:
85
+ modalities:
86
+ - image
87
+ - state
88
+ modality_embed_dim: 256
89
+ normalize_state: ${dataset.normalize_state}
90
+ state_embedding_dim: 1
91
+ image_encoder: ${dataset.image_encoder}
92
+ crossattn_dim_head: 64
93
+ crossattn_heads: 8
94
+ crossattn_modality_dropout: 0.1
95
+ observation_horizon: ${dataset.observation_horizon}
96
+ random_horizon_masking: true
97
+ add_pos_embedding_to_state: false
98
+ num_blocks: 1
99
+ crossattn_latent:
100
+ image: 16
101
+ state: 16
102
+ image:
103
+ _target_: hpt_pretrain.models.policy_stem.MLP
104
+ input_dim: 512
105
+ output_dim: 256
106
+ widths:
107
+ - 128
108
+ num_of_copy: 1
109
+ state:
110
+ _target_: hpt_pretrain.models.policy_stem.MLP
111
+ input_dim: 7
112
+ output_dim: 256
113
+ widths:
114
+ - 128
115
+ head:
116
+ _target_: hpt_pretrain.models.policy_head.MLP
117
+ input_dim: 256
118
+ tanh_end: true
119
+ output_dim: 48
120
+ dropout: true
121
+ widths:
122
+ - 256
123
+ - 128
124
+ dataloader:
125
+ batch_size: 32
126
+ num_workers: 1
127
+ pin_memory: false
128
+ persistent_workers: false
129
+ drop_last: true
130
+ val_dataloader:
131
+ num_workers: 1
132
+ pin_memory: false
133
+ persistent_workers: false
134
+ ddp_dataloader:
135
+ num_workers: 16
136
+ pin_memory: false
137
+ persistent_workers: false
138
+ drop_last: false
139
+ prefetch_factor: 2
140
+ ddp_val_dataloader:
141
+ num_workers: 8
142
+ pin_memory: false
143
+ persistent_workers: false
144
+ drop_last: false
145
+ prefetch_factor: 2
146
+ optimizer:
147
+ _target_: torch.optim.AdamW
148
+ lr: 0.001
149
+ eps: 1.0e-06
150
+ weight_decay: 0.05
151
+ optimizer_misc:
152
+ nontrunk_lr_scale: 0.5
153
+ warmup_lr:
154
+ lr: 1.0e-10
155
+ step: 1000
156
+ train:
157
+ total_epochs: 3000
158
+ total_iters: 80000
159
+ epoch_iters: 1000
160
+ validation_iters: 100
161
+ use_accumulation: false
162
+ pretrained_dir: ''
163
+ max_validation_size: 10
164
+ accumulate_batch_step: 1
165
+ lr_scheduler:
166
+ _target_: torch.optim.lr_scheduler.CosineAnnealingLR
167
+ T_max: 80000
168
+ eta_min: 1.0e-06
trunk.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe69015f0e7456423d8db2f2a8a32be0399487277fb1253e30b3959de8119110
3
+ size 50642858