File size: 4,460 Bytes
d9b8fd5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
seed: 0
output_dir: output/08_04_2024_18_13_30_2327515
domains: austin_sailor_dataset_converted_externally_to_rlds, stanford_hydra_dataset_converted_externally_to_rlds,
austin_buds_dataset_converted_externally_to_rlds, austin_sirius_dataset_converted_externally_to_rlds,
berkeley_mvp_converted_externally_to_rlds, berkeley_rpt_converted_externally_to_rlds,
iamlab_cmu_pickup_insert_converted_externally_to_rlds, utaustin_mutex, imperialcollege_sawyer_wrist_cam,
stanford_mask_vit_converted_externally_to_rlds, language_table, kuka, bc_z, robo_net,
dlr_sara_pour_converted_externally_to_rlds, stanford_robocook_converted_externally_to_rlds,
cmu_play_fusion, bridge, furniture_bench_dataset_converted_externally_to_rlds, ucsd_pick_and_place_dataset_converted_externally_to_rlds,
usc_cloth_sim_converted_externally_to_rlds, stanford_kuka_multimodal_dataset_converted_externally_to_rlds,
roboturk, kaist_nonprehensile_converted_externally_to_rlds, asu_table_top_converted_externally_to_rlds,
utokyo_xarm_pick_and_place_converted_externally_to_rlds, berkeley_cable_routing
log_dir: output/08_04_2024_18_13_30_2327515
debug_distributed: false
wb_tag: default
wb_cont_run: 24yg5gb8
log_interval: 10
script_name: run_resnet_30dataset_traj10000_embed256_batch2048
save_wb_checkpoint: true
slurm_job_id: '26140239'
effective_total_epochs: 100
effective_batch_size: 256
epoch_size: 10
total_num_traj: 0
total_num_sample: 0
rank: 0
gpu: 0
task_per_gpu: 1
world_size: 64
debug_submitit: false
ngpus: 8
nodes: 8
timeout: 4320
job_dir: logs/
partition: learnlab
use_volta32: true
comment: ''
resume: logs/
dist_url: file:///checkpoint/xinleic/experiments/0a3d948fc6f644428ef132eb4f3a0d15_init
dist_on_itp: false
local_rank: 1
distributed: true
dist_backend: nccl
dset_w_temperature: 2.0
dataset_shuffle: true
dataset_groups: ''
nodelist: learnlab,learnfair,scavenge
fsdp: false
dataset:
_target_: hpt_pretrain.dataset.traj_dataset.TrajDataset
horizon: 5
val_ratio: 0.1
pad_after: 0
precompute_feat: true
image_encoder: resnet
episode_cnt: 10000
step_cnt: 10000000
data_augmentation: false
use_disk: true
pad_before: 0
data_ratio: 1
action_horizon: 8
observation_horizon: 4
dataset_postfix: _traj100000
dataset_encoder_postfix: _resnet
use_multiview: false
normalize_state: true
use_heldout_dataset: true
heldout_dataset: false
regenerate: false
continue_generate: false
network:
_target_: hpt_pretrain.models.policy.Policy
embed_dim: 256
num_blocks: 16
num_heads: 8
use_modality_embedding: true
use_domain_embedding: false
token_postprocessing: mean
weight_init_style: pytorch
drop_path: 0.1
mae_loss_scale: 0.0
masked_autoencoding: false
stem:
modalities:
- image
- state
modality_embed_dim: 256
normalize_state: ${dataset.normalize_state}
state_embedding_dim: 1
image_encoder: ${dataset.image_encoder}
crossattn_dim_head: 64
crossattn_heads: 8
crossattn_modality_dropout: 0.1
observation_horizon: ${dataset.observation_horizon}
random_horizon_masking: true
add_pos_embedding_to_state: false
num_blocks: 1
crossattn_latent:
image: 16
state: 16
image:
_target_: hpt_pretrain.models.policy_stem.MLP
input_dim: 512
output_dim: 256
widths:
- 128
num_of_copy: 1
state:
_target_: hpt_pretrain.models.policy_stem.MLP
input_dim: 7
output_dim: 256
widths:
- 128
head:
_target_: hpt_pretrain.models.policy_head.MLP
input_dim: 256
tanh_end: true
output_dim: 48
dropout: true
widths:
- 256
- 128
dataloader:
batch_size: 32
num_workers: 1
pin_memory: false
persistent_workers: false
drop_last: true
val_dataloader:
num_workers: 1
pin_memory: false
persistent_workers: false
ddp_dataloader:
num_workers: 16
pin_memory: false
persistent_workers: false
drop_last: false
prefetch_factor: 2
ddp_val_dataloader:
num_workers: 8
pin_memory: false
persistent_workers: false
drop_last: false
prefetch_factor: 2
optimizer:
_target_: torch.optim.AdamW
lr: 0.001
eps: 1.0e-06
weight_decay: 0.05
optimizer_misc:
nontrunk_lr_scale: 0.5
warmup_lr:
lr: 1.0e-10
step: 1000
train:
total_epochs: 3000
total_iters: 80000
epoch_iters: 1000
validation_iters: 100
use_accumulation: false
pretrained_dir: ''
max_validation_size: 10
accumulate_batch_step: 1
lr_scheduler:
_target_: torch.optim.lr_scheduler.CosineAnnealingLR
T_max: 80000
eta_min: 1.0e-06
|