# for batch in each gpu is 128, 8 gpu # lr = 5e-4 * 128 * 8 / 512 = 0.001 # schedule settings optim_wrapper = dict( optimizer=dict( type='AdamW', lr=5e-4 * 2048 / 512, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999)), paramwise_cfg=dict( norm_decay_mult=0.0, bias_decay_mult=0.0, custom_keys={ '.cls_token': dict(decay_mult=0.0), '.pos_embed': dict(decay_mult=0.0) }), clip_grad=dict(max_norm=1.0), ) # learning policy param_scheduler = [ # warm up learning rate scheduler dict( type='LinearLR', start_factor=1e-8 / 2e-3, by_epoch=True, end=70, # update by iter convert_to_iter_based=True), # main learning rate scheduler dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=70) ] # train, val, test setting train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) val_cfg = dict() test_cfg = dict() # NOTE: `auto_scale_lr` is for automatically scaling LR, # based on the actual training batch size. auto_scale_lr = dict(base_batch_size=1024)