# After DCP training, distill the Previewer with DCP in `train_previewer_lora.py`: accelerate launch --num_processes train_previewer_lora.py \ --output_dir \ --train_data_dir \ --logging_dir \ --pretrained_model_name_or_path \ --feature_extractor_path \ --pretrained_adapter_model_path \ --losses_config_path config_files/losses.yaml \ --data_config_path config_files/IR_dataset.yaml \ --save_only_adapter \ --gradient_checkpointing \ --num_train_timesteps 1000 \ --num_ddim_timesteps 50 \ --lora_alpha 1 \ --mixed_precision fp16 \ --train_batch_size 32 \ --vae_encode_batch_size 16 \ --gradient_accumulation_steps 1 \ --learning_rate 1e-4 \ --lr_warmup_steps 1000 \ --lr_scheduler cosine \ --lr_num_cycles 1 \ --resume_from_checkpoint latest