|
top.booster: auto |
|
top.checkpoint_path: [] |
|
top.finetuning_type: lora |
|
top.model_name: Qwen2-7B-Chat |
|
top.quantization_bit: none |
|
top.quantization_method: bitsandbytes |
|
top.rope_scaling: none |
|
top.template: qwen |
|
top.visual_inputs: false |
|
train.additional_target: '' |
|
train.badam_mode: layer |
|
train.badam_switch_interval: 50 |
|
train.badam_switch_mode: ascending |
|
train.badam_update_ratio: 0.05 |
|
train.batch_size: 4 |
|
train.compute_type: bf16 |
|
train.create_new_adapter: false |
|
train.cutoff_len: 1024 |
|
train.dataset: |
|
- cangjie |
|
train.dataset_dir: data |
|
train.ds_offload: false |
|
train.ds_stage: none |
|
train.freeze_extra_modules: '' |
|
train.freeze_trainable_layers: 2 |
|
train.freeze_trainable_modules: all |
|
train.galore_rank: 16 |
|
train.galore_scale: 0.25 |
|
train.galore_target: all |
|
train.galore_update_interval: 200 |
|
train.gradient_accumulation_steps: 8 |
|
train.learning_rate: '1.0e-4' |
|
train.logging_steps: 2 |
|
train.lora_alpha: 32 |
|
train.lora_dropout: 0.1 |
|
train.lora_rank: 8 |
|
train.lora_target: q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj |
|
train.loraplus_lr_ratio: 16 |
|
train.lr_scheduler_type: cosine |
|
train.mask_history: false |
|
train.max_grad_norm: '1.0' |
|
train.max_samples: '100000' |
|
train.neat_packing: false |
|
train.neftune_alpha: 0 |
|
train.num_train_epochs: '3.0' |
|
train.optim: adamw_torch |
|
train.packing: true |
|
train.ppo_score_norm: false |
|
train.ppo_whiten_rewards: false |
|
train.pref_beta: 0.1 |
|
train.pref_ftx: 0 |
|
train.pref_loss: sigmoid |
|
train.report_to: false |
|
train.resize_vocab: false |
|
train.reward_model: null |
|
train.save_steps: 5000 |
|
train.shift_attn: false |
|
train.train_on_prompt: false |
|
train.training_stage: Pre-Training |
|
train.use_badam: false |
|
train.use_dora: false |
|
train.use_galore: false |
|
train.use_llama_pro: false |
|
train.use_pissa: false |
|
train.use_rslora: false |
|
train.val_size: 0.05 |
|
train.warmup_steps: 2 |
|
|