dh-mc commited on
Commit
b2d230a
·
1 Parent(s): 3a63ec4

Update llama3-8b_lora_sft_bf16-p1.yaml

Browse files
llama-factory/config/llama3-8b_lora_sft_bf16-p1.yaml CHANGED
@@ -21,17 +21,17 @@ preprocessing_num_workers: 16
21
 
22
  ### output
23
  output_dir: saves/llama3-8b/lora/sft_bf16_p1_full
24
- logging_steps: 100
25
- save_steps: 2109
26
  plot_loss: true
27
  overwrite_output_dir: true
28
  # resume_from_checkpoint: true
29
 
30
  ### train
31
- per_device_train_batch_size: 16
32
  gradient_accumulation_steps: 8
33
  learning_rate: 1.0e-4
34
- num_train_epochs: 3.0
35
  lr_scheduler_type: cosine
36
  warmup_ratio: 0.1
37
  bf16: true
@@ -41,7 +41,7 @@ ddp_timeout: 180000000
41
  val_size: 0.1
42
  per_device_eval_batch_size: 1
43
  eval_strategy: steps
44
- eval_steps: 2109
45
 
46
  report_to: wandb
47
  run_name: llama3_8b_p1_full # optional
 
21
 
22
  ### output
23
  output_dir: saves/llama3-8b/lora/sft_bf16_p1_full
24
+ logging_steps: 10
25
+ save_steps: 88
26
  plot_loss: true
27
  overwrite_output_dir: true
28
  # resume_from_checkpoint: true
29
 
30
  ### train
31
+ per_device_train_batch_size: 32
32
  gradient_accumulation_steps: 8
33
  learning_rate: 1.0e-4
34
+ num_train_epochs: 6.0
35
  lr_scheduler_type: cosine
36
  warmup_ratio: 0.1
37
  bf16: true
 
41
  val_size: 0.1
42
  per_device_eval_batch_size: 1
43
  eval_strategy: steps
44
+ eval_steps: 88
45
 
46
  report_to: wandb
47
  run_name: llama3_8b_p1_full # optional