Femboyuwu2000 commited on
Commit
efae3bf
1 Parent(s): dee540b

Training in progress, step 2640, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:102a934b9c2b16bd152b8fb45fb756ed8a39ee8f7db8b00ff347bd79c86fbef6
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54c37478ba6579cff2d0646434ea7d1892cb48a4219f90126afbdf7bde329abb
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:352473e3f8f3dae7afcda846881ada181c756f061e5a3d9beb1d4a3e068eeb5d
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b0e5872ff2188ce6710b4b1015cc3fcaf69768dd90c4fa6ab9ac4aecfb9a52a
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fcc2667f22c446f99eb24a0261709aeedb26c632fa56415781522ee28f079a36
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27177ac056c4d9dbf19f5cb10b2bae9ee9392da9370d73a04aa8b3b9bc1e770d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b7e6b2fa8982dc42ac672b8fc6e736397d0e1ef6af55fabc97bdaa24968e490
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea196c4dba55ede2883451624fdfc54856529c0e1f9c30be6a7f36bcba4e1179
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2096,
5
  "eval_steps": 500,
6
- "global_step": 2620,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -924,6 +924,13 @@
924
  "learning_rate": 2.920460247795056e-05,
925
  "loss": 3.621,
926
  "step": 2620
 
 
 
 
 
 
 
927
  }
928
  ],
929
  "logging_steps": 20,
@@ -931,7 +938,7 @@
931
  "num_input_tokens_seen": 0,
932
  "num_train_epochs": 2,
933
  "save_steps": 20,
934
- "total_flos": 6207501971718144.0,
935
  "train_batch_size": 8,
936
  "trial_name": null,
937
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2112,
5
  "eval_steps": 500,
6
+ "global_step": 2640,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
924
  "learning_rate": 2.920460247795056e-05,
925
  "loss": 3.621,
926
  "step": 2620
927
+ },
928
+ {
929
+ "epoch": 0.21,
930
+ "grad_norm": 29.75731086730957,
931
+ "learning_rate": 2.918891825699857e-05,
932
+ "loss": 3.6067,
933
+ "step": 2640
934
  }
935
  ],
936
  "logging_steps": 20,
 
938
  "num_input_tokens_seen": 0,
939
  "num_train_epochs": 2,
940
  "save_steps": 20,
941
+ "total_flos": 6257137317445632.0,
942
  "train_batch_size": 8,
943
  "trial_name": null,
944
  "trial_params": null