Femboyuwu2000 commited on
Commit
b3ee632
1 Parent(s): d41377e

Training in progress, step 3560, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2eaaf1a4b9929034d74b8c50d19a6d8d53c2d26a4f6d02da1080dda4df0a360
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a279bb382c19f7087584582ba555522cb002ccf80a44bbe5e3c597b684301c82
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e4d9e80974a7703d029c54544f000cb77fd2fc62ef17eaadf206357849796c63
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8775de7669719bfc6cda900db4b80facb296a4bc38c35ee1dd063ca4e74c637
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6dcf5824f0c632f86069df2e9f17325eeacf000d5c2583772ef40a9c05221767
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a23498417c752807362f2bdcefaa146d86c23127f42029519379996e216d6fc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09908f90e07d4149404a47b02e836c6f4c444f056f9b8d3bbb5b2be38110c203
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a50c3936e13aaa4712fbbe35f036d9dd60837f2dcabd664a11c642adeb650ebf
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2832,
5
  "eval_steps": 500,
6
- "global_step": 3540,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1246,6 +1246,13 @@
1246
  "learning_rate": 2.833185708216743e-05,
1247
  "loss": 3.53,
1248
  "step": 3540
 
 
 
 
 
 
 
1249
  }
1250
  ],
1251
  "logging_steps": 20,
@@ -1253,7 +1260,7 @@
1253
  "num_input_tokens_seen": 0,
1254
  "num_train_epochs": 2,
1255
  "save_steps": 20,
1256
- "total_flos": 8383944629747712.0,
1257
  "train_batch_size": 8,
1258
  "trial_name": null,
1259
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2848,
5
  "eval_steps": 500,
6
+ "global_step": 3560,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1246
  "learning_rate": 2.833185708216743e-05,
1247
  "loss": 3.53,
1248
  "step": 3540
1249
+ },
1250
+ {
1251
+ "epoch": 0.28,
1252
+ "grad_norm": 27.148874282836914,
1253
+ "learning_rate": 2.8309521707095835e-05,
1254
+ "loss": 3.4933,
1255
+ "step": 3560
1256
  }
1257
  ],
1258
  "logging_steps": 20,
 
1260
  "num_input_tokens_seen": 0,
1261
  "num_train_epochs": 2,
1262
  "save_steps": 20,
1263
+ "total_flos": 8430233772392448.0,
1264
  "train_batch_size": 8,
1265
  "trial_name": null,
1266
  "trial_params": null