Femboyuwu2000 commited on
Commit
0bff9b8
1 Parent(s): 994244d

Training in progress, step 7360, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00ce3f623d20bb63874635e9ab5549586003cb7bbe97de574a4d3b26fcade91b
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d43dac100a478777d7327b0006c122763d2abb80886baee0035eb613a9d90058
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fbff368016dd31d4c5854dfe282709681d4bc7645f54c79d0965534024463b19
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:240d60de975c0945aa74cf4143609d1521ffdbf8a677ed16031436a2753ea85a
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89e0026633dcf0577197d5da032bd4f6f6125e7e640e504383869bec01d526ec
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cde4bd1f352df2f587f030606a2937e2e4a2a1f27843acc3a420d91ed9691940
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:325806e4aae5b821ce1ac956faabdb099b3ea0e4420f8dfcf52b72adab91eb65
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e5b70b0bfaafc0eb8410fbf7357e984fbd2946bce340f9b24d9e51bab6242d5
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5872,
5
  "eval_steps": 500,
6
- "global_step": 7340,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2576,6 +2576,13 @@
2576
  "learning_rate": 2.1917848793399926e-05,
2577
  "loss": 3.395,
2578
  "step": 7340
 
 
 
 
 
 
 
2579
  }
2580
  ],
2581
  "logging_steps": 20,
@@ -2583,7 +2590,7 @@
2583
  "num_input_tokens_seen": 0,
2584
  "num_train_epochs": 2,
2585
  "save_steps": 20,
2586
- "total_flos": 1.7355049482780672e+16,
2587
  "train_batch_size": 8,
2588
  "trial_name": null,
2589
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5888,
5
  "eval_steps": 500,
6
+ "global_step": 7360,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2576
  "learning_rate": 2.1917848793399926e-05,
2577
  "loss": 3.395,
2578
  "step": 7340
2579
+ },
2580
+ {
2581
+ "epoch": 0.59,
2582
+ "grad_norm": 25.45415687561035,
2583
+ "learning_rate": 2.187470630213845e-05,
2584
+ "loss": 3.4692,
2585
+ "step": 7360
2586
  }
2587
  ],
2588
  "logging_steps": 20,
 
2590
  "num_input_tokens_seen": 0,
2591
  "num_train_epochs": 2,
2592
  "save_steps": 20,
2593
+ "total_flos": 1.7411508458323968e+16,
2594
  "train_batch_size": 8,
2595
  "trial_name": null,
2596
  "trial_params": null