Femboyuwu2000 commited on
Commit
31ca753
1 Parent(s): 3afe9cf

Training in progress, step 7340, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9207586f7a1d2ba4556ac81bf96016c6a6bdc9668c406e8e8639d19d9692cdd0
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00ce3f623d20bb63874635e9ab5549586003cb7bbe97de574a4d3b26fcade91b
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ed2a524baac54ff9e8afed21d2b42af94fa038aef5ded6b599c9e972dd0477d
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbff368016dd31d4c5854dfe282709681d4bc7645f54c79d0965534024463b19
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e665c9777fc69706bdf0b3707a06c85d4cb2b43fbb10521c503874813f13d69
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89e0026633dcf0577197d5da032bd4f6f6125e7e640e504383869bec01d526ec
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e2282342021370a0027cf32dfaa491c0a05194df75cc08c2cc5453789cc6933
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:325806e4aae5b821ce1ac956faabdb099b3ea0e4420f8dfcf52b72adab91eb65
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5856,
5
  "eval_steps": 500,
6
- "global_step": 7320,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2569,6 +2569,13 @@
2569
  "learning_rate": 2.196091871970103e-05,
2570
  "loss": 3.5508,
2571
  "step": 7320
 
 
 
 
 
 
 
2572
  }
2573
  ],
2574
  "logging_steps": 20,
@@ -2576,7 +2583,7 @@
2576
  "num_input_tokens_seen": 0,
2577
  "num_train_epochs": 2,
2578
  "save_steps": 20,
2579
- "total_flos": 1.7308825951961088e+16,
2580
  "train_batch_size": 8,
2581
  "trial_name": null,
2582
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5872,
5
  "eval_steps": 500,
6
+ "global_step": 7340,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2569
  "learning_rate": 2.196091871970103e-05,
2570
  "loss": 3.5508,
2571
  "step": 7320
2572
+ },
2573
+ {
2574
+ "epoch": 0.59,
2575
+ "grad_norm": 25.02892303466797,
2576
+ "learning_rate": 2.1917848793399926e-05,
2577
+ "loss": 3.395,
2578
+ "step": 7340
2579
  }
2580
  ],
2581
  "logging_steps": 20,
 
2583
  "num_input_tokens_seen": 0,
2584
  "num_train_epochs": 2,
2585
  "save_steps": 20,
2586
+ "total_flos": 1.7355049482780672e+16,
2587
  "train_batch_size": 8,
2588
  "trial_name": null,
2589
  "trial_params": null