Femboyuwu2000 commited on
Commit
6fe79b7
1 Parent(s): fb44229

Training in progress, step 18580, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3ead06ca64c097b468e4848a48dfe313e10e066882daadec4733b392f7d027f
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97e8843eb054c3ad74976f86442c8477d9387361a93f5b5aa761f197a5bc9063
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7406298021aee6db4278f2ee9543df6b8e99529b9e938b2035da9248006c804c
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c96818bbdf189c58ef600d3a975e4800cffb8d3a7249ab57a213e50b7404224
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47f34dc38cec7b95d2822f7dac5953198d47e6fd7a3a22a93769d88071de8848
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5412b1babcd6e1b6448c556f65ec1c55adcb2b9230a5765d4a15f21fea9e3a6f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b581f3feb3bd9bc8f3fdf9e12a5bd849da1459f5b42fe0464a7236b0311e0814
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c99a29b95a012bd096c718ca62d3947568602c45a86a3f3f0f2a57277532305
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2626551377665822,
5
  "eval_steps": 500,
6
- "global_step": 18560,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -6503,6 +6503,13 @@
6503
  "learning_rate": 6.766473629355452e-07,
6504
  "loss": 3.5451,
6505
  "step": 18560
 
 
 
 
 
 
 
6506
  }
6507
  ],
6508
  "logging_steps": 20,
@@ -6510,7 +6517,7 @@
6510
  "num_input_tokens_seen": 0,
6511
  "num_train_epochs": 1,
6512
  "save_steps": 20,
6513
- "total_flos": 4.033483670627942e+16,
6514
  "train_batch_size": 8,
6515
  "trial_name": null,
6516
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.26293817132021,
5
  "eval_steps": 500,
6
+ "global_step": 18580,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
6503
  "learning_rate": 6.766473629355452e-07,
6504
  "loss": 3.5451,
6505
  "step": 18560
6506
+ },
6507
+ {
6508
+ "epoch": 0.26,
6509
+ "grad_norm": 19.47591209411621,
6510
+ "learning_rate": 6.580644556884702e-07,
6511
+ "loss": 3.5458,
6512
+ "step": 18580
6513
  }
6514
  ],
6515
  "logging_steps": 20,
 
6517
  "num_input_tokens_seen": 0,
6518
  "num_train_epochs": 1,
6519
  "save_steps": 20,
6520
+ "total_flos": 4.037882943504384e+16,
6521
  "train_batch_size": 8,
6522
  "trial_name": null,
6523
  "trial_params": null