Femboyuwu2000 commited on
Commit
7cc5b70
1 Parent(s): 4a646ea

Training in progress, step 880, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7261173c6cc37f0af738ac4f62cf733cb9f189924cfe1daf21367523618d8ed
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:122f732830a82f1a7a3a7425537e862e7ee1d697cd8187d0044646345b434143
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cac75987c36eb3aed135202de0ad0e205dc153c722e06aec3f39d75285a9d2aa
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa28364534d96be46cefdbe1b76cf7a40940e1cf04c4f5a6cf3ccbb915f922bc
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9216bd0d63331af67d42e3e886e98fe378c7f9e63ccf40fc65dc43479bba4269
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dea0e7f5f0c188c2ee6e729843388c15e47a905456029356105d7ca29f2d0fef
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:edcbda48b98381d2804df29839fc6b7baea1aebc4a5832ba4c600db41135c488
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52aa474262a07e7da5493acb71323699d3a95e420cf1220730538dffbf9e1ae4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0688,
5
  "eval_steps": 500,
6
- "global_step": 860,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -308,6 +308,13 @@
308
  "learning_rate": 2.9986706477143436e-05,
309
  "loss": 3.6594,
310
  "step": 860
 
 
 
 
 
 
 
311
  }
312
  ],
313
  "logging_steps": 20,
@@ -315,7 +322,7 @@
315
  "num_input_tokens_seen": 0,
316
  "num_train_epochs": 2,
317
  "save_steps": 20,
318
- "total_flos": 2034589892050944.0,
319
  "train_batch_size": 8,
320
  "trial_name": null,
321
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0704,
5
  "eval_steps": 500,
6
+ "global_step": 880,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
308
  "learning_rate": 2.9986706477143436e-05,
309
  "loss": 3.6594,
310
  "step": 860
311
+ },
312
+ {
313
+ "epoch": 0.07,
314
+ "grad_norm": 49.44480895996094,
315
+ "learning_rate": 2.9984583023761318e-05,
316
+ "loss": 3.7271,
317
+ "step": 880
318
  }
319
  ],
320
  "logging_steps": 20,
 
322
  "num_input_tokens_seen": 0,
323
  "num_train_epochs": 2,
324
  "save_steps": 20,
325
+ "total_flos": 2079501186367488.0,
326
  "train_batch_size": 8,
327
  "trial_name": null,
328
  "trial_params": null