rizkyjun commited on
Commit
c74b0c3
1 Parent(s): a24a71f

Training in progress, step 2200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:320fcaa4881a085e08afecc439e34ab02fb9b08efb7234483ae7b7879b113f01
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b7dab2d2546bc9b5584cd70aecf27fd01c0ec429f54ed0da1544788df34146a
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed961bcbe143c3a1b8c5c2afd53aa8d6ea3586ad407056e24c2504caefa331cf
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a7ae030246005df73d9ebd1f4db1803862407102f7541754e56bc9aba74e4b6
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37d52c1bc6ea42c69efec50a9fdb93109c150d0f5358a667cf5ce03d6e6f892e
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:591b68cb553c7eec0aa1f6e0e433f89cdb637b97e8f51bc7213f85b31dd88afe
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1238b3bfa75e49a19396161e9e7b72ab89cdd1a3f63b51c0ab4d6e8d216c5a5
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f10a983aa914555fea6e5c0db8d7ddbaebbe7e28546c78ee0e93ac76cbc28436
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.201956272125244,
3
- "best_model_checkpoint": "./outputs/checkpoint-2100",
4
- "epoch": 1.530054644808743,
5
  "eval_steps": 100,
6
- "global_step": 2100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -301,13 +301,27 @@
301
  "eval_samples_per_second": 30.18,
302
  "eval_steps_per_second": 3.776,
303
  "step": 2100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
  }
305
  ],
306
  "logging_steps": 100,
307
  "max_steps": 4116,
308
  "num_train_epochs": 3,
309
  "save_steps": 100,
310
- "total_flos": 6.134610229364736e+16,
311
  "trial_name": null,
312
  "trial_params": null
313
  }
 
1
  {
2
+ "best_metric": 2.1847541332244873,
3
+ "best_model_checkpoint": "./outputs/checkpoint-2200",
4
+ "epoch": 1.6029143897996359,
5
  "eval_steps": 100,
6
+ "global_step": 2200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
301
  "eval_samples_per_second": 30.18,
302
  "eval_steps_per_second": 3.776,
303
  "step": 2100
304
+ },
305
+ {
306
+ "epoch": 1.6,
307
+ "learning_rate": 0.0002,
308
+ "loss": 2.1611,
309
+ "step": 2200
310
+ },
311
+ {
312
+ "epoch": 1.6,
313
+ "eval_loss": 2.1847541332244873,
314
+ "eval_runtime": 207.4083,
315
+ "eval_samples_per_second": 30.25,
316
+ "eval_steps_per_second": 3.785,
317
+ "step": 2200
318
  }
319
  ],
320
  "logging_steps": 100,
321
  "max_steps": 4116,
322
  "num_train_epochs": 3,
323
  "save_steps": 100,
324
+ "total_flos": 6.427567056218112e+16,
325
  "trial_name": null,
326
  "trial_params": null
327
  }