rizkyjun commited on
Commit
918028f
1 Parent(s): 8039260

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d860f5aab5a0c5ca3d790df834172af4546e475481d00f672ca6c401e66002c
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59d777d2ee7103580893746a589b3d1e9c7ef5c56f8d9be87d7d8ed400ceede2
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5cbfbb7daacc1d53a9486675dd4ac1a7b806604cb6ae9f78569d831bc8c1926d
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02e35c8c44e73996db2eade8af211bbc06db0bb369e6d88cecf3e8d49d2c3f63
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7393dbde4bffa4ea759a39a2e6dd5d0164b7e91c9e8ab3bfffc0ca38d5daac71
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb3d217570474ed44ac994697c08a6231247f4bb9ecce7bb8af4dbe390e8d62a
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:216f76b8039f833c337db298c81f13b12082d5fd4f9d866cecd34b2ca7550b37
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7aa2c8b84e17817e6a4dcba5955fca913e266fdcd47f5594a29933ebd4972a01
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.6427574157714844,
3
- "best_model_checkpoint": "./outputs/checkpoint-100",
4
- "epoch": 0.07285974499089254,
5
  "eval_steps": 100,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -21,13 +21,27 @@
21
  "eval_samples_per_second": 30.603,
22
  "eval_steps_per_second": 3.829,
23
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  }
25
  ],
26
  "logging_steps": 100,
27
  "max_steps": 4116,
28
  "num_train_epochs": 3,
29
  "save_steps": 100,
30
- "total_flos": 2917794121482240.0,
31
  "trial_name": null,
32
  "trial_params": null
33
  }
 
1
  {
2
+ "best_metric": 2.591986656188965,
3
+ "best_model_checkpoint": "./outputs/checkpoint-200",
4
+ "epoch": 0.14571948998178508,
5
  "eval_steps": 100,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
21
  "eval_samples_per_second": 30.603,
22
  "eval_steps_per_second": 3.829,
23
  "step": 100
24
+ },
25
+ {
26
+ "epoch": 0.15,
27
+ "learning_rate": 0.0002,
28
+ "loss": 2.6057,
29
+ "step": 200
30
+ },
31
+ {
32
+ "epoch": 0.15,
33
+ "eval_loss": 2.591986656188965,
34
+ "eval_runtime": 205.1897,
35
+ "eval_samples_per_second": 30.577,
36
+ "eval_steps_per_second": 3.826,
37
+ "step": 200
38
  }
39
  ],
40
  "logging_steps": 100,
41
  "max_steps": 4116,
42
  "num_train_epochs": 3,
43
  "save_steps": 100,
44
+ "total_flos": 5821275858370560.0,
45
  "trial_name": null,
46
  "trial_params": null
47
  }