smilemikan commited on
Commit
a793eaa
1 Parent(s): 269cfe1

Training in progress, step 28500, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e656d6d29e5bdf4184675628e325ee8bf61b1421375059bb8a61ac1fe851baa
3
  size 5125261
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ba925de0a60de0c05c86f535773334f6736578470a60abf3389a77a7d18f322
3
  size 5125261
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:74329bbfdf4b3c0badecf4ec527037112c259541577b466c1c5dfb5166ef80ef
3
  size 2460465086
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d30ee4008301e76e80d8a8eeac8dc73fc5fc2e453c877ab66a3287ecf88f8dd
3
  size 2460465086
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2723958486f3c31644c2e93836f066ed3efc20b43c147cbdcfa43e5d91d46d82
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:526086536729fe0981b2912eea3c7882387a680e14d15799a8edd01ced16e4dd
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dba7fc5c0b02e717e9f7e8532434753e0f189a8ecfaf34de78f9861ef1481f15
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71acf6119c8fd2709bf12b059c3dbedc5a8c189d0b1312bdc0c5e6d36b2d6949
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.4157196283340454,
3
- "best_model_checkpoint": "smilemikan/nllb-finetuned-jpn-to-ain-2/checkpoint-28000",
4
- "epoch": 6.168759638686936,
5
  "eval_steps": 500,
6
- "global_step": 28000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -791,13 +791,27 @@
791
  "eval_samples_per_second": 203.197,
792
  "eval_steps_per_second": 12.714,
793
  "step": 28000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
794
  }
795
  ],
796
  "logging_steps": 500,
797
  "max_steps": 60000,
798
  "num_train_epochs": 14,
799
  "save_steps": 500,
800
- "total_flos": 2.840078619692237e+16,
801
  "trial_name": null,
802
  "trial_params": null
803
  }
 
1
  {
2
+ "best_metric": 1.41006338596344,
3
+ "best_model_checkpoint": "smilemikan/nllb-finetuned-jpn-to-ain-2/checkpoint-28500",
4
+ "epoch": 6.2789160608063455,
5
  "eval_steps": 500,
6
+ "global_step": 28500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
791
  "eval_samples_per_second": 203.197,
792
  "eval_steps_per_second": 12.714,
793
  "step": 28000
794
+ },
795
+ {
796
+ "epoch": 6.28,
797
+ "learning_rate": 1.0503333333333335e-05,
798
+ "loss": 1.2863,
799
+ "step": 28500
800
+ },
801
+ {
802
+ "epoch": 6.28,
803
+ "eval_loss": 1.41006338596344,
804
+ "eval_runtime": 44.666,
805
+ "eval_samples_per_second": 203.242,
806
+ "eval_steps_per_second": 12.717,
807
+ "step": 28500
808
  }
809
  ],
810
  "logging_steps": 500,
811
  "max_steps": 60000,
812
  "num_train_epochs": 14,
813
  "save_steps": 500,
814
+ "total_flos": 2.891032666649395e+16,
815
  "trial_name": null,
816
  "trial_params": null
817
  }