ncbateman commited on
Commit
742f49b
·
verified ·
1 Parent(s): 762daca

Training in progress, step 430, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e25778c7c6ed07405c2bed1af8854d32371e125c8cab716efa285d57bb112666
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a99c5b789b7a06f40811d3146f888da339adf9e702f36969d5b6a7ac3ab82ab6
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1fbefd644422725fa55cf6197ec390b7bccedb73df3f6773b25ab8cc753c8ae0
3
  size 85723732
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7081fa4644e6edc9ed10c9a37bfc9b2e3b4dd975799975829b4c606230bf5f36
3
  size 85723732
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dbf84d8d54c89546067af2b8c7960a09cd63b14b8c58a6c2f45d3a1805c2671
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f7bcb69931d483e25b0c94c58fcc965a98e3bae3e6c3bf085e4edd6e43f5a40
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af347bd6e9a64e9d103a4bd43a4757792a255b273b942d70aeddb5b7a243efa6
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87bfdda30185873c0a9cb229d25fc230d1146196d05e81747db223517c4eafcf
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a821a31a15e8c39e25415cc4954957cfbae7d4d73e706211955c475fdac1633
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cd61f66c9dd6299c5f3f99f7ceb5a152a1d64c4909020bec811205c4243a070
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3bcfbc84a1d987ab5465db7241354afa4d4494c36e9c1879e23aee4df617417f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66172525291e0f467bfc09091521e14e32a3eaaef79955a2826a4348133fc8e0
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7957f0bede386b9e7dfa8afbce84db80049c290d0482836d6c77ca519d043dd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79b607b3724fadd616761069f9d370b3b216d512a0fbae133eb5c1bd8e06f7d6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9486166007905138,
5
  "eval_steps": 222,
6
- "global_step": 420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2963,6 +2963,76 @@
2963
  "learning_rate": 7.63285913778733e-07,
2964
  "loss": 1.3464,
2965
  "step": 420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2966
  }
2967
  ],
2968
  "logging_steps": 1,
@@ -2982,7 +3052,7 @@
2982
  "attributes": {}
2983
  }
2984
  },
2985
- "total_flos": 2.4934831596440125e+18,
2986
  "train_batch_size": 2,
2987
  "trial_name": null,
2988
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9712027103331451,
5
  "eval_steps": 222,
6
+ "global_step": 430,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2963
  "learning_rate": 7.63285913778733e-07,
2964
  "loss": 1.3464,
2965
  "step": 420
2966
+ },
2967
+ {
2968
+ "epoch": 0.950875211744777,
2969
+ "grad_norm": 0.5072855949401855,
2970
+ "learning_rate": 6.985077338944657e-07,
2971
+ "loss": 1.0404,
2972
+ "step": 421
2973
+ },
2974
+ {
2975
+ "epoch": 0.9531338226990401,
2976
+ "grad_norm": 0.5355942845344543,
2977
+ "learning_rate": 6.365822639327723e-07,
2978
+ "loss": 1.2642,
2979
+ "step": 422
2980
+ },
2981
+ {
2982
+ "epoch": 0.9553924336533032,
2983
+ "grad_norm": 0.5786038041114807,
2984
+ "learning_rate": 5.775130870590783e-07,
2985
+ "loss": 1.3897,
2986
+ "step": 423
2987
+ },
2988
+ {
2989
+ "epoch": 0.9576510446075663,
2990
+ "grad_norm": 0.564463198184967,
2991
+ "learning_rate": 5.213036211664191e-07,
2992
+ "loss": 1.4783,
2993
+ "step": 424
2994
+ },
2995
+ {
2996
+ "epoch": 0.9599096555618295,
2997
+ "grad_norm": 0.5604745745658875,
2998
+ "learning_rate": 4.6795711867766436e-07,
2999
+ "loss": 1.6895,
3000
+ "step": 425
3001
+ },
3002
+ {
3003
+ "epoch": 0.9621682665160926,
3004
+ "grad_norm": 0.7266672849655151,
3005
+ "learning_rate": 4.1747666635733597e-07,
3006
+ "loss": 1.3143,
3007
+ "step": 426
3008
+ },
3009
+ {
3010
+ "epoch": 0.9644268774703557,
3011
+ "grad_norm": 0.7507563829421997,
3012
+ "learning_rate": 3.698651851329837e-07,
3013
+ "loss": 1.4809,
3014
+ "step": 427
3015
+ },
3016
+ {
3017
+ "epoch": 0.9666854884246189,
3018
+ "grad_norm": 0.5136380195617676,
3019
+ "learning_rate": 3.251254299261874e-07,
3020
+ "loss": 1.6922,
3021
+ "step": 428
3022
+ },
3023
+ {
3024
+ "epoch": 0.968944099378882,
3025
+ "grad_norm": 0.7942318916320801,
3026
+ "learning_rate": 2.8325998949314536e-07,
3027
+ "loss": 1.5874,
3028
+ "step": 429
3029
+ },
3030
+ {
3031
+ "epoch": 0.9712027103331451,
3032
+ "grad_norm": 0.46300673484802246,
3033
+ "learning_rate": 2.442712862748775e-07,
3034
+ "loss": 1.7898,
3035
+ "step": 430
3036
  }
3037
  ],
3038
  "logging_steps": 1,
 
3052
  "attributes": {}
3053
  }
3054
  },
3055
+ "total_flos": 2.5528341423288484e+18,
3056
  "train_batch_size": 2,
3057
  "trial_name": null,
3058
  "trial_params": null