plip commited on
Commit
0c017ce
1 Parent(s): baf5eda

Training in progress, step 50000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61231d17702dfc54da0df39c22bae2d1eeca50c901ef6c70c2d09f7f924d00c7
3
  size 202193937
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6640249a1c6041c32dfd1464a658338887363ff5085678afe917eb168bf7a616
3
  size 202193937
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6491475de43c8b876f271481f40ac671b352de875d87744fae94f453e73e7076
3
  size 102501541
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b9279029389011e28791bcbea261efc23528afcc9ebee41b4e9cca6964cc5f3
3
  size 102501541
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:040772ef0e121e2fe79b90f7b74d0118c9dffb2edec04e04df9f9cfb55a592d2
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfa0b7d5c3d248e8db4db63af9bfcb5ccbe3aa3f4c28b0b4e1a7ce6f0017bb0
3
  size 14503
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:040772ef0e121e2fe79b90f7b74d0118c9dffb2edec04e04df9f9cfb55a592d2
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfa0b7d5c3d248e8db4db63af9bfcb5ccbe3aa3f4c28b0b4e1a7ce6f0017bb0
3
  size 14503
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:040772ef0e121e2fe79b90f7b74d0118c9dffb2edec04e04df9f9cfb55a592d2
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfa0b7d5c3d248e8db4db63af9bfcb5ccbe3aa3f4c28b0b4e1a7ce6f0017bb0
3
  size 14503
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:040772ef0e121e2fe79b90f7b74d0118c9dffb2edec04e04df9f9cfb55a592d2
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfa0b7d5c3d248e8db4db63af9bfcb5ccbe3aa3f4c28b0b4e1a7ce6f0017bb0
3
  size 14503
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:040772ef0e121e2fe79b90f7b74d0118c9dffb2edec04e04df9f9cfb55a592d2
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfa0b7d5c3d248e8db4db63af9bfcb5ccbe3aa3f4c28b0b4e1a7ce6f0017bb0
3
  size 14503
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:040772ef0e121e2fe79b90f7b74d0118c9dffb2edec04e04df9f9cfb55a592d2
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfa0b7d5c3d248e8db4db63af9bfcb5ccbe3aa3f4c28b0b4e1a7ce6f0017bb0
3
  size 14503
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:040772ef0e121e2fe79b90f7b74d0118c9dffb2edec04e04df9f9cfb55a592d2
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfa0b7d5c3d248e8db4db63af9bfcb5ccbe3aa3f4c28b0b4e1a7ce6f0017bb0
3
  size 14503
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:040772ef0e121e2fe79b90f7b74d0118c9dffb2edec04e04df9f9cfb55a592d2
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfa0b7d5c3d248e8db4db63af9bfcb5ccbe3aa3f4c28b0b4e1a7ce6f0017bb0
3
  size 14503
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a74054da3729955ad5eacae83f875e2df84ec7d4ca3ed3437d6f2cf84557171f
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ec5c60f6b831a1ad5ababd554115ca1132f641d196d63d42183ef95c8827963
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0194199500484225,
5
- "global_step": 40000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -806,11 +806,211 @@
806
  "eval_samples_per_second": 711.674,
807
  "eval_steps_per_second": 11.387,
808
  "step": 40000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
809
  }
810
  ],
811
  "max_steps": 500000,
812
  "num_train_epochs": 13,
813
- "total_flos": 1.277935076184955e+21,
814
  "trial_name": null,
815
  "trial_params": null
816
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.274274937560528,
5
+ "global_step": 50000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
806
  "eval_samples_per_second": 711.674,
807
  "eval_steps_per_second": 11.387,
808
  "step": 40000
809
+ },
810
+ {
811
+ "epoch": 1.03,
812
+ "learning_rate": 0.00029923873905521244,
813
+ "loss": 0.3545,
814
+ "step": 40500
815
+ },
816
+ {
817
+ "epoch": 1.04,
818
+ "learning_rate": 0.000299188879869657,
819
+ "loss": 0.3535,
820
+ "step": 41000
821
+ },
822
+ {
823
+ "epoch": 1.04,
824
+ "eval_loss": 0.8638287782669067,
825
+ "eval_runtime": 1.2182,
826
+ "eval_samples_per_second": 820.876,
827
+ "eval_steps_per_second": 13.134,
828
+ "step": 41000
829
+ },
830
+ {
831
+ "epoch": 1.06,
832
+ "learning_rate": 0.00029913744385761244,
833
+ "loss": 0.3524,
834
+ "step": 41500
835
+ },
836
+ {
837
+ "epoch": 1.07,
838
+ "learning_rate": 0.00029908443158157465,
839
+ "loss": 0.3518,
840
+ "step": 42000
841
+ },
842
+ {
843
+ "epoch": 1.07,
844
+ "eval_loss": 0.8664878606796265,
845
+ "eval_runtime": 1.2547,
846
+ "eval_samples_per_second": 796.987,
847
+ "eval_steps_per_second": 12.752,
848
+ "step": 42000
849
+ },
850
+ {
851
+ "epoch": 1.08,
852
+ "learning_rate": 0.0002990298436212775,
853
+ "loss": 0.3511,
854
+ "step": 42500
855
+ },
856
+ {
857
+ "epoch": 1.1,
858
+ "learning_rate": 0.0002989736805736861,
859
+ "loss": 0.3502,
860
+ "step": 43000
861
+ },
862
+ {
863
+ "epoch": 1.1,
864
+ "eval_loss": 0.8559480905532837,
865
+ "eval_runtime": 1.2272,
866
+ "eval_samples_per_second": 814.861,
867
+ "eval_steps_per_second": 13.038,
868
+ "step": 43000
869
+ },
870
+ {
871
+ "epoch": 1.11,
872
+ "learning_rate": 0.00029891594305299065,
873
+ "loss": 0.3494,
874
+ "step": 43500
875
+ },
876
+ {
877
+ "epoch": 1.12,
878
+ "learning_rate": 0.00029885663169059926,
879
+ "loss": 0.3488,
880
+ "step": 44000
881
+ },
882
+ {
883
+ "epoch": 1.12,
884
+ "eval_loss": 0.859957754611969,
885
+ "eval_runtime": 1.2944,
886
+ "eval_samples_per_second": 772.587,
887
+ "eval_steps_per_second": 12.361,
888
+ "step": 44000
889
+ },
890
+ {
891
+ "epoch": 1.13,
892
+ "learning_rate": 0.0002987957471351316,
893
+ "loss": 0.3478,
894
+ "step": 44500
895
+ },
896
+ {
897
+ "epoch": 1.15,
898
+ "learning_rate": 0.00029873329005241137,
899
+ "loss": 0.3469,
900
+ "step": 45000
901
+ },
902
+ {
903
+ "epoch": 1.15,
904
+ "eval_loss": 0.852756917476654,
905
+ "eval_runtime": 1.2478,
906
+ "eval_samples_per_second": 801.439,
907
+ "eval_steps_per_second": 12.823,
908
+ "step": 45000
909
+ },
910
+ {
911
+ "epoch": 1.16,
912
+ "learning_rate": 0.00029866926112545925,
913
+ "loss": 0.3464,
914
+ "step": 45500
915
+ },
916
+ {
917
+ "epoch": 1.17,
918
+ "learning_rate": 0.00029860366105448534,
919
+ "loss": 0.3459,
920
+ "step": 46000
921
+ },
922
+ {
923
+ "epoch": 1.17,
924
+ "eval_loss": 0.8597527146339417,
925
+ "eval_runtime": 1.1814,
926
+ "eval_samples_per_second": 846.435,
927
+ "eval_steps_per_second": 13.543,
928
+ "step": 46000
929
+ },
930
+ {
931
+ "epoch": 1.19,
932
+ "learning_rate": 0.00029853649055688143,
933
+ "loss": 0.3451,
934
+ "step": 46500
935
+ },
936
+ {
937
+ "epoch": 1.2,
938
+ "learning_rate": 0.00029846775036721337,
939
+ "loss": 0.3444,
940
+ "step": 47000
941
+ },
942
+ {
943
+ "epoch": 1.2,
944
+ "eval_loss": 0.860701322555542,
945
+ "eval_runtime": 1.2083,
946
+ "eval_samples_per_second": 827.633,
947
+ "eval_steps_per_second": 13.242,
948
+ "step": 47000
949
+ },
950
+ {
951
+ "epoch": 1.21,
952
+ "learning_rate": 0.0002983974412372129,
953
+ "loss": 0.3438,
954
+ "step": 47500
955
+ },
956
+ {
957
+ "epoch": 1.22,
958
+ "learning_rate": 0.00029832556393576934,
959
+ "loss": 0.3428,
960
+ "step": 48000
961
+ },
962
+ {
963
+ "epoch": 1.22,
964
+ "eval_loss": 0.865045964717865,
965
+ "eval_runtime": 1.2493,
966
+ "eval_samples_per_second": 800.445,
967
+ "eval_steps_per_second": 12.807,
968
+ "step": 48000
969
+ },
970
+ {
971
+ "epoch": 1.24,
972
+ "learning_rate": 0.0002982521192489214,
973
+ "loss": 0.3425,
974
+ "step": 48500
975
+ },
976
+ {
977
+ "epoch": 1.25,
978
+ "learning_rate": 0.0002981771079798483,
979
+ "loss": 0.342,
980
+ "step": 49000
981
+ },
982
+ {
983
+ "epoch": 1.25,
984
+ "eval_loss": 0.8640099167823792,
985
+ "eval_runtime": 1.2369,
986
+ "eval_samples_per_second": 808.489,
987
+ "eval_steps_per_second": 12.936,
988
+ "step": 49000
989
+ },
990
+ {
991
+ "epoch": 1.26,
992
+ "learning_rate": 0.00029810053094886136,
993
+ "loss": 0.3417,
994
+ "step": 49500
995
+ },
996
+ {
997
+ "epoch": 1.27,
998
+ "learning_rate": 0.00029802238899339473,
999
+ "loss": 0.3408,
1000
+ "step": 50000
1001
+ },
1002
+ {
1003
+ "epoch": 1.27,
1004
+ "eval_loss": 0.8549481630325317,
1005
+ "eval_runtime": 1.3108,
1006
+ "eval_samples_per_second": 762.871,
1007
+ "eval_steps_per_second": 12.206,
1008
+ "step": 50000
1009
  }
1010
  ],
1011
  "max_steps": 500000,
1012
  "num_train_epochs": 13,
1013
+ "total_flos": 1.597422214959455e+21,
1014
  "trial_name": null,
1015
  "trial_params": null
1016
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6491475de43c8b876f271481f40ac671b352de875d87744fae94f453e73e7076
3
  size 102501541
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b9279029389011e28791bcbea261efc23528afcc9ebee41b4e9cca6964cc5f3
3
  size 102501541