Femboyuwu2000 commited on
Commit
71f7e6d
1 Parent(s): d71f063

Training in progress, step 240, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3078213dcd15bc291dc37ca3077f9efb155cb015e2ecfc039f1079c016cd9a50
3
  size 4725640
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33ef7c866d7d18cd44bc9d12177a548794d5c309ba0e4eedfd17449167d11b3b
3
  size 4725640
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:684dc57c1e94460d88f7247e65733810667abcc8211761ae7f175a94fb2a5c38
3
- size 2423738
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a46c69917afb0de58ee2291c88b716000c1ddad671a41491530dcaa6e488cfd
3
+ size 2423802
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70a160794c42b620b052ded8acee8ed084ab34b149aee53004744c1a7084b810
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e998aeb067d30d8950f7348a4d890bfd1791599ad6f3d5db56952d64809af4d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a258940d20544cb994c096c67b2810560f36bc1c5c956be9ea4d5468f6460551
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:549529bf2ef6166170cc78b313c10abe92d8424e01d3dbfc9bb2a483a33c5b1f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.00017121555259819602,
5
  "eval_steps": 500,
6
- "global_step": 220,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -84,6 +84,13 @@
84
  "learning_rate": 3.65e-05,
85
  "loss": 2.8568,
86
  "step": 220
 
 
 
 
 
 
 
87
  }
88
  ],
89
  "logging_steps": 20,
@@ -91,7 +98,7 @@
91
  "num_input_tokens_seen": 0,
92
  "num_train_epochs": 1,
93
  "save_steps": 20,
94
- "total_flos": 1070150797246464.0,
95
  "train_batch_size": 1,
96
  "trial_name": null,
97
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.00018678060283439565,
5
  "eval_steps": 500,
6
+ "global_step": 240,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
84
  "learning_rate": 3.65e-05,
85
  "loss": 2.8568,
86
  "step": 220
87
+ },
88
+ {
89
+ "epoch": 0.0,
90
+ "grad_norm": 1.0859078168869019,
91
+ "learning_rate": 3.983333333333333e-05,
92
+ "loss": 2.8566,
93
+ "step": 240
94
  }
95
  ],
96
  "logging_steps": 20,
 
98
  "num_input_tokens_seen": 0,
99
  "num_train_epochs": 1,
100
  "save_steps": 20,
101
+ "total_flos": 1154797709672448.0,
102
  "train_batch_size": 1,
103
  "trial_name": null,
104
  "trial_params": null