Femboyuwu2000 commited on
Commit
54ae7a4
1 Parent(s): 2671ab4

Training in progress, step 320, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29dd7ebae639bd2d9820d94f55a7d57ed7d7e651db6caee9214dbc394d6d8a52
3
  size 4725640
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfd12fc64f06bad2fa92746516b8664309c4466b97e18621e506d23fda100aa8
3
  size 4725640
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69d66ca304d85c0c98fb0bf09060960ca796eb9eb4224559d90c4ff9e6ce0ba7
3
  size 2423802
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55c188f05db7cbc47d6d06e7ddb60737fba5819ce8ee84d17d80c52a5a576f38
3
  size 2423802
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23cf2bfaf6a90fad15e6cbfeee3986b0a34e7580afbe7ec2acc0dc8448accfb4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04fce672895616ff879d43f4759ee203689dab1d6c33fbbeeae559d971ee1020
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bac6da347960aa710f0cf30015122a382ce8a8ef99042818f6297fe7974a65f2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3572c26d973ddba0178cecd4abd00ac7ce2546e793a6c420879267916425a660
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.00023347575354299455,
5
  "eval_steps": 500,
6
- "global_step": 300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -112,6 +112,13 @@
112
  "learning_rate": 4.9833333333333336e-05,
113
  "loss": 2.7932,
114
  "step": 300
 
 
 
 
 
 
 
115
  }
116
  ],
117
  "logging_steps": 20,
@@ -119,7 +126,7 @@
119
  "num_input_tokens_seen": 0,
120
  "num_train_epochs": 1,
121
  "save_steps": 20,
122
- "total_flos": 1405893996822528.0,
123
  "train_batch_size": 1,
124
  "trial_name": null,
125
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0002490408037791942,
5
  "eval_steps": 500,
6
+ "global_step": 320,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
112
  "learning_rate": 4.9833333333333336e-05,
113
  "loss": 2.7932,
114
  "step": 300
115
+ },
116
+ {
117
+ "epoch": 0.0,
118
+ "grad_norm": 4.788575172424316,
119
+ "learning_rate": 4.9999526661182696e-05,
120
+ "loss": 2.9341,
121
+ "step": 320
122
  }
123
  ],
124
  "logging_steps": 20,
 
126
  "num_input_tokens_seen": 0,
127
  "num_train_epochs": 1,
128
  "save_steps": 20,
129
+ "total_flos": 1489592759205888.0,
130
  "train_batch_size": 1,
131
  "trial_name": null,
132
  "trial_params": null