Femboyuwu2000 commited on
Commit
69af666
1 Parent(s): 50857db

Training in progress, step 140, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27c29bc43948c63a0f226cc87ab0660cea4ab6cd6ae33f4b3a1386449f13e33c
3
  size 4725640
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e959f2bedd4d1c06f7560c91048e25cb09d9cd0bf6f1c8ec525e1b2a50b2be5
3
  size 4725640
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ac748cfb6f2f2d9146a22f9e90943f89a69ac611b4ace36f0cd0825509a80ec0
3
  size 2423738
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ea726ba9fc9c9eb1066b46bde05b501eb2ebeaedc9625cd740c2148848db790
3
  size 2423738
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd836bde9e7de00360c353ed3595fa24ed6d63347354818295fc73e9837134fc
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:840146a001ff48fd14f9176d2551e18a68e4936bbf1b797f6c21c89adb36c9a6
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6977daaeb422e8aea5fd23309b89c0087d980bd2e3b8f44821aa562f12801c8d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d82c151815973da3ac2e374434a38a206899c333d88de9e900733b7877b8460
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 9.339030141719783e-05,
5
  "eval_steps": 500,
6
- "global_step": 120,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -49,6 +49,13 @@
49
  "learning_rate": 1.9833333333333335e-05,
50
  "loss": 3.1903,
51
  "step": 120
 
 
 
 
 
 
 
52
  }
53
  ],
54
  "logging_steps": 20,
@@ -56,7 +63,7 @@
56
  "num_input_tokens_seen": 0,
57
  "num_train_epochs": 1,
58
  "save_steps": 20,
59
- "total_flos": 637410213568512.0,
60
  "train_batch_size": 1,
61
  "trial_name": null,
62
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.00010895535165339746,
5
  "eval_steps": 500,
6
+ "global_step": 140,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
49
  "learning_rate": 1.9833333333333335e-05,
50
  "loss": 3.1903,
51
  "step": 120
52
+ },
53
+ {
54
+ "epoch": 0.0,
55
+ "grad_norm": 3.08722186088562,
56
+ "learning_rate": 2.3166666666666666e-05,
57
+ "loss": 3.5379,
58
+ "step": 140
59
  }
60
  ],
61
  "logging_steps": 20,
 
63
  "num_input_tokens_seen": 0,
64
  "num_train_epochs": 1,
65
  "save_steps": 20,
66
+ "total_flos": 721108975951872.0,
67
  "train_batch_size": 1,
68
  "trial_name": null,
69
  "trial_params": null