Femboyuwu2000 commited on
Commit
2051b35
1 Parent(s): 9208ac0

Training in progress, step 180, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e7a69d1ce9bf6667ee18a34b80700a6f021f75a39b502df24c940c33a248326
3
  size 4725640
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c866b681847079f3b9029f07492db9880da802a14740ef308276cbc48af7029
3
  size 4725640
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ffb5918dd4da0749e42b25da719356fb52e940925d48e3da3d388b6dd555958
3
  size 2423738
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48b0feae9377bfd2ee4adc15c04b256d703f0f8cfe89d645ffa2f876e8acdd71
3
  size 2423738
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a31f9a55784a31138651f313f71945c76da95fef110e5c73e597379ef000db0
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5818eed0ee5844cae7078ebbf0cd36b392212601ec7951f21f30f58ec6711de0
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a68cf1fb54f09702759cafde2c8a6d257515638ebe6138585b95a632f1a49e6
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10bc8edf4e1fadab9976453bf48d4affbe0fded7ba378b722041dd8223dc111e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0001245204018895971,
5
  "eval_steps": 500,
6
- "global_step": 160,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -63,6 +63,13 @@
63
  "learning_rate": 2.6500000000000004e-05,
64
  "loss": 3.16,
65
  "step": 160
 
 
 
 
 
 
 
66
  }
67
  ],
68
  "logging_steps": 20,
@@ -70,7 +77,7 @@
70
  "num_input_tokens_seen": 0,
71
  "num_train_epochs": 1,
72
  "save_steps": 20,
73
- "total_flos": 817767151202304.0,
74
  "train_batch_size": 1,
75
  "trial_name": null,
76
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.00014008545212579673,
5
  "eval_steps": 500,
6
+ "global_step": 180,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
63
  "learning_rate": 2.6500000000000004e-05,
64
  "loss": 3.16,
65
  "step": 160
66
+ },
67
+ {
68
+ "epoch": 0.0,
69
+ "grad_norm": 5.391817092895508,
70
+ "learning_rate": 2.9833333333333335e-05,
71
+ "loss": 3.2489,
72
+ "step": 180
73
  }
74
  ],
75
  "logging_steps": 20,
 
77
  "num_input_tokens_seen": 0,
78
  "num_train_epochs": 1,
79
  "save_steps": 20,
80
+ "total_flos": 901465913585664.0,
81
  "train_batch_size": 1,
82
  "trial_name": null,
83
  "trial_params": null