Femboyuwu2000 commited on
Commit
a6cab9b
1 Parent(s): f7f01bd

Training in progress, step 280, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c74abcbc22a956d2c4927f3cd90c498db1401577c1a95edd5aa2828ae826f3e
3
  size 4725640
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b69e1ece531ae04226ab8e666c97b14f0d7a11afc5625e4c77a464e4ce29496
3
  size 4725640
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0405dd9af2f32c8a1a022354904e4a96c6491985948aa4e13d81d2e6b0537b10
3
  size 2423802
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbd9ad863a348d05efe28bd6743c02865fa3ef1f49db9e787ff7a2fb7012c27d
3
  size 2423802
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1257d052b4a8dc172dc42140b2ee6a8793b59746e8cf14885edb0b6f2889582d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b08248ed26b28c1a0bd4b42e309ef4427a1e5fb6da926b6c61e6c6e7cc3b09fb
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b9b1b69a7ab8ccbf3078c429a8961ade0793a75661dbafee0da970c8ab67482
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c08479d3d6dabb601d418ce802b775aad13d7bf7b26fde0b24c32b2a8f36ef6e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.00020234565307059528,
5
  "eval_steps": 500,
6
- "global_step": 260,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -98,6 +98,13 @@
98
  "learning_rate": 4.316666666666667e-05,
99
  "loss": 3.0079,
100
  "step": 260
 
 
 
 
 
 
 
101
  }
102
  ],
103
  "logging_steps": 20,
@@ -105,7 +112,7 @@
105
  "num_input_tokens_seen": 0,
106
  "num_train_epochs": 1,
107
  "save_steps": 20,
108
- "total_flos": 1238496472055808.0,
109
  "train_batch_size": 1,
110
  "trial_name": null,
111
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.00021791070330679492,
5
  "eval_steps": 500,
6
+ "global_step": 280,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
98
  "learning_rate": 4.316666666666667e-05,
99
  "loss": 3.0079,
100
  "step": 260
101
+ },
102
+ {
103
+ "epoch": 0.0,
104
+ "grad_norm": 4.502331256866455,
105
+ "learning_rate": 4.6500000000000005e-05,
106
+ "loss": 2.6839,
107
+ "step": 280
108
  }
109
  ],
110
  "logging_steps": 20,
 
112
  "num_input_tokens_seen": 0,
113
  "num_train_epochs": 1,
114
  "save_steps": 20,
115
+ "total_flos": 1322195234439168.0,
116
  "train_batch_size": 1,
117
  "trial_name": null,
118
  "trial_params": null