fay-ong commited on
Commit
d2d9259
1 Parent(s): 31d1e07

End of training

Browse files
Files changed (4) hide show
  1. README.md +3 -2
  2. all_results.json +6 -6
  3. train_results.json +6 -6
  4. trainer_state.json +139 -48
README.md CHANGED
@@ -2,8 +2,9 @@
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
5
- - unsloth
6
  - llama-factory
 
 
7
  - generated_from_trainer
8
  base_model: unsloth/llama-3-8b-Instruct-bnb-4bit
9
  model-index:
@@ -16,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # llama-3-8b-finetuned
18
 
19
- This model is a fine-tuned version of [unsloth/llama-3-8b-Instruct-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-Instruct-bnb-4bit) on an unknown dataset.
20
 
21
  ## Model description
22
 
 
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
 
5
  - llama-factory
6
+ - lora
7
+ - unsloth
8
  - generated_from_trainer
9
  base_model: unsloth/llama-3-8b-Instruct-bnb-4bit
10
  model-index:
 
17
 
18
  # llama-3-8b-finetuned
19
 
20
+ This model is a fine-tuned version of [unsloth/llama-3-8b-Instruct-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-Instruct-bnb-4bit) on the formatted_data dataset.
21
 
22
  ## Model description
23
 
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 1.0,
3
- "total_flos": 7074583249158144.0,
4
- "train_loss": 0.2772752811908722,
5
- "train_runtime": 751.7766,
6
- "train_samples_per_second": 0.665,
7
- "train_steps_per_second": 0.166
8
  }
 
1
  {
2
+ "epoch": 2.0,
3
+ "total_flos": 3.846009907091866e+16,
4
+ "train_loss": 0.16032438999414445,
5
+ "train_runtime": 2680.6813,
6
+ "train_samples_per_second": 0.373,
7
+ "train_steps_per_second": 0.093
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 1.0,
3
- "total_flos": 7074583249158144.0,
4
- "train_loss": 0.2772752811908722,
5
- "train_runtime": 751.7766,
6
- "train_samples_per_second": 0.665,
7
- "train_steps_per_second": 0.166
8
  }
 
1
  {
2
+ "epoch": 2.0,
3
+ "total_flos": 3.846009907091866e+16,
4
+ "train_loss": 0.16032438999414445,
5
+ "train_runtime": 2680.6813,
6
+ "train_samples_per_second": 0.373,
7
+ "train_steps_per_second": 0.093
8
  }
trainer_state.json CHANGED
@@ -1,111 +1,202 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
  "eval_steps": 500,
6
- "global_step": 125,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.08,
13
- "grad_norm": 7.301303386688232,
14
- "learning_rate": 1.3846153846153847e-05,
15
- "loss": 1.5694,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 0.16,
20
- "grad_norm": 3.652470827102661,
21
- "learning_rate": 1.985871018518236e-05,
22
- "loss": 0.3644,
23
  "step": 20
24
  },
25
  {
26
  "epoch": 0.24,
27
- "grad_norm": 5.326970100402832,
28
- "learning_rate": 1.912783265061319e-05,
29
- "loss": 0.374,
30
  "step": 30
31
  },
32
  {
33
  "epoch": 0.32,
34
- "grad_norm": 0.5815876126289368,
35
- "learning_rate": 1.7640373758216075e-05,
36
- "loss": 0.1797,
37
  "step": 40
38
  },
39
  {
40
  "epoch": 0.4,
41
- "grad_norm": 1.439568281173706,
42
- "learning_rate": 1.5555702330196024e-05,
43
- "loss": 0.0685,
44
  "step": 50
45
  },
46
  {
47
  "epoch": 0.48,
48
- "grad_norm": 0.09065556526184082,
49
- "learning_rate": 1.3036767451096148e-05,
50
- "loss": 0.1244,
51
  "step": 60
52
  },
53
  {
54
  "epoch": 0.56,
55
- "grad_norm": 2.369553327560425,
56
- "learning_rate": 1.028046256275869e-05,
57
- "loss": 0.1301,
58
  "step": 70
59
  },
60
  {
61
  "epoch": 0.64,
62
- "grad_norm": 2.2806155681610107,
63
- "learning_rate": 7.50223521832773e-06,
64
- "loss": 0.2033,
65
  "step": 80
66
  },
67
  {
68
  "epoch": 0.72,
69
- "grad_norm": 1.1145380735397339,
70
- "learning_rate": 4.919246547534709e-06,
71
- "loss": 0.1496,
72
  "step": 90
73
  },
74
  {
75
  "epoch": 0.8,
76
- "grad_norm": 1.7836447954177856,
77
- "learning_rate": 2.7333967796597317e-06,
78
- "loss": 0.0447,
79
  "step": 100
80
  },
81
  {
82
  "epoch": 0.88,
83
- "grad_norm": 0.2162286639213562,
84
- "learning_rate": 1.1155436402112785e-06,
85
- "loss": 0.1065,
86
  "step": 110
87
  },
88
  {
89
  "epoch": 0.96,
90
- "grad_norm": 1.168771505355835,
91
- "learning_rate": 1.921471959676957e-07,
92
- "loss": 0.1084,
93
  "step": 120
94
  },
95
  {
96
- "epoch": 1.0,
97
- "step": 125,
98
- "total_flos": 7074583249158144.0,
99
- "train_loss": 0.2772752811908722,
100
- "train_runtime": 751.7766,
101
- "train_samples_per_second": 0.665,
102
- "train_steps_per_second": 0.166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  }
104
  ],
105
  "logging_steps": 10,
106
- "max_steps": 125,
107
  "num_input_tokens_seen": 0,
108
- "num_train_epochs": 1,
109
  "save_steps": 1000,
110
  "stateful_callbacks": {
111
  "TrainerControl": {
@@ -119,7 +210,7 @@
119
  "attributes": {}
120
  }
121
  },
122
- "total_flos": 7074583249158144.0,
123
  "train_batch_size": 1,
124
  "trial_name": null,
125
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
  "eval_steps": 500,
6
+ "global_step": 250,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.08,
13
+ "grad_norm": 6.857266902923584,
14
+ "learning_rate": 7.2000000000000005e-06,
15
+ "loss": 1.585,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 0.16,
20
+ "grad_norm": 2.489650011062622,
21
+ "learning_rate": 1.5200000000000002e-05,
22
+ "loss": 0.268,
23
  "step": 20
24
  },
25
  {
26
  "epoch": 0.24,
27
+ "grad_norm": 6.398056507110596,
28
+ "learning_rate": 1.9984407641819812e-05,
29
+ "loss": 0.2895,
30
  "step": 30
31
  },
32
  {
33
  "epoch": 0.32,
34
+ "grad_norm": 0.1504039317369461,
35
+ "learning_rate": 1.9809551553491918e-05,
36
+ "loss": 0.1859,
37
  "step": 40
38
  },
39
  {
40
  "epoch": 0.4,
41
+ "grad_norm": 1.2296534776687622,
42
+ "learning_rate": 1.944376370237481e-05,
43
+ "loss": 0.0984,
44
  "step": 50
45
  },
46
  {
47
  "epoch": 0.48,
48
+ "grad_norm": 0.012895800173282623,
49
+ "learning_rate": 1.889416373291298e-05,
50
+ "loss": 0.0668,
51
  "step": 60
52
  },
53
  {
54
  "epoch": 0.56,
55
+ "grad_norm": 2.1277153491973877,
56
+ "learning_rate": 1.8171448983351284e-05,
57
+ "loss": 0.1228,
58
  "step": 70
59
  },
60
  {
61
  "epoch": 0.64,
62
+ "grad_norm": 1.6015583276748657,
63
+ "learning_rate": 1.7289686274214116e-05,
64
+ "loss": 0.193,
65
  "step": 80
66
  },
67
  {
68
  "epoch": 0.72,
69
+ "grad_norm": 0.42995280027389526,
70
+ "learning_rate": 1.6266038113644605e-05,
71
+ "loss": 0.1573,
72
  "step": 90
73
  },
74
  {
75
  "epoch": 0.8,
76
+ "grad_norm": 0.2124607264995575,
77
+ "learning_rate": 1.5120428648705716e-05,
78
+ "loss": 0.0905,
79
  "step": 100
80
  },
81
  {
82
  "epoch": 0.88,
83
+ "grad_norm": 0.20811651647090912,
84
+ "learning_rate": 1.3875155864521031e-05,
85
+ "loss": 0.1169,
86
  "step": 110
87
  },
88
  {
89
  "epoch": 0.96,
90
+ "grad_norm": 0.7650100588798523,
91
+ "learning_rate": 1.2554457579357906e-05,
92
+ "loss": 0.125,
93
  "step": 120
94
  },
95
  {
96
+ "epoch": 1.04,
97
+ "grad_norm": 0.23556451499462128,
98
+ "learning_rate": 1.1184039683065014e-05,
99
+ "loss": 0.0576,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 1.12,
104
+ "grad_norm": 0.6805216073989868,
105
+ "learning_rate": 9.790575801166432e-06,
106
+ "loss": 0.0703,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 1.2,
111
+ "grad_norm": 0.023868851363658905,
112
+ "learning_rate": 8.401188123081653e-06,
113
+ "loss": 0.049,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 1.28,
118
+ "grad_norm": 1.3114885091781616,
119
+ "learning_rate": 7.042919499559538e-06,
120
+ "loss": 0.129,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 1.3599999999999999,
125
+ "grad_norm": 0.2610754668712616,
126
+ "learning_rate": 5.742207084349274e-06,
127
+ "loss": 0.0652,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 1.44,
132
+ "grad_norm": 2.7140579223632812,
133
+ "learning_rate": 4.524367765074499e-06,
134
+ "loss": 0.0147,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 1.52,
139
+ "grad_norm": 1.237959384918213,
140
+ "learning_rate": 3.4131053988131947e-06,
141
+ "loss": 0.0694,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 1.6,
146
+ "grad_norm": 0.09863968938589096,
147
+ "learning_rate": 2.4300494434824373e-06,
148
+ "loss": 0.0786,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 1.6800000000000002,
153
+ "grad_norm": 0.009426446631550789,
154
+ "learning_rate": 1.5943339650431578e-06,
155
+ "loss": 0.0301,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 1.76,
160
+ "grad_norm": 1.9068654775619507,
161
+ "learning_rate": 9.222252146709143e-07,
162
+ "loss": 0.025,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 1.8399999999999999,
167
+ "grad_norm": 0.03250988572835922,
168
+ "learning_rate": 4.268050246793276e-07,
169
+ "loss": 0.0093,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 1.92,
174
+ "grad_norm": 0.29542025923728943,
175
+ "learning_rate": 1.1771618553447217e-07,
176
+ "loss": 0.0756,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 2.0,
181
+ "grad_norm": 0.12926587462425232,
182
+ "learning_rate": 9.74759906957612e-10,
183
+ "loss": 0.0351,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 2.0,
188
+ "step": 250,
189
+ "total_flos": 3.846009907091866e+16,
190
+ "train_loss": 0.16032438999414445,
191
+ "train_runtime": 2680.6813,
192
+ "train_samples_per_second": 0.373,
193
+ "train_steps_per_second": 0.093
194
  }
195
  ],
196
  "logging_steps": 10,
197
+ "max_steps": 250,
198
  "num_input_tokens_seen": 0,
199
+ "num_train_epochs": 2,
200
  "save_steps": 1000,
201
  "stateful_callbacks": {
202
  "TrainerControl": {
 
210
  "attributes": {}
211
  }
212
  },
213
+ "total_flos": 3.846009907091866e+16,
214
  "train_batch_size": 1,
215
  "trial_name": null,
216
  "trial_params": null