End of training
Browse files- README.md +4 -1
- all_results.json +12 -0
- eval_results.json +7 -0
- train_results.json +8 -0
- trainer_state.json +142 -0
- training_loss.png +0 -0
README.md
CHANGED
@@ -3,6 +3,7 @@ license: apache-2.0
|
|
3 |
library_name: peft
|
4 |
tags:
|
5 |
- llama-factory
|
|
|
6 |
- generated_from_trainer
|
7 |
base_model: mistralai/Mistral-7B-Instruct-v0.3
|
8 |
model-index:
|
@@ -15,7 +16,9 @@ should probably proofread and complete it, then remove this comment. -->
|
|
15 |
|
16 |
# Mistral-7B-Instruct-v0.3-ORPO-SFT
|
17 |
|
18 |
-
This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.3](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) on
|
|
|
|
|
19 |
|
20 |
## Model description
|
21 |
|
|
|
3 |
library_name: peft
|
4 |
tags:
|
5 |
- llama-factory
|
6 |
+
- lora
|
7 |
- generated_from_trainer
|
8 |
base_model: mistralai/Mistral-7B-Instruct-v0.3
|
9 |
model-index:
|
|
|
16 |
|
17 |
# Mistral-7B-Instruct-v0.3-ORPO-SFT
|
18 |
|
19 |
+
This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.3](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) on the bct_non_cot_sft_1000 dataset.
|
20 |
+
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 0.0563
|
22 |
|
23 |
## Model description
|
24 |
|
all_results.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.986666666666667,
|
3 |
+
"eval_loss": 0.05633977800607681,
|
4 |
+
"eval_runtime": 3.1751,
|
5 |
+
"eval_samples_per_second": 31.495,
|
6 |
+
"eval_steps_per_second": 15.748,
|
7 |
+
"total_flos": 1.5795369631678464e+16,
|
8 |
+
"train_loss": 0.11316087664592833,
|
9 |
+
"train_runtime": 355.6972,
|
10 |
+
"train_samples_per_second": 7.591,
|
11 |
+
"train_steps_per_second": 0.472
|
12 |
+
}
|
eval_results.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.986666666666667,
|
3 |
+
"eval_loss": 0.05633977800607681,
|
4 |
+
"eval_runtime": 3.1751,
|
5 |
+
"eval_samples_per_second": 31.495,
|
6 |
+
"eval_steps_per_second": 15.748
|
7 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.986666666666667,
|
3 |
+
"total_flos": 1.5795369631678464e+16,
|
4 |
+
"train_loss": 0.11316087664592833,
|
5 |
+
"train_runtime": 355.6972,
|
6 |
+
"train_samples_per_second": 7.591,
|
7 |
+
"train_steps_per_second": 0.472
|
8 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 2.986666666666667,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 168,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.17777777777777778,
|
13 |
+
"grad_norm": 7.053971290588379,
|
14 |
+
"learning_rate": 4.97273712672844e-06,
|
15 |
+
"loss": 0.5704,
|
16 |
+
"step": 10
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.35555555555555557,
|
20 |
+
"grad_norm": 5.397775650024414,
|
21 |
+
"learning_rate": 4.861084470200228e-06,
|
22 |
+
"loss": 0.2394,
|
23 |
+
"step": 20
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.5333333333333333,
|
27 |
+
"grad_norm": 2.6659767627716064,
|
28 |
+
"learning_rate": 4.667009949002349e-06,
|
29 |
+
"loss": 0.1046,
|
30 |
+
"step": 30
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.7111111111111111,
|
34 |
+
"grad_norm": 3.866196870803833,
|
35 |
+
"learning_rate": 4.397288409237892e-06,
|
36 |
+
"loss": 0.0639,
|
37 |
+
"step": 40
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 0.8888888888888888,
|
41 |
+
"grad_norm": 3.2750980854034424,
|
42 |
+
"learning_rate": 4.061335419273658e-06,
|
43 |
+
"loss": 0.0841,
|
44 |
+
"step": 50
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 1.0666666666666667,
|
48 |
+
"grad_norm": 2.892080783843994,
|
49 |
+
"learning_rate": 3.6708785865814186e-06,
|
50 |
+
"loss": 0.1054,
|
51 |
+
"step": 60
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 1.2444444444444445,
|
55 |
+
"grad_norm": 3.0646233558654785,
|
56 |
+
"learning_rate": 3.239548164813544e-06,
|
57 |
+
"loss": 0.0709,
|
58 |
+
"step": 70
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 1.4222222222222223,
|
62 |
+
"grad_norm": 3.744271755218506,
|
63 |
+
"learning_rate": 2.782401242396799e-06,
|
64 |
+
"loss": 0.0684,
|
65 |
+
"step": 80
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 1.6,
|
69 |
+
"grad_norm": 3.9203922748565674,
|
70 |
+
"learning_rate": 2.3153961224961665e-06,
|
71 |
+
"loss": 0.082,
|
72 |
+
"step": 90
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 1.7777777777777777,
|
76 |
+
"grad_norm": 2.8430514335632324,
|
77 |
+
"learning_rate": 1.854835242944048e-06,
|
78 |
+
"loss": 0.0505,
|
79 |
+
"step": 100
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 1.9555555555555557,
|
83 |
+
"grad_norm": 1.6261467933654785,
|
84 |
+
"learning_rate": 1.4167960829520933e-06,
|
85 |
+
"loss": 0.0843,
|
86 |
+
"step": 110
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 2.1333333333333333,
|
90 |
+
"grad_norm": 2.9942877292633057,
|
91 |
+
"learning_rate": 1.0165699227860215e-06,
|
92 |
+
"loss": 0.0642,
|
93 |
+
"step": 120
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 2.311111111111111,
|
97 |
+
"grad_norm": 1.675881266593933,
|
98 |
+
"learning_rate": 6.681280484488576e-07,
|
99 |
+
"loss": 0.0684,
|
100 |
+
"step": 130
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"epoch": 2.488888888888889,
|
104 |
+
"grad_norm": 5.115616321563721,
|
105 |
+
"learning_rate": 3.8363403535449846e-07,
|
106 |
+
"loss": 0.0837,
|
107 |
+
"step": 140
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 2.6666666666666665,
|
111 |
+
"grad_norm": 2.429882287979126,
|
112 |
+
"learning_rate": 1.7301913642614382e-07,
|
113 |
+
"loss": 0.0603,
|
114 |
+
"step": 150
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 2.8444444444444446,
|
118 |
+
"grad_norm": 3.372321128845215,
|
119 |
+
"learning_rate": 4.3635597174694347e-08,
|
120 |
+
"loss": 0.06,
|
121 |
+
"step": 160
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 2.986666666666667,
|
125 |
+
"step": 168,
|
126 |
+
"total_flos": 1.5795369631678464e+16,
|
127 |
+
"train_loss": 0.11316087664592833,
|
128 |
+
"train_runtime": 355.6972,
|
129 |
+
"train_samples_per_second": 7.591,
|
130 |
+
"train_steps_per_second": 0.472
|
131 |
+
}
|
132 |
+
],
|
133 |
+
"logging_steps": 10,
|
134 |
+
"max_steps": 168,
|
135 |
+
"num_input_tokens_seen": 0,
|
136 |
+
"num_train_epochs": 3,
|
137 |
+
"save_steps": 500,
|
138 |
+
"total_flos": 1.5795369631678464e+16,
|
139 |
+
"train_batch_size": 2,
|
140 |
+
"trial_name": null,
|
141 |
+
"trial_params": null
|
142 |
+
}
|
training_loss.png
ADDED