End of training
Browse files- README.md +6 -2
- all_results.json +5 -0
- config.json +1 -1
- eval_results.json +8 -0
README.md
CHANGED
@@ -2,11 +2,15 @@
|
|
2 |
license: gemma
|
3 |
base_model: google/gemma-7b
|
4 |
tags:
|
|
|
|
|
|
|
|
|
5 |
- trl
|
6 |
- sft
|
7 |
- generated_from_trainer
|
8 |
datasets:
|
9 |
-
-
|
10 |
model-index:
|
11 |
- name: zephyr-7b-gemma-sft
|
12 |
results: []
|
@@ -17,7 +21,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
17 |
|
18 |
# zephyr-7b-gemma-sft
|
19 |
|
20 |
-
This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the
|
21 |
It achieves the following results on the evaluation set:
|
22 |
- Loss: 1.0814
|
23 |
|
|
|
2 |
license: gemma
|
3 |
base_model: google/gemma-7b
|
4 |
tags:
|
5 |
+
- alignment-handbook
|
6 |
+
- trl
|
7 |
+
- sft
|
8 |
+
- generated_from_trainer
|
9 |
- trl
|
10 |
- sft
|
11 |
- generated_from_trainer
|
12 |
datasets:
|
13 |
+
- HuggingFaceH4/deita-10k-v0-sft
|
14 |
model-index:
|
15 |
- name: zephyr-7b-gemma-sft
|
16 |
results: []
|
|
|
21 |
|
22 |
# zephyr-7b-gemma-sft
|
23 |
|
24 |
+
This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the HuggingFaceH4/deita-10k-v0-sft dataset.
|
25 |
It achieves the following results on the evaluation set:
|
26 |
- Loss: 1.0814
|
27 |
|
all_results.json
CHANGED
@@ -1,5 +1,10 @@
|
|
1 |
{
|
2 |
"epoch": 2.994991652754591,
|
|
|
|
|
|
|
|
|
|
|
3 |
"total_flos": 246978202042368.0,
|
4 |
"train_loss": 0.8794620578637224,
|
5 |
"train_runtime": 12081.5129,
|
|
|
1 |
{
|
2 |
"epoch": 2.994991652754591,
|
3 |
+
"eval_loss": 1.0814331769943237,
|
4 |
+
"eval_runtime": 50.5833,
|
5 |
+
"eval_samples": 500,
|
6 |
+
"eval_samples_per_second": 37.819,
|
7 |
+
"eval_steps_per_second": 1.186,
|
8 |
"total_flos": 246978202042368.0,
|
9 |
"train_loss": 0.8794620578637224,
|
10 |
"train_runtime": 12081.5129,
|
config.json
CHANGED
@@ -24,6 +24,6 @@
|
|
24 |
"rope_theta": 10000.0,
|
25 |
"torch_dtype": "bfloat16",
|
26 |
"transformers_version": "4.40.2",
|
27 |
-
"use_cache":
|
28 |
"vocab_size": 256000
|
29 |
}
|
|
|
24 |
"rope_theta": 10000.0,
|
25 |
"torch_dtype": "bfloat16",
|
26 |
"transformers_version": "4.40.2",
|
27 |
+
"use_cache": true,
|
28 |
"vocab_size": 256000
|
29 |
}
|
eval_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.994991652754591,
|
3 |
+
"eval_loss": 1.0814331769943237,
|
4 |
+
"eval_runtime": 50.5833,
|
5 |
+
"eval_samples": 500,
|
6 |
+
"eval_samples_per_second": 37.819,
|
7 |
+
"eval_steps_per_second": 1.186
|
8 |
+
}
|