ale-bay commited on
Commit
46c8deb
1 Parent(s): 6d8ad5c

End of training

Browse files
README.md CHANGED
@@ -2,9 +2,15 @@
2
  license: gemma
3
  base_model: google/gemma-2b
4
  tags:
 
5
  - trl
6
  - dpo
7
  - generated_from_trainer
 
 
 
 
 
8
  model-index:
9
  - name: zephyr-2b-gemma-dpo
10
  results: []
@@ -16,17 +22,17 @@ should probably proofread and complete it, then remove this comment. -->
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://zebra.wandb.io/cto/distillm/runs/vnc8ka21)
17
  # zephyr-2b-gemma-dpo
18
 
19
- This model is a fine-tuned version of [google/gemma-2b](https://huggingface.co/google/gemma-2b) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.6497
22
- - Rewards/chosen: -0.0395
23
- - Rewards/rejected: -0.1328
24
- - Rewards/accuracies: 0.6354
25
- - Rewards/margins: 0.0933
26
- - Logps/rejected: -378.4776
27
- - Logps/chosen: -386.1444
28
- - Logits/rejected: -25.9802
29
- - Logits/chosen: -26.9529
30
 
31
  ## Model description
32
 
 
2
  license: gemma
3
  base_model: google/gemma-2b
4
  tags:
5
+ - alignment-handbook
6
  - trl
7
  - dpo
8
  - generated_from_trainer
9
+ - trl
10
+ - dpo
11
+ - generated_from_trainer
12
+ datasets:
13
+ - argilla/dpo-mix-7k
14
  model-index:
15
  - name: zephyr-2b-gemma-dpo
16
  results: []
 
22
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://zebra.wandb.io/cto/distillm/runs/vnc8ka21)
23
  # zephyr-2b-gemma-dpo
24
 
25
+ This model is a fine-tuned version of [google/gemma-2b](https://huggingface.co/google/gemma-2b) on the argilla/dpo-mix-7k dataset.
26
  It achieves the following results on the evaluation set:
27
+ - Loss: 0.6493
28
+ - Rewards/chosen: -0.0415
29
+ - Rewards/rejected: -0.1402
30
+ - Rewards/accuracies: 0.6875
31
+ - Rewards/margins: 0.0986
32
+ - Logps/rejected: -378.6258
33
+ - Logps/chosen: -386.1853
34
+ - Logits/rejected: -25.9826
35
+ - Logits/chosen: -26.9604
36
 
37
  ## Model description
38
 
all_results.json CHANGED
@@ -1,5 +1,18 @@
1
  {
2
  "epoch": 1.971563981042654,
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "total_flos": 0.0,
4
  "train_loss": 0.661209049133154,
5
  "train_runtime": 432.2704,
 
1
  {
2
  "epoch": 1.971563981042654,
3
+ "eval_logits/chosen": -26.960426330566406,
4
+ "eval_logits/rejected": -25.98261260986328,
5
+ "eval_logps/chosen": -386.185302734375,
6
+ "eval_logps/rejected": -378.6257629394531,
7
+ "eval_loss": 0.6492888331413269,
8
+ "eval_rewards/accuracies": 0.6875,
9
+ "eval_rewards/chosen": -0.04152223840355873,
10
+ "eval_rewards/margins": 0.09864477068185806,
11
+ "eval_rewards/rejected": -0.1401670128107071,
12
+ "eval_runtime": 6.8392,
13
+ "eval_samples": 750,
14
+ "eval_samples_per_second": 109.662,
15
+ "eval_steps_per_second": 3.509,
16
  "total_flos": 0.0,
17
  "train_loss": 0.661209049133154,
18
  "train_runtime": 432.2704,
config.json CHANGED
@@ -24,6 +24,6 @@
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.42.4",
27
- "use_cache": false,
28
  "vocab_size": 256000
29
  }
 
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.42.4",
27
+ "use_cache": true,
28
  "vocab_size": 256000
29
  }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.971563981042654,
3
+ "eval_logits/chosen": -26.960426330566406,
4
+ "eval_logits/rejected": -25.98261260986328,
5
+ "eval_logps/chosen": -386.185302734375,
6
+ "eval_logps/rejected": -378.6257629394531,
7
+ "eval_loss": 0.6492888331413269,
8
+ "eval_rewards/accuracies": 0.6875,
9
+ "eval_rewards/chosen": -0.04152223840355873,
10
+ "eval_rewards/margins": 0.09864477068185806,
11
+ "eval_rewards/rejected": -0.1401670128107071,
12
+ "eval_runtime": 6.8392,
13
+ "eval_samples": 750,
14
+ "eval_samples_per_second": 109.662,
15
+ "eval_steps_per_second": 3.509
16
+ }
runs/Jul12_09-48-54_ale-distillm-8-0-0/events.out.tfevents.1720774969.ale-distillm-8-0-0.1202.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9d915ada9c410742f74fb7bf0195b865f90ed07b43ea56d354150997db6a4f3
3
+ size 815