just1nseo commited on
Commit
6f4d452
1 Parent(s): 7d77b52

Model save

Browse files
README.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: alignment-handbook/zephyr-7b-sft-full
9
+ model-index:
10
+ - name: zephyr-dpo-qlora-gpt4-5e-6-epoch3
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # zephyr-dpo-qlora-gpt4-5e-6-epoch3
18
+
19
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 1.8351
22
+ - Rewards/chosen: -16.6710
23
+ - Rewards/rejected: -19.6029
24
+ - Rewards/accuracies: 0.6825
25
+ - Rewards/margins: 2.9319
26
+ - Rewards/margins Max: 11.9629
27
+ - Rewards/margins Min: -5.2899
28
+ - Rewards/margins Std: 7.7662
29
+ - Logps/rejected: -2219.4746
30
+ - Logps/chosen: -1952.3245
31
+ - Logits/rejected: -1.4296
32
+ - Logits/chosen: -1.5156
33
+
34
+ ## Model description
35
+
36
+ More information needed
37
+
38
+ ## Intended uses & limitations
39
+
40
+ More information needed
41
+
42
+ ## Training and evaluation data
43
+
44
+ More information needed
45
+
46
+ ## Training procedure
47
+
48
+ ### Training hyperparameters
49
+
50
+ The following hyperparameters were used during training:
51
+ - learning_rate: 5e-06
52
+ - train_batch_size: 2
53
+ - eval_batch_size: 4
54
+ - seed: 42
55
+ - distributed_type: multi-GPU
56
+ - num_devices: 8
57
+ - total_train_batch_size: 16
58
+ - total_eval_batch_size: 32
59
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
60
+ - lr_scheduler_type: cosine
61
+ - lr_scheduler_warmup_ratio: 0.1
62
+ - num_epochs: 3
63
+
64
+ ### Training results
65
+
66
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Rewards/margins Max | Rewards/margins Min | Rewards/margins Std | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
67
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:-------------------:|:-------------------:|:-------------------:|:--------------:|:------------:|:---------------:|:-------------:|
68
+ | 0.4777 | 0.28 | 100 | 0.6755 | -0.3600 | -0.4244 | 0.6032 | 0.0644 | 0.3559 | -0.2210 | 0.2529 | -301.6254 | -321.2222 | -2.6312 | -2.6710 |
69
+ | 0.1416 | 0.56 | 200 | 0.9053 | -6.7040 | -7.2161 | 0.6270 | 0.5121 | 2.7698 | -1.7984 | 2.0239 | -980.7882 | -955.6170 | -1.4055 | -1.4608 |
70
+ | 0.0426 | 0.85 | 300 | 0.9213 | -7.5636 | -8.6200 | 0.6786 | 1.0563 | 4.2652 | -2.1614 | 2.8565 | -1121.1776 | -1041.5824 | -1.6508 | -1.7101 |
71
+ | 0.0537 | 1.13 | 400 | 1.1419 | -12.1996 | -13.1820 | 0.6468 | 0.9824 | 5.4879 | -3.0621 | 3.7889 | -1577.3877 | -1505.1829 | -1.5926 | -1.6576 |
72
+ | 0.0197 | 1.41 | 500 | 1.6844 | -17.1495 | -18.8730 | 0.6667 | 1.7235 | 9.4195 | -5.1462 | 6.5774 | -2146.4797 | -2000.1663 | -1.4330 | -1.5026 |
73
+ | 0.0029 | 1.69 | 600 | 1.9743 | -14.5461 | -17.4661 | 0.6865 | 2.9200 | 12.4008 | -5.7167 | 8.1643 | -2005.7900 | -1739.8331 | -1.4547 | -1.5331 |
74
+ | 0.018 | 1.97 | 700 | 1.8030 | -16.5306 | -19.1782 | 0.6786 | 2.6476 | 11.2308 | -5.2715 | 7.4338 | -2177.0017 | -1938.2783 | -1.4133 | -1.4978 |
75
+ | 0.0014 | 2.25 | 800 | 1.8519 | -16.7236 | -19.4930 | 0.6746 | 2.7694 | 11.6630 | -5.3047 | 7.6237 | -2208.4844 | -1957.5789 | -1.4433 | -1.5266 |
76
+ | 0.0034 | 2.54 | 900 | 1.6799 | -16.1476 | -18.7797 | 0.6865 | 2.6322 | 10.7631 | -4.8758 | 7.0339 | -2137.1570 | -1899.9781 | -1.4489 | -1.5324 |
77
+ | 0.0118 | 2.82 | 1000 | 1.8351 | -16.6710 | -19.6029 | 0.6825 | 2.9319 | 11.9629 | -5.2899 | 7.7662 | -2219.4746 | -1952.3245 | -1.4296 | -1.5156 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - PEFT 0.7.1
83
+ - Transformers 4.39.0.dev0
84
+ - Pytorch 2.1.2+cu121
85
+ - Datasets 2.14.6
86
+ - Tokenizers 0.15.2
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9fed038970acfd1d3cb93654197ff2ca239bc8ce56496886fe6ee34a8126b7b1
3
  size 671150064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06e2f84004c2c3a6c5466a3646a926d3b628fdf048b091676f66c5a9e8c73eca
3
  size 671150064
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.1103198329137612,
4
+ "train_runtime": 9245.0119,
5
+ "train_samples": 5678,
6
+ "train_samples_per_second": 1.843,
7
+ "train_steps_per_second": 0.115
8
+ }
runs/Jul29_11-56-09_node12/events.out.tfevents.1722222188.node12.2246976.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64887f3f7a5b3a437f5458fd441d2fdd3a5d39afdd242003bb2e364896c235fb
3
- size 103293
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca018eab3fc37b41ee902b0ebd05f088f102a32b1c6029d0fdb9e4ef1dd2a95c
3
+ size 108927
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.1103198329137612,
4
+ "train_runtime": 9245.0119,
5
+ "train_samples": 5678,
6
+ "train_samples_per_second": 1.843,
7
+ "train_steps_per_second": 0.115
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,2146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1065,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "grad_norm": 2.1205010754043525,
14
+ "learning_rate": 4.672897196261682e-08,
15
+ "logits/chosen": -2.8477635383605957,
16
+ "logits/rejected": -2.8469698429107666,
17
+ "logps/chosen": -522.6112670898438,
18
+ "logps/rejected": -359.48583984375,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/margins_max": 0.0,
24
+ "rewards/margins_min": 0.0,
25
+ "rewards/margins_std": 0.0,
26
+ "rewards/rejected": 0.0,
27
+ "step": 1
28
+ },
29
+ {
30
+ "epoch": 0.03,
31
+ "grad_norm": 10.218544680897951,
32
+ "learning_rate": 4.6728971962616824e-07,
33
+ "logits/chosen": -2.9212379455566406,
34
+ "logits/rejected": -2.7965469360351562,
35
+ "logps/chosen": -313.4451904296875,
36
+ "logps/rejected": -170.3771209716797,
37
+ "loss": 0.6932,
38
+ "rewards/accuracies": 0.5,
39
+ "rewards/chosen": 0.0002524534647818655,
40
+ "rewards/margins": 0.0003799269034061581,
41
+ "rewards/margins_max": 0.0016077507752925158,
42
+ "rewards/margins_min": -0.0008478969684801996,
43
+ "rewards/margins_std": 0.0017364051891490817,
44
+ "rewards/rejected": -0.0001274734386242926,
45
+ "step": 10
46
+ },
47
+ {
48
+ "epoch": 0.06,
49
+ "grad_norm": 2.0408708876984667,
50
+ "learning_rate": 9.345794392523365e-07,
51
+ "logits/chosen": -2.7633142471313477,
52
+ "logits/rejected": -2.7104804515838623,
53
+ "logps/chosen": -380.93878173828125,
54
+ "logps/rejected": -244.42214965820312,
55
+ "loss": 0.6916,
56
+ "rewards/accuracies": 0.699999988079071,
57
+ "rewards/chosen": 0.0017110242042690516,
58
+ "rewards/margins": 0.002610816154628992,
59
+ "rewards/margins_max": 0.004759171046316624,
60
+ "rewards/margins_min": 0.0004624614375643432,
61
+ "rewards/margins_std": 0.0030382319819182158,
62
+ "rewards/rejected": -0.0008997917175292969,
63
+ "step": 20
64
+ },
65
+ {
66
+ "epoch": 0.08,
67
+ "grad_norm": 2.293731718484229,
68
+ "learning_rate": 1.4018691588785047e-06,
69
+ "logits/chosen": -2.8749966621398926,
70
+ "logits/rejected": -2.8233141899108887,
71
+ "logps/chosen": -375.4239196777344,
72
+ "logps/rejected": -252.9129638671875,
73
+ "loss": 0.687,
74
+ "rewards/accuracies": 0.8500000238418579,
75
+ "rewards/chosen": 0.0067976354621350765,
76
+ "rewards/margins": 0.009298587217926979,
77
+ "rewards/margins_max": 0.015676181763410568,
78
+ "rewards/margins_min": 0.0029209901113063097,
79
+ "rewards/margins_std": 0.009019283577799797,
80
+ "rewards/rejected": -0.0025009517557919025,
81
+ "step": 30
82
+ },
83
+ {
84
+ "epoch": 0.11,
85
+ "grad_norm": 1.9265009094442067,
86
+ "learning_rate": 1.869158878504673e-06,
87
+ "logits/chosen": -2.7316184043884277,
88
+ "logits/rejected": -2.7654078006744385,
89
+ "logps/chosen": -305.0208740234375,
90
+ "logps/rejected": -318.15576171875,
91
+ "loss": 0.6783,
92
+ "rewards/accuracies": 0.8999999761581421,
93
+ "rewards/chosen": 0.01904786378145218,
94
+ "rewards/margins": 0.02529343031346798,
95
+ "rewards/margins_max": 0.03756815567612648,
96
+ "rewards/margins_min": 0.013018706813454628,
97
+ "rewards/margins_std": 0.017359081655740738,
98
+ "rewards/rejected": -0.006245566997677088,
99
+ "step": 40
100
+ },
101
+ {
102
+ "epoch": 0.14,
103
+ "grad_norm": 2.2762718753507225,
104
+ "learning_rate": 2.3364485981308413e-06,
105
+ "logits/chosen": -2.7840142250061035,
106
+ "logits/rejected": -2.695960521697998,
107
+ "logps/chosen": -241.2890167236328,
108
+ "logps/rejected": -175.4230194091797,
109
+ "loss": 0.6612,
110
+ "rewards/accuracies": 0.8999999761581421,
111
+ "rewards/chosen": 0.039340294897556305,
112
+ "rewards/margins": 0.05124547332525253,
113
+ "rewards/margins_max": 0.07519420981407166,
114
+ "rewards/margins_min": 0.027296727523207664,
115
+ "rewards/margins_std": 0.03386863321065903,
116
+ "rewards/rejected": -0.011905180290341377,
117
+ "step": 50
118
+ },
119
+ {
120
+ "epoch": 0.17,
121
+ "grad_norm": 2.278929693070735,
122
+ "learning_rate": 2.8037383177570094e-06,
123
+ "logits/chosen": -2.7337279319763184,
124
+ "logits/rejected": -2.6699888706207275,
125
+ "logps/chosen": -257.01812744140625,
126
+ "logps/rejected": -237.2047119140625,
127
+ "loss": 0.636,
128
+ "rewards/accuracies": 0.8500000238418579,
129
+ "rewards/chosen": 0.10417316108942032,
130
+ "rewards/margins": 0.12125153839588165,
131
+ "rewards/margins_max": 0.19414573907852173,
132
+ "rewards/margins_min": 0.04835732653737068,
133
+ "rewards/margins_std": 0.10308797657489777,
134
+ "rewards/rejected": -0.01707836613059044,
135
+ "step": 60
136
+ },
137
+ {
138
+ "epoch": 0.2,
139
+ "grad_norm": 1.9261684067245632,
140
+ "learning_rate": 3.2710280373831774e-06,
141
+ "logits/chosen": -2.6452136039733887,
142
+ "logits/rejected": -2.649742364883423,
143
+ "logps/chosen": -320.9119567871094,
144
+ "logps/rejected": -220.4650421142578,
145
+ "loss": 0.6066,
146
+ "rewards/accuracies": 1.0,
147
+ "rewards/chosen": 0.1302875578403473,
148
+ "rewards/margins": 0.186918243765831,
149
+ "rewards/margins_max": 0.2680404782295227,
150
+ "rewards/margins_min": 0.10579605400562286,
151
+ "rewards/margins_std": 0.11472412198781967,
152
+ "rewards/rejected": -0.0566307008266449,
153
+ "step": 70
154
+ },
155
+ {
156
+ "epoch": 0.23,
157
+ "grad_norm": 1.899604093562728,
158
+ "learning_rate": 3.738317757009346e-06,
159
+ "logits/chosen": -2.856180191040039,
160
+ "logits/rejected": -2.781043291091919,
161
+ "logps/chosen": -324.0494079589844,
162
+ "logps/rejected": -299.65643310546875,
163
+ "loss": 0.5744,
164
+ "rewards/accuracies": 0.949999988079071,
165
+ "rewards/chosen": 0.12999968230724335,
166
+ "rewards/margins": 0.25530779361724854,
167
+ "rewards/margins_max": 0.37520045042037964,
168
+ "rewards/margins_min": 0.13541515171527863,
169
+ "rewards/margins_std": 0.16955383121967316,
170
+ "rewards/rejected": -0.12530812621116638,
171
+ "step": 80
172
+ },
173
+ {
174
+ "epoch": 0.25,
175
+ "grad_norm": 2.438635537156189,
176
+ "learning_rate": 4.205607476635514e-06,
177
+ "logits/chosen": -2.6444644927978516,
178
+ "logits/rejected": -2.6486284732818604,
179
+ "logps/chosen": -272.92718505859375,
180
+ "logps/rejected": -228.8600616455078,
181
+ "loss": 0.523,
182
+ "rewards/accuracies": 0.8500000238418579,
183
+ "rewards/chosen": 0.04048062115907669,
184
+ "rewards/margins": 0.29693564772605896,
185
+ "rewards/margins_max": 0.4845455288887024,
186
+ "rewards/margins_min": 0.1093258485198021,
187
+ "rewards/margins_std": 0.265320360660553,
188
+ "rewards/rejected": -0.25645506381988525,
189
+ "step": 90
190
+ },
191
+ {
192
+ "epoch": 0.28,
193
+ "grad_norm": 2.676590355830037,
194
+ "learning_rate": 4.6728971962616825e-06,
195
+ "logits/chosen": -2.7964138984680176,
196
+ "logits/rejected": -2.735548973083496,
197
+ "logps/chosen": -437.5833435058594,
198
+ "logps/rejected": -379.58123779296875,
199
+ "loss": 0.4777,
200
+ "rewards/accuracies": 0.8999999761581421,
201
+ "rewards/chosen": 0.20675165951251984,
202
+ "rewards/margins": 0.581081748008728,
203
+ "rewards/margins_max": 0.8298590779304504,
204
+ "rewards/margins_min": 0.3323042690753937,
205
+ "rewards/margins_std": 0.3518243730068207,
206
+ "rewards/rejected": -0.3743300139904022,
207
+ "step": 100
208
+ },
209
+ {
210
+ "epoch": 0.28,
211
+ "eval_logits/chosen": -2.670954704284668,
212
+ "eval_logits/rejected": -2.6312379837036133,
213
+ "eval_logps/chosen": -321.22222900390625,
214
+ "eval_logps/rejected": -301.6253967285156,
215
+ "eval_loss": 0.6754581928253174,
216
+ "eval_rewards/accuracies": 0.60317462682724,
217
+ "eval_rewards/chosen": -0.3600099980831146,
218
+ "eval_rewards/margins": 0.06441720575094223,
219
+ "eval_rewards/margins_max": 0.35590171813964844,
220
+ "eval_rewards/margins_min": -0.22098243236541748,
221
+ "eval_rewards/margins_std": 0.25287726521492004,
222
+ "eval_rewards/rejected": -0.42442721128463745,
223
+ "eval_runtime": 283.3412,
224
+ "eval_samples_per_second": 7.059,
225
+ "eval_steps_per_second": 0.222,
226
+ "step": 100
227
+ },
228
+ {
229
+ "epoch": 0.31,
230
+ "grad_norm": 2.5201742608505686,
231
+ "learning_rate": 4.999879018839288e-06,
232
+ "logits/chosen": -2.637324810028076,
233
+ "logits/rejected": -2.529784679412842,
234
+ "logps/chosen": -315.1212158203125,
235
+ "logps/rejected": -298.06903076171875,
236
+ "loss": 0.4234,
237
+ "rewards/accuracies": 1.0,
238
+ "rewards/chosen": 0.12577927112579346,
239
+ "rewards/margins": 0.6422899961471558,
240
+ "rewards/margins_max": 0.9393427968025208,
241
+ "rewards/margins_min": 0.3452370762825012,
242
+ "rewards/margins_std": 0.42009615898132324,
243
+ "rewards/rejected": -0.5165106058120728,
244
+ "step": 110
245
+ },
246
+ {
247
+ "epoch": 0.34,
248
+ "grad_norm": 6.261552433653697,
249
+ "learning_rate": 4.99772856836941e-06,
250
+ "logits/chosen": -2.7266364097595215,
251
+ "logits/rejected": -2.7145590782165527,
252
+ "logps/chosen": -347.3783264160156,
253
+ "logps/rejected": -389.63299560546875,
254
+ "loss": 0.3956,
255
+ "rewards/accuracies": 1.0,
256
+ "rewards/chosen": 0.24562442302703857,
257
+ "rewards/margins": 0.8258479237556458,
258
+ "rewards/margins_max": 1.141953468322754,
259
+ "rewards/margins_min": 0.5097422003746033,
260
+ "rewards/margins_std": 0.44704094529151917,
261
+ "rewards/rejected": -0.5802234411239624,
262
+ "step": 120
263
+ },
264
+ {
265
+ "epoch": 0.37,
266
+ "grad_norm": 2.5117234961196413,
267
+ "learning_rate": 4.992892309373227e-06,
268
+ "logits/chosen": -2.5119540691375732,
269
+ "logits/rejected": -2.4644391536712646,
270
+ "logps/chosen": -370.6039733886719,
271
+ "logps/rejected": -361.2594909667969,
272
+ "loss": 0.3218,
273
+ "rewards/accuracies": 1.0,
274
+ "rewards/chosen": 0.20368309319019318,
275
+ "rewards/margins": 1.2330464124679565,
276
+ "rewards/margins_max": 1.4150781631469727,
277
+ "rewards/margins_min": 1.0510146617889404,
278
+ "rewards/margins_std": 0.25743168592453003,
279
+ "rewards/rejected": -1.0293633937835693,
280
+ "step": 130
281
+ },
282
+ {
283
+ "epoch": 0.39,
284
+ "grad_norm": 5.066809244826759,
285
+ "learning_rate": 4.985375442281969e-06,
286
+ "logits/chosen": -2.325155019760132,
287
+ "logits/rejected": -2.2663826942443848,
288
+ "logps/chosen": -366.98211669921875,
289
+ "logps/rejected": -403.01495361328125,
290
+ "loss": 0.2761,
291
+ "rewards/accuracies": 0.8999999761581421,
292
+ "rewards/chosen": -0.1577085703611374,
293
+ "rewards/margins": 1.5553103685379028,
294
+ "rewards/margins_max": 2.037226676940918,
295
+ "rewards/margins_min": 1.0733940601348877,
296
+ "rewards/margins_std": 0.681532621383667,
297
+ "rewards/rejected": -1.7130190134048462,
298
+ "step": 140
299
+ },
300
+ {
301
+ "epoch": 0.42,
302
+ "grad_norm": 7.190427764349362,
303
+ "learning_rate": 4.9751860499858175e-06,
304
+ "logits/chosen": -2.1403324604034424,
305
+ "logits/rejected": -2.041670560836792,
306
+ "logps/chosen": -324.15667724609375,
307
+ "logps/rejected": -441.0560607910156,
308
+ "loss": 0.2399,
309
+ "rewards/accuracies": 1.0,
310
+ "rewards/chosen": -0.27334439754486084,
311
+ "rewards/margins": 1.659519910812378,
312
+ "rewards/margins_max": 2.2249293327331543,
313
+ "rewards/margins_min": 1.0941104888916016,
314
+ "rewards/margins_std": 0.7996099591255188,
315
+ "rewards/rejected": -1.9328645467758179,
316
+ "step": 150
317
+ },
318
+ {
319
+ "epoch": 0.45,
320
+ "grad_norm": 7.116224539942571,
321
+ "learning_rate": 4.962335089142376e-06,
322
+ "logits/chosen": -1.9535696506500244,
323
+ "logits/rejected": -1.7718425989151,
324
+ "logps/chosen": -358.6165466308594,
325
+ "logps/rejected": -501.46856689453125,
326
+ "loss": 0.1556,
327
+ "rewards/accuracies": 0.949999988079071,
328
+ "rewards/chosen": -0.26896899938583374,
329
+ "rewards/margins": 2.3143906593322754,
330
+ "rewards/margins_max": 2.8530867099761963,
331
+ "rewards/margins_min": 1.7756941318511963,
332
+ "rewards/margins_std": 0.7618317008018494,
333
+ "rewards/rejected": -2.5833592414855957,
334
+ "step": 160
335
+ },
336
+ {
337
+ "epoch": 0.48,
338
+ "grad_norm": 12.210481387434758,
339
+ "learning_rate": 4.946836378394967e-06,
340
+ "logits/chosen": -1.838096022605896,
341
+ "logits/rejected": -1.5799922943115234,
342
+ "logps/chosen": -445.1002502441406,
343
+ "logps/rejected": -597.6307373046875,
344
+ "loss": 0.1406,
345
+ "rewards/accuracies": 1.0,
346
+ "rewards/chosen": -0.4461892545223236,
347
+ "rewards/margins": 3.19466233253479,
348
+ "rewards/margins_max": 4.110939979553223,
349
+ "rewards/margins_min": 2.2783844470977783,
350
+ "rewards/margins_std": 1.2958126068115234,
351
+ "rewards/rejected": -3.6408514976501465,
352
+ "step": 170
353
+ },
354
+ {
355
+ "epoch": 0.51,
356
+ "grad_norm": 27.562973883397905,
357
+ "learning_rate": 4.928706583513441e-06,
358
+ "logits/chosen": -1.3463890552520752,
359
+ "logits/rejected": -1.2715332508087158,
360
+ "logps/chosen": -605.5383911132812,
361
+ "logps/rejected": -967.7098388671875,
362
+ "loss": 0.1672,
363
+ "rewards/accuracies": 0.949999988079071,
364
+ "rewards/chosen": -2.81402325630188,
365
+ "rewards/margins": 3.0660033226013184,
366
+ "rewards/margins_max": 3.8246688842773438,
367
+ "rewards/margins_min": 2.307338237762451,
368
+ "rewards/margins_std": 1.072914719581604,
369
+ "rewards/rejected": -5.880026817321777,
370
+ "step": 180
371
+ },
372
+ {
373
+ "epoch": 0.54,
374
+ "grad_norm": 3.9080684244028343,
375
+ "learning_rate": 4.907965199473471e-06,
376
+ "logits/chosen": -1.3362934589385986,
377
+ "logits/rejected": -1.0377042293548584,
378
+ "logps/chosen": -732.0992431640625,
379
+ "logps/rejected": -907.0653076171875,
380
+ "loss": 0.131,
381
+ "rewards/accuracies": 1.0,
382
+ "rewards/chosen": -2.7598698139190674,
383
+ "rewards/margins": 4.08551549911499,
384
+ "rewards/margins_max": 4.806515693664551,
385
+ "rewards/margins_min": 3.3645145893096924,
386
+ "rewards/margins_std": 1.019648551940918,
387
+ "rewards/rejected": -6.8453850746154785,
388
+ "step": 190
389
+ },
390
+ {
391
+ "epoch": 0.56,
392
+ "grad_norm": 42.83035382744783,
393
+ "learning_rate": 4.884634529493591e-06,
394
+ "logits/chosen": -1.4783378839492798,
395
+ "logits/rejected": -1.2933928966522217,
396
+ "logps/chosen": -735.5909423828125,
397
+ "logps/rejected": -1023.0391845703125,
398
+ "loss": 0.1416,
399
+ "rewards/accuracies": 1.0,
400
+ "rewards/chosen": -4.047953128814697,
401
+ "rewards/margins": 4.137004375457764,
402
+ "rewards/margins_max": 5.257144927978516,
403
+ "rewards/margins_min": 3.0168652534484863,
404
+ "rewards/margins_std": 1.5841166973114014,
405
+ "rewards/rejected": -8.184958457946777,
406
+ "step": 200
407
+ },
408
+ {
409
+ "epoch": 0.56,
410
+ "eval_logits/chosen": -1.4607926607131958,
411
+ "eval_logits/rejected": -1.4055131673812866,
412
+ "eval_logps/chosen": -955.6170043945312,
413
+ "eval_logps/rejected": -980.7882080078125,
414
+ "eval_loss": 0.9053447246551514,
415
+ "eval_rewards/accuracies": 0.6269841194152832,
416
+ "eval_rewards/chosen": -6.703957557678223,
417
+ "eval_rewards/margins": 0.5120973587036133,
418
+ "eval_rewards/margins_max": 2.7698452472686768,
419
+ "eval_rewards/margins_min": -1.7983918190002441,
420
+ "eval_rewards/margins_std": 2.0239174365997314,
421
+ "eval_rewards/rejected": -7.216055393218994,
422
+ "eval_runtime": 281.707,
423
+ "eval_samples_per_second": 7.1,
424
+ "eval_steps_per_second": 0.224,
425
+ "step": 200
426
+ },
427
+ {
428
+ "epoch": 0.59,
429
+ "grad_norm": 11.323675041923366,
430
+ "learning_rate": 4.858739661052539e-06,
431
+ "logits/chosen": -1.350990891456604,
432
+ "logits/rejected": -1.2011955976486206,
433
+ "logps/chosen": -738.5956420898438,
434
+ "logps/rejected": -1072.1134033203125,
435
+ "loss": 0.1359,
436
+ "rewards/accuracies": 0.8999999761581421,
437
+ "rewards/chosen": -3.817591905593872,
438
+ "rewards/margins": 4.215450286865234,
439
+ "rewards/margins_max": 6.099488735198975,
440
+ "rewards/margins_min": 2.3314108848571777,
441
+ "rewards/margins_std": 2.664433240890503,
442
+ "rewards/rejected": -8.033041000366211,
443
+ "step": 210
444
+ },
445
+ {
446
+ "epoch": 0.62,
447
+ "grad_norm": 2.145861603880887,
448
+ "learning_rate": 4.830308438912687e-06,
449
+ "logits/chosen": -1.5942816734313965,
450
+ "logits/rejected": -1.3603050708770752,
451
+ "logps/chosen": -854.7412109375,
452
+ "logps/rejected": -1243.659423828125,
453
+ "loss": 0.0774,
454
+ "rewards/accuracies": 1.0,
455
+ "rewards/chosen": -4.454717636108398,
456
+ "rewards/margins": 5.1989240646362305,
457
+ "rewards/margins_max": 6.37256383895874,
458
+ "rewards/margins_min": 4.025284290313721,
459
+ "rewards/margins_std": 1.6597778797149658,
460
+ "rewards/rejected": -9.653641700744629,
461
+ "step": 220
462
+ },
463
+ {
464
+ "epoch": 0.65,
465
+ "grad_norm": 4.962012371252307,
466
+ "learning_rate": 4.799371435178544e-06,
467
+ "logits/chosen": -1.7452170848846436,
468
+ "logits/rejected": -1.609167456626892,
469
+ "logps/chosen": -769.598876953125,
470
+ "logps/rejected": -1189.131103515625,
471
+ "loss": 0.104,
472
+ "rewards/accuracies": 1.0,
473
+ "rewards/chosen": -3.888404130935669,
474
+ "rewards/margins": 4.6370439529418945,
475
+ "rewards/margins_max": 5.980400085449219,
476
+ "rewards/margins_min": 3.293687343597412,
477
+ "rewards/margins_std": 1.8997926712036133,
478
+ "rewards/rejected": -8.5254487991333,
479
+ "step": 230
480
+ },
481
+ {
482
+ "epoch": 0.68,
483
+ "grad_norm": 2.001005873458455,
484
+ "learning_rate": 4.765961916422575e-06,
485
+ "logits/chosen": -1.6597576141357422,
486
+ "logits/rejected": -1.444551944732666,
487
+ "logps/chosen": -838.1024169921875,
488
+ "logps/rejected": -1238.279052734375,
489
+ "loss": 0.0955,
490
+ "rewards/accuracies": 0.949999988079071,
491
+ "rewards/chosen": -4.969546318054199,
492
+ "rewards/margins": 4.475127696990967,
493
+ "rewards/margins_max": 5.603785514831543,
494
+ "rewards/margins_min": 3.346471071243286,
495
+ "rewards/margins_std": 1.59616219997406,
496
+ "rewards/rejected": -9.444674491882324,
497
+ "step": 240
498
+ },
499
+ {
500
+ "epoch": 0.7,
501
+ "grad_norm": 17.06427775193877,
502
+ "learning_rate": 4.730115807913627e-06,
503
+ "logits/chosen": -1.6722052097320557,
504
+ "logits/rejected": -1.393259882926941,
505
+ "logps/chosen": -916.7503662109375,
506
+ "logps/rejected": -1274.2889404296875,
507
+ "loss": 0.0866,
508
+ "rewards/accuracies": 0.949999988079071,
509
+ "rewards/chosen": -5.015233039855957,
510
+ "rewards/margins": 5.172359943389893,
511
+ "rewards/margins_max": 6.111589431762695,
512
+ "rewards/margins_min": 4.233129501342773,
513
+ "rewards/margins_std": 1.328271508216858,
514
+ "rewards/rejected": -10.187592506408691,
515
+ "step": 250
516
+ },
517
+ {
518
+ "epoch": 0.73,
519
+ "grad_norm": 1.9182916124757974,
520
+ "learning_rate": 4.691871654986485e-06,
521
+ "logits/chosen": -1.7107824087142944,
522
+ "logits/rejected": -1.6128714084625244,
523
+ "logps/chosen": -878.5494384765625,
524
+ "logps/rejected": -1255.8555908203125,
525
+ "loss": 0.079,
526
+ "rewards/accuracies": 0.949999988079071,
527
+ "rewards/chosen": -5.5672712326049805,
528
+ "rewards/margins": 4.748871803283691,
529
+ "rewards/margins_max": 5.786838531494141,
530
+ "rewards/margins_min": 3.7109055519104004,
531
+ "rewards/margins_std": 1.4679062366485596,
532
+ "rewards/rejected": -10.316143035888672,
533
+ "step": 260
534
+ },
535
+ {
536
+ "epoch": 0.76,
537
+ "grad_norm": 14.786553042508123,
538
+ "learning_rate": 4.651270581594054e-06,
539
+ "logits/chosen": -1.8650672435760498,
540
+ "logits/rejected": -1.613443374633789,
541
+ "logps/chosen": -834.0842895507812,
542
+ "logps/rejected": -1138.3665771484375,
543
+ "loss": 0.0875,
544
+ "rewards/accuracies": 1.0,
545
+ "rewards/chosen": -4.050877571105957,
546
+ "rewards/margins": 5.007403373718262,
547
+ "rewards/margins_max": 5.84472131729126,
548
+ "rewards/margins_min": 4.170086860656738,
549
+ "rewards/margins_std": 1.184145212173462,
550
+ "rewards/rejected": -9.058280944824219,
551
+ "step": 270
552
+ },
553
+ {
554
+ "epoch": 0.79,
555
+ "grad_norm": 5.30439894597876,
556
+ "learning_rate": 4.6083562460867545e-06,
557
+ "logits/chosen": -1.6716859340667725,
558
+ "logits/rejected": -1.5429413318634033,
559
+ "logps/chosen": -701.3162841796875,
560
+ "logps/rejected": -1120.8736572265625,
561
+ "loss": 0.0896,
562
+ "rewards/accuracies": 0.8500000238418579,
563
+ "rewards/chosen": -3.7223961353302,
564
+ "rewards/margins": 4.8294267654418945,
565
+ "rewards/margins_max": 6.9812211990356445,
566
+ "rewards/margins_min": 2.6776328086853027,
567
+ "rewards/margins_std": 3.0430965423583984,
568
+ "rewards/rejected": -8.551824569702148,
569
+ "step": 280
570
+ },
571
+ {
572
+ "epoch": 0.82,
573
+ "grad_norm": 12.724182318476426,
574
+ "learning_rate": 4.563174794266684e-06,
575
+ "logits/chosen": -1.8460794687271118,
576
+ "logits/rejected": -1.6377445459365845,
577
+ "logps/chosen": -858.4215698242188,
578
+ "logps/rejected": -1289.198974609375,
579
+ "loss": 0.0576,
580
+ "rewards/accuracies": 1.0,
581
+ "rewards/chosen": -4.965760231018066,
582
+ "rewards/margins": 5.2121992111206055,
583
+ "rewards/margins_max": 6.927371025085449,
584
+ "rewards/margins_min": 3.49702525138855,
585
+ "rewards/margins_std": 2.4256205558776855,
586
+ "rewards/rejected": -10.177958488464355,
587
+ "step": 290
588
+ },
589
+ {
590
+ "epoch": 0.85,
591
+ "grad_norm": 5.778488241840074,
592
+ "learning_rate": 4.5157748097670125e-06,
593
+ "logits/chosen": -1.7077114582061768,
594
+ "logits/rejected": -1.5558173656463623,
595
+ "logps/chosen": -739.67333984375,
596
+ "logps/rejected": -1423.210693359375,
597
+ "loss": 0.0426,
598
+ "rewards/accuracies": 1.0,
599
+ "rewards/chosen": -4.0192999839782715,
600
+ "rewards/margins": 7.085653781890869,
601
+ "rewards/margins_max": 7.969016075134277,
602
+ "rewards/margins_min": 6.202291488647461,
603
+ "rewards/margins_std": 1.2492637634277344,
604
+ "rewards/rejected": -11.104954719543457,
605
+ "step": 300
606
+ },
607
+ {
608
+ "epoch": 0.85,
609
+ "eval_logits/chosen": -1.7101370096206665,
610
+ "eval_logits/rejected": -1.6507517099380493,
611
+ "eval_logps/chosen": -1041.5823974609375,
612
+ "eval_logps/rejected": -1121.1776123046875,
613
+ "eval_loss": 0.9213338494300842,
614
+ "eval_rewards/accuracies": 0.6785714030265808,
615
+ "eval_rewards/chosen": -7.563611030578613,
616
+ "eval_rewards/margins": 1.0563386678695679,
617
+ "eval_rewards/margins_max": 4.265172481536865,
618
+ "eval_rewards/margins_min": -2.1614327430725098,
619
+ "eval_rewards/margins_std": 2.8564813137054443,
620
+ "eval_rewards/rejected": -8.619950294494629,
621
+ "eval_runtime": 281.7456,
622
+ "eval_samples_per_second": 7.099,
623
+ "eval_steps_per_second": 0.224,
624
+ "step": 300
625
+ },
626
+ {
627
+ "epoch": 0.87,
628
+ "grad_norm": 12.853675144552225,
629
+ "learning_rate": 4.466207261809989e-06,
630
+ "logits/chosen": -1.9336496591567993,
631
+ "logits/rejected": -1.6221659183502197,
632
+ "logps/chosen": -901.4439697265625,
633
+ "logps/rejected": -1262.938720703125,
634
+ "loss": 0.0633,
635
+ "rewards/accuracies": 1.0,
636
+ "rewards/chosen": -4.771965980529785,
637
+ "rewards/margins": 5.247581958770752,
638
+ "rewards/margins_max": 6.526535987854004,
639
+ "rewards/margins_min": 3.968628406524658,
640
+ "rewards/margins_std": 1.8087135553359985,
641
+ "rewards/rejected": -10.019546508789062,
642
+ "step": 310
643
+ },
644
+ {
645
+ "epoch": 0.9,
646
+ "grad_norm": 12.332833632235157,
647
+ "learning_rate": 4.414525450399713e-06,
648
+ "logits/chosen": -1.6821091175079346,
649
+ "logits/rejected": -1.511785626411438,
650
+ "logps/chosen": -956.3181762695312,
651
+ "logps/rejected": -1481.1754150390625,
652
+ "loss": 0.0978,
653
+ "rewards/accuracies": 1.0,
654
+ "rewards/chosen": -6.100653171539307,
655
+ "rewards/margins": 6.3301496505737305,
656
+ "rewards/margins_max": 8.061585426330566,
657
+ "rewards/margins_min": 4.598714828491211,
658
+ "rewards/margins_std": 2.4486188888549805,
659
+ "rewards/rejected": -12.430803298950195,
660
+ "step": 320
661
+ },
662
+ {
663
+ "epoch": 0.93,
664
+ "grad_norm": 3.9044155848949162,
665
+ "learning_rate": 4.360784949008615e-06,
666
+ "logits/chosen": -1.768561601638794,
667
+ "logits/rejected": -1.5437813997268677,
668
+ "logps/chosen": -1006.9339599609375,
669
+ "logps/rejected": -1522.902587890625,
670
+ "loss": 0.1091,
671
+ "rewards/accuracies": 0.949999988079071,
672
+ "rewards/chosen": -6.272473335266113,
673
+ "rewards/margins": 6.482227325439453,
674
+ "rewards/margins_max": 8.401371002197266,
675
+ "rewards/margins_min": 4.563082695007324,
676
+ "rewards/margins_std": 2.7140800952911377,
677
+ "rewards/rejected": -12.754700660705566,
678
+ "step": 330
679
+ },
680
+ {
681
+ "epoch": 0.96,
682
+ "grad_norm": 4.01171637277802,
683
+ "learning_rate": 4.30504354481929e-06,
684
+ "logits/chosen": -1.7665777206420898,
685
+ "logits/rejected": -1.5484760999679565,
686
+ "logps/chosen": -942.85888671875,
687
+ "logps/rejected": -1260.244384765625,
688
+ "loss": 0.0741,
689
+ "rewards/accuracies": 1.0,
690
+ "rewards/chosen": -5.743631839752197,
691
+ "rewards/margins": 4.815784931182861,
692
+ "rewards/margins_max": 6.530648708343506,
693
+ "rewards/margins_min": 3.1009204387664795,
694
+ "rewards/margins_std": 2.425184488296509,
695
+ "rewards/rejected": -10.559415817260742,
696
+ "step": 340
697
+ },
698
+ {
699
+ "epoch": 0.99,
700
+ "grad_norm": 12.659683176327913,
701
+ "learning_rate": 4.247361176585904e-06,
702
+ "logits/chosen": -1.831321120262146,
703
+ "logits/rejected": -1.6549314260482788,
704
+ "logps/chosen": -909.5006713867188,
705
+ "logps/rejected": -1532.635986328125,
706
+ "loss": 0.0943,
707
+ "rewards/accuracies": 1.0,
708
+ "rewards/chosen": -4.810971736907959,
709
+ "rewards/margins": 7.531504154205322,
710
+ "rewards/margins_max": 8.548044204711914,
711
+ "rewards/margins_min": 6.514962673187256,
712
+ "rewards/margins_std": 1.4376055002212524,
713
+ "rewards/rejected": -12.342476844787598,
714
+ "step": 350
715
+ },
716
+ {
717
+ "epoch": 1.01,
718
+ "grad_norm": 3.001942641389469,
719
+ "learning_rate": 4.187799870182038e-06,
720
+ "logits/chosen": -1.7835716009140015,
721
+ "logits/rejected": -1.5620241165161133,
722
+ "logps/chosen": -896.9002075195312,
723
+ "logps/rejected": -1392.6307373046875,
724
+ "loss": 0.0555,
725
+ "rewards/accuracies": 1.0,
726
+ "rewards/chosen": -5.5069427490234375,
727
+ "rewards/margins": 6.391612529754639,
728
+ "rewards/margins_max": 7.894322872161865,
729
+ "rewards/margins_min": 4.888903617858887,
730
+ "rewards/margins_std": 2.125152349472046,
731
+ "rewards/rejected": -11.898555755615234,
732
+ "step": 360
733
+ },
734
+ {
735
+ "epoch": 1.04,
736
+ "grad_norm": 34.14422714120664,
737
+ "learning_rate": 4.1264236719042365e-06,
738
+ "logits/chosen": -1.5919651985168457,
739
+ "logits/rejected": -1.5377094745635986,
740
+ "logps/chosen": -915.7950439453125,
741
+ "logps/rejected": -1490.6865234375,
742
+ "loss": 0.0808,
743
+ "rewards/accuracies": 1.0,
744
+ "rewards/chosen": -5.6413750648498535,
745
+ "rewards/margins": 6.627654075622559,
746
+ "rewards/margins_max": 8.43530559539795,
747
+ "rewards/margins_min": 4.820002555847168,
748
+ "rewards/margins_std": 2.5564048290252686,
749
+ "rewards/rejected": -12.26902961730957,
750
+ "step": 370
751
+ },
752
+ {
753
+ "epoch": 1.07,
754
+ "grad_norm": 2.1290534012360847,
755
+ "learning_rate": 4.063298579603001e-06,
756
+ "logits/chosen": -1.8492443561553955,
757
+ "logits/rejected": -1.5422757863998413,
758
+ "logps/chosen": -937.0126953125,
759
+ "logps/rejected": -1458.616455078125,
760
+ "loss": 0.0231,
761
+ "rewards/accuracies": 1.0,
762
+ "rewards/chosen": -5.460320949554443,
763
+ "rewards/margins": 7.281059265136719,
764
+ "rewards/margins_max": 8.49816608428955,
765
+ "rewards/margins_min": 6.0639543533325195,
766
+ "rewards/margins_std": 1.7212467193603516,
767
+ "rewards/rejected": -12.74138069152832,
768
+ "step": 380
769
+ },
770
+ {
771
+ "epoch": 1.1,
772
+ "grad_norm": 5.584775064800199,
773
+ "learning_rate": 3.998492471715272e-06,
774
+ "logits/chosen": -1.8397998809814453,
775
+ "logits/rejected": -1.6857073307037354,
776
+ "logps/chosen": -913.9352416992188,
777
+ "logps/rejected": -1781.8939208984375,
778
+ "loss": 0.0278,
779
+ "rewards/accuracies": 1.0,
780
+ "rewards/chosen": -5.164222717285156,
781
+ "rewards/margins": 9.338297843933105,
782
+ "rewards/margins_max": 11.463502883911133,
783
+ "rewards/margins_min": 7.2130937576293945,
784
+ "rewards/margins_std": 3.005493640899658,
785
+ "rewards/rejected": -14.502520561218262,
786
+ "step": 390
787
+ },
788
+ {
789
+ "epoch": 1.13,
790
+ "grad_norm": 0.9893449328848739,
791
+ "learning_rate": 3.932075034274723e-06,
792
+ "logits/chosen": -1.5922348499298096,
793
+ "logits/rejected": -1.4688727855682373,
794
+ "logps/chosen": -871.9650268554688,
795
+ "logps/rejected": -1526.658935546875,
796
+ "loss": 0.0537,
797
+ "rewards/accuracies": 1.0,
798
+ "rewards/chosen": -5.7322564125061035,
799
+ "rewards/margins": 7.261972904205322,
800
+ "rewards/margins_max": 8.895970344543457,
801
+ "rewards/margins_min": 5.627974510192871,
802
+ "rewards/margins_std": 2.3108224868774414,
803
+ "rewards/rejected": -12.994227409362793,
804
+ "step": 400
805
+ },
806
+ {
807
+ "epoch": 1.13,
808
+ "eval_logits/chosen": -1.6575742959976196,
809
+ "eval_logits/rejected": -1.5926053524017334,
810
+ "eval_logps/chosen": -1505.182861328125,
811
+ "eval_logps/rejected": -1577.3876953125,
812
+ "eval_loss": 1.1419050693511963,
813
+ "eval_rewards/accuracies": 0.64682537317276,
814
+ "eval_rewards/chosen": -12.199617385864258,
815
+ "eval_rewards/margins": 0.9824325442314148,
816
+ "eval_rewards/margins_max": 5.48787260055542,
817
+ "eval_rewards/margins_min": -3.0621237754821777,
818
+ "eval_rewards/margins_std": 3.7889323234558105,
819
+ "eval_rewards/rejected": -13.182049751281738,
820
+ "eval_runtime": 282.4562,
821
+ "eval_samples_per_second": 7.081,
822
+ "eval_steps_per_second": 0.223,
823
+ "step": 400
824
+ },
825
+ {
826
+ "epoch": 1.15,
827
+ "grad_norm": 0.9794540017501292,
828
+ "learning_rate": 3.864117685978339e-06,
829
+ "logits/chosen": -1.6234560012817383,
830
+ "logits/rejected": -1.4928052425384521,
831
+ "logps/chosen": -1131.8265380859375,
832
+ "logps/rejected": -1794.791015625,
833
+ "loss": 0.0776,
834
+ "rewards/accuracies": 0.8999999761581421,
835
+ "rewards/chosen": -8.371360778808594,
836
+ "rewards/margins": 7.494576454162598,
837
+ "rewards/margins_max": 10.048029899597168,
838
+ "rewards/margins_min": 4.941121578216553,
839
+ "rewards/margins_std": 3.61112904548645,
840
+ "rewards/rejected": -15.865939140319824,
841
+ "step": 410
842
+ },
843
+ {
844
+ "epoch": 1.18,
845
+ "grad_norm": 5.020955613205059,
846
+ "learning_rate": 3.794693501389861e-06,
847
+ "logits/chosen": -1.7987747192382812,
848
+ "logits/rejected": -1.6164734363555908,
849
+ "logps/chosen": -1037.0328369140625,
850
+ "logps/rejected": -1667.540283203125,
851
+ "loss": 0.054,
852
+ "rewards/accuracies": 0.949999988079071,
853
+ "rewards/chosen": -6.372786045074463,
854
+ "rewards/margins": 7.646895408630371,
855
+ "rewards/margins_max": 8.891626358032227,
856
+ "rewards/margins_min": 6.402162075042725,
857
+ "rewards/margins_std": 1.7603172063827515,
858
+ "rewards/rejected": -14.019680976867676,
859
+ "step": 420
860
+ },
861
+ {
862
+ "epoch": 1.21,
863
+ "grad_norm": 15.978168852619268,
864
+ "learning_rate": 3.7238771323626822e-06,
865
+ "logits/chosen": -1.6425611972808838,
866
+ "logits/rejected": -1.4570006132125854,
867
+ "logps/chosen": -1138.6572265625,
868
+ "logps/rejected": -1780.6002197265625,
869
+ "loss": 0.044,
870
+ "rewards/accuracies": 1.0,
871
+ "rewards/chosen": -7.515681266784668,
872
+ "rewards/margins": 7.655673027038574,
873
+ "rewards/margins_max": 9.563043594360352,
874
+ "rewards/margins_min": 5.748303413391113,
875
+ "rewards/margins_std": 2.6974284648895264,
876
+ "rewards/rejected": -15.171353340148926,
877
+ "step": 430
878
+ },
879
+ {
880
+ "epoch": 1.24,
881
+ "grad_norm": 1.4394479904186748,
882
+ "learning_rate": 3.651744727766676e-06,
883
+ "logits/chosen": -1.565843939781189,
884
+ "logits/rejected": -1.3031253814697266,
885
+ "logps/chosen": -1135.116943359375,
886
+ "logps/rejected": -1897.188232421875,
887
+ "loss": 0.0356,
888
+ "rewards/accuracies": 1.0,
889
+ "rewards/chosen": -8.198633193969727,
890
+ "rewards/margins": 8.82483196258545,
891
+ "rewards/margins_max": 11.5381441116333,
892
+ "rewards/margins_min": 6.1115217208862305,
893
+ "rewards/margins_std": 3.8372015953063965,
894
+ "rewards/rejected": -17.023466110229492,
895
+ "step": 440
896
+ },
897
+ {
898
+ "epoch": 1.27,
899
+ "grad_norm": 2.5233082457705853,
900
+ "learning_rate": 3.57837385160529e-06,
901
+ "logits/chosen": -1.6333341598510742,
902
+ "logits/rejected": -1.419213056564331,
903
+ "logps/chosen": -991.2794799804688,
904
+ "logps/rejected": -1686.808837890625,
905
+ "loss": 0.0246,
906
+ "rewards/accuracies": 1.0,
907
+ "rewards/chosen": -6.5310492515563965,
908
+ "rewards/margins": 7.6606926918029785,
909
+ "rewards/margins_max": 9.670614242553711,
910
+ "rewards/margins_min": 5.650770664215088,
911
+ "rewards/margins_std": 2.842459201812744,
912
+ "rewards/rejected": -14.191740036010742,
913
+ "step": 450
914
+ },
915
+ {
916
+ "epoch": 1.3,
917
+ "grad_norm": 1.432241857413985,
918
+ "learning_rate": 3.503843399610941e-06,
919
+ "logits/chosen": -1.6662094593048096,
920
+ "logits/rejected": -1.5159740447998047,
921
+ "logps/chosen": -1023.26220703125,
922
+ "logps/rejected": -1997.1787109375,
923
+ "loss": 0.0208,
924
+ "rewards/accuracies": 1.0,
925
+ "rewards/chosen": -6.279843330383301,
926
+ "rewards/margins": 9.666014671325684,
927
+ "rewards/margins_max": 11.908063888549805,
928
+ "rewards/margins_min": 7.423966407775879,
929
+ "rewards/margins_std": 3.1707358360290527,
930
+ "rewards/rejected": -15.945857048034668,
931
+ "step": 460
932
+ },
933
+ {
934
+ "epoch": 1.32,
935
+ "grad_norm": 1.3845844015706055,
936
+ "learning_rate": 3.4282335144083985e-06,
937
+ "logits/chosen": -1.5941836833953857,
938
+ "logits/rejected": -1.34697425365448,
939
+ "logps/chosen": -1180.2171630859375,
940
+ "logps/rejected": -1964.836181640625,
941
+ "loss": 0.0304,
942
+ "rewards/accuracies": 1.0,
943
+ "rewards/chosen": -8.229662895202637,
944
+ "rewards/margins": 9.211896896362305,
945
+ "rewards/margins_max": 11.3733549118042,
946
+ "rewards/margins_min": 7.050437927246094,
947
+ "rewards/margins_std": 3.0567641258239746,
948
+ "rewards/rejected": -17.441558837890625,
949
+ "step": 470
950
+ },
951
+ {
952
+ "epoch": 1.35,
953
+ "grad_norm": 0.25091350074864577,
954
+ "learning_rate": 3.351625499337395e-06,
955
+ "logits/chosen": -1.7405236959457397,
956
+ "logits/rejected": -1.4616386890411377,
957
+ "logps/chosen": -1157.209716796875,
958
+ "logps/rejected": -1899.130126953125,
959
+ "loss": 0.014,
960
+ "rewards/accuracies": 1.0,
961
+ "rewards/chosen": -7.413580417633057,
962
+ "rewards/margins": 8.81358528137207,
963
+ "rewards/margins_max": 10.952999114990234,
964
+ "rewards/margins_min": 6.674172401428223,
965
+ "rewards/margins_std": 3.0255870819091797,
966
+ "rewards/rejected": -16.227169036865234,
967
+ "step": 480
968
+ },
969
+ {
970
+ "epoch": 1.38,
971
+ "grad_norm": 1.9987349085330508,
972
+ "learning_rate": 3.2741017310271056e-06,
973
+ "logits/chosen": -1.3325449228286743,
974
+ "logits/rejected": -1.044908881187439,
975
+ "logps/chosen": -1130.028076171875,
976
+ "logps/rejected": -2392.521728515625,
977
+ "loss": 0.0448,
978
+ "rewards/accuracies": 0.8999999761581421,
979
+ "rewards/chosen": -9.127466201782227,
980
+ "rewards/margins": 12.631993293762207,
981
+ "rewards/margins_max": 19.333314895629883,
982
+ "rewards/margins_min": 5.930669784545898,
983
+ "rewards/margins_std": 9.47710132598877,
984
+ "rewards/rejected": -21.759456634521484,
985
+ "step": 490
986
+ },
987
+ {
988
+ "epoch": 1.41,
989
+ "grad_norm": 1.7094204242814826,
990
+ "learning_rate": 3.195745570816532e-06,
991
+ "logits/chosen": -1.3385294675827026,
992
+ "logits/rejected": -1.144627571105957,
993
+ "logps/chosen": -1425.61474609375,
994
+ "logps/rejected": -2558.358642578125,
995
+ "loss": 0.0197,
996
+ "rewards/accuracies": 1.0,
997
+ "rewards/chosen": -10.41409969329834,
998
+ "rewards/margins": 12.790387153625488,
999
+ "rewards/margins_max": 14.778757095336914,
1000
+ "rewards/margins_min": 10.802019119262695,
1001
+ "rewards/margins_std": 2.811978340148926,
1002
+ "rewards/rejected": -23.204486846923828,
1003
+ "step": 500
1004
+ },
1005
+ {
1006
+ "epoch": 1.41,
1007
+ "eval_logits/chosen": -1.5026105642318726,
1008
+ "eval_logits/rejected": -1.4330366849899292,
1009
+ "eval_logps/chosen": -2000.166259765625,
1010
+ "eval_logps/rejected": -2146.479736328125,
1011
+ "eval_loss": 1.684375524520874,
1012
+ "eval_rewards/accuracies": 0.6666666865348816,
1013
+ "eval_rewards/chosen": -17.149450302124023,
1014
+ "eval_rewards/margins": 1.7235194444656372,
1015
+ "eval_rewards/margins_max": 9.41946029663086,
1016
+ "eval_rewards/margins_min": -5.146158218383789,
1017
+ "eval_rewards/margins_std": 6.577420711517334,
1018
+ "eval_rewards/rejected": -18.872970581054688,
1019
+ "eval_runtime": 282.6761,
1020
+ "eval_samples_per_second": 7.075,
1021
+ "eval_steps_per_second": 0.223,
1022
+ "step": 500
1023
+ },
1024
+ {
1025
+ "epoch": 1.44,
1026
+ "grad_norm": 19.195207569920772,
1027
+ "learning_rate": 3.116641275116018e-06,
1028
+ "logits/chosen": -1.2405312061309814,
1029
+ "logits/rejected": -0.9798258543014526,
1030
+ "logps/chosen": -1318.967041015625,
1031
+ "logps/rejected": -3077.10986328125,
1032
+ "loss": 0.0229,
1033
+ "rewards/accuracies": 1.0,
1034
+ "rewards/chosen": -10.26286792755127,
1035
+ "rewards/margins": 17.355688095092773,
1036
+ "rewards/margins_max": 25.170244216918945,
1037
+ "rewards/margins_min": 9.541135787963867,
1038
+ "rewards/margins_std": 11.051448822021484,
1039
+ "rewards/rejected": -27.618555068969727,
1040
+ "step": 510
1041
+ },
1042
+ {
1043
+ "epoch": 1.46,
1044
+ "grad_norm": 18.23076880980296,
1045
+ "learning_rate": 3.0368739048062956e-06,
1046
+ "logits/chosen": -1.6826045513153076,
1047
+ "logits/rejected": -1.4554195404052734,
1048
+ "logps/chosen": -1159.925048828125,
1049
+ "logps/rejected": -2069.19580078125,
1050
+ "loss": 0.0355,
1051
+ "rewards/accuracies": 1.0,
1052
+ "rewards/chosen": -8.183090209960938,
1053
+ "rewards/margins": 10.176679611206055,
1054
+ "rewards/margins_max": 13.777229309082031,
1055
+ "rewards/margins_min": 6.5761308670043945,
1056
+ "rewards/margins_std": 5.091946125030518,
1057
+ "rewards/rejected": -18.359769821166992,
1058
+ "step": 520
1059
+ },
1060
+ {
1061
+ "epoch": 1.49,
1062
+ "grad_norm": 7.345312333811953,
1063
+ "learning_rate": 2.956529233772492e-06,
1064
+ "logits/chosen": -1.6696984767913818,
1065
+ "logits/rejected": -1.566896915435791,
1066
+ "logps/chosen": -1206.398681640625,
1067
+ "logps/rejected": -2070.3857421875,
1068
+ "loss": 0.0184,
1069
+ "rewards/accuracies": 1.0,
1070
+ "rewards/chosen": -8.273930549621582,
1071
+ "rewards/margins": 9.733041763305664,
1072
+ "rewards/margins_max": 12.174661636352539,
1073
+ "rewards/margins_min": 7.291422367095947,
1074
+ "rewards/margins_std": 3.4529712200164795,
1075
+ "rewards/rejected": -18.006973266601562,
1076
+ "step": 530
1077
+ },
1078
+ {
1079
+ "epoch": 1.52,
1080
+ "grad_norm": 21.78105244485373,
1081
+ "learning_rate": 2.8756936566714317e-06,
1082
+ "logits/chosen": -1.8572250604629517,
1083
+ "logits/rejected": -1.5829768180847168,
1084
+ "logps/chosen": -1132.333740234375,
1085
+ "logps/rejected": -1908.844970703125,
1086
+ "loss": 0.0256,
1087
+ "rewards/accuracies": 1.0,
1088
+ "rewards/chosen": -7.327805519104004,
1089
+ "rewards/margins": 9.385960578918457,
1090
+ "rewards/margins_max": 10.629077911376953,
1091
+ "rewards/margins_min": 8.142843246459961,
1092
+ "rewards/margins_std": 1.7580335140228271,
1093
+ "rewards/rejected": -16.713764190673828,
1094
+ "step": 540
1095
+ },
1096
+ {
1097
+ "epoch": 1.55,
1098
+ "grad_norm": 0.0011589092808777935,
1099
+ "learning_rate": 2.794454096031429e-06,
1100
+ "logits/chosen": -1.7256653308868408,
1101
+ "logits/rejected": -1.5292785167694092,
1102
+ "logps/chosen": -1160.131591796875,
1103
+ "logps/rejected": -2000.1337890625,
1104
+ "loss": 0.0223,
1105
+ "rewards/accuracies": 1.0,
1106
+ "rewards/chosen": -8.491829872131348,
1107
+ "rewards/margins": 8.8389892578125,
1108
+ "rewards/margins_max": 10.393911361694336,
1109
+ "rewards/margins_min": 7.284067630767822,
1110
+ "rewards/margins_std": 2.1989917755126953,
1111
+ "rewards/rejected": -17.33081817626953,
1112
+ "step": 550
1113
+ },
1114
+ {
1115
+ "epoch": 1.58,
1116
+ "grad_norm": 1.1029358007262624,
1117
+ "learning_rate": 2.71289790878446e-06,
1118
+ "logits/chosen": -1.5588399171829224,
1119
+ "logits/rejected": -1.3718044757843018,
1120
+ "logps/chosen": -1313.054443359375,
1121
+ "logps/rejected": -2318.33544921875,
1122
+ "loss": 0.0303,
1123
+ "rewards/accuracies": 1.0,
1124
+ "rewards/chosen": -9.963714599609375,
1125
+ "rewards/margins": 9.831637382507324,
1126
+ "rewards/margins_max": 12.691813468933105,
1127
+ "rewards/margins_min": 6.971460819244385,
1128
+ "rewards/margins_std": 4.044900894165039,
1129
+ "rewards/rejected": -19.795352935791016,
1130
+ "step": 560
1131
+ },
1132
+ {
1133
+ "epoch": 1.61,
1134
+ "grad_norm": 0.032589510422147,
1135
+ "learning_rate": 2.6311127923312156e-06,
1136
+ "logits/chosen": -1.7382599115371704,
1137
+ "logits/rejected": -1.5052683353424072,
1138
+ "logps/chosen": -1249.270263671875,
1139
+ "logps/rejected": -2084.659912109375,
1140
+ "loss": 0.0177,
1141
+ "rewards/accuracies": 1.0,
1142
+ "rewards/chosen": -8.170693397521973,
1143
+ "rewards/margins": 9.51733684539795,
1144
+ "rewards/margins_max": 11.196283340454102,
1145
+ "rewards/margins_min": 7.8383917808532715,
1146
+ "rewards/margins_std": 2.374387741088867,
1147
+ "rewards/rejected": -17.68802833557129,
1148
+ "step": 570
1149
+ },
1150
+ {
1151
+ "epoch": 1.63,
1152
+ "grad_norm": 12.99158263963332,
1153
+ "learning_rate": 2.549186690240057e-06,
1154
+ "logits/chosen": -1.610082983970642,
1155
+ "logits/rejected": -1.3717553615570068,
1156
+ "logps/chosen": -1186.931884765625,
1157
+ "logps/rejected": -2215.44970703125,
1158
+ "loss": 0.0096,
1159
+ "rewards/accuracies": 1.0,
1160
+ "rewards/chosen": -8.884663581848145,
1161
+ "rewards/margins": 11.055347442626953,
1162
+ "rewards/margins_max": 13.794784545898438,
1163
+ "rewards/margins_min": 8.315912246704102,
1164
+ "rewards/margins_std": 3.874147891998291,
1165
+ "rewards/rejected": -19.94001007080078,
1166
+ "step": 580
1167
+ },
1168
+ {
1169
+ "epoch": 1.66,
1170
+ "grad_norm": 0.09893386521593805,
1171
+ "learning_rate": 2.4672076976812548e-06,
1172
+ "logits/chosen": -1.504370927810669,
1173
+ "logits/rejected": -1.24093759059906,
1174
+ "logps/chosen": -1294.6529541015625,
1175
+ "logps/rejected": -2374.53271484375,
1176
+ "loss": 0.0182,
1177
+ "rewards/accuracies": 1.0,
1178
+ "rewards/chosen": -9.253921508789062,
1179
+ "rewards/margins": 11.56922721862793,
1180
+ "rewards/margins_max": 16.103586196899414,
1181
+ "rewards/margins_min": 7.034867763519287,
1182
+ "rewards/margins_std": 6.412552833557129,
1183
+ "rewards/rejected": -20.823148727416992,
1184
+ "step": 590
1185
+ },
1186
+ {
1187
+ "epoch": 1.69,
1188
+ "grad_norm": 1.4677452546622722,
1189
+ "learning_rate": 2.3852639666982218e-06,
1190
+ "logits/chosen": -1.5387322902679443,
1191
+ "logits/rejected": -1.3424365520477295,
1192
+ "logps/chosen": -1172.688232421875,
1193
+ "logps/rejected": -2390.56689453125,
1194
+ "loss": 0.0029,
1195
+ "rewards/accuracies": 1.0,
1196
+ "rewards/chosen": -8.940356254577637,
1197
+ "rewards/margins": 12.360175132751465,
1198
+ "rewards/margins_max": 14.774116516113281,
1199
+ "rewards/margins_min": 9.946235656738281,
1200
+ "rewards/margins_std": 3.4138267040252686,
1201
+ "rewards/rejected": -21.300533294677734,
1202
+ "step": 600
1203
+ },
1204
+ {
1205
+ "epoch": 1.69,
1206
+ "eval_logits/chosen": -1.5330660343170166,
1207
+ "eval_logits/rejected": -1.4547291994094849,
1208
+ "eval_logps/chosen": -1739.8331298828125,
1209
+ "eval_logps/rejected": -2005.7900390625,
1210
+ "eval_loss": 1.9743393659591675,
1211
+ "eval_rewards/accuracies": 0.6865079402923584,
1212
+ "eval_rewards/chosen": -14.546117782592773,
1213
+ "eval_rewards/margins": 2.9199535846710205,
1214
+ "eval_rewards/margins_max": 12.400845527648926,
1215
+ "eval_rewards/margins_min": -5.716708660125732,
1216
+ "eval_rewards/margins_std": 8.164259910583496,
1217
+ "eval_rewards/rejected": -17.46607208251953,
1218
+ "eval_runtime": 281.995,
1219
+ "eval_samples_per_second": 7.092,
1220
+ "eval_steps_per_second": 0.223,
1221
+ "step": 600
1222
+ },
1223
+ {
1224
+ "epoch": 1.72,
1225
+ "grad_norm": 16.662428863900104,
1226
+ "learning_rate": 2.303443611417584e-06,
1227
+ "logits/chosen": -1.2892029285430908,
1228
+ "logits/rejected": -1.0749212503433228,
1229
+ "logps/chosen": -1583.099609375,
1230
+ "logps/rejected": -2742.760498046875,
1231
+ "loss": 0.3581,
1232
+ "rewards/accuracies": 1.0,
1233
+ "rewards/chosen": -12.176101684570312,
1234
+ "rewards/margins": 12.56828498840332,
1235
+ "rewards/margins_max": 17.369625091552734,
1236
+ "rewards/margins_min": 7.766943454742432,
1237
+ "rewards/margins_std": 6.790121555328369,
1238
+ "rewards/rejected": -24.744388580322266,
1239
+ "step": 610
1240
+ },
1241
+ {
1242
+ "epoch": 1.75,
1243
+ "grad_norm": 0.1502185307527533,
1244
+ "learning_rate": 2.2218346133000264e-06,
1245
+ "logits/chosen": -1.1851621866226196,
1246
+ "logits/rejected": -0.8747516870498657,
1247
+ "logps/chosen": -1684.5989990234375,
1248
+ "logps/rejected": -2998.321044921875,
1249
+ "loss": 0.0851,
1250
+ "rewards/accuracies": 0.949999988079071,
1251
+ "rewards/chosen": -13.733156204223633,
1252
+ "rewards/margins": 14.27801513671875,
1253
+ "rewards/margins_max": 20.737751007080078,
1254
+ "rewards/margins_min": 7.818281650543213,
1255
+ "rewards/margins_std": 9.135442733764648,
1256
+ "rewards/rejected": -28.011173248291016,
1257
+ "step": 620
1258
+ },
1259
+ {
1260
+ "epoch": 1.77,
1261
+ "grad_norm": 0.608737783564001,
1262
+ "learning_rate": 2.140524726533792e-06,
1263
+ "logits/chosen": -1.4635207653045654,
1264
+ "logits/rejected": -1.206559658050537,
1265
+ "logps/chosen": -1263.6993408203125,
1266
+ "logps/rejected": -2158.978759765625,
1267
+ "loss": 0.0474,
1268
+ "rewards/accuracies": 1.0,
1269
+ "rewards/chosen": -8.689355850219727,
1270
+ "rewards/margins": 10.659037590026855,
1271
+ "rewards/margins_max": 13.989839553833008,
1272
+ "rewards/margins_min": 7.3282365798950195,
1273
+ "rewards/margins_std": 4.710465431213379,
1274
+ "rewards/rejected": -19.3483943939209,
1275
+ "step": 630
1276
+ },
1277
+ {
1278
+ "epoch": 1.8,
1279
+ "grad_norm": 37.51094566818964,
1280
+ "learning_rate": 2.059601383672566e-06,
1281
+ "logits/chosen": -1.6980371475219727,
1282
+ "logits/rejected": -1.5178521871566772,
1283
+ "logps/chosen": -964.2796630859375,
1284
+ "logps/rejected": -1743.4036865234375,
1285
+ "loss": 0.0669,
1286
+ "rewards/accuracies": 1.0,
1287
+ "rewards/chosen": -6.6180419921875,
1288
+ "rewards/margins": 8.817036628723145,
1289
+ "rewards/margins_max": 10.244000434875488,
1290
+ "rewards/margins_min": 7.390072822570801,
1291
+ "rewards/margins_std": 2.018031597137451,
1292
+ "rewards/rejected": -15.435079574584961,
1293
+ "step": 640
1294
+ },
1295
+ {
1296
+ "epoch": 1.83,
1297
+ "grad_norm": 0.824336798291059,
1298
+ "learning_rate": 1.9791516016192214e-06,
1299
+ "logits/chosen": -1.8461487293243408,
1300
+ "logits/rejected": -1.5655087232589722,
1301
+ "logps/chosen": -941.0548706054688,
1302
+ "logps/rejected": -1621.322265625,
1303
+ "loss": 0.0587,
1304
+ "rewards/accuracies": 0.949999988079071,
1305
+ "rewards/chosen": -6.0665507316589355,
1306
+ "rewards/margins": 7.918545722961426,
1307
+ "rewards/margins_max": 10.15103530883789,
1308
+ "rewards/margins_min": 5.6860551834106445,
1309
+ "rewards/margins_std": 3.157217502593994,
1310
+ "rewards/rejected": -13.985095024108887,
1311
+ "step": 650
1312
+ },
1313
+ {
1314
+ "epoch": 1.86,
1315
+ "grad_norm": 0.2329366656877762,
1316
+ "learning_rate": 1.8992618880565039e-06,
1317
+ "logits/chosen": -1.4127376079559326,
1318
+ "logits/rejected": -1.204310655593872,
1319
+ "logps/chosen": -974.7972412109375,
1320
+ "logps/rejected": -1706.96484375,
1321
+ "loss": 0.0472,
1322
+ "rewards/accuracies": 1.0,
1323
+ "rewards/chosen": -6.5077385902404785,
1324
+ "rewards/margins": 8.661420822143555,
1325
+ "rewards/margins_max": 11.35025691986084,
1326
+ "rewards/margins_min": 5.972585678100586,
1327
+ "rewards/margins_std": 3.8025870323181152,
1328
+ "rewards/rejected": -15.169160842895508,
1329
+ "step": 660
1330
+ },
1331
+ {
1332
+ "epoch": 1.89,
1333
+ "grad_norm": 0.2766932797893532,
1334
+ "learning_rate": 1.8200181484252888e-06,
1335
+ "logits/chosen": -1.6775104999542236,
1336
+ "logits/rejected": -1.5603760480880737,
1337
+ "logps/chosen": -1146.943603515625,
1338
+ "logps/rejected": -2180.825927734375,
1339
+ "loss": 0.0303,
1340
+ "rewards/accuracies": 1.0,
1341
+ "rewards/chosen": -7.641868591308594,
1342
+ "rewards/margins": 10.99293327331543,
1343
+ "rewards/margins_max": 14.466341018676758,
1344
+ "rewards/margins_min": 7.519525051116943,
1345
+ "rewards/margins_std": 4.912140369415283,
1346
+ "rewards/rejected": -18.634801864624023,
1347
+ "step": 670
1348
+ },
1349
+ {
1350
+ "epoch": 1.92,
1351
+ "grad_norm": 1.9894517252535326,
1352
+ "learning_rate": 1.7415055935504234e-06,
1353
+ "logits/chosen": -1.6779143810272217,
1354
+ "logits/rejected": -1.3088996410369873,
1355
+ "logps/chosen": -1250.79345703125,
1356
+ "logps/rejected": -2332.5302734375,
1357
+ "loss": 0.0268,
1358
+ "rewards/accuracies": 1.0,
1359
+ "rewards/chosen": -8.574339866638184,
1360
+ "rewards/margins": 11.780553817749023,
1361
+ "rewards/margins_max": 17.217056274414062,
1362
+ "rewards/margins_min": 6.344052314758301,
1363
+ "rewards/margins_std": 7.688374996185303,
1364
+ "rewards/rejected": -20.35489273071289,
1365
+ "step": 680
1366
+ },
1367
+ {
1368
+ "epoch": 1.94,
1369
+ "grad_norm": 1.2264882447915335,
1370
+ "learning_rate": 1.6638086480134954e-06,
1371
+ "logits/chosen": -1.133843183517456,
1372
+ "logits/rejected": -0.9121431112289429,
1373
+ "logps/chosen": -1320.951171875,
1374
+ "logps/rejected": -2429.5537109375,
1375
+ "loss": 0.014,
1376
+ "rewards/accuracies": 0.949999988079071,
1377
+ "rewards/chosen": -10.83985710144043,
1378
+ "rewards/margins": 12.160634994506836,
1379
+ "rewards/margins_max": 17.855926513671875,
1380
+ "rewards/margins_min": 6.465344429016113,
1381
+ "rewards/margins_std": 8.054357528686523,
1382
+ "rewards/rejected": -23.000492095947266,
1383
+ "step": 690
1384
+ },
1385
+ {
1386
+ "epoch": 1.97,
1387
+ "grad_norm": 4.223913353219136,
1388
+ "learning_rate": 1.5870108593710473e-06,
1389
+ "logits/chosen": -1.4314680099487305,
1390
+ "logits/rejected": -1.1393955945968628,
1391
+ "logps/chosen": -1421.0302734375,
1392
+ "logps/rejected": -2616.06005859375,
1393
+ "loss": 0.018,
1394
+ "rewards/accuracies": 1.0,
1395
+ "rewards/chosen": -9.940652847290039,
1396
+ "rewards/margins": 14.069793701171875,
1397
+ "rewards/margins_max": 18.50979995727539,
1398
+ "rewards/margins_min": 9.62978744506836,
1399
+ "rewards/margins_std": 6.279117584228516,
1400
+ "rewards/rejected": -24.010446548461914,
1401
+ "step": 700
1402
+ },
1403
+ {
1404
+ "epoch": 1.97,
1405
+ "eval_logits/chosen": -1.4977593421936035,
1406
+ "eval_logits/rejected": -1.4133175611495972,
1407
+ "eval_logps/chosen": -1938.2783203125,
1408
+ "eval_logps/rejected": -2177.001708984375,
1409
+ "eval_loss": 1.8029882907867432,
1410
+ "eval_rewards/accuracies": 0.6785714030265808,
1411
+ "eval_rewards/chosen": -16.53057098388672,
1412
+ "eval_rewards/margins": 2.6476187705993652,
1413
+ "eval_rewards/margins_max": 11.230785369873047,
1414
+ "eval_rewards/margins_min": -5.27154541015625,
1415
+ "eval_rewards/margins_std": 7.43382453918457,
1416
+ "eval_rewards/rejected": -19.178190231323242,
1417
+ "eval_runtime": 282.2867,
1418
+ "eval_samples_per_second": 7.085,
1419
+ "eval_steps_per_second": 0.223,
1420
+ "step": 700
1421
+ },
1422
+ {
1423
+ "epoch": 2.0,
1424
+ "grad_norm": 0.027200756028801846,
1425
+ "learning_rate": 1.511194808315853e-06,
1426
+ "logits/chosen": -1.4225877523422241,
1427
+ "logits/rejected": -1.1490380764007568,
1428
+ "logps/chosen": -1361.941162109375,
1429
+ "logps/rejected": -2227.452880859375,
1430
+ "loss": 0.0423,
1431
+ "rewards/accuracies": 0.949999988079071,
1432
+ "rewards/chosen": -10.556672096252441,
1433
+ "rewards/margins": 9.88037109375,
1434
+ "rewards/margins_max": 13.63640022277832,
1435
+ "rewards/margins_min": 6.124342441558838,
1436
+ "rewards/margins_std": 5.311827182769775,
1437
+ "rewards/rejected": -20.437042236328125,
1438
+ "step": 710
1439
+ },
1440
+ {
1441
+ "epoch": 2.03,
1442
+ "grad_norm": 0.318786591879142,
1443
+ "learning_rate": 1.4364420198778662e-06,
1444
+ "logits/chosen": -1.5894582271575928,
1445
+ "logits/rejected": -1.3686472177505493,
1446
+ "logps/chosen": -1422.156005859375,
1447
+ "logps/rejected": -2683.84814453125,
1448
+ "loss": 0.0033,
1449
+ "rewards/accuracies": 1.0,
1450
+ "rewards/chosen": -10.745410919189453,
1451
+ "rewards/margins": 12.789144515991211,
1452
+ "rewards/margins_max": 16.427227020263672,
1453
+ "rewards/margins_min": 9.15106201171875,
1454
+ "rewards/margins_std": 5.14502477645874,
1455
+ "rewards/rejected": -23.53455352783203,
1456
+ "step": 720
1457
+ },
1458
+ {
1459
+ "epoch": 2.06,
1460
+ "grad_norm": 1.5807231251466567,
1461
+ "learning_rate": 1.3628328757603243e-06,
1462
+ "logits/chosen": -1.6512333154678345,
1463
+ "logits/rejected": -1.3885473012924194,
1464
+ "logps/chosen": -1368.7022705078125,
1465
+ "logps/rejected": -2550.4912109375,
1466
+ "loss": 0.0091,
1467
+ "rewards/accuracies": 1.0,
1468
+ "rewards/chosen": -9.459519386291504,
1469
+ "rewards/margins": 13.517751693725586,
1470
+ "rewards/margins_max": 18.180484771728516,
1471
+ "rewards/margins_min": 8.855023384094238,
1472
+ "rewards/margins_std": 6.5940961837768555,
1473
+ "rewards/rejected": -22.97727394104004,
1474
+ "step": 730
1475
+ },
1476
+ {
1477
+ "epoch": 2.08,
1478
+ "grad_norm": 0.1516893711186873,
1479
+ "learning_rate": 1.2904465279052725e-06,
1480
+ "logits/chosen": -1.6209065914154053,
1481
+ "logits/rejected": -1.351872444152832,
1482
+ "logps/chosen": -1231.8480224609375,
1483
+ "logps/rejected": -2237.622802734375,
1484
+ "loss": 0.0085,
1485
+ "rewards/accuracies": 1.0,
1486
+ "rewards/chosen": -8.798944473266602,
1487
+ "rewards/margins": 11.324702262878418,
1488
+ "rewards/margins_max": 13.88591480255127,
1489
+ "rewards/margins_min": 8.763489723205566,
1490
+ "rewards/margins_std": 3.6221022605895996,
1491
+ "rewards/rejected": -20.123645782470703,
1492
+ "step": 740
1493
+ },
1494
+ {
1495
+ "epoch": 2.11,
1496
+ "grad_norm": 0.8035507691467565,
1497
+ "learning_rate": 1.219360813381446e-06,
1498
+ "logits/chosen": -1.247396469116211,
1499
+ "logits/rejected": -1.033151388168335,
1500
+ "logps/chosen": -1316.85546875,
1501
+ "logps/rejected": -2502.35400390625,
1502
+ "loss": 0.0042,
1503
+ "rewards/accuracies": 1.0,
1504
+ "rewards/chosen": -11.282798767089844,
1505
+ "rewards/margins": 12.374329566955566,
1506
+ "rewards/margins_max": 16.396432876586914,
1507
+ "rewards/margins_min": 8.352226257324219,
1508
+ "rewards/margins_std": 5.688112258911133,
1509
+ "rewards/rejected": -23.657127380371094,
1510
+ "step": 750
1511
+ },
1512
+ {
1513
+ "epoch": 2.14,
1514
+ "grad_norm": 0.10201527009610997,
1515
+ "learning_rate": 1.1496521706860392e-06,
1516
+ "logits/chosen": -1.5233542919158936,
1517
+ "logits/rejected": -1.1838680505752563,
1518
+ "logps/chosen": -1417.0087890625,
1519
+ "logps/rejected": -2805.773681640625,
1520
+ "loss": 0.0051,
1521
+ "rewards/accuracies": 1.0,
1522
+ "rewards/chosen": -10.839475631713867,
1523
+ "rewards/margins": 14.590258598327637,
1524
+ "rewards/margins_max": 17.661457061767578,
1525
+ "rewards/margins_min": 11.519063949584961,
1526
+ "rewards/margins_std": 4.343328475952148,
1527
+ "rewards/rejected": -25.429737091064453,
1528
+ "step": 760
1529
+ },
1530
+ {
1531
+ "epoch": 2.17,
1532
+ "grad_norm": 0.0015806759819360625,
1533
+ "learning_rate": 1.0813955575503588e-06,
1534
+ "logits/chosen": -1.355691909790039,
1535
+ "logits/rejected": -1.144424557685852,
1536
+ "logps/chosen": -1348.842041015625,
1537
+ "logps/rejected": -2898.0224609375,
1538
+ "loss": 0.0066,
1539
+ "rewards/accuracies": 1.0,
1540
+ "rewards/chosen": -10.152058601379395,
1541
+ "rewards/margins": 16.251543045043945,
1542
+ "rewards/margins_max": 22.687950134277344,
1543
+ "rewards/margins_min": 9.815134048461914,
1544
+ "rewards/margins_std": 9.102456092834473,
1545
+ "rewards/rejected": -26.40359878540039,
1546
+ "step": 770
1547
+ },
1548
+ {
1549
+ "epoch": 2.2,
1550
+ "grad_norm": 0.408380187113466,
1551
+ "learning_rate": 1.0146643703377488e-06,
1552
+ "logits/chosen": -1.6056991815567017,
1553
+ "logits/rejected": -1.3266913890838623,
1554
+ "logps/chosen": -1298.9927978515625,
1555
+ "logps/rejected": -2409.390869140625,
1556
+ "loss": 0.0075,
1557
+ "rewards/accuracies": 1.0,
1558
+ "rewards/chosen": -9.719507217407227,
1559
+ "rewards/margins": 12.09427261352539,
1560
+ "rewards/margins_max": 15.695422172546387,
1561
+ "rewards/margins_min": 8.493124008178711,
1562
+ "rewards/margins_std": 5.092793941497803,
1563
+ "rewards/rejected": -21.813779830932617,
1564
+ "step": 780
1565
+ },
1566
+ {
1567
+ "epoch": 2.23,
1568
+ "grad_norm": 0.001344347508367163,
1569
+ "learning_rate": 9.495303651204496e-07,
1570
+ "logits/chosen": -1.563906192779541,
1571
+ "logits/rejected": -1.3474560976028442,
1572
+ "logps/chosen": -1254.9219970703125,
1573
+ "logps/rejected": -2623.2822265625,
1574
+ "loss": 0.0009,
1575
+ "rewards/accuracies": 1.0,
1576
+ "rewards/chosen": -8.889430046081543,
1577
+ "rewards/margins": 14.575796127319336,
1578
+ "rewards/margins_max": 18.69800567626953,
1579
+ "rewards/margins_min": 10.453584671020508,
1580
+ "rewards/margins_std": 5.829684734344482,
1581
+ "rewards/rejected": -23.465227127075195,
1582
+ "step": 790
1583
+ },
1584
+ {
1585
+ "epoch": 2.25,
1586
+ "grad_norm": 1.6920469977748351,
1587
+ "learning_rate": 8.860635805202616e-07,
1588
+ "logits/chosen": -1.551922082901001,
1589
+ "logits/rejected": -1.2580442428588867,
1590
+ "logps/chosen": -1456.9490966796875,
1591
+ "logps/rejected": -2604.62744140625,
1592
+ "loss": 0.0014,
1593
+ "rewards/accuracies": 1.0,
1594
+ "rewards/chosen": -10.935505867004395,
1595
+ "rewards/margins": 12.657417297363281,
1596
+ "rewards/margins_max": 15.51282024383545,
1597
+ "rewards/margins_min": 9.802014350891113,
1598
+ "rewards/margins_std": 4.038149833679199,
1599
+ "rewards/rejected": -23.59292221069336,
1600
+ "step": 800
1601
+ },
1602
+ {
1603
+ "epoch": 2.25,
1604
+ "eval_logits/chosen": -1.5266377925872803,
1605
+ "eval_logits/rejected": -1.4433014392852783,
1606
+ "eval_logps/chosen": -1957.578857421875,
1607
+ "eval_logps/rejected": -2208.484375,
1608
+ "eval_loss": 1.8519227504730225,
1609
+ "eval_rewards/accuracies": 0.6746031641960144,
1610
+ "eval_rewards/chosen": -16.72357749938965,
1611
+ "eval_rewards/margins": 2.7694385051727295,
1612
+ "eval_rewards/margins_max": 11.662981033325195,
1613
+ "eval_rewards/margins_min": -5.304656982421875,
1614
+ "eval_rewards/margins_std": 7.62367582321167,
1615
+ "eval_rewards/rejected": -19.493017196655273,
1616
+ "eval_runtime": 282.5434,
1617
+ "eval_samples_per_second": 7.079,
1618
+ "eval_steps_per_second": 0.223,
1619
+ "step": 800
1620
+ },
1621
+ {
1622
+ "epoch": 2.28,
1623
+ "grad_norm": 3.2305387145726234,
1624
+ "learning_rate": 8.24332262395994e-07,
1625
+ "logits/chosen": -1.5742024183273315,
1626
+ "logits/rejected": -1.3343318700790405,
1627
+ "logps/chosen": -1459.0062255859375,
1628
+ "logps/rejected": -2835.21044921875,
1629
+ "loss": 0.0055,
1630
+ "rewards/accuracies": 1.0,
1631
+ "rewards/chosen": -11.644388198852539,
1632
+ "rewards/margins": 14.268835067749023,
1633
+ "rewards/margins_max": 19.221527099609375,
1634
+ "rewards/margins_min": 9.316144943237305,
1635
+ "rewards/margins_std": 7.0041632652282715,
1636
+ "rewards/rejected": -25.913223266601562,
1637
+ "step": 810
1638
+ },
1639
+ {
1640
+ "epoch": 2.31,
1641
+ "grad_norm": 0.26542768442550385,
1642
+ "learning_rate": 7.644027904586587e-07,
1643
+ "logits/chosen": -1.50737726688385,
1644
+ "logits/rejected": -1.2445927858352661,
1645
+ "logps/chosen": -1452.3663330078125,
1646
+ "logps/rejected": -2697.02880859375,
1647
+ "loss": 0.0067,
1648
+ "rewards/accuracies": 1.0,
1649
+ "rewards/chosen": -11.468404769897461,
1650
+ "rewards/margins": 13.425836563110352,
1651
+ "rewards/margins_max": 16.106616973876953,
1652
+ "rewards/margins_min": 10.745055198669434,
1653
+ "rewards/margins_std": 3.791196823120117,
1654
+ "rewards/rejected": -24.894241333007812,
1655
+ "step": 820
1656
+ },
1657
+ {
1658
+ "epoch": 2.34,
1659
+ "grad_norm": 0.8567763833713586,
1660
+ "learning_rate": 7.06339606893347e-07,
1661
+ "logits/chosen": -1.6803547143936157,
1662
+ "logits/rejected": -1.4048993587493896,
1663
+ "logps/chosen": -1588.3795166015625,
1664
+ "logps/rejected": -2856.94873046875,
1665
+ "loss": 0.0218,
1666
+ "rewards/accuracies": 1.0,
1667
+ "rewards/chosen": -11.344830513000488,
1668
+ "rewards/margins": 14.68242073059082,
1669
+ "rewards/margins_max": 20.33969497680664,
1670
+ "rewards/margins_min": 9.025145530700684,
1671
+ "rewards/margins_std": 8.000594139099121,
1672
+ "rewards/rejected": -26.02724838256836,
1673
+ "step": 830
1674
+ },
1675
+ {
1676
+ "epoch": 2.37,
1677
+ "grad_norm": 0.19797390603665133,
1678
+ "learning_rate": 6.502051470645149e-07,
1679
+ "logits/chosen": -1.7654281854629517,
1680
+ "logits/rejected": -1.40230393409729,
1681
+ "logps/chosen": -1327.5189208984375,
1682
+ "logps/rejected": -2276.90771484375,
1683
+ "loss": 0.0218,
1684
+ "rewards/accuracies": 1.0,
1685
+ "rewards/chosen": -9.360559463500977,
1686
+ "rewards/margins": 10.55632495880127,
1687
+ "rewards/margins_max": 12.99437141418457,
1688
+ "rewards/margins_min": 8.118279457092285,
1689
+ "rewards/margins_std": 3.4479167461395264,
1690
+ "rewards/rejected": -19.916885375976562,
1691
+ "step": 840
1692
+ },
1693
+ {
1694
+ "epoch": 2.39,
1695
+ "grad_norm": 0.0023467881665189677,
1696
+ "learning_rate": 5.960597723792194e-07,
1697
+ "logits/chosen": -1.5812981128692627,
1698
+ "logits/rejected": -1.1608024835586548,
1699
+ "logps/chosen": -1374.124267578125,
1700
+ "logps/rejected": -2819.462158203125,
1701
+ "loss": 0.0049,
1702
+ "rewards/accuracies": 1.0,
1703
+ "rewards/chosen": -9.890588760375977,
1704
+ "rewards/margins": 15.723424911499023,
1705
+ "rewards/margins_max": 21.0240421295166,
1706
+ "rewards/margins_min": 10.422807693481445,
1707
+ "rewards/margins_std": 7.4962053298950195,
1708
+ "rewards/rejected": -25.614009857177734,
1709
+ "step": 850
1710
+ },
1711
+ {
1712
+ "epoch": 2.42,
1713
+ "grad_norm": 1.4084849928658003,
1714
+ "learning_rate": 5.43961705380465e-07,
1715
+ "logits/chosen": -1.646162986755371,
1716
+ "logits/rejected": -1.4091808795928955,
1717
+ "logps/chosen": -1218.2606201171875,
1718
+ "logps/rejected": -2409.643798828125,
1719
+ "loss": 0.0078,
1720
+ "rewards/accuracies": 0.949999988079071,
1721
+ "rewards/chosen": -8.628401756286621,
1722
+ "rewards/margins": 12.78498649597168,
1723
+ "rewards/margins_max": 17.431535720825195,
1724
+ "rewards/margins_min": 8.138437271118164,
1725
+ "rewards/margins_std": 6.5712127685546875,
1726
+ "rewards/rejected": -21.413387298583984,
1727
+ "step": 860
1728
+ },
1729
+ {
1730
+ "epoch": 2.45,
1731
+ "grad_norm": 0.13595105985996128,
1732
+ "learning_rate": 4.939669671404871e-07,
1733
+ "logits/chosen": -1.5396533012390137,
1734
+ "logits/rejected": -1.2183513641357422,
1735
+ "logps/chosen": -1237.326904296875,
1736
+ "logps/rejected": -3156.015380859375,
1737
+ "loss": 0.0039,
1738
+ "rewards/accuracies": 1.0,
1739
+ "rewards/chosen": -8.967730522155762,
1740
+ "rewards/margins": 19.433839797973633,
1741
+ "rewards/margins_max": 26.383316040039062,
1742
+ "rewards/margins_min": 12.484365463256836,
1743
+ "rewards/margins_std": 9.828042984008789,
1744
+ "rewards/rejected": -28.40157127380371,
1745
+ "step": 870
1746
+ },
1747
+ {
1748
+ "epoch": 2.48,
1749
+ "grad_norm": 0.012403182973777866,
1750
+ "learning_rate": 4.461293170212644e-07,
1751
+ "logits/chosen": -1.6268768310546875,
1752
+ "logits/rejected": -1.3297674655914307,
1753
+ "logps/chosen": -1231.2391357421875,
1754
+ "logps/rejected": -2482.310546875,
1755
+ "loss": 0.0125,
1756
+ "rewards/accuracies": 1.0,
1757
+ "rewards/chosen": -9.140237808227539,
1758
+ "rewards/margins": 13.229069709777832,
1759
+ "rewards/margins_max": 16.058679580688477,
1760
+ "rewards/margins_min": 10.399457931518555,
1761
+ "rewards/margins_std": 4.001674175262451,
1762
+ "rewards/rejected": -22.369308471679688,
1763
+ "step": 880
1764
+ },
1765
+ {
1766
+ "epoch": 2.51,
1767
+ "grad_norm": 5.925107209728559,
1768
+ "learning_rate": 4.005001948670606e-07,
1769
+ "logits/chosen": -1.7953965663909912,
1770
+ "logits/rejected": -1.5808696746826172,
1771
+ "logps/chosen": -1377.26611328125,
1772
+ "logps/rejected": -2234.20849609375,
1773
+ "loss": 0.0043,
1774
+ "rewards/accuracies": 1.0,
1775
+ "rewards/chosen": -9.003216743469238,
1776
+ "rewards/margins": 10.078218460083008,
1777
+ "rewards/margins_max": 11.774847030639648,
1778
+ "rewards/margins_min": 8.381589889526367,
1779
+ "rewards/margins_std": 2.39939546585083,
1780
+ "rewards/rejected": -19.08143424987793,
1781
+ "step": 890
1782
+ },
1783
+ {
1784
+ "epoch": 2.54,
1785
+ "grad_norm": 0.0018034560654693567,
1786
+ "learning_rate": 3.571286656911377e-07,
1787
+ "logits/chosen": -1.6509956121444702,
1788
+ "logits/rejected": -1.2617855072021484,
1789
+ "logps/chosen": -1374.924072265625,
1790
+ "logps/rejected": -2686.83154296875,
1791
+ "loss": 0.0034,
1792
+ "rewards/accuracies": 1.0,
1793
+ "rewards/chosen": -9.74584674835205,
1794
+ "rewards/margins": 14.469047546386719,
1795
+ "rewards/margins_max": 20.866533279418945,
1796
+ "rewards/margins_min": 8.071561813354492,
1797
+ "rewards/margins_std": 9.04741096496582,
1798
+ "rewards/rejected": -24.214895248413086,
1799
+ "step": 900
1800
+ },
1801
+ {
1802
+ "epoch": 2.54,
1803
+ "eval_logits/chosen": -1.5324345827102661,
1804
+ "eval_logits/rejected": -1.4488511085510254,
1805
+ "eval_logps/chosen": -1899.9781494140625,
1806
+ "eval_logps/rejected": -2137.156982421875,
1807
+ "eval_loss": 1.6798701286315918,
1808
+ "eval_rewards/accuracies": 0.6865079402923584,
1809
+ "eval_rewards/chosen": -16.14756965637207,
1810
+ "eval_rewards/margins": 2.632173776626587,
1811
+ "eval_rewards/margins_max": 10.763092994689941,
1812
+ "eval_rewards/margins_min": -4.875840663909912,
1813
+ "eval_rewards/margins_std": 7.033862590789795,
1814
+ "eval_rewards/rejected": -18.77974510192871,
1815
+ "eval_runtime": 281.9065,
1816
+ "eval_samples_per_second": 7.095,
1817
+ "eval_steps_per_second": 0.223,
1818
+ "step": 900
1819
+ },
1820
+ {
1821
+ "epoch": 2.56,
1822
+ "grad_norm": 0.39851941407344293,
1823
+ "learning_rate": 3.1606136691612555e-07,
1824
+ "logits/chosen": -1.7041774988174438,
1825
+ "logits/rejected": -1.4187756776809692,
1826
+ "logps/chosen": -1301.1878662109375,
1827
+ "logps/rejected": -2172.826904296875,
1828
+ "loss": 0.001,
1829
+ "rewards/accuracies": 1.0,
1830
+ "rewards/chosen": -8.820059776306152,
1831
+ "rewards/margins": 10.524114608764648,
1832
+ "rewards/margins_max": 12.688272476196289,
1833
+ "rewards/margins_min": 8.359955787658691,
1834
+ "rewards/margins_std": 3.060582160949707,
1835
+ "rewards/rejected": -19.344173431396484,
1836
+ "step": 910
1837
+ },
1838
+ {
1839
+ "epoch": 2.59,
1840
+ "grad_norm": 0.0005374838985619683,
1841
+ "learning_rate": 2.773424582247844e-07,
1842
+ "logits/chosen": -1.5690796375274658,
1843
+ "logits/rejected": -1.2215526103973389,
1844
+ "logps/chosen": -1358.075927734375,
1845
+ "logps/rejected": -2381.899169921875,
1846
+ "loss": 0.0024,
1847
+ "rewards/accuracies": 1.0,
1848
+ "rewards/chosen": -9.972057342529297,
1849
+ "rewards/margins": 11.921777725219727,
1850
+ "rewards/margins_max": 14.729642868041992,
1851
+ "rewards/margins_min": 9.113912582397461,
1852
+ "rewards/margins_std": 3.970921754837036,
1853
+ "rewards/rejected": -21.893835067749023,
1854
+ "step": 920
1855
+ },
1856
+ {
1857
+ "epoch": 2.62,
1858
+ "grad_norm": 0.8257494267996711,
1859
+ "learning_rate": 2.410135740750821e-07,
1860
+ "logits/chosen": -1.5338929891586304,
1861
+ "logits/rejected": -1.259865164756775,
1862
+ "logps/chosen": -1410.4990234375,
1863
+ "logps/rejected": -2998.914794921875,
1864
+ "loss": 0.0099,
1865
+ "rewards/accuracies": 1.0,
1866
+ "rewards/chosen": -10.63892936706543,
1867
+ "rewards/margins": 16.653705596923828,
1868
+ "rewards/margins_max": 21.365177154541016,
1869
+ "rewards/margins_min": 11.942238807678223,
1870
+ "rewards/margins_std": 6.663023471832275,
1871
+ "rewards/rejected": -27.29263687133789,
1872
+ "step": 930
1873
+ },
1874
+ {
1875
+ "epoch": 2.65,
1876
+ "grad_norm": 0.06916221157748438,
1877
+ "learning_rate": 2.0711377893064182e-07,
1878
+ "logits/chosen": -1.5516988039016724,
1879
+ "logits/rejected": -1.2729582786560059,
1880
+ "logps/chosen": -1308.211669921875,
1881
+ "logps/rejected": -2490.35693359375,
1882
+ "loss": 0.0053,
1883
+ "rewards/accuracies": 1.0,
1884
+ "rewards/chosen": -9.68997573852539,
1885
+ "rewards/margins": 13.111665725708008,
1886
+ "rewards/margins_max": 18.273632049560547,
1887
+ "rewards/margins_min": 7.9496965408325195,
1888
+ "rewards/margins_std": 7.300126075744629,
1889
+ "rewards/rejected": -22.801639556884766,
1890
+ "step": 940
1891
+ },
1892
+ {
1893
+ "epoch": 2.68,
1894
+ "grad_norm": 2.498417925921994,
1895
+ "learning_rate": 1.756795252547111e-07,
1896
+ "logits/chosen": -1.4785737991333008,
1897
+ "logits/rejected": -1.2068592309951782,
1898
+ "logps/chosen": -1470.0135498046875,
1899
+ "logps/rejected": -2859.243408203125,
1900
+ "loss": 0.0078,
1901
+ "rewards/accuracies": 1.0,
1902
+ "rewards/chosen": -11.678686141967773,
1903
+ "rewards/margins": 14.885587692260742,
1904
+ "rewards/margins_max": 18.92436981201172,
1905
+ "rewards/margins_min": 10.846805572509766,
1906
+ "rewards/margins_std": 5.7117018699646,
1907
+ "rewards/rejected": -26.564273834228516,
1908
+ "step": 950
1909
+ },
1910
+ {
1911
+ "epoch": 2.7,
1912
+ "grad_norm": 0.30835027385045066,
1913
+ "learning_rate": 1.4674461431281013e-07,
1914
+ "logits/chosen": -1.6750847101211548,
1915
+ "logits/rejected": -1.3757655620574951,
1916
+ "logps/chosen": -1276.86669921875,
1917
+ "logps/rejected": -2703.418701171875,
1918
+ "loss": 0.0151,
1919
+ "rewards/accuracies": 0.949999988079071,
1920
+ "rewards/chosen": -9.499726295471191,
1921
+ "rewards/margins": 15.09521198272705,
1922
+ "rewards/margins_max": 21.079849243164062,
1923
+ "rewards/margins_min": 9.11056900024414,
1924
+ "rewards/margins_std": 8.463561058044434,
1925
+ "rewards/rejected": -24.59493637084961,
1926
+ "step": 960
1927
+ },
1928
+ {
1929
+ "epoch": 2.73,
1930
+ "grad_norm": 0.23235990194938522,
1931
+ "learning_rate": 1.2034015982622243e-07,
1932
+ "logits/chosen": -1.5666346549987793,
1933
+ "logits/rejected": -1.2590982913970947,
1934
+ "logps/chosen": -1482.5379638671875,
1935
+ "logps/rejected": -2852.9375,
1936
+ "loss": 0.0003,
1937
+ "rewards/accuracies": 1.0,
1938
+ "rewards/chosen": -11.074012756347656,
1939
+ "rewards/margins": 14.420585632324219,
1940
+ "rewards/margins_max": 18.83799934387207,
1941
+ "rewards/margins_min": 10.003174781799316,
1942
+ "rewards/margins_std": 6.24716329574585,
1943
+ "rewards/rejected": -25.494598388671875,
1944
+ "step": 970
1945
+ },
1946
+ {
1947
+ "epoch": 2.76,
1948
+ "grad_norm": 0.003130078676672441,
1949
+ "learning_rate": 9.649455451539419e-08,
1950
+ "logits/chosen": -1.2376658916473389,
1951
+ "logits/rejected": -0.9727104306221008,
1952
+ "logps/chosen": -1320.026123046875,
1953
+ "logps/rejected": -2890.248291015625,
1954
+ "loss": 0.0043,
1955
+ "rewards/accuracies": 1.0,
1956
+ "rewards/chosen": -10.823871612548828,
1957
+ "rewards/margins": 16.33503532409668,
1958
+ "rewards/margins_max": 22.118406295776367,
1959
+ "rewards/margins_min": 10.551666259765625,
1960
+ "rewards/margins_std": 8.178921699523926,
1961
+ "rewards/rejected": -27.15890884399414,
1962
+ "step": 980
1963
+ },
1964
+ {
1965
+ "epoch": 2.79,
1966
+ "grad_norm": 0.01106748013868886,
1967
+ "learning_rate": 7.523343956923196e-08,
1968
+ "logits/chosen": -1.6014173030853271,
1969
+ "logits/rejected": -1.3725566864013672,
1970
+ "logps/chosen": -1455.7508544921875,
1971
+ "logps/rejected": -2784.856201171875,
1972
+ "loss": 0.0014,
1973
+ "rewards/accuracies": 1.0,
1974
+ "rewards/chosen": -11.036726951599121,
1975
+ "rewards/margins": 13.958398818969727,
1976
+ "rewards/margins_max": 18.721614837646484,
1977
+ "rewards/margins_min": 9.19517993927002,
1978
+ "rewards/margins_std": 6.736205101013184,
1979
+ "rewards/rejected": -24.995126724243164,
1980
+ "step": 990
1981
+ },
1982
+ {
1983
+ "epoch": 2.82,
1984
+ "grad_norm": 0.21777107682252947,
1985
+ "learning_rate": 5.657967707312195e-08,
1986
+ "logits/chosen": -1.4147546291351318,
1987
+ "logits/rejected": -1.2533682584762573,
1988
+ "logps/chosen": -1340.80859375,
1989
+ "logps/rejected": -2710.937255859375,
1990
+ "loss": 0.0118,
1991
+ "rewards/accuracies": 1.0,
1992
+ "rewards/chosen": -10.588825225830078,
1993
+ "rewards/margins": 13.658398628234863,
1994
+ "rewards/margins_max": 17.033788681030273,
1995
+ "rewards/margins_min": 10.28300666809082,
1996
+ "rewards/margins_std": 4.773523807525635,
1997
+ "rewards/rejected": -24.247220993041992,
1998
+ "step": 1000
1999
+ },
2000
+ {
2001
+ "epoch": 2.82,
2002
+ "eval_logits/chosen": -1.51563560962677,
2003
+ "eval_logits/rejected": -1.4296027421951294,
2004
+ "eval_logps/chosen": -1952.324462890625,
2005
+ "eval_logps/rejected": -2219.474609375,
2006
+ "eval_loss": 1.8351484537124634,
2007
+ "eval_rewards/accuracies": 0.682539701461792,
2008
+ "eval_rewards/chosen": -16.671031951904297,
2009
+ "eval_rewards/margins": 2.931889057159424,
2010
+ "eval_rewards/margins_max": 11.962862014770508,
2011
+ "eval_rewards/margins_min": -5.289890766143799,
2012
+ "eval_rewards/margins_std": 7.766205787658691,
2013
+ "eval_rewards/rejected": -19.602922439575195,
2014
+ "eval_runtime": 281.5027,
2015
+ "eval_samples_per_second": 7.105,
2016
+ "eval_steps_per_second": 0.224,
2017
+ "step": 1000
2018
+ },
2019
+ {
2020
+ "epoch": 2.85,
2021
+ "grad_norm": 0.4419550733032763,
2022
+ "learning_rate": 4.055332542531959e-08,
2023
+ "logits/chosen": -1.5433815717697144,
2024
+ "logits/rejected": -1.295972228050232,
2025
+ "logps/chosen": -1293.6630859375,
2026
+ "logps/rejected": -2648.736572265625,
2027
+ "loss": 0.0096,
2028
+ "rewards/accuracies": 0.949999988079071,
2029
+ "rewards/chosen": -9.779963493347168,
2030
+ "rewards/margins": 14.112527847290039,
2031
+ "rewards/margins_max": 18.39639663696289,
2032
+ "rewards/margins_min": 9.828656196594238,
2033
+ "rewards/margins_std": 6.058306694030762,
2034
+ "rewards/rejected": -23.89249038696289,
2035
+ "step": 1010
2036
+ },
2037
+ {
2038
+ "epoch": 2.87,
2039
+ "grad_norm": 0.14005943320430667,
2040
+ "learning_rate": 2.7171617768147472e-08,
2041
+ "logits/chosen": -1.398990273475647,
2042
+ "logits/rejected": -1.063157320022583,
2043
+ "logps/chosen": -1454.0186767578125,
2044
+ "logps/rejected": -2948.3251953125,
2045
+ "loss": 0.0009,
2046
+ "rewards/accuracies": 1.0,
2047
+ "rewards/chosen": -11.425373077392578,
2048
+ "rewards/margins": 15.727473258972168,
2049
+ "rewards/margins_max": 20.60434341430664,
2050
+ "rewards/margins_min": 10.850606918334961,
2051
+ "rewards/margins_std": 6.896933078765869,
2052
+ "rewards/rejected": -27.152847290039062,
2053
+ "step": 1020
2054
+ },
2055
+ {
2056
+ "epoch": 2.9,
2057
+ "grad_norm": 0.2626213621970617,
2058
+ "learning_rate": 1.6448943457189616e-08,
2059
+ "logits/chosen": -1.5582804679870605,
2060
+ "logits/rejected": -1.3218994140625,
2061
+ "logps/chosen": -1478.698974609375,
2062
+ "logps/rejected": -2884.353271484375,
2063
+ "loss": 0.0057,
2064
+ "rewards/accuracies": 1.0,
2065
+ "rewards/chosen": -11.135309219360352,
2066
+ "rewards/margins": 14.943025588989258,
2067
+ "rewards/margins_max": 20.703128814697266,
2068
+ "rewards/margins_min": 9.1829195022583,
2069
+ "rewards/margins_std": 8.146018981933594,
2070
+ "rewards/rejected": -26.07833480834961,
2071
+ "step": 1030
2072
+ },
2073
+ {
2074
+ "epoch": 2.93,
2075
+ "grad_norm": 2.8326701528782565,
2076
+ "learning_rate": 8.39683258841123e-09,
2077
+ "logits/chosen": -1.5044890642166138,
2078
+ "logits/rejected": -1.2109694480895996,
2079
+ "logps/chosen": -1402.8773193359375,
2080
+ "logps/rejected": -2849.219970703125,
2081
+ "loss": 0.0062,
2082
+ "rewards/accuracies": 1.0,
2083
+ "rewards/chosen": -10.566572189331055,
2084
+ "rewards/margins": 15.638870239257812,
2085
+ "rewards/margins_max": 20.092174530029297,
2086
+ "rewards/margins_min": 11.185564041137695,
2087
+ "rewards/margins_std": 6.297926425933838,
2088
+ "rewards/rejected": -26.2054443359375,
2089
+ "step": 1040
2090
+ },
2091
+ {
2092
+ "epoch": 2.96,
2093
+ "grad_norm": 0.3213477153635432,
2094
+ "learning_rate": 3.0239435998430376e-09,
2095
+ "logits/chosen": -1.4634066820144653,
2096
+ "logits/rejected": -1.1483074426651,
2097
+ "logps/chosen": -1369.406494140625,
2098
+ "logps/rejected": -2688.2548828125,
2099
+ "loss": 0.0034,
2100
+ "rewards/accuracies": 1.0,
2101
+ "rewards/chosen": -10.378218650817871,
2102
+ "rewards/margins": 13.92640495300293,
2103
+ "rewards/margins_max": 18.696613311767578,
2104
+ "rewards/margins_min": 9.156195640563965,
2105
+ "rewards/margins_std": 6.746094703674316,
2106
+ "rewards/rejected": -24.304622650146484,
2107
+ "step": 1050
2108
+ },
2109
+ {
2110
+ "epoch": 2.99,
2111
+ "grad_norm": 0.31694097428400714,
2112
+ "learning_rate": 3.3605396115826695e-10,
2113
+ "logits/chosen": -1.4050662517547607,
2114
+ "logits/rejected": -1.1527583599090576,
2115
+ "logps/chosen": -1549.754150390625,
2116
+ "logps/rejected": -2639.6474609375,
2117
+ "loss": 0.0027,
2118
+ "rewards/accuracies": 1.0,
2119
+ "rewards/chosen": -12.238971710205078,
2120
+ "rewards/margins": 12.063154220581055,
2121
+ "rewards/margins_max": 15.284955978393555,
2122
+ "rewards/margins_min": 8.841352462768555,
2123
+ "rewards/margins_std": 4.5563154220581055,
2124
+ "rewards/rejected": -24.302127838134766,
2125
+ "step": 1060
2126
+ },
2127
+ {
2128
+ "epoch": 3.0,
2129
+ "step": 1065,
2130
+ "total_flos": 0.0,
2131
+ "train_loss": 0.1103198329137612,
2132
+ "train_runtime": 9245.0119,
2133
+ "train_samples_per_second": 1.843,
2134
+ "train_steps_per_second": 0.115
2135
+ }
2136
+ ],
2137
+ "logging_steps": 10,
2138
+ "max_steps": 1065,
2139
+ "num_input_tokens_seen": 0,
2140
+ "num_train_epochs": 3,
2141
+ "save_steps": 100,
2142
+ "total_flos": 0.0,
2143
+ "train_batch_size": 2,
2144
+ "trial_name": null,
2145
+ "trial_params": null
2146
+ }