lewtun HF staff commited on
Commit
b4276b8
1 Parent(s): e45a60d

Model save

Browse files
README.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ base_model: lewtun/gemma-7b-sft-full-deita-10k-v0
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: gemma-7b-dpo-full-mix2-beta-0.1
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # gemma-7b-dpo-full-mix2-beta-0.1
17
+
18
+ This model is a fine-tuned version of [lewtun/gemma-7b-sft-full-deita-10k-v0](https://huggingface.co/lewtun/gemma-7b-sft-full-deita-10k-v0) on the None dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.4134
21
+ - Rewards/chosen: -0.3763
22
+ - Rewards/rejected: -3.5060
23
+ - Rewards/accuracies: 0.8032
24
+ - Rewards/margins: 3.1296
25
+ - Logps/rejected: -413.8586
26
+ - Logps/chosen: -392.1099
27
+ - Logits/rejected: 83.6363
28
+ - Logits/chosen: 82.8991
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 5e-07
48
+ - train_batch_size: 2
49
+ - eval_batch_size: 4
50
+ - seed: 42
51
+ - distributed_type: multi-GPU
52
+ - num_devices: 8
53
+ - gradient_accumulation_steps: 8
54
+ - total_train_batch_size: 128
55
+ - total_eval_batch_size: 32
56
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
+ - lr_scheduler_type: cosine
58
+ - lr_scheduler_warmup_ratio: 0.1
59
+ - num_epochs: 1
60
+
61
+ ### Training results
62
+
63
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
+ | 0.6146 | 0.18 | 100 | 0.4651 | -2.3366 | -4.2121 | 0.7527 | 1.8755 | -420.9197 | -411.7124 | 84.3991 | 81.8795 |
66
+ | 0.5464 | 0.35 | 200 | 0.4531 | -0.7850 | -3.1857 | 0.7899 | 2.4007 | -410.6562 | -396.1968 | 84.8764 | 82.9057 |
67
+ | 0.5841 | 0.53 | 300 | 0.4209 | -1.5926 | -4.2403 | 0.8085 | 2.6477 | -421.2023 | -404.2725 | 83.7612 | 81.9224 |
68
+ | 0.519 | 0.7 | 400 | 0.4162 | -1.2384 | -4.1774 | 0.7819 | 2.9390 | -420.5732 | -400.7308 | 85.8201 | 84.7816 |
69
+ | 0.5432 | 0.88 | 500 | 0.4134 | -0.3763 | -3.5060 | 0.8032 | 3.1296 | -413.8586 | -392.1099 | 83.6363 | 82.8991 |
70
+
71
+
72
+ ### Framework versions
73
+
74
+ - Transformers 4.39.0.dev0
75
+ - Pytorch 2.1.2+cu121
76
+ - Datasets 2.14.6
77
+ - Tokenizers 0.15.1
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.5623811487565961,
4
+ "train_runtime": 6612.2068,
5
+ "train_samples": 72994,
6
+ "train_samples_per_second": 11.039,
7
+ "train_steps_per_second": 0.086
8
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.39.0.dev0"
7
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fcde3b10f654e18d912b862f803cb96364b4bc0edc8e253eed58c65a94f6ddd
3
+ size 4995496656
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a94574d27f26710e1b5fd5f894fa582857b9272a409774cbc4f291678b1a9d2c
3
+ size 4982953168
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d6a36a7a5cc17090b9dc76326c60324317ae8fba0a22ece6a3899affc1b2545
3
+ size 4982953200
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:190a885761114abd32232f5cd167eaf77eeeb6a29a1583a6a446b17450ce0be2
3
+ size 2113988336
model.safetensors.index.json ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 17075361792
4
+ },
5
+ "weight_map": {
6
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
7
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
26
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
27
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
28
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
29
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
30
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
31
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.15.input_layernorm.weight": "model-00003-of-00004.safetensors",
71
+ "model.layers.15.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
72
+ "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
73
+ "model.layers.15.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
74
+ "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
75
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
80
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
81
+ "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
82
+ "model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
83
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
84
+ "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
85
+ "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
86
+ "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
87
+ "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
88
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
89
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
90
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
91
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
92
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
93
+ "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
94
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
95
+ "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
96
+ "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
97
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
98
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
99
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
100
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
101
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
102
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
103
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
104
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
105
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
106
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
107
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
108
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
109
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
110
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
111
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
112
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
113
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
114
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
115
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
116
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
117
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
118
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
119
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
120
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
121
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
122
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
123
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
124
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
125
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
126
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
128
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
134
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
135
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
137
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
138
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
139
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
140
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
151
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
154
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
155
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
156
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
157
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
158
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
159
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
160
+ "model.layers.24.input_layernorm.weight": "model-00004-of-00004.safetensors",
161
+ "model.layers.24.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
162
+ "model.layers.24.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
163
+ "model.layers.24.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
164
+ "model.layers.24.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
165
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.25.input_layernorm.weight": "model-00004-of-00004.safetensors",
170
+ "model.layers.25.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
171
+ "model.layers.25.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
172
+ "model.layers.25.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
173
+ "model.layers.25.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
174
+ "model.layers.25.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
175
+ "model.layers.25.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
176
+ "model.layers.25.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
177
+ "model.layers.25.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
178
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
179
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
180
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
181
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
182
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
183
+ "model.layers.26.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
184
+ "model.layers.26.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
185
+ "model.layers.26.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
186
+ "model.layers.26.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
187
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
188
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
189
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
190
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
191
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
192
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
193
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
194
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
195
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
196
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
197
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
198
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
199
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
200
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
201
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
202
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
203
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
204
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
205
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
206
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
207
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
208
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
209
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
210
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
211
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
212
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
213
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
214
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
215
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
216
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
217
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
218
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
219
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
220
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
221
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
222
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
223
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
224
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
225
+ "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
226
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
227
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
228
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
229
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
230
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
231
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
232
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
233
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
234
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
235
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
236
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
237
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
238
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
239
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
240
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
241
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
242
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
243
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
244
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
245
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
246
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
247
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
248
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
249
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
250
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
251
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
252
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
253
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
254
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
255
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
256
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
257
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
258
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
259
+ "model.norm.weight": "model-00004-of-00004.safetensors"
260
+ }
261
+ }
runs/Feb29_21-52-12_ip-26-0-165-38/events.out.tfevents.1709243843.ip-26-0-165-38.2014140.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:36392fc7330d0054871907bdfede4569daf6d71c664bc4130033d80298f3def5
3
- size 43470
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:146825b2792ce81c69acddd6fdc23ed59a5a18ef309c6426a5573902d03818e8
3
+ size 48640
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.5623811487565961,
4
+ "train_runtime": 6612.2068,
5
+ "train_samples": 72994,
6
+ "train_samples_per_second": 11.039,
7
+ "train_steps_per_second": 0.086
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,980 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9993425378040762,
5
+ "eval_steps": 100,
6
+ "global_step": 570,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "grad_norm": 288.1190594115736,
14
+ "learning_rate": 8.771929824561403e-09,
15
+ "logits/chosen": 110.2152099609375,
16
+ "logits/rejected": 102.90592193603516,
17
+ "logps/chosen": -522.5816650390625,
18
+ "logps/rejected": -417.8726501464844,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/rejected": 0.0,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.02,
28
+ "grad_norm": 275.85410641713264,
29
+ "learning_rate": 8.771929824561403e-08,
30
+ "logits/chosen": 126.02606964111328,
31
+ "logits/rejected": 117.09617614746094,
32
+ "logps/chosen": -389.89422607421875,
33
+ "logps/rejected": -354.1610412597656,
34
+ "loss": 0.7792,
35
+ "rewards/accuracies": 0.4652777910232544,
36
+ "rewards/chosen": 0.04768397659063339,
37
+ "rewards/margins": 0.08727537095546722,
38
+ "rewards/rejected": -0.039591409265995026,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.04,
43
+ "grad_norm": 217.40628820586957,
44
+ "learning_rate": 1.7543859649122805e-07,
45
+ "logits/chosen": 129.68209838867188,
46
+ "logits/rejected": 113.68559265136719,
47
+ "logps/chosen": -432.5853576660156,
48
+ "logps/rejected": -373.7879943847656,
49
+ "loss": 0.7513,
50
+ "rewards/accuracies": 0.5687500238418579,
51
+ "rewards/chosen": 0.031696923077106476,
52
+ "rewards/margins": 0.08473679423332214,
53
+ "rewards/rejected": -0.05303985998034477,
54
+ "step": 20
55
+ },
56
+ {
57
+ "epoch": 0.05,
58
+ "grad_norm": 191.89370217443937,
59
+ "learning_rate": 2.631578947368421e-07,
60
+ "logits/chosen": 124.4021987915039,
61
+ "logits/rejected": 125.64024353027344,
62
+ "logps/chosen": -399.22802734375,
63
+ "logps/rejected": -390.37579345703125,
64
+ "loss": 0.7037,
65
+ "rewards/accuracies": 0.5687500238418579,
66
+ "rewards/chosen": -0.20258919894695282,
67
+ "rewards/margins": 0.1700224131345749,
68
+ "rewards/rejected": -0.3726116120815277,
69
+ "step": 30
70
+ },
71
+ {
72
+ "epoch": 0.07,
73
+ "grad_norm": 211.6351305832278,
74
+ "learning_rate": 3.508771929824561e-07,
75
+ "logits/chosen": 131.0064697265625,
76
+ "logits/rejected": 123.0418930053711,
77
+ "logps/chosen": -419.60662841796875,
78
+ "logps/rejected": -402.06634521484375,
79
+ "loss": 0.6797,
80
+ "rewards/accuracies": 0.643750011920929,
81
+ "rewards/chosen": -0.7152727842330933,
82
+ "rewards/margins": 0.4604054391384125,
83
+ "rewards/rejected": -1.1756783723831177,
84
+ "step": 40
85
+ },
86
+ {
87
+ "epoch": 0.09,
88
+ "grad_norm": 484.38349273880505,
89
+ "learning_rate": 4.3859649122807013e-07,
90
+ "logits/chosen": 128.71829223632812,
91
+ "logits/rejected": 118.0994873046875,
92
+ "logps/chosen": -448.260498046875,
93
+ "logps/rejected": -412.1876525878906,
94
+ "loss": 0.695,
95
+ "rewards/accuracies": 0.6937500238418579,
96
+ "rewards/chosen": -1.3968048095703125,
97
+ "rewards/margins": 0.8602328300476074,
98
+ "rewards/rejected": -2.25703763961792,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.11,
103
+ "grad_norm": 182.40125166565076,
104
+ "learning_rate": 4.999578104083306e-07,
105
+ "logits/chosen": 127.1559066772461,
106
+ "logits/rejected": 121.3763427734375,
107
+ "logps/chosen": -435.3233337402344,
108
+ "logps/rejected": -415.9839782714844,
109
+ "loss": 0.6133,
110
+ "rewards/accuracies": 0.71875,
111
+ "rewards/chosen": -1.5324429273605347,
112
+ "rewards/margins": 1.0619370937347412,
113
+ "rewards/rejected": -2.5943799018859863,
114
+ "step": 60
115
+ },
116
+ {
117
+ "epoch": 0.12,
118
+ "grad_norm": 199.29717720826676,
119
+ "learning_rate": 4.992081692902698e-07,
120
+ "logits/chosen": 120.18775939941406,
121
+ "logits/rejected": 117.95320129394531,
122
+ "logps/chosen": -406.31915283203125,
123
+ "logps/rejected": -413.50787353515625,
124
+ "loss": 0.5865,
125
+ "rewards/accuracies": 0.75,
126
+ "rewards/chosen": -0.46991199254989624,
127
+ "rewards/margins": 1.2349997758865356,
128
+ "rewards/rejected": -1.7049118280410767,
129
+ "step": 70
130
+ },
131
+ {
132
+ "epoch": 0.14,
133
+ "grad_norm": 207.06449323027374,
134
+ "learning_rate": 4.975242169652915e-07,
135
+ "logits/chosen": 128.04473876953125,
136
+ "logits/rejected": 130.7515869140625,
137
+ "logps/chosen": -447.3434143066406,
138
+ "logps/rejected": -459.81890869140625,
139
+ "loss": 0.6004,
140
+ "rewards/accuracies": 0.71875,
141
+ "rewards/chosen": -2.0434231758117676,
142
+ "rewards/margins": 1.4744064807891846,
143
+ "rewards/rejected": -3.5178298950195312,
144
+ "step": 80
145
+ },
146
+ {
147
+ "epoch": 0.16,
148
+ "grad_norm": 200.92304707212335,
149
+ "learning_rate": 4.949122667718934e-07,
150
+ "logits/chosen": 121.19368743896484,
151
+ "logits/rejected": 121.2099380493164,
152
+ "logps/chosen": -397.20086669921875,
153
+ "logps/rejected": -414.5176696777344,
154
+ "loss": 0.6301,
155
+ "rewards/accuracies": 0.6812499761581421,
156
+ "rewards/chosen": -2.6995744705200195,
157
+ "rewards/margins": 1.4437432289123535,
158
+ "rewards/rejected": -4.143317222595215,
159
+ "step": 90
160
+ },
161
+ {
162
+ "epoch": 0.18,
163
+ "grad_norm": 215.04289608607098,
164
+ "learning_rate": 4.913821112234773e-07,
165
+ "logits/chosen": 116.49479675292969,
166
+ "logits/rejected": 112.4799575805664,
167
+ "logps/chosen": -447.57647705078125,
168
+ "logps/rejected": -429.46575927734375,
169
+ "loss": 0.6146,
170
+ "rewards/accuracies": 0.706250011920929,
171
+ "rewards/chosen": -2.545346260070801,
172
+ "rewards/margins": 1.2528657913208008,
173
+ "rewards/rejected": -3.7982120513916016,
174
+ "step": 100
175
+ },
176
+ {
177
+ "epoch": 0.18,
178
+ "eval_logits/chosen": 81.87950897216797,
179
+ "eval_logits/rejected": 84.39910125732422,
180
+ "eval_logps/chosen": -411.71240234375,
181
+ "eval_logps/rejected": -420.9197082519531,
182
+ "eval_loss": 0.4651451110839844,
183
+ "eval_rewards/accuracies": 0.7526595592498779,
184
+ "eval_rewards/chosen": -2.3365964889526367,
185
+ "eval_rewards/margins": 1.8754801750183105,
186
+ "eval_rewards/rejected": -4.2120771408081055,
187
+ "eval_runtime": 140.5275,
188
+ "eval_samples_per_second": 21.348,
189
+ "eval_steps_per_second": 0.669,
190
+ "step": 100
191
+ },
192
+ {
193
+ "epoch": 0.19,
194
+ "grad_norm": 145.22259820264708,
195
+ "learning_rate": 4.869469852950461e-07,
196
+ "logits/chosen": 122.2143325805664,
197
+ "logits/rejected": 105.96075439453125,
198
+ "logps/chosen": -432.1094665527344,
199
+ "logps/rejected": -416.0596618652344,
200
+ "loss": 0.5204,
201
+ "rewards/accuracies": 0.731249988079071,
202
+ "rewards/chosen": -2.683460235595703,
203
+ "rewards/margins": 1.542055606842041,
204
+ "rewards/rejected": -4.225515842437744,
205
+ "step": 110
206
+ },
207
+ {
208
+ "epoch": 0.21,
209
+ "grad_norm": 156.26979860678045,
210
+ "learning_rate": 4.816235168037004e-07,
211
+ "logits/chosen": 128.47006225585938,
212
+ "logits/rejected": 118.56982421875,
213
+ "logps/chosen": -449.61700439453125,
214
+ "logps/rejected": -434.48028564453125,
215
+ "loss": 0.6423,
216
+ "rewards/accuracies": 0.7562500238418579,
217
+ "rewards/chosen": -3.0041728019714355,
218
+ "rewards/margins": 1.7394739389419556,
219
+ "rewards/rejected": -4.74364709854126,
220
+ "step": 120
221
+ },
222
+ {
223
+ "epoch": 0.23,
224
+ "grad_norm": 177.86144513954326,
225
+ "learning_rate": 4.754316640689664e-07,
226
+ "logits/chosen": 113.59197998046875,
227
+ "logits/rejected": 112.35980224609375,
228
+ "logps/chosen": -457.04144287109375,
229
+ "logps/rejected": -439.47723388671875,
230
+ "loss": 0.5826,
231
+ "rewards/accuracies": 0.731249988079071,
232
+ "rewards/chosen": -2.8894424438476562,
233
+ "rewards/margins": 1.4542138576507568,
234
+ "rewards/rejected": -4.343656539916992,
235
+ "step": 130
236
+ },
237
+ {
238
+ "epoch": 0.25,
239
+ "grad_norm": 189.02971648014739,
240
+ "learning_rate": 4.683946410866696e-07,
241
+ "logits/chosen": 116.56878662109375,
242
+ "logits/rejected": 122.89111328125,
243
+ "logps/chosen": -420.02703857421875,
244
+ "logps/rejected": -411.07720947265625,
245
+ "loss": 0.6118,
246
+ "rewards/accuracies": 0.7437499761581421,
247
+ "rewards/chosen": -3.1426749229431152,
248
+ "rewards/margins": 1.2650305032730103,
249
+ "rewards/rejected": -4.407705307006836,
250
+ "step": 140
251
+ },
252
+ {
253
+ "epoch": 0.26,
254
+ "grad_norm": 211.31450916072063,
255
+ "learning_rate": 4.605388304968914e-07,
256
+ "logits/chosen": 118.91947937011719,
257
+ "logits/rejected": 116.5951156616211,
258
+ "logps/chosen": -431.5098571777344,
259
+ "logps/rejected": -429.95440673828125,
260
+ "loss": 0.5911,
261
+ "rewards/accuracies": 0.643750011920929,
262
+ "rewards/chosen": -2.910405397415161,
263
+ "rewards/margins": 1.1865342855453491,
264
+ "rewards/rejected": -4.096939563751221,
265
+ "step": 150
266
+ },
267
+ {
268
+ "epoch": 0.28,
269
+ "grad_norm": 170.26872451717134,
270
+ "learning_rate": 4.518936846722982e-07,
271
+ "logits/chosen": 117.84376525878906,
272
+ "logits/rejected": 112.31922912597656,
273
+ "logps/chosen": -414.76165771484375,
274
+ "logps/rejected": -437.95452880859375,
275
+ "loss": 0.5456,
276
+ "rewards/accuracies": 0.768750011920929,
277
+ "rewards/chosen": -1.7054227590560913,
278
+ "rewards/margins": 2.110852003097534,
279
+ "rewards/rejected": -3.816274642944336,
280
+ "step": 160
281
+ },
282
+ {
283
+ "epoch": 0.3,
284
+ "grad_norm": 191.46259689342833,
285
+ "learning_rate": 4.424916152976768e-07,
286
+ "logits/chosen": 119.23990631103516,
287
+ "logits/rejected": 121.25250244140625,
288
+ "logps/chosen": -443.57867431640625,
289
+ "logps/rejected": -435.184326171875,
290
+ "loss": 0.5512,
291
+ "rewards/accuracies": 0.793749988079071,
292
+ "rewards/chosen": -1.7071870565414429,
293
+ "rewards/margins": 1.8656721115112305,
294
+ "rewards/rejected": -3.572859287261963,
295
+ "step": 170
296
+ },
297
+ {
298
+ "epoch": 0.32,
299
+ "grad_norm": 184.73035643974228,
300
+ "learning_rate": 4.323678718546552e-07,
301
+ "logits/chosen": 108.43327331542969,
302
+ "logits/rejected": 114.77632141113281,
303
+ "logps/chosen": -438.0908203125,
304
+ "logps/rejected": -462.9315490722656,
305
+ "loss": 0.6132,
306
+ "rewards/accuracies": 0.8125,
307
+ "rewards/chosen": -2.3591582775115967,
308
+ "rewards/margins": 2.4720942974090576,
309
+ "rewards/rejected": -4.8312530517578125,
310
+ "step": 180
311
+ },
312
+ {
313
+ "epoch": 0.33,
314
+ "grad_norm": 158.74860587248762,
315
+ "learning_rate": 4.2156040946718343e-07,
316
+ "logits/chosen": 123.97856140136719,
317
+ "logits/rejected": 116.08268737792969,
318
+ "logps/chosen": -403.24920654296875,
319
+ "logps/rejected": -421.84698486328125,
320
+ "loss": 0.5587,
321
+ "rewards/accuracies": 0.768750011920929,
322
+ "rewards/chosen": -1.5052869319915771,
323
+ "rewards/margins": 1.7390124797821045,
324
+ "rewards/rejected": -3.2442994117736816,
325
+ "step": 190
326
+ },
327
+ {
328
+ "epoch": 0.35,
329
+ "grad_norm": 183.38314527086422,
330
+ "learning_rate": 4.1010974660323827e-07,
331
+ "logits/chosen": 126.38175964355469,
332
+ "logits/rejected": 122.32222747802734,
333
+ "logps/chosen": -436.79522705078125,
334
+ "logps/rejected": -426.5311584472656,
335
+ "loss": 0.5464,
336
+ "rewards/accuracies": 0.7749999761581421,
337
+ "rewards/chosen": -0.6060012578964233,
338
+ "rewards/margins": 1.8500821590423584,
339
+ "rewards/rejected": -2.456083297729492,
340
+ "step": 200
341
+ },
342
+ {
343
+ "epoch": 0.35,
344
+ "eval_logits/chosen": 82.9056625366211,
345
+ "eval_logits/rejected": 84.87641143798828,
346
+ "eval_logps/chosen": -396.1968078613281,
347
+ "eval_logps/rejected": -410.65625,
348
+ "eval_loss": 0.4531392455101013,
349
+ "eval_rewards/accuracies": 0.789893627166748,
350
+ "eval_rewards/chosen": -0.785041868686676,
351
+ "eval_rewards/margins": 2.400688648223877,
352
+ "eval_rewards/rejected": -3.185730218887329,
353
+ "eval_runtime": 137.5235,
354
+ "eval_samples_per_second": 21.814,
355
+ "eval_steps_per_second": 0.684,
356
+ "step": 200
357
+ },
358
+ {
359
+ "epoch": 0.37,
360
+ "grad_norm": 168.64841301750204,
361
+ "learning_rate": 3.9805881316624503e-07,
362
+ "logits/chosen": 130.578857421875,
363
+ "logits/rejected": 128.09909057617188,
364
+ "logps/chosen": -438.5977478027344,
365
+ "logps/rejected": -418.1968688964844,
366
+ "loss": 0.5578,
367
+ "rewards/accuracies": 0.762499988079071,
368
+ "rewards/chosen": -1.4819152355194092,
369
+ "rewards/margins": 1.8128970861434937,
370
+ "rewards/rejected": -3.2948124408721924,
371
+ "step": 210
372
+ },
373
+ {
374
+ "epoch": 0.39,
375
+ "grad_norm": 136.21776962330497,
376
+ "learning_rate": 3.8545278954573936e-07,
377
+ "logits/chosen": 113.0008773803711,
378
+ "logits/rejected": 117.78938293457031,
379
+ "logps/chosen": -455.12457275390625,
380
+ "logps/rejected": -453.49786376953125,
381
+ "loss": 0.5577,
382
+ "rewards/accuracies": 0.737500011920929,
383
+ "rewards/chosen": -2.3700404167175293,
384
+ "rewards/margins": 2.080824375152588,
385
+ "rewards/rejected": -4.450865268707275,
386
+ "step": 220
387
+ },
388
+ {
389
+ "epoch": 0.4,
390
+ "grad_norm": 168.04155893000748,
391
+ "learning_rate": 3.7233893723068785e-07,
392
+ "logits/chosen": 117.96110534667969,
393
+ "logits/rejected": 112.2010726928711,
394
+ "logps/chosen": -417.343994140625,
395
+ "logps/rejected": -411.2234802246094,
396
+ "loss": 0.5906,
397
+ "rewards/accuracies": 0.7250000238418579,
398
+ "rewards/chosen": -2.2008044719696045,
399
+ "rewards/margins": 1.7120252847671509,
400
+ "rewards/rejected": -3.912829875946045,
401
+ "step": 230
402
+ },
403
+ {
404
+ "epoch": 0.42,
405
+ "grad_norm": 233.13263556391968,
406
+ "learning_rate": 3.587664216205183e-07,
407
+ "logits/chosen": 111.51997375488281,
408
+ "logits/rejected": 105.1529312133789,
409
+ "logps/chosen": -435.8033142089844,
410
+ "logps/rejected": -414.79608154296875,
411
+ "loss": 0.5228,
412
+ "rewards/accuracies": 0.699999988079071,
413
+ "rewards/chosen": -1.822892189025879,
414
+ "rewards/margins": 1.6441761255264282,
415
+ "rewards/rejected": -3.4670684337615967,
416
+ "step": 240
417
+ },
418
+ {
419
+ "epoch": 0.44,
420
+ "grad_norm": 145.93578202633324,
421
+ "learning_rate": 3.447861276981619e-07,
422
+ "logits/chosen": 112.54423522949219,
423
+ "logits/rejected": 114.58158874511719,
424
+ "logps/chosen": -433.91717529296875,
425
+ "logps/rejected": -451.19140625,
426
+ "loss": 0.4728,
427
+ "rewards/accuracies": 0.793749988079071,
428
+ "rewards/chosen": -2.1698975563049316,
429
+ "rewards/margins": 2.254122018814087,
430
+ "rewards/rejected": -4.424019813537598,
431
+ "step": 250
432
+ },
433
+ {
434
+ "epoch": 0.46,
435
+ "grad_norm": 265.64870689354916,
436
+ "learning_rate": 3.304504692561714e-07,
437
+ "logits/chosen": 107.4307632446289,
438
+ "logits/rejected": 109.19493103027344,
439
+ "logps/chosen": -384.3638916015625,
440
+ "logps/rejected": -407.864013671875,
441
+ "loss": 0.572,
442
+ "rewards/accuracies": 0.768750011920929,
443
+ "rewards/chosen": -1.458132028579712,
444
+ "rewards/margins": 2.150017499923706,
445
+ "rewards/rejected": -3.608149766921997,
446
+ "step": 260
447
+ },
448
+ {
449
+ "epoch": 0.47,
450
+ "grad_norm": 188.67031809112055,
451
+ "learning_rate": 3.1581319239114976e-07,
452
+ "logits/chosen": 117.85128021240234,
453
+ "logits/rejected": 116.6944580078125,
454
+ "logps/chosen": -415.9629821777344,
455
+ "logps/rejected": -407.85235595703125,
456
+ "loss": 0.5564,
457
+ "rewards/accuracies": 0.731249988079071,
458
+ "rewards/chosen": -0.2570802569389343,
459
+ "rewards/margins": 2.1505465507507324,
460
+ "rewards/rejected": -2.4076266288757324,
461
+ "step": 270
462
+ },
463
+ {
464
+ "epoch": 0.49,
465
+ "grad_norm": 165.06348521901816,
466
+ "learning_rate": 3.0092917400321105e-07,
467
+ "logits/chosen": 128.14698791503906,
468
+ "logits/rejected": 121.8365478515625,
469
+ "logps/chosen": -417.6142578125,
470
+ "logps/rejected": -403.83367919921875,
471
+ "loss": 0.5676,
472
+ "rewards/accuracies": 0.7562500238418579,
473
+ "rewards/chosen": -0.7323893308639526,
474
+ "rewards/margins": 1.7381248474121094,
475
+ "rewards/rejected": -2.4705142974853516,
476
+ "step": 280
477
+ },
478
+ {
479
+ "epoch": 0.51,
480
+ "grad_norm": 157.00916744994808,
481
+ "learning_rate": 2.8585421605592406e-07,
482
+ "logits/chosen": 110.540283203125,
483
+ "logits/rejected": 115.08995056152344,
484
+ "logps/chosen": -396.43426513671875,
485
+ "logps/rejected": -436.2679138183594,
486
+ "loss": 0.5495,
487
+ "rewards/accuracies": 0.731249988079071,
488
+ "rewards/chosen": -0.4560773968696594,
489
+ "rewards/margins": 1.98747980594635,
490
+ "rewards/rejected": -2.4435572624206543,
491
+ "step": 290
492
+ },
493
+ {
494
+ "epoch": 0.53,
495
+ "grad_norm": 205.45827003907002,
496
+ "learning_rate": 2.706448363680831e-07,
497
+ "logits/chosen": 124.5927963256836,
498
+ "logits/rejected": 110.01478576660156,
499
+ "logps/chosen": -446.9676818847656,
500
+ "logps/rejected": -425.59130859375,
501
+ "loss": 0.5841,
502
+ "rewards/accuracies": 0.78125,
503
+ "rewards/chosen": -1.1815617084503174,
504
+ "rewards/margins": 2.0572218894958496,
505
+ "rewards/rejected": -3.238783597946167,
506
+ "step": 300
507
+ },
508
+ {
509
+ "epoch": 0.53,
510
+ "eval_logits/chosen": 81.92237091064453,
511
+ "eval_logits/rejected": 83.76121520996094,
512
+ "eval_logps/chosen": -404.27252197265625,
513
+ "eval_logps/rejected": -421.2023010253906,
514
+ "eval_loss": 0.420937716960907,
515
+ "eval_rewards/accuracies": 0.8085106611251831,
516
+ "eval_rewards/chosen": -1.5926097631454468,
517
+ "eval_rewards/margins": 2.6477227210998535,
518
+ "eval_rewards/rejected": -4.24033260345459,
519
+ "eval_runtime": 140.4063,
520
+ "eval_samples_per_second": 21.367,
521
+ "eval_steps_per_second": 0.669,
522
+ "step": 300
523
+ },
524
+ {
525
+ "epoch": 0.54,
526
+ "grad_norm": 160.60055964808268,
527
+ "learning_rate": 2.5535805672165076e-07,
528
+ "logits/chosen": 137.0350799560547,
529
+ "logits/rejected": 120.5669174194336,
530
+ "logps/chosen": -484.946533203125,
531
+ "logps/rejected": -442.79840087890625,
532
+ "loss": 0.531,
533
+ "rewards/accuracies": 0.78125,
534
+ "rewards/chosen": -1.6490952968597412,
535
+ "rewards/margins": 2.217057943344116,
536
+ "rewards/rejected": -3.8661532402038574,
537
+ "step": 310
538
+ },
539
+ {
540
+ "epoch": 0.56,
541
+ "grad_norm": 185.28720083745498,
542
+ "learning_rate": 2.4005118908028396e-07,
543
+ "logits/chosen": 124.60655212402344,
544
+ "logits/rejected": 121.116455078125,
545
+ "logps/chosen": -404.44512939453125,
546
+ "logps/rejected": -417.16912841796875,
547
+ "loss": 0.5466,
548
+ "rewards/accuracies": 0.7437499761581421,
549
+ "rewards/chosen": -0.48712271451950073,
550
+ "rewards/margins": 2.2081336975097656,
551
+ "rewards/rejected": -2.695256471633911,
552
+ "step": 320
553
+ },
554
+ {
555
+ "epoch": 0.58,
556
+ "grad_norm": 126.54706641028886,
557
+ "learning_rate": 2.2478162071993296e-07,
558
+ "logits/chosen": 120.48885345458984,
559
+ "logits/rejected": 124.61685943603516,
560
+ "logps/chosen": -399.6452941894531,
561
+ "logps/rejected": -414.8997497558594,
562
+ "loss": 0.4872,
563
+ "rewards/accuracies": 0.75,
564
+ "rewards/chosen": -0.09031596034765244,
565
+ "rewards/margins": 2.396162509918213,
566
+ "rewards/rejected": -2.486478567123413,
567
+ "step": 330
568
+ },
569
+ {
570
+ "epoch": 0.6,
571
+ "grad_norm": 178.09627633035737,
572
+ "learning_rate": 2.096065990770863e-07,
573
+ "logits/chosen": 122.05961608886719,
574
+ "logits/rejected": 118.81587982177734,
575
+ "logps/chosen": -394.23077392578125,
576
+ "logps/rejected": -430.9129943847656,
577
+ "loss": 0.5554,
578
+ "rewards/accuracies": 0.706250011920929,
579
+ "rewards/chosen": -0.9867365956306458,
580
+ "rewards/margins": 1.8670978546142578,
581
+ "rewards/rejected": -2.8538341522216797,
582
+ "step": 340
583
+ },
584
+ {
585
+ "epoch": 0.61,
586
+ "grad_norm": 177.15865509490735,
587
+ "learning_rate": 1.9458301712129033e-07,
588
+ "logits/chosen": 119.64925384521484,
589
+ "logits/rejected": 119.57633972167969,
590
+ "logps/chosen": -411.25408935546875,
591
+ "logps/rejected": -429.3443298339844,
592
+ "loss": 0.5122,
593
+ "rewards/accuracies": 0.793749988079071,
594
+ "rewards/chosen": -0.9517320394515991,
595
+ "rewards/margins": 2.0737788677215576,
596
+ "rewards/rejected": -3.0255112648010254,
597
+ "step": 350
598
+ },
599
+ {
600
+ "epoch": 0.63,
601
+ "grad_norm": 139.47726339895294,
602
+ "learning_rate": 1.7976720005660767e-07,
603
+ "logits/chosen": 113.1412582397461,
604
+ "logits/rejected": 101.12696838378906,
605
+ "logps/chosen": -406.71917724609375,
606
+ "logps/rejected": -391.29437255859375,
607
+ "loss": 0.5455,
608
+ "rewards/accuracies": 0.731249988079071,
609
+ "rewards/chosen": -0.5302667617797852,
610
+ "rewards/margins": 2.25763201713562,
611
+ "rewards/rejected": -2.787898540496826,
612
+ "step": 360
613
+ },
614
+ {
615
+ "epoch": 0.65,
616
+ "grad_norm": 133.36840849054562,
617
+ "learning_rate": 1.652146941516963e-07,
618
+ "logits/chosen": 114.43309020996094,
619
+ "logits/rejected": 119.9453353881836,
620
+ "logps/chosen": -399.7528381347656,
621
+ "logps/rejected": -420.86297607421875,
622
+ "loss": 0.4984,
623
+ "rewards/accuracies": 0.75,
624
+ "rewards/chosen": -0.21387052536010742,
625
+ "rewards/margins": 2.155322313308716,
626
+ "rewards/rejected": -2.3691928386688232,
627
+ "step": 370
628
+ },
629
+ {
630
+ "epoch": 0.67,
631
+ "grad_norm": 149.00043137049042,
632
+ "learning_rate": 1.5098005849021078e-07,
633
+ "logits/chosen": 127.96968841552734,
634
+ "logits/rejected": 125.54051208496094,
635
+ "logps/chosen": -389.4139404296875,
636
+ "logps/rejected": -413.8482971191406,
637
+ "loss": 0.5179,
638
+ "rewards/accuracies": 0.7250000238418579,
639
+ "rewards/chosen": -0.6685792803764343,
640
+ "rewards/margins": 2.1137466430664062,
641
+ "rewards/rejected": -2.7823262214660645,
642
+ "step": 380
643
+ },
644
+ {
645
+ "epoch": 0.68,
646
+ "grad_norm": 186.72529615905967,
647
+ "learning_rate": 1.371166604222777e-07,
648
+ "logits/chosen": 119.5155258178711,
649
+ "logits/rejected": 120.4027328491211,
650
+ "logps/chosen": -425.53814697265625,
651
+ "logps/rejected": -438.1182556152344,
652
+ "loss": 0.5434,
653
+ "rewards/accuracies": 0.668749988079071,
654
+ "rewards/chosen": -1.0980108976364136,
655
+ "rewards/margins": 1.9416344165802002,
656
+ "rewards/rejected": -3.039644956588745,
657
+ "step": 390
658
+ },
659
+ {
660
+ "epoch": 0.7,
661
+ "grad_norm": 199.20784502094995,
662
+ "learning_rate": 1.236764754839226e-07,
663
+ "logits/chosen": 123.8387680053711,
664
+ "logits/rejected": 123.20845794677734,
665
+ "logps/chosen": -452.08056640625,
666
+ "logps/rejected": -414.00054931640625,
667
+ "loss": 0.519,
668
+ "rewards/accuracies": 0.768750011920929,
669
+ "rewards/chosen": -1.4723873138427734,
670
+ "rewards/margins": 2.1262927055358887,
671
+ "rewards/rejected": -3.598680019378662,
672
+ "step": 400
673
+ },
674
+ {
675
+ "epoch": 0.7,
676
+ "eval_logits/chosen": 84.7816162109375,
677
+ "eval_logits/rejected": 85.82012176513672,
678
+ "eval_logps/chosen": -400.7308044433594,
679
+ "eval_logps/rejected": -420.57318115234375,
680
+ "eval_loss": 0.41622278094291687,
681
+ "eval_rewards/accuracies": 0.7819148898124695,
682
+ "eval_rewards/chosen": -1.2384368181228638,
683
+ "eval_rewards/margins": 2.938983917236328,
684
+ "eval_rewards/rejected": -4.1774210929870605,
685
+ "eval_runtime": 139.5411,
686
+ "eval_samples_per_second": 21.499,
687
+ "eval_steps_per_second": 0.674,
688
+ "step": 400
689
+ },
690
+ {
691
+ "epoch": 0.72,
692
+ "grad_norm": 134.84528611140695,
693
+ "learning_rate": 1.1070989253457461e-07,
694
+ "logits/chosen": 122.2228012084961,
695
+ "logits/rejected": 110.46368408203125,
696
+ "logps/chosen": -441.6568298339844,
697
+ "logps/rejected": -433.2784118652344,
698
+ "loss": 0.5219,
699
+ "rewards/accuracies": 0.762499988079071,
700
+ "rewards/chosen": -1.0036917924880981,
701
+ "rewards/margins": 2.34743595123291,
702
+ "rewards/rejected": -3.3511276245117188,
703
+ "step": 410
704
+ },
705
+ {
706
+ "epoch": 0.74,
707
+ "grad_norm": 133.1187583799304,
708
+ "learning_rate": 9.826552484321085e-08,
709
+ "logits/chosen": 132.171630859375,
710
+ "logits/rejected": 129.95004272460938,
711
+ "logps/chosen": -417.84796142578125,
712
+ "logps/rejected": -423.4884338378906,
713
+ "loss": 0.5061,
714
+ "rewards/accuracies": 0.768750011920929,
715
+ "rewards/chosen": -0.618304967880249,
716
+ "rewards/margins": 2.553462266921997,
717
+ "rewards/rejected": -3.171767234802246,
718
+ "step": 420
719
+ },
720
+ {
721
+ "epoch": 0.75,
722
+ "grad_norm": 573.6152142738256,
723
+ "learning_rate": 8.639002783140181e-08,
724
+ "logits/chosen": 118.46337890625,
725
+ "logits/rejected": 109.05899810791016,
726
+ "logps/chosen": -426.46795654296875,
727
+ "logps/rejected": -418.7447814941406,
728
+ "loss": 0.5267,
729
+ "rewards/accuracies": 0.731249988079071,
730
+ "rewards/chosen": -0.6656720042228699,
731
+ "rewards/margins": 2.3314335346221924,
732
+ "rewards/rejected": -2.997105598449707,
733
+ "step": 430
734
+ },
735
+ {
736
+ "epoch": 0.77,
737
+ "grad_norm": 148.45628734093913,
738
+ "learning_rate": 7.512792415656055e-08,
739
+ "logits/chosen": 119.17752838134766,
740
+ "logits/rejected": 121.80256652832031,
741
+ "logps/chosen": -419.00714111328125,
742
+ "logps/rejected": -397.78277587890625,
743
+ "loss": 0.5312,
744
+ "rewards/accuracies": 0.731249988079071,
745
+ "rewards/chosen": -0.8773514628410339,
746
+ "rewards/margins": 1.9682013988494873,
747
+ "rewards/rejected": -2.845552921295166,
748
+ "step": 440
749
+ },
750
+ {
751
+ "epoch": 0.79,
752
+ "grad_norm": 162.12056581720563,
753
+ "learning_rate": 6.452143679117964e-08,
754
+ "logits/chosen": 112.10150146484375,
755
+ "logits/rejected": 113.61590576171875,
756
+ "logps/chosen": -399.46868896484375,
757
+ "logps/rejected": -410.13604736328125,
758
+ "loss": 0.4931,
759
+ "rewards/accuracies": 0.7749999761581421,
760
+ "rewards/chosen": -0.21312670409679413,
761
+ "rewards/margins": 2.5830297470092773,
762
+ "rewards/rejected": -2.796156406402588,
763
+ "step": 450
764
+ },
765
+ {
766
+ "epoch": 0.81,
767
+ "grad_norm": 126.77510481617468,
768
+ "learning_rate": 5.46103307238617e-08,
769
+ "logits/chosen": 123.6783447265625,
770
+ "logits/rejected": 120.97247314453125,
771
+ "logps/chosen": -415.7288513183594,
772
+ "logps/rejected": -401.6763610839844,
773
+ "loss": 0.5306,
774
+ "rewards/accuracies": 0.762499988079071,
775
+ "rewards/chosen": 0.10093896090984344,
776
+ "rewards/margins": 1.8561251163482666,
777
+ "rewards/rejected": -1.7551860809326172,
778
+ "step": 460
779
+ },
780
+ {
781
+ "epoch": 0.82,
782
+ "grad_norm": 150.71507301379341,
783
+ "learning_rate": 4.5431763875625226e-08,
784
+ "logits/chosen": 116.371826171875,
785
+ "logits/rejected": 127.0480728149414,
786
+ "logps/chosen": -382.4268798828125,
787
+ "logps/rejected": -425.90899658203125,
788
+ "loss": 0.5385,
789
+ "rewards/accuracies": 0.78125,
790
+ "rewards/chosen": 0.36557143926620483,
791
+ "rewards/margins": 2.3377490043640137,
792
+ "rewards/rejected": -1.972177267074585,
793
+ "step": 470
794
+ },
795
+ {
796
+ "epoch": 0.84,
797
+ "grad_norm": 158.81620899731348,
798
+ "learning_rate": 3.702014779041826e-08,
799
+ "logits/chosen": 123.89225769042969,
800
+ "logits/rejected": 123.4906234741211,
801
+ "logps/chosen": -424.238525390625,
802
+ "logps/rejected": -413.3152770996094,
803
+ "loss": 0.5397,
804
+ "rewards/accuracies": 0.7437499761581421,
805
+ "rewards/chosen": 0.1715961992740631,
806
+ "rewards/margins": 2.171330213546753,
807
+ "rewards/rejected": -1.9997339248657227,
808
+ "step": 480
809
+ },
810
+ {
811
+ "epoch": 0.86,
812
+ "grad_norm": 146.54123732916176,
813
+ "learning_rate": 2.940701862212802e-08,
814
+ "logits/chosen": 126.18601989746094,
815
+ "logits/rejected": 121.62428283691406,
816
+ "logps/chosen": -400.54058837890625,
817
+ "logps/rejected": -408.0806884765625,
818
+ "loss": 0.5078,
819
+ "rewards/accuracies": 0.762499988079071,
820
+ "rewards/chosen": -0.3071042597293854,
821
+ "rewards/margins": 1.9608478546142578,
822
+ "rewards/rejected": -2.2679522037506104,
823
+ "step": 490
824
+ },
825
+ {
826
+ "epoch": 0.88,
827
+ "grad_norm": 179.72110486843388,
828
+ "learning_rate": 2.2620918901771507e-08,
829
+ "logits/chosen": 123.40205383300781,
830
+ "logits/rejected": 128.0767059326172,
831
+ "logps/chosen": -391.4453125,
832
+ "logps/rejected": -411.9076232910156,
833
+ "loss": 0.5432,
834
+ "rewards/accuracies": 0.6937500238418579,
835
+ "rewards/chosen": -0.5281409621238708,
836
+ "rewards/margins": 1.8229057788848877,
837
+ "rewards/rejected": -2.3510470390319824,
838
+ "step": 500
839
+ },
840
+ {
841
+ "epoch": 0.88,
842
+ "eval_logits/chosen": 82.89906311035156,
843
+ "eval_logits/rejected": 83.63628387451172,
844
+ "eval_logps/chosen": -392.10986328125,
845
+ "eval_logps/rejected": -413.8585510253906,
846
+ "eval_loss": 0.4134235084056854,
847
+ "eval_rewards/accuracies": 0.8031914830207825,
848
+ "eval_rewards/chosen": -0.3763478994369507,
849
+ "eval_rewards/margins": 3.1296117305755615,
850
+ "eval_rewards/rejected": -3.5059597492218018,
851
+ "eval_runtime": 139.1588,
852
+ "eval_samples_per_second": 21.558,
853
+ "eval_steps_per_second": 0.675,
854
+ "step": 500
855
+ },
856
+ {
857
+ "epoch": 0.89,
858
+ "grad_norm": 136.393779166859,
859
+ "learning_rate": 1.6687290528135722e-08,
860
+ "logits/chosen": 125.94908142089844,
861
+ "logits/rejected": 125.49617004394531,
862
+ "logps/chosen": -416.5887756347656,
863
+ "logps/rejected": -427.69500732421875,
864
+ "loss": 0.5234,
865
+ "rewards/accuracies": 0.7437499761581421,
866
+ "rewards/chosen": -0.5508764386177063,
867
+ "rewards/margins": 2.112180471420288,
868
+ "rewards/rejected": -2.6630568504333496,
869
+ "step": 510
870
+ },
871
+ {
872
+ "epoch": 0.91,
873
+ "grad_norm": 146.29141931806384,
874
+ "learning_rate": 1.1628379383059022e-08,
875
+ "logits/chosen": 120.80562591552734,
876
+ "logits/rejected": 115.93048095703125,
877
+ "logps/chosen": -371.3426513671875,
878
+ "logps/rejected": -387.1521301269531,
879
+ "loss": 0.4969,
880
+ "rewards/accuracies": 0.824999988079071,
881
+ "rewards/chosen": -0.32862672209739685,
882
+ "rewards/margins": 2.9703025817871094,
883
+ "rewards/rejected": -3.298929214477539,
884
+ "step": 520
885
+ },
886
+ {
887
+ "epoch": 0.93,
888
+ "grad_norm": 147.11717811510456,
889
+ "learning_rate": 7.463151928961548e-09,
890
+ "logits/chosen": 122.363037109375,
891
+ "logits/rejected": 109.66593933105469,
892
+ "logps/chosen": -448.3584899902344,
893
+ "logps/rejected": -436.228515625,
894
+ "loss": 0.5469,
895
+ "rewards/accuracies": 0.7749999761581421,
896
+ "rewards/chosen": -0.46789926290512085,
897
+ "rewards/margins": 2.4167771339416504,
898
+ "rewards/rejected": -2.884676933288574,
899
+ "step": 530
900
+ },
901
+ {
902
+ "epoch": 0.95,
903
+ "grad_norm": 155.0350657623424,
904
+ "learning_rate": 4.207224101311246e-09,
905
+ "logits/chosen": 117.22102355957031,
906
+ "logits/rejected": 120.94425201416016,
907
+ "logps/chosen": -442.1240234375,
908
+ "logps/rejected": -422.91839599609375,
909
+ "loss": 0.5662,
910
+ "rewards/accuracies": 0.75,
911
+ "rewards/chosen": -0.7507426738739014,
912
+ "rewards/margins": 2.2546093463897705,
913
+ "rewards/rejected": -3.005352020263672,
914
+ "step": 540
915
+ },
916
+ {
917
+ "epoch": 0.96,
918
+ "grad_norm": 152.83811169959978,
919
+ "learning_rate": 1.8728027626156997e-09,
920
+ "logits/chosen": 125.8171157836914,
921
+ "logits/rejected": 120.76863861083984,
922
+ "logps/chosen": -421.21453857421875,
923
+ "logps/rejected": -413.3653869628906,
924
+ "loss": 0.5051,
925
+ "rewards/accuracies": 0.7437499761581421,
926
+ "rewards/chosen": -0.43215274810791016,
927
+ "rewards/margins": 2.5282857418060303,
928
+ "rewards/rejected": -2.9604387283325195,
929
+ "step": 550
930
+ },
931
+ {
932
+ "epoch": 0.98,
933
+ "grad_norm": 119.16575693204335,
934
+ "learning_rate": 4.686399374358441e-10,
935
+ "logits/chosen": 125.23756408691406,
936
+ "logits/rejected": 120.61265563964844,
937
+ "logps/chosen": -421.6220703125,
938
+ "logps/rejected": -447.9476013183594,
939
+ "loss": 0.4916,
940
+ "rewards/accuracies": 0.7124999761581421,
941
+ "rewards/chosen": -0.8027129173278809,
942
+ "rewards/margins": 2.2439770698547363,
943
+ "rewards/rejected": -3.046689987182617,
944
+ "step": 560
945
+ },
946
+ {
947
+ "epoch": 1.0,
948
+ "grad_norm": 150.61389898443417,
949
+ "learning_rate": 0.0,
950
+ "logits/chosen": 126.7450942993164,
951
+ "logits/rejected": 124.49223327636719,
952
+ "logps/chosen": -381.63909912109375,
953
+ "logps/rejected": -396.2301940917969,
954
+ "loss": 0.4902,
955
+ "rewards/accuracies": 0.7562500238418579,
956
+ "rewards/chosen": -0.5853160619735718,
957
+ "rewards/margins": 2.132859706878662,
958
+ "rewards/rejected": -2.7181754112243652,
959
+ "step": 570
960
+ },
961
+ {
962
+ "epoch": 1.0,
963
+ "step": 570,
964
+ "total_flos": 0.0,
965
+ "train_loss": 0.5623811487565961,
966
+ "train_runtime": 6612.2068,
967
+ "train_samples_per_second": 11.039,
968
+ "train_steps_per_second": 0.086
969
+ }
970
+ ],
971
+ "logging_steps": 10,
972
+ "max_steps": 570,
973
+ "num_input_tokens_seen": 0,
974
+ "num_train_epochs": 1,
975
+ "save_steps": 100,
976
+ "total_flos": 0.0,
977
+ "train_batch_size": 2,
978
+ "trial_name": null,
979
+ "trial_params": null
980
+ }