silviasapora commited on
Commit
05a3dde
·
verified ·
1 Parent(s): 46e1556

Model save

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: gemma
4
+ base_model: google/gemma-7b
5
+ tags:
6
+ - trl
7
+ - orpo
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: gemma-7b-borpo-low-quality
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # gemma-7b-borpo-low-quality
18
+
19
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 1.5380
22
+ - Rewards/chosen: -0.0547
23
+ - Rewards/rejected: -0.0625
24
+ - Rewards/accuracies: 0.5468
25
+ - Rewards/margins: 0.0079
26
+ - Logps/rejected: -1.2508
27
+ - Logps/chosen: -1.0933
28
+ - Logits/rejected: 267.2346
29
+ - Logits/chosen: 296.6808
30
+ - Nll Loss: 1.4703
31
+ - Log Odds Ratio: -0.7039
32
+ - Log Odds Chosen: 0.2721
33
+
34
+ ## Model description
35
+
36
+ More information needed
37
+
38
+ ## Intended uses & limitations
39
+
40
+ More information needed
41
+
42
+ ## Training and evaluation data
43
+
44
+ More information needed
45
+
46
+ ## Training procedure
47
+
48
+ ### Training hyperparameters
49
+
50
+ The following hyperparameters were used during training:
51
+ - learning_rate: 5e-06
52
+ - train_batch_size: 2
53
+ - eval_batch_size: 1
54
+ - seed: 42
55
+ - distributed_type: multi-GPU
56
+ - num_devices: 4
57
+ - gradient_accumulation_steps: 4
58
+ - total_train_batch_size: 32
59
+ - total_eval_batch_size: 4
60
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
+ - lr_scheduler_type: inverse_sqrt
62
+ - lr_scheduler_warmup_steps: 100
63
+ - num_epochs: 3
64
+
65
+ ### Training results
66
+
67
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | Nll Loss | Log Odds Ratio | Log Odds Chosen |
68
+ |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|:--------:|:--------------:|:---------------:|
69
+ | 1.436 | 0.9955 | 167 | 1.4639 | -0.0502 | -0.0571 | 0.5540 | 0.0068 | -1.1413 | -1.0048 | 294.2689 | 322.9157 | 1.4152 | -0.6882 | 0.2192 |
70
+ | 1.0918 | 1.9970 | 335 | 1.4233 | -0.0501 | -0.0574 | 0.4964 | 0.0073 | -1.1475 | -1.0012 | 284.8744 | 313.3100 | 1.3661 | -0.7028 | 0.2209 |
71
+ | 0.576 | 2.9866 | 501 | 1.5380 | -0.0547 | -0.0625 | 0.5468 | 0.0079 | -1.2508 | -1.0933 | 267.2346 | 296.6808 | 1.4703 | -0.7039 | 0.2721 |
72
+
73
+
74
+ ### Framework versions
75
+
76
+ - Transformers 4.44.2
77
+ - Pytorch 2.4.0+cu121
78
+ - Datasets 3.0.0
79
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.9865871833084947,
3
+ "total_flos": 0.0,
4
+ "train_loss": 1.4583633707431025,
5
+ "train_runtime": 13840.413,
6
+ "train_samples": 5364,
7
+ "train_samples_per_second": 1.163,
8
+ "train_steps_per_second": 0.036
9
+ }
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/gemma-7b",
3
+ "architectures": [
4
+ "GemmaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 106,
9
+ "eos_token_id": 107,
10
+ "head_dim": 256,
11
+ "hidden_act": "gelu",
12
+ "hidden_activation": "gelu_pytorch_tanh",
13
+ "hidden_size": 3072,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 24576,
16
+ "max_position_embeddings": 8192,
17
+ "model_type": "gemma",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 28,
20
+ "num_key_value_heads": 16,
21
+ "pad_token_id": 107,
22
+ "rms_norm_eps": 1e-06,
23
+ "rope_scaling": null,
24
+ "rope_theta": 10000.0,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.44.2",
27
+ "use_cache": false,
28
+ "vocab_size": 256000
29
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 106,
4
+ "eos_token_id": 107,
5
+ "pad_token_id": 107,
6
+ "transformers_version": "4.44.2"
7
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:939232d17e3967c3caafed0eba01dd5ff3ffc46d415abd2cb256c1118aade1d4
3
+ size 4995496656
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4abb51cfb7e74d6ab27e2b66b730506921d56b53413ab1dabdb30d4acbdd963
3
+ size 4982953168
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d2c95e59e11f7826294c7603e84850e1664e4c5074896a0e5dbcdf7e5c55366
3
+ size 4982953200
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a76fec30f621da3f8585db962d2d84ad4cc84496bd827e491478c191a5af87ea
3
+ size 2113988336
model.safetensors.index.json ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 17075361792
4
+ },
5
+ "weight_map": {
6
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
7
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
26
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
27
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
28
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
29
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
30
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
31
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.15.input_layernorm.weight": "model-00003-of-00004.safetensors",
71
+ "model.layers.15.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
72
+ "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
73
+ "model.layers.15.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
74
+ "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
75
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
80
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
81
+ "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
82
+ "model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
83
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
84
+ "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
85
+ "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
86
+ "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
87
+ "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
88
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
89
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
90
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
91
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
92
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
93
+ "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
94
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
95
+ "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
96
+ "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
97
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
98
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
99
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
100
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
101
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
102
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
103
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
104
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
105
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
106
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
107
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
108
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
109
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
110
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
111
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
112
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
113
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
114
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
115
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
116
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
117
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
118
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
119
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
120
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
121
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
122
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
123
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
124
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
125
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
126
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
128
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
134
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
135
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
137
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
138
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
139
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
140
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
151
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
154
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
155
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
156
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
157
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
158
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
159
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
160
+ "model.layers.24.input_layernorm.weight": "model-00004-of-00004.safetensors",
161
+ "model.layers.24.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
162
+ "model.layers.24.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
163
+ "model.layers.24.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
164
+ "model.layers.24.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
165
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.25.input_layernorm.weight": "model-00004-of-00004.safetensors",
170
+ "model.layers.25.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
171
+ "model.layers.25.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
172
+ "model.layers.25.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
173
+ "model.layers.25.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
174
+ "model.layers.25.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
175
+ "model.layers.25.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
176
+ "model.layers.25.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
177
+ "model.layers.25.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
178
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
179
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
180
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
181
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
182
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
183
+ "model.layers.26.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
184
+ "model.layers.26.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
185
+ "model.layers.26.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
186
+ "model.layers.26.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
187
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
188
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
189
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
190
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
191
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
192
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
193
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
194
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
195
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
196
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
197
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
198
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
199
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
200
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
201
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
202
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
203
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
204
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
205
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
206
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
207
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
208
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
209
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
210
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
211
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
212
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
213
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
214
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
215
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
216
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
217
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
218
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
219
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
220
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
221
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
222
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
223
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
224
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
225
+ "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
226
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
227
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
228
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
229
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
230
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
231
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
232
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
233
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
234
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
235
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
236
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
237
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
238
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
239
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
240
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
241
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
242
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
243
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
244
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
245
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
246
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
247
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
248
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
249
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
250
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
251
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
252
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
253
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
254
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
255
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
256
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
257
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
258
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
259
+ "model.norm.weight": "model-00004-of-00004.safetensors"
260
+ }
261
+ }
runs/Sep20_21-28-51_65ecb96dba42/events.out.tfevents.1726867828.65ecb96dba42.202062.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f8b8961d4d9997efa5b656c0170916f1782815f0c92d2e2b23808ee6d577efe
3
+ size 5408
runs/Sep20_21-31-58_65ecb96dba42/events.out.tfevents.1726868017.65ecb96dba42.204459.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a175c7523f6f2458ff2dbe1cb0bf1b3398334a8500108018703bb67150a064fc
3
+ size 5408
runs/Sep20_21-39-49_65ecb96dba42/events.out.tfevents.1726868472.65ecb96dba42.209667.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee308f27eccbb4f164bd1776aae8b50b7548601bb20f46734b749a2920abc97f
3
+ size 5408
runs/Sep20_21-43-04_65ecb96dba42/events.out.tfevents.1726868682.65ecb96dba42.212134.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df2112b4a40d4e5523ea2183d882680333ee69e26ee27415d7da60d18437e393
3
+ size 5408
runs/Sep20_22-20-48_65ecb96dba42/events.out.tfevents.1726870945.65ecb96dba42.233626.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3c7244b71d0d742156b9d76f941d5137d78a67bf61a0dab88448efac5232a22
3
+ size 7098
runs/Sep20_22-31-39_65ecb96dba42/events.out.tfevents.1726871582.65ecb96dba42.240178.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58559ee73d657b893b9004715e2dfeb78f55f940bd13d025f82563d4c5c97b3b
3
+ size 94114
special_tokens_map.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|im_start|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|im_end|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ],
18
+ "bos_token": "<|im_start|>",
19
+ "eos_token": "<|im_end|>",
20
+ "pad_token": "<|im_end|>",
21
+ "unk_token": {
22
+ "content": "<unk>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ }
28
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:322a5f52ab5cab196761ab397a022d6fa3a2e1418585e532bb6efb2fedd2ae94
3
+ size 17477501
tokenizer_config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<bos>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "106": {
38
+ "content": "<|im_start|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "107": {
46
+ "content": "<|im_end|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ }
53
+ },
54
+ "additional_special_tokens": [
55
+ "<|im_start|>",
56
+ "<|im_end|>"
57
+ ],
58
+ "bos_token": "<|im_start|>",
59
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
60
+ "clean_up_tokenization_spaces": false,
61
+ "eos_token": "<|im_end|>",
62
+ "legacy": null,
63
+ "model_max_length": 2048,
64
+ "pad_token": "<|im_end|>",
65
+ "sp_model_kwargs": {},
66
+ "spaces_between_special_tokens": false,
67
+ "tokenizer_class": "GemmaTokenizer",
68
+ "unk_token": "<unk>",
69
+ "use_default_system_prompt": false
70
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.9865871833084947,
3
+ "total_flos": 0.0,
4
+ "train_loss": 1.4583633707431025,
5
+ "train_runtime": 13840.413,
6
+ "train_samples": 5364,
7
+ "train_samples_per_second": 1.163,
8
+ "train_steps_per_second": 0.036
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,1899 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9865871833084947,
5
+ "eval_steps": 500,
6
+ "global_step": 501,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.029806259314456036,
13
+ "grad_norm": 1766.112548828125,
14
+ "learning_rate": 2.5000000000000004e-07,
15
+ "log_odds_chosen": -0.22085151076316833,
16
+ "log_odds_ratio": -1.0071768760681152,
17
+ "logits/chosen": 204.30807495117188,
18
+ "logits/rejected": 202.9915771484375,
19
+ "logps/chosen": -14.827531814575195,
20
+ "logps/rejected": -14.60667896270752,
21
+ "loss": 14.9615,
22
+ "nll_loss": 14.545796394348145,
23
+ "rewards/accuracies": 0.3499999940395355,
24
+ "rewards/chosen": -0.7413766384124756,
25
+ "rewards/margins": -0.011042552068829536,
26
+ "rewards/rejected": -0.7303339838981628,
27
+ "step": 5
28
+ },
29
+ {
30
+ "epoch": 0.05961251862891207,
31
+ "grad_norm": 1192.7847900390625,
32
+ "learning_rate": 5.000000000000001e-07,
33
+ "log_odds_chosen": 0.2549552619457245,
34
+ "log_odds_ratio": -0.769985020160675,
35
+ "logits/chosen": 219.4311981201172,
36
+ "logits/rejected": 223.46994018554688,
37
+ "logps/chosen": -12.237886428833008,
38
+ "logps/rejected": -12.492194175720215,
39
+ "loss": 12.6124,
40
+ "nll_loss": 12.336746215820312,
41
+ "rewards/accuracies": 0.5249999761581421,
42
+ "rewards/chosen": -0.6118943095207214,
43
+ "rewards/margins": 0.012715431861579418,
44
+ "rewards/rejected": -0.6246097683906555,
45
+ "step": 10
46
+ },
47
+ {
48
+ "epoch": 0.08941877794336811,
49
+ "grad_norm": 725.3335571289062,
50
+ "learning_rate": 7.5e-07,
51
+ "log_odds_chosen": 0.0513768307864666,
52
+ "log_odds_ratio": -0.7725777626037598,
53
+ "logits/chosen": 282.11224365234375,
54
+ "logits/rejected": 261.1120910644531,
55
+ "logps/chosen": -7.968921661376953,
56
+ "logps/rejected": -8.020242691040039,
57
+ "loss": 8.2811,
58
+ "nll_loss": 7.956255912780762,
59
+ "rewards/accuracies": 0.574999988079071,
60
+ "rewards/chosen": -0.39844608306884766,
61
+ "rewards/margins": 0.0025660558603703976,
62
+ "rewards/rejected": -0.401012122631073,
63
+ "step": 15
64
+ },
65
+ {
66
+ "epoch": 0.11922503725782414,
67
+ "grad_norm": 216.32925415039062,
68
+ "learning_rate": 1.0000000000000002e-06,
69
+ "log_odds_chosen": -0.1569918394088745,
70
+ "log_odds_ratio": -0.9539459347724915,
71
+ "logits/chosen": 280.9331359863281,
72
+ "logits/rejected": 275.239013671875,
73
+ "logps/chosen": -5.379445552825928,
74
+ "logps/rejected": -5.223770618438721,
75
+ "loss": 5.4483,
76
+ "nll_loss": 5.457978248596191,
77
+ "rewards/accuracies": 0.3499999940395355,
78
+ "rewards/chosen": -0.268972247838974,
79
+ "rewards/margins": -0.007783788256347179,
80
+ "rewards/rejected": -0.26118847727775574,
81
+ "step": 20
82
+ },
83
+ {
84
+ "epoch": 0.14903129657228018,
85
+ "grad_norm": 152.3154296875,
86
+ "learning_rate": 1.25e-06,
87
+ "log_odds_chosen": -0.03973072022199631,
88
+ "log_odds_ratio": -0.8836725354194641,
89
+ "logits/chosen": 300.5466003417969,
90
+ "logits/rejected": 309.90667724609375,
91
+ "logps/chosen": -3.2718262672424316,
92
+ "logps/rejected": -3.2141456604003906,
93
+ "loss": 3.4993,
94
+ "nll_loss": 3.386289596557617,
95
+ "rewards/accuracies": 0.5249999761581421,
96
+ "rewards/chosen": -0.16359131038188934,
97
+ "rewards/margins": -0.0028840256854891777,
98
+ "rewards/rejected": -0.1607072949409485,
99
+ "step": 25
100
+ },
101
+ {
102
+ "epoch": 0.17883755588673622,
103
+ "grad_norm": 89.40418243408203,
104
+ "learning_rate": 1.5e-06,
105
+ "log_odds_chosen": -0.05680186673998833,
106
+ "log_odds_ratio": -0.8815790414810181,
107
+ "logits/chosen": 350.63232421875,
108
+ "logits/rejected": 379.04736328125,
109
+ "logps/chosen": -2.606602668762207,
110
+ "logps/rejected": -2.523834466934204,
111
+ "loss": 2.5544,
112
+ "nll_loss": 2.63269305229187,
113
+ "rewards/accuracies": 0.550000011920929,
114
+ "rewards/chosen": -0.1303301304578781,
115
+ "rewards/margins": -0.0041384161449968815,
116
+ "rewards/rejected": -0.12619172036647797,
117
+ "step": 30
118
+ },
119
+ {
120
+ "epoch": 0.20864381520119224,
121
+ "grad_norm": 49.261566162109375,
122
+ "learning_rate": 1.75e-06,
123
+ "log_odds_chosen": 0.16310584545135498,
124
+ "log_odds_ratio": -0.7383562922477722,
125
+ "logits/chosen": 385.07769775390625,
126
+ "logits/rejected": 373.39593505859375,
127
+ "logps/chosen": -1.8015142679214478,
128
+ "logps/rejected": -1.905853033065796,
129
+ "loss": 2.1302,
130
+ "nll_loss": 2.0047783851623535,
131
+ "rewards/accuracies": 0.550000011920929,
132
+ "rewards/chosen": -0.09007571637630463,
133
+ "rewards/margins": 0.005216942168772221,
134
+ "rewards/rejected": -0.09529266506433487,
135
+ "step": 35
136
+ },
137
+ {
138
+ "epoch": 0.23845007451564829,
139
+ "grad_norm": 35.88943099975586,
140
+ "learning_rate": 2.0000000000000003e-06,
141
+ "log_odds_chosen": 0.03436558321118355,
142
+ "log_odds_ratio": -0.7623429298400879,
143
+ "logits/chosen": 378.38153076171875,
144
+ "logits/rejected": 376.9698791503906,
145
+ "logps/chosen": -1.6465238332748413,
146
+ "logps/rejected": -1.670940637588501,
147
+ "loss": 1.9446,
148
+ "nll_loss": 2.038804769515991,
149
+ "rewards/accuracies": 0.5249999761581421,
150
+ "rewards/chosen": -0.08232619613409042,
151
+ "rewards/margins": 0.0012208283878862858,
152
+ "rewards/rejected": -0.08354702591896057,
153
+ "step": 40
154
+ },
155
+ {
156
+ "epoch": 0.26825633383010433,
157
+ "grad_norm": 37.345130920410156,
158
+ "learning_rate": 2.25e-06,
159
+ "log_odds_chosen": 0.058513958007097244,
160
+ "log_odds_ratio": -0.7263653874397278,
161
+ "logits/chosen": 390.2134704589844,
162
+ "logits/rejected": 399.8643493652344,
163
+ "logps/chosen": -1.5079405307769775,
164
+ "logps/rejected": -1.5511177778244019,
165
+ "loss": 1.8369,
166
+ "nll_loss": 1.7173048257827759,
167
+ "rewards/accuracies": 0.550000011920929,
168
+ "rewards/chosen": -0.07539702951908112,
169
+ "rewards/margins": 0.002158870454877615,
170
+ "rewards/rejected": -0.07755588740110397,
171
+ "step": 45
172
+ },
173
+ {
174
+ "epoch": 0.29806259314456035,
175
+ "grad_norm": 50.422019958496094,
176
+ "learning_rate": 2.5e-06,
177
+ "log_odds_chosen": 0.2006622552871704,
178
+ "log_odds_ratio": -0.6733421087265015,
179
+ "logits/chosen": 399.89837646484375,
180
+ "logits/rejected": 420.90509033203125,
181
+ "logps/chosen": -1.418306827545166,
182
+ "logps/rejected": -1.5650049448013306,
183
+ "loss": 1.8378,
184
+ "nll_loss": 1.8607208728790283,
185
+ "rewards/accuracies": 0.550000011920929,
186
+ "rewards/chosen": -0.0709153264760971,
187
+ "rewards/margins": 0.007334905676543713,
188
+ "rewards/rejected": -0.07825024425983429,
189
+ "step": 50
190
+ },
191
+ {
192
+ "epoch": 0.32786885245901637,
193
+ "grad_norm": 47.060543060302734,
194
+ "learning_rate": 2.7500000000000004e-06,
195
+ "log_odds_chosen": 0.04189341515302658,
196
+ "log_odds_ratio": -0.7426876425743103,
197
+ "logits/chosen": 387.83795166015625,
198
+ "logits/rejected": 379.873291015625,
199
+ "logps/chosen": -1.4415570497512817,
200
+ "logps/rejected": -1.463037133216858,
201
+ "loss": 1.8704,
202
+ "nll_loss": 1.915564775466919,
203
+ "rewards/accuracies": 0.550000011920929,
204
+ "rewards/chosen": -0.07207784801721573,
205
+ "rewards/margins": 0.0010740077123045921,
206
+ "rewards/rejected": -0.07315186411142349,
207
+ "step": 55
208
+ },
209
+ {
210
+ "epoch": 0.35767511177347244,
211
+ "grad_norm": 34.15119934082031,
212
+ "learning_rate": 3e-06,
213
+ "log_odds_chosen": 0.14166930317878723,
214
+ "log_odds_ratio": -0.6689570546150208,
215
+ "logits/chosen": 395.5494079589844,
216
+ "logits/rejected": 387.44561767578125,
217
+ "logps/chosen": -1.3572882413864136,
218
+ "logps/rejected": -1.4584226608276367,
219
+ "loss": 1.705,
220
+ "nll_loss": 1.769514799118042,
221
+ "rewards/accuracies": 0.6499999761581421,
222
+ "rewards/chosen": -0.06786441057920456,
223
+ "rewards/margins": 0.005056709982454777,
224
+ "rewards/rejected": -0.07292111963033676,
225
+ "step": 60
226
+ },
227
+ {
228
+ "epoch": 0.38748137108792846,
229
+ "grad_norm": 59.435298919677734,
230
+ "learning_rate": 3.2500000000000002e-06,
231
+ "log_odds_chosen": 0.09232059866189957,
232
+ "log_odds_ratio": -0.7047046422958374,
233
+ "logits/chosen": 390.05853271484375,
234
+ "logits/rejected": 390.36346435546875,
235
+ "logps/chosen": -1.2730042934417725,
236
+ "logps/rejected": -1.3218134641647339,
237
+ "loss": 1.6272,
238
+ "nll_loss": 1.5361205339431763,
239
+ "rewards/accuracies": 0.6000000238418579,
240
+ "rewards/chosen": -0.0636502131819725,
241
+ "rewards/margins": 0.0024404595606029034,
242
+ "rewards/rejected": -0.06609068065881729,
243
+ "step": 65
244
+ },
245
+ {
246
+ "epoch": 0.4172876304023845,
247
+ "grad_norm": 32.72905349731445,
248
+ "learning_rate": 3.5e-06,
249
+ "log_odds_chosen": 0.03600925952196121,
250
+ "log_odds_ratio": -0.7415024042129517,
251
+ "logits/chosen": 381.87835693359375,
252
+ "logits/rejected": 389.978271484375,
253
+ "logps/chosen": -1.496351718902588,
254
+ "logps/rejected": -1.5099536180496216,
255
+ "loss": 1.7008,
256
+ "nll_loss": 1.8172271251678467,
257
+ "rewards/accuracies": 0.574999988079071,
258
+ "rewards/chosen": -0.07481758296489716,
259
+ "rewards/margins": 0.0006800902774557471,
260
+ "rewards/rejected": -0.07549767941236496,
261
+ "step": 70
262
+ },
263
+ {
264
+ "epoch": 0.44709388971684055,
265
+ "grad_norm": 31.950422286987305,
266
+ "learning_rate": 3.7500000000000005e-06,
267
+ "log_odds_chosen": 0.12743942439556122,
268
+ "log_odds_ratio": -0.6888688206672668,
269
+ "logits/chosen": 396.3226318359375,
270
+ "logits/rejected": 383.38067626953125,
271
+ "logps/chosen": -1.2637438774108887,
272
+ "logps/rejected": -1.34519362449646,
273
+ "loss": 1.6454,
274
+ "nll_loss": 1.5880029201507568,
275
+ "rewards/accuracies": 0.6000000238418579,
276
+ "rewards/chosen": -0.06318718940019608,
277
+ "rewards/margins": 0.0040724934078752995,
278
+ "rewards/rejected": -0.06725968420505524,
279
+ "step": 75
280
+ },
281
+ {
282
+ "epoch": 0.47690014903129657,
283
+ "grad_norm": 29.037189483642578,
284
+ "learning_rate": 4.000000000000001e-06,
285
+ "log_odds_chosen": 0.18037892878055573,
286
+ "log_odds_ratio": -0.6654509902000427,
287
+ "logits/chosen": 381.05133056640625,
288
+ "logits/rejected": 403.25115966796875,
289
+ "logps/chosen": -1.220245599746704,
290
+ "logps/rejected": -1.3405919075012207,
291
+ "loss": 1.6425,
292
+ "nll_loss": 1.6346063613891602,
293
+ "rewards/accuracies": 0.6499999761581421,
294
+ "rewards/chosen": -0.061012279242277145,
295
+ "rewards/margins": 0.006017312873154879,
296
+ "rewards/rejected": -0.06702959537506104,
297
+ "step": 80
298
+ },
299
+ {
300
+ "epoch": 0.5067064083457526,
301
+ "grad_norm": 48.85629653930664,
302
+ "learning_rate": 4.25e-06,
303
+ "log_odds_chosen": 0.13455167412757874,
304
+ "log_odds_ratio": -0.7552928924560547,
305
+ "logits/chosen": 406.13067626953125,
306
+ "logits/rejected": 390.22216796875,
307
+ "logps/chosen": -1.419901967048645,
308
+ "logps/rejected": -1.4787737131118774,
309
+ "loss": 1.6961,
310
+ "nll_loss": 1.7476110458374023,
311
+ "rewards/accuracies": 0.6000000238418579,
312
+ "rewards/chosen": -0.07099510729312897,
313
+ "rewards/margins": 0.002943576779216528,
314
+ "rewards/rejected": -0.07393868267536163,
315
+ "step": 85
316
+ },
317
+ {
318
+ "epoch": 0.5365126676602087,
319
+ "grad_norm": 383.9244079589844,
320
+ "learning_rate": 4.5e-06,
321
+ "log_odds_chosen": 0.579018771648407,
322
+ "log_odds_ratio": -0.5751021504402161,
323
+ "logits/chosen": 401.8145446777344,
324
+ "logits/rejected": 417.1114807128906,
325
+ "logps/chosen": -1.2194725275039673,
326
+ "logps/rejected": -1.6738338470458984,
327
+ "loss": 1.4692,
328
+ "nll_loss": 1.5031993389129639,
329
+ "rewards/accuracies": 0.625,
330
+ "rewards/chosen": -0.0609736330807209,
331
+ "rewards/margins": 0.02271805889904499,
332
+ "rewards/rejected": -0.08369167894124985,
333
+ "step": 90
334
+ },
335
+ {
336
+ "epoch": 0.5663189269746647,
337
+ "grad_norm": 83.88288116455078,
338
+ "learning_rate": 4.75e-06,
339
+ "log_odds_chosen": 0.20128187537193298,
340
+ "log_odds_ratio": -0.6592803597450256,
341
+ "logits/chosen": 368.6065368652344,
342
+ "logits/rejected": 382.3212890625,
343
+ "logps/chosen": -1.1970250606536865,
344
+ "logps/rejected": -1.2992660999298096,
345
+ "loss": 1.6441,
346
+ "nll_loss": 1.5431461334228516,
347
+ "rewards/accuracies": 0.6000000238418579,
348
+ "rewards/chosen": -0.059851258993148804,
349
+ "rewards/margins": 0.00511204544454813,
350
+ "rewards/rejected": -0.06496329605579376,
351
+ "step": 95
352
+ },
353
+ {
354
+ "epoch": 0.5961251862891207,
355
+ "grad_norm": 54.11213684082031,
356
+ "learning_rate": 5e-06,
357
+ "log_odds_chosen": 0.13380606472492218,
358
+ "log_odds_ratio": -0.6779340505599976,
359
+ "logits/chosen": 404.63409423828125,
360
+ "logits/rejected": 441.92938232421875,
361
+ "logps/chosen": -1.2189857959747314,
362
+ "logps/rejected": -1.3092880249023438,
363
+ "loss": 1.596,
364
+ "nll_loss": 1.5926085710525513,
365
+ "rewards/accuracies": 0.6000000238418579,
366
+ "rewards/chosen": -0.06094928830862045,
367
+ "rewards/margins": 0.004515114240348339,
368
+ "rewards/rejected": -0.06546439975500107,
369
+ "step": 100
370
+ },
371
+ {
372
+ "epoch": 0.6259314456035767,
373
+ "grad_norm": 24.880090713500977,
374
+ "learning_rate": 4.8795003647426654e-06,
375
+ "log_odds_chosen": 0.2874816656112671,
376
+ "log_odds_ratio": -0.6406186819076538,
377
+ "logits/chosen": 386.64764404296875,
378
+ "logits/rejected": 400.5997009277344,
379
+ "logps/chosen": -1.0471909046173096,
380
+ "logps/rejected": -1.2103850841522217,
381
+ "loss": 1.5244,
382
+ "nll_loss": 1.5330688953399658,
383
+ "rewards/accuracies": 0.574999988079071,
384
+ "rewards/chosen": -0.05235954001545906,
385
+ "rewards/margins": 0.008159706369042397,
386
+ "rewards/rejected": -0.060519248247146606,
387
+ "step": 105
388
+ },
389
+ {
390
+ "epoch": 0.6557377049180327,
391
+ "grad_norm": 65.00325775146484,
392
+ "learning_rate": 4.767312946227961e-06,
393
+ "log_odds_chosen": 0.17585398256778717,
394
+ "log_odds_ratio": -0.6596522331237793,
395
+ "logits/chosen": 370.3504333496094,
396
+ "logits/rejected": 369.62982177734375,
397
+ "logps/chosen": -1.0811831951141357,
398
+ "logps/rejected": -1.2061924934387207,
399
+ "loss": 1.5148,
400
+ "nll_loss": 1.530368685722351,
401
+ "rewards/accuracies": 0.6000000238418579,
402
+ "rewards/chosen": -0.054059166461229324,
403
+ "rewards/margins": 0.006250458303838968,
404
+ "rewards/rejected": -0.06030962988734245,
405
+ "step": 110
406
+ },
407
+ {
408
+ "epoch": 0.6855439642324889,
409
+ "grad_norm": 44.05381393432617,
410
+ "learning_rate": 4.662524041201569e-06,
411
+ "log_odds_chosen": 0.3691500723361969,
412
+ "log_odds_ratio": -0.5788921117782593,
413
+ "logits/chosen": 388.53631591796875,
414
+ "logits/rejected": 396.33770751953125,
415
+ "logps/chosen": -0.9749990701675415,
416
+ "logps/rejected": -1.168736219406128,
417
+ "loss": 1.5042,
418
+ "nll_loss": 1.417229413986206,
419
+ "rewards/accuracies": 0.625,
420
+ "rewards/chosen": -0.048749953508377075,
421
+ "rewards/margins": 0.009686857461929321,
422
+ "rewards/rejected": -0.05843681842088699,
423
+ "step": 115
424
+ },
425
+ {
426
+ "epoch": 0.7153502235469449,
427
+ "grad_norm": 38.68400955200195,
428
+ "learning_rate": 4.564354645876385e-06,
429
+ "log_odds_chosen": 0.3533535599708557,
430
+ "log_odds_ratio": -0.6087926626205444,
431
+ "logits/chosen": 379.69366455078125,
432
+ "logits/rejected": 379.962158203125,
433
+ "logps/chosen": -1.0634129047393799,
434
+ "logps/rejected": -1.2383795976638794,
435
+ "loss": 1.5529,
436
+ "nll_loss": 1.5442397594451904,
437
+ "rewards/accuracies": 0.675000011920929,
438
+ "rewards/chosen": -0.053170643746852875,
439
+ "rewards/margins": 0.008748333901166916,
440
+ "rewards/rejected": -0.06191897392272949,
441
+ "step": 120
442
+ },
443
+ {
444
+ "epoch": 0.7451564828614009,
445
+ "grad_norm": 21.89837646484375,
446
+ "learning_rate": 4.47213595499958e-06,
447
+ "log_odds_chosen": 0.17026309669017792,
448
+ "log_odds_ratio": -0.6800710558891296,
449
+ "logits/chosen": 375.1714782714844,
450
+ "logits/rejected": 387.94122314453125,
451
+ "logps/chosen": -1.0947885513305664,
452
+ "logps/rejected": -1.1978260278701782,
453
+ "loss": 1.4603,
454
+ "nll_loss": 1.4590686559677124,
455
+ "rewards/accuracies": 0.6000000238418579,
456
+ "rewards/chosen": -0.05473943427205086,
457
+ "rewards/margins": 0.005151876248419285,
458
+ "rewards/rejected": -0.059891313314437866,
459
+ "step": 125
460
+ },
461
+ {
462
+ "epoch": 0.7749627421758569,
463
+ "grad_norm": 22.374597549438477,
464
+ "learning_rate": 4.385290096535147e-06,
465
+ "log_odds_chosen": 0.1504744589328766,
466
+ "log_odds_ratio": -0.6961590051651001,
467
+ "logits/chosen": 401.68170166015625,
468
+ "logits/rejected": 390.8814392089844,
469
+ "logps/chosen": -1.1183803081512451,
470
+ "logps/rejected": -1.1522414684295654,
471
+ "loss": 1.5124,
472
+ "nll_loss": 1.4771168231964111,
473
+ "rewards/accuracies": 0.5,
474
+ "rewards/chosen": -0.055919013917446136,
475
+ "rewards/margins": 0.0016930631827563047,
476
+ "rewards/rejected": -0.05761207267642021,
477
+ "step": 130
478
+ },
479
+ {
480
+ "epoch": 0.8047690014903129,
481
+ "grad_norm": 54.11912155151367,
482
+ "learning_rate": 4.303314829119352e-06,
483
+ "log_odds_chosen": 0.09426576644182205,
484
+ "log_odds_ratio": -0.7113849520683289,
485
+ "logits/chosen": 408.5809326171875,
486
+ "logits/rejected": 412.0887756347656,
487
+ "logps/chosen": -1.1269690990447998,
488
+ "logps/rejected": -1.195809245109558,
489
+ "loss": 1.519,
490
+ "nll_loss": 1.582168698310852,
491
+ "rewards/accuracies": 0.574999988079071,
492
+ "rewards/chosen": -0.056348465383052826,
493
+ "rewards/margins": 0.0034419982694089413,
494
+ "rewards/rejected": -0.059790462255477905,
495
+ "step": 135
496
+ },
497
+ {
498
+ "epoch": 0.834575260804769,
499
+ "grad_norm": 23.130447387695312,
500
+ "learning_rate": 4.2257712736425835e-06,
501
+ "log_odds_chosen": -0.09378698468208313,
502
+ "log_odds_ratio": -0.8063527345657349,
503
+ "logits/chosen": 395.3038635253906,
504
+ "logits/rejected": 402.5538330078125,
505
+ "logps/chosen": -1.1263329982757568,
506
+ "logps/rejected": -1.0577051639556885,
507
+ "loss": 1.5115,
508
+ "nll_loss": 1.6241823434829712,
509
+ "rewards/accuracies": 0.44999998807907104,
510
+ "rewards/chosen": -0.056316643953323364,
511
+ "rewards/margins": -0.0034313846845179796,
512
+ "rewards/rejected": -0.052885256707668304,
513
+ "step": 140
514
+ },
515
+ {
516
+ "epoch": 0.8643815201192251,
517
+ "grad_norm": 46.85090255737305,
518
+ "learning_rate": 4.1522739926869985e-06,
519
+ "log_odds_chosen": -0.024486612528562546,
520
+ "log_odds_ratio": -0.735202431678772,
521
+ "logits/chosen": 391.74871826171875,
522
+ "logits/rejected": 396.13165283203125,
523
+ "logps/chosen": -1.1859979629516602,
524
+ "logps/rejected": -1.1640434265136719,
525
+ "loss": 1.5112,
526
+ "nll_loss": 1.5025219917297363,
527
+ "rewards/accuracies": 0.5249999761581421,
528
+ "rewards/chosen": -0.05929989740252495,
529
+ "rewards/margins": -0.0010977205820381641,
530
+ "rewards/rejected": -0.05820217728614807,
531
+ "step": 145
532
+ },
533
+ {
534
+ "epoch": 0.8941877794336811,
535
+ "grad_norm": 32.988243103027344,
536
+ "learning_rate": 4.082482904638631e-06,
537
+ "log_odds_chosen": 0.3807345926761627,
538
+ "log_odds_ratio": -0.559662401676178,
539
+ "logits/chosen": 399.6306457519531,
540
+ "logits/rejected": 417.28515625,
541
+ "logps/chosen": -1.0708329677581787,
542
+ "logps/rejected": -1.319719672203064,
543
+ "loss": 1.4701,
544
+ "nll_loss": 1.3841121196746826,
545
+ "rewards/accuracies": 0.7749999761581421,
546
+ "rewards/chosen": -0.053541649132966995,
547
+ "rewards/margins": 0.012444335967302322,
548
+ "rewards/rejected": -0.06598598510026932,
549
+ "step": 150
550
+ },
551
+ {
552
+ "epoch": 0.9239940387481371,
553
+ "grad_norm": 19.93175506591797,
554
+ "learning_rate": 4.016096644512495e-06,
555
+ "log_odds_chosen": 0.13164076209068298,
556
+ "log_odds_ratio": -0.6793235540390015,
557
+ "logits/chosen": 381.662353515625,
558
+ "logits/rejected": 396.54168701171875,
559
+ "logps/chosen": -1.1232904195785522,
560
+ "logps/rejected": -1.203460931777954,
561
+ "loss": 1.4306,
562
+ "nll_loss": 1.322166919708252,
563
+ "rewards/accuracies": 0.6499999761581421,
564
+ "rewards/chosen": -0.05616452172398567,
565
+ "rewards/margins": 0.004008529242128134,
566
+ "rewards/rejected": -0.06017305329442024,
567
+ "step": 155
568
+ },
569
+ {
570
+ "epoch": 0.9538002980625931,
571
+ "grad_norm": 48.30046844482422,
572
+ "learning_rate": 3.952847075210474e-06,
573
+ "log_odds_chosen": 0.010590496473014355,
574
+ "log_odds_ratio": -0.794399619102478,
575
+ "logits/chosen": 387.1076354980469,
576
+ "logits/rejected": 432.69384765625,
577
+ "logps/chosen": -1.0055660009384155,
578
+ "logps/rejected": -1.0430043935775757,
579
+ "loss": 1.3903,
580
+ "nll_loss": 1.3440879583358765,
581
+ "rewards/accuracies": 0.5,
582
+ "rewards/chosen": -0.050278306007385254,
583
+ "rewards/margins": 0.0018719173967838287,
584
+ "rewards/rejected": -0.052150219678878784,
585
+ "step": 160
586
+ },
587
+ {
588
+ "epoch": 0.9836065573770492,
589
+ "grad_norm": 17.599102020263672,
590
+ "learning_rate": 3.892494720807615e-06,
591
+ "log_odds_chosen": 0.021682387217879295,
592
+ "log_odds_ratio": -0.7344300746917725,
593
+ "logits/chosen": 397.306884765625,
594
+ "logits/rejected": 409.34222412109375,
595
+ "logps/chosen": -1.1095333099365234,
596
+ "logps/rejected": -1.1364104747772217,
597
+ "loss": 1.436,
598
+ "nll_loss": 1.4027729034423828,
599
+ "rewards/accuracies": 0.5249999761581421,
600
+ "rewards/chosen": -0.05547667294740677,
601
+ "rewards/margins": 0.0013438586611300707,
602
+ "rewards/rejected": -0.05682053044438362,
603
+ "step": 165
604
+ },
605
+ {
606
+ "epoch": 0.9955290611028316,
607
+ "eval_log_odds_chosen": 0.21922710537910461,
608
+ "eval_log_odds_ratio": -0.6881988644599915,
609
+ "eval_logits/chosen": 322.9157409667969,
610
+ "eval_logits/rejected": 294.26885986328125,
611
+ "eval_logps/chosen": -1.0047651529312134,
612
+ "eval_logps/rejected": -1.141276478767395,
613
+ "eval_loss": 1.4639270305633545,
614
+ "eval_nll_loss": 1.4151870012283325,
615
+ "eval_rewards/accuracies": 0.5539568066596985,
616
+ "eval_rewards/chosen": -0.05023825913667679,
617
+ "eval_rewards/margins": 0.006825567223131657,
618
+ "eval_rewards/rejected": -0.05706382542848587,
619
+ "eval_runtime": 112.2165,
620
+ "eval_samples_per_second": 4.928,
621
+ "eval_steps_per_second": 1.239,
622
+ "step": 167
623
+ },
624
+ {
625
+ "epoch": 1.0134128166915053,
626
+ "grad_norm": 19.598791122436523,
627
+ "learning_rate": 3.834824944236852e-06,
628
+ "log_odds_chosen": 0.45991507172584534,
629
+ "log_odds_ratio": -0.5599726438522339,
630
+ "logits/chosen": 381.36883544921875,
631
+ "logits/rejected": 404.8132629394531,
632
+ "logps/chosen": -0.8878811001777649,
633
+ "logps/rejected": -1.1503413915634155,
634
+ "loss": 1.2939,
635
+ "nll_loss": 1.1531199216842651,
636
+ "rewards/accuracies": 0.675000011920929,
637
+ "rewards/chosen": -0.044394053518772125,
638
+ "rewards/margins": 0.013123014941811562,
639
+ "rewards/rejected": -0.057517070323228836,
640
+ "step": 170
641
+ },
642
+ {
643
+ "epoch": 1.0432190760059612,
644
+ "grad_norm": 20.02121353149414,
645
+ "learning_rate": 3.7796447300922724e-06,
646
+ "log_odds_chosen": 0.8954402804374695,
647
+ "log_odds_ratio": -0.4191034436225891,
648
+ "logits/chosen": 360.7755126953125,
649
+ "logits/rejected": 400.88580322265625,
650
+ "logps/chosen": -0.6612526774406433,
651
+ "logps/rejected": -1.1563608646392822,
652
+ "loss": 1.0774,
653
+ "nll_loss": 1.1424416303634644,
654
+ "rewards/accuracies": 0.875,
655
+ "rewards/chosen": -0.033062636852264404,
656
+ "rewards/margins": 0.024755412712693214,
657
+ "rewards/rejected": -0.05781804770231247,
658
+ "step": 175
659
+ },
660
+ {
661
+ "epoch": 1.0730253353204173,
662
+ "grad_norm": 31.31179428100586,
663
+ "learning_rate": 3.72677996249965e-06,
664
+ "log_odds_chosen": 0.9305903315544128,
665
+ "log_odds_ratio": -0.4297749102115631,
666
+ "logits/chosen": 358.54632568359375,
667
+ "logits/rejected": 333.3828125,
668
+ "logps/chosen": -0.7619189023971558,
669
+ "logps/rejected": -1.288059949874878,
670
+ "loss": 1.1343,
671
+ "nll_loss": 1.1943809986114502,
672
+ "rewards/accuracies": 0.800000011920929,
673
+ "rewards/chosen": -0.03809594735503197,
674
+ "rewards/margins": 0.02630705200135708,
675
+ "rewards/rejected": -0.0644029974937439,
676
+ "step": 180
677
+ },
678
+ {
679
+ "epoch": 1.1028315946348732,
680
+ "grad_norm": 18.475496292114258,
681
+ "learning_rate": 3.6760731104690393e-06,
682
+ "log_odds_chosen": 0.9789319038391113,
683
+ "log_odds_ratio": -0.39594078063964844,
684
+ "logits/chosen": 388.92523193359375,
685
+ "logits/rejected": 379.08624267578125,
686
+ "logps/chosen": -0.6655952334403992,
687
+ "logps/rejected": -1.1871401071548462,
688
+ "loss": 1.0198,
689
+ "nll_loss": 0.9870488047599792,
690
+ "rewards/accuracies": 0.8500000238418579,
691
+ "rewards/chosen": -0.03327976167201996,
692
+ "rewards/margins": 0.026077240705490112,
693
+ "rewards/rejected": -0.05935700982809067,
694
+ "step": 185
695
+ },
696
+ {
697
+ "epoch": 1.1326378539493294,
698
+ "grad_norm": 15.556231498718262,
699
+ "learning_rate": 3.6273812505500587e-06,
700
+ "log_odds_chosen": 0.5961565971374512,
701
+ "log_odds_ratio": -0.5247331857681274,
702
+ "logits/chosen": 364.07965087890625,
703
+ "logits/rejected": 406.9730529785156,
704
+ "logps/chosen": -0.7741492986679077,
705
+ "logps/rejected": -1.1193947792053223,
706
+ "loss": 1.0997,
707
+ "nll_loss": 1.0372028350830078,
708
+ "rewards/accuracies": 0.699999988079071,
709
+ "rewards/chosen": -0.03870746120810509,
710
+ "rewards/margins": 0.017262274399399757,
711
+ "rewards/rejected": -0.055969737470149994,
712
+ "step": 190
713
+ },
714
+ {
715
+ "epoch": 1.1624441132637853,
716
+ "grad_norm": 20.581634521484375,
717
+ "learning_rate": 3.5805743701971648e-06,
718
+ "log_odds_chosen": 0.9057777523994446,
719
+ "log_odds_ratio": -0.40288910269737244,
720
+ "logits/chosen": 387.034912109375,
721
+ "logits/rejected": 400.3788146972656,
722
+ "logps/chosen": -0.8112454414367676,
723
+ "logps/rejected": -1.2839386463165283,
724
+ "loss": 1.143,
725
+ "nll_loss": 1.1492406129837036,
726
+ "rewards/accuracies": 0.925000011920929,
727
+ "rewards/chosen": -0.04056227579712868,
728
+ "rewards/margins": 0.023634660989046097,
729
+ "rewards/rejected": -0.06419692933559418,
730
+ "step": 195
731
+ },
732
+ {
733
+ "epoch": 1.1922503725782414,
734
+ "grad_norm": 25.256078720092773,
735
+ "learning_rate": 3.5355339059327378e-06,
736
+ "log_odds_chosen": 0.968602180480957,
737
+ "log_odds_ratio": -0.41492360830307007,
738
+ "logits/chosen": 408.2672424316406,
739
+ "logits/rejected": 392.72406005859375,
740
+ "logps/chosen": -0.7001940011978149,
741
+ "logps/rejected": -1.2566159963607788,
742
+ "loss": 1.1031,
743
+ "nll_loss": 0.9941505193710327,
744
+ "rewards/accuracies": 0.800000011920929,
745
+ "rewards/chosen": -0.03500969707965851,
746
+ "rewards/margins": 0.02782110869884491,
747
+ "rewards/rejected": -0.06283080577850342,
748
+ "step": 200
749
+ },
750
+ {
751
+ "epoch": 1.2220566318926975,
752
+ "grad_norm": 20.430418014526367,
753
+ "learning_rate": 3.4921514788478916e-06,
754
+ "log_odds_chosen": 0.9490750432014465,
755
+ "log_odds_ratio": -0.43051719665527344,
756
+ "logits/chosen": 366.13177490234375,
757
+ "logits/rejected": 359.82196044921875,
758
+ "logps/chosen": -0.7287150025367737,
759
+ "logps/rejected": -1.228252649307251,
760
+ "loss": 1.1093,
761
+ "nll_loss": 1.1782151460647583,
762
+ "rewards/accuracies": 0.875,
763
+ "rewards/chosen": -0.036435749381780624,
764
+ "rewards/margins": 0.024976884946227074,
765
+ "rewards/rejected": -0.061412639915943146,
766
+ "step": 205
767
+ },
768
+ {
769
+ "epoch": 1.2518628912071534,
770
+ "grad_norm": 29.456756591796875,
771
+ "learning_rate": 3.450327796711771e-06,
772
+ "log_odds_chosen": 1.1124473810195923,
773
+ "log_odds_ratio": -0.36473917961120605,
774
+ "logits/chosen": 371.9077453613281,
775
+ "logits/rejected": 400.37908935546875,
776
+ "logps/chosen": -0.6593618392944336,
777
+ "logps/rejected": -1.292751431465149,
778
+ "loss": 1.0725,
779
+ "nll_loss": 1.041669487953186,
780
+ "rewards/accuracies": 0.949999988079071,
781
+ "rewards/chosen": -0.03296809643507004,
782
+ "rewards/margins": 0.03166947886347771,
783
+ "rewards/rejected": -0.06463757157325745,
784
+ "step": 210
785
+ },
786
+ {
787
+ "epoch": 1.2816691505216096,
788
+ "grad_norm": 51.35958480834961,
789
+ "learning_rate": 3.409971697352368e-06,
790
+ "log_odds_chosen": 1.1422733068466187,
791
+ "log_odds_ratio": -0.3576507568359375,
792
+ "logits/chosen": 396.532470703125,
793
+ "logits/rejected": 379.38525390625,
794
+ "logps/chosen": -0.7450262904167175,
795
+ "logps/rejected": -1.4131982326507568,
796
+ "loss": 1.0757,
797
+ "nll_loss": 1.0690205097198486,
798
+ "rewards/accuracies": 0.8999999761581421,
799
+ "rewards/chosen": -0.0372513122856617,
800
+ "rewards/margins": 0.033408597111701965,
801
+ "rewards/rejected": -0.07065991312265396,
802
+ "step": 215
803
+ },
804
+ {
805
+ "epoch": 1.3114754098360657,
806
+ "grad_norm": 16.197195053100586,
807
+ "learning_rate": 3.3709993123162106e-06,
808
+ "log_odds_chosen": 0.7627310752868652,
809
+ "log_odds_ratio": -0.46217769384384155,
810
+ "logits/chosen": 383.55572509765625,
811
+ "logits/rejected": 377.1234436035156,
812
+ "logps/chosen": -0.7894420623779297,
813
+ "logps/rejected": -1.196644902229309,
814
+ "loss": 1.0384,
815
+ "nll_loss": 1.0212104320526123,
816
+ "rewards/accuracies": 0.824999988079071,
817
+ "rewards/chosen": -0.03947211056947708,
818
+ "rewards/margins": 0.02036014385521412,
819
+ "rewards/rejected": -0.05983225256204605,
820
+ "step": 220
821
+ },
822
+ {
823
+ "epoch": 1.3412816691505216,
824
+ "grad_norm": 26.38709831237793,
825
+ "learning_rate": 3.3333333333333333e-06,
826
+ "log_odds_chosen": 0.5317455530166626,
827
+ "log_odds_ratio": -0.5077921152114868,
828
+ "logits/chosen": 383.05657958984375,
829
+ "logits/rejected": 375.5099792480469,
830
+ "logps/chosen": -0.8590075373649597,
831
+ "logps/rejected": -1.1633431911468506,
832
+ "loss": 1.0481,
833
+ "nll_loss": 1.092240333557129,
834
+ "rewards/accuracies": 0.800000011920929,
835
+ "rewards/chosen": -0.04295038431882858,
836
+ "rewards/margins": 0.01521677989512682,
837
+ "rewards/rejected": -0.05816715955734253,
838
+ "step": 225
839
+ },
840
+ {
841
+ "epoch": 1.3710879284649775,
842
+ "grad_norm": 22.215499877929688,
843
+ "learning_rate": 3.296902366978936e-06,
844
+ "log_odds_chosen": 1.152860403060913,
845
+ "log_odds_ratio": -0.33905166387557983,
846
+ "logits/chosen": 360.29229736328125,
847
+ "logits/rejected": 377.34967041015625,
848
+ "logps/chosen": -0.6110543012619019,
849
+ "logps/rejected": -1.235459804534912,
850
+ "loss": 1.0305,
851
+ "nll_loss": 0.8882306814193726,
852
+ "rewards/accuracies": 0.8999999761581421,
853
+ "rewards/chosen": -0.03055271878838539,
854
+ "rewards/margins": 0.031220275908708572,
855
+ "rewards/rejected": -0.061772990971803665,
856
+ "step": 230
857
+ },
858
+ {
859
+ "epoch": 1.4008941877794336,
860
+ "grad_norm": 15.053303718566895,
861
+ "learning_rate": 3.2616403652672114e-06,
862
+ "log_odds_chosen": 1.173457384109497,
863
+ "log_odds_ratio": -0.39741092920303345,
864
+ "logits/chosen": 382.1279602050781,
865
+ "logits/rejected": 394.14453125,
866
+ "logps/chosen": -0.6574608087539673,
867
+ "logps/rejected": -1.3275513648986816,
868
+ "loss": 1.041,
869
+ "nll_loss": 0.9579538106918335,
870
+ "rewards/accuracies": 0.824999988079071,
871
+ "rewards/chosen": -0.032873041927814484,
872
+ "rewards/margins": 0.033504534512758255,
873
+ "rewards/rejected": -0.06637756526470184,
874
+ "step": 235
875
+ },
876
+ {
877
+ "epoch": 1.4307004470938898,
878
+ "grad_norm": 17.994874954223633,
879
+ "learning_rate": 3.2274861218395142e-06,
880
+ "log_odds_chosen": 0.7307401895523071,
881
+ "log_odds_ratio": -0.4488092362880707,
882
+ "logits/chosen": 405.40802001953125,
883
+ "logits/rejected": 409.69647216796875,
884
+ "logps/chosen": -0.7471104264259338,
885
+ "logps/rejected": -1.1478822231292725,
886
+ "loss": 1.062,
887
+ "nll_loss": 1.034090518951416,
888
+ "rewards/accuracies": 0.875,
889
+ "rewards/chosen": -0.03735552355647087,
890
+ "rewards/margins": 0.02003859169781208,
891
+ "rewards/rejected": -0.057394109666347504,
892
+ "step": 240
893
+ },
894
+ {
895
+ "epoch": 1.4605067064083457,
896
+ "grad_norm": 14.603346824645996,
897
+ "learning_rate": 3.1943828249997e-06,
898
+ "log_odds_chosen": 0.9838510751724243,
899
+ "log_odds_ratio": -0.39035505056381226,
900
+ "logits/chosen": 394.9382629394531,
901
+ "logits/rejected": 383.0425109863281,
902
+ "logps/chosen": -0.604079008102417,
903
+ "logps/rejected": -1.1444944143295288,
904
+ "loss": 1.0828,
905
+ "nll_loss": 1.0692133903503418,
906
+ "rewards/accuracies": 0.8999999761581421,
907
+ "rewards/chosen": -0.03020395338535309,
908
+ "rewards/margins": 0.027020767331123352,
909
+ "rewards/rejected": -0.05722472816705704,
910
+ "step": 245
911
+ },
912
+ {
913
+ "epoch": 1.4903129657228018,
914
+ "grad_norm": 24.562320709228516,
915
+ "learning_rate": 3.1622776601683796e-06,
916
+ "log_odds_chosen": 1.0623723268508911,
917
+ "log_odds_ratio": -0.41066059470176697,
918
+ "logits/chosen": 374.6037902832031,
919
+ "logits/rejected": 381.32269287109375,
920
+ "logps/chosen": -0.6770502924919128,
921
+ "logps/rejected": -1.2543606758117676,
922
+ "loss": 1.0094,
923
+ "nll_loss": 0.9133090972900391,
924
+ "rewards/accuracies": 0.8500000238418579,
925
+ "rewards/chosen": -0.03385251387953758,
926
+ "rewards/margins": 0.028865519911050797,
927
+ "rewards/rejected": -0.06271803379058838,
928
+ "step": 250
929
+ },
930
+ {
931
+ "epoch": 1.520119225037258,
932
+ "grad_norm": 16.239404678344727,
933
+ "learning_rate": 3.131121455425748e-06,
934
+ "log_odds_chosen": 1.1072403192520142,
935
+ "log_odds_ratio": -0.34550726413726807,
936
+ "logits/chosen": 394.74688720703125,
937
+ "logits/rejected": 396.5551452636719,
938
+ "logps/chosen": -0.6057699918746948,
939
+ "logps/rejected": -1.1659581661224365,
940
+ "loss": 1.0541,
941
+ "nll_loss": 0.9463102221488953,
942
+ "rewards/accuracies": 0.925000011920929,
943
+ "rewards/chosen": -0.03028850257396698,
944
+ "rewards/margins": 0.028009409084916115,
945
+ "rewards/rejected": -0.058297913521528244,
946
+ "step": 255
947
+ },
948
+ {
949
+ "epoch": 1.5499254843517138,
950
+ "grad_norm": 29.02210235595703,
951
+ "learning_rate": 3.1008683647302113e-06,
952
+ "log_odds_chosen": 0.9046269655227661,
953
+ "log_odds_ratio": -0.42992085218429565,
954
+ "logits/chosen": 375.5458984375,
955
+ "logits/rejected": 415.7400817871094,
956
+ "logps/chosen": -0.7795925140380859,
957
+ "logps/rejected": -1.3336765766143799,
958
+ "loss": 1.021,
959
+ "nll_loss": 1.037724256515503,
960
+ "rewards/accuracies": 0.75,
961
+ "rewards/chosen": -0.038979630917310715,
962
+ "rewards/margins": 0.02770419791340828,
963
+ "rewards/rejected": -0.066683828830719,
964
+ "step": 260
965
+ },
966
+ {
967
+ "epoch": 1.5797317436661698,
968
+ "grad_norm": 16.736574172973633,
969
+ "learning_rate": 3.0714755841697565e-06,
970
+ "log_odds_chosen": 1.028351902961731,
971
+ "log_odds_ratio": -0.4398733079433441,
972
+ "logits/chosen": 384.7108154296875,
973
+ "logits/rejected": 406.8509521484375,
974
+ "logps/chosen": -0.717097282409668,
975
+ "logps/rejected": -1.318499207496643,
976
+ "loss": 1.104,
977
+ "nll_loss": 1.0128785371780396,
978
+ "rewards/accuracies": 0.800000011920929,
979
+ "rewards/chosen": -0.03585486486554146,
980
+ "rewards/margins": 0.030070099979639053,
981
+ "rewards/rejected": -0.06592496484518051,
982
+ "step": 265
983
+ },
984
+ {
985
+ "epoch": 1.6095380029806259,
986
+ "grad_norm": 17.67503547668457,
987
+ "learning_rate": 3.0429030972509227e-06,
988
+ "log_odds_chosen": 0.7785763740539551,
989
+ "log_odds_ratio": -0.4546143412590027,
990
+ "logits/chosen": 369.1441955566406,
991
+ "logits/rejected": 379.18487548828125,
992
+ "logps/chosen": -0.7971286177635193,
993
+ "logps/rejected": -1.235899567604065,
994
+ "loss": 1.0973,
995
+ "nll_loss": 1.1573312282562256,
996
+ "rewards/accuracies": 0.8500000238418579,
997
+ "rewards/chosen": -0.0398564375936985,
998
+ "rewards/margins": 0.021938541904091835,
999
+ "rewards/rejected": -0.06179497763514519,
1000
+ "step": 270
1001
+ },
1002
+ {
1003
+ "epoch": 1.639344262295082,
1004
+ "grad_norm": 15.161911964416504,
1005
+ "learning_rate": 3.0151134457776365e-06,
1006
+ "log_odds_chosen": 0.9045177698135376,
1007
+ "log_odds_ratio": -0.4321528375148773,
1008
+ "logits/chosen": 360.2083435058594,
1009
+ "logits/rejected": 351.1839904785156,
1010
+ "logps/chosen": -0.6309309005737305,
1011
+ "logps/rejected": -1.0601168870925903,
1012
+ "loss": 1.0614,
1013
+ "nll_loss": 0.976634681224823,
1014
+ "rewards/accuracies": 0.800000011920929,
1015
+ "rewards/chosen": -0.031546544283628464,
1016
+ "rewards/margins": 0.021459298208355904,
1017
+ "rewards/rejected": -0.05300584435462952,
1018
+ "step": 275
1019
+ },
1020
+ {
1021
+ "epoch": 1.669150521609538,
1022
+ "grad_norm": 14.985346794128418,
1023
+ "learning_rate": 2.988071523335984e-06,
1024
+ "log_odds_chosen": 0.8889838457107544,
1025
+ "log_odds_ratio": -0.5245683193206787,
1026
+ "logits/chosen": 404.428955078125,
1027
+ "logits/rejected": 391.63189697265625,
1028
+ "logps/chosen": -0.6854910850524902,
1029
+ "logps/rejected": -1.1978435516357422,
1030
+ "loss": 1.0314,
1031
+ "nll_loss": 1.0269334316253662,
1032
+ "rewards/accuracies": 0.7749999761581421,
1033
+ "rewards/chosen": -0.03427455574274063,
1034
+ "rewards/margins": 0.02561761811375618,
1035
+ "rewards/rejected": -0.05989217013120651,
1036
+ "step": 280
1037
+ },
1038
+ {
1039
+ "epoch": 1.698956780923994,
1040
+ "grad_norm": 13.728422164916992,
1041
+ "learning_rate": 2.961744388795462e-06,
1042
+ "log_odds_chosen": 0.9078812599182129,
1043
+ "log_odds_ratio": -0.4240701198577881,
1044
+ "logits/chosen": 369.9886779785156,
1045
+ "logits/rejected": 374.25213623046875,
1046
+ "logps/chosen": -0.640867292881012,
1047
+ "logps/rejected": -1.1350326538085938,
1048
+ "loss": 1.0129,
1049
+ "nll_loss": 0.9724606275558472,
1050
+ "rewards/accuracies": 0.800000011920929,
1051
+ "rewards/chosen": -0.03204336389899254,
1052
+ "rewards/margins": 0.024708271026611328,
1053
+ "rewards/rejected": -0.05675163120031357,
1054
+ "step": 285
1055
+ },
1056
+ {
1057
+ "epoch": 1.7287630402384502,
1058
+ "grad_norm": 13.05510425567627,
1059
+ "learning_rate": 2.9361010975735177e-06,
1060
+ "log_odds_chosen": 0.9024085998535156,
1061
+ "log_odds_ratio": -0.41897106170654297,
1062
+ "logits/chosen": 383.33331298828125,
1063
+ "logits/rejected": 421.84710693359375,
1064
+ "logps/chosen": -0.7976008057594299,
1065
+ "logps/rejected": -1.2910783290863037,
1066
+ "loss": 1.0749,
1067
+ "nll_loss": 1.0102927684783936,
1068
+ "rewards/accuracies": 0.875,
1069
+ "rewards/chosen": -0.039880041033029556,
1070
+ "rewards/margins": 0.024673879146575928,
1071
+ "rewards/rejected": -0.06455391645431519,
1072
+ "step": 290
1073
+ },
1074
+ {
1075
+ "epoch": 1.758569299552906,
1076
+ "grad_norm": 22.535524368286133,
1077
+ "learning_rate": 2.9111125486979104e-06,
1078
+ "log_odds_chosen": 0.82624751329422,
1079
+ "log_odds_ratio": -0.44848528504371643,
1080
+ "logits/chosen": 358.3087463378906,
1081
+ "logits/rejected": 401.21539306640625,
1082
+ "logps/chosen": -0.7310397028923035,
1083
+ "logps/rejected": -1.1755989789962769,
1084
+ "loss": 1.0777,
1085
+ "nll_loss": 1.026176929473877,
1086
+ "rewards/accuracies": 0.800000011920929,
1087
+ "rewards/chosen": -0.03655198961496353,
1088
+ "rewards/margins": 0.022227967157959938,
1089
+ "rewards/rejected": -0.05877995491027832,
1090
+ "step": 295
1091
+ },
1092
+ {
1093
+ "epoch": 1.788375558867362,
1094
+ "grad_norm": 20.802898406982422,
1095
+ "learning_rate": 2.8867513459481293e-06,
1096
+ "log_odds_chosen": 1.3168494701385498,
1097
+ "log_odds_ratio": -0.3224945366382599,
1098
+ "logits/chosen": 398.9639587402344,
1099
+ "logits/rejected": 377.6863098144531,
1100
+ "logps/chosen": -0.6201862096786499,
1101
+ "logps/rejected": -1.2738819122314453,
1102
+ "loss": 0.9939,
1103
+ "nll_loss": 0.9130023121833801,
1104
+ "rewards/accuracies": 0.8999999761581421,
1105
+ "rewards/chosen": -0.031009310856461525,
1106
+ "rewards/margins": 0.03268478438258171,
1107
+ "rewards/rejected": -0.06369409710168839,
1108
+ "step": 300
1109
+ },
1110
+ {
1111
+ "epoch": 1.8181818181818183,
1112
+ "grad_norm": 17.038707733154297,
1113
+ "learning_rate": 2.862991671569341e-06,
1114
+ "log_odds_chosen": 0.5965818762779236,
1115
+ "log_odds_ratio": -0.5011720657348633,
1116
+ "logits/chosen": 391.8568115234375,
1117
+ "logits/rejected": 401.20648193359375,
1118
+ "logps/chosen": -0.8930565714836121,
1119
+ "logps/rejected": -1.2144049406051636,
1120
+ "loss": 1.0267,
1121
+ "nll_loss": 1.134427785873413,
1122
+ "rewards/accuracies": 0.7749999761581421,
1123
+ "rewards/chosen": -0.044652827084064484,
1124
+ "rewards/margins": 0.016067421063780785,
1125
+ "rewards/rejected": -0.060720253735780716,
1126
+ "step": 305
1127
+ },
1128
+ {
1129
+ "epoch": 1.8479880774962743,
1130
+ "grad_norm": 16.012582778930664,
1131
+ "learning_rate": 2.839809171235324e-06,
1132
+ "log_odds_chosen": 0.9494487047195435,
1133
+ "log_odds_ratio": -0.5154255032539368,
1134
+ "logits/chosen": 374.0430908203125,
1135
+ "logits/rejected": 383.86016845703125,
1136
+ "logps/chosen": -0.7265419960021973,
1137
+ "logps/rejected": -1.3359992504119873,
1138
+ "loss": 1.0959,
1139
+ "nll_loss": 1.0941182374954224,
1140
+ "rewards/accuracies": 0.800000011920929,
1141
+ "rewards/chosen": -0.03632710129022598,
1142
+ "rewards/margins": 0.03047286346554756,
1143
+ "rewards/rejected": -0.06679996103048325,
1144
+ "step": 310
1145
+ },
1146
+ {
1147
+ "epoch": 1.8777943368107302,
1148
+ "grad_norm": 13.797101974487305,
1149
+ "learning_rate": 2.817180849095055e-06,
1150
+ "log_odds_chosen": 0.4637278616428375,
1151
+ "log_odds_ratio": -0.5917452573776245,
1152
+ "logits/chosen": 352.93426513671875,
1153
+ "logits/rejected": 371.59765625,
1154
+ "logps/chosen": -0.9915712475776672,
1155
+ "logps/rejected": -1.311402440071106,
1156
+ "loss": 1.0986,
1157
+ "nll_loss": 1.2321771383285522,
1158
+ "rewards/accuracies": 0.675000011920929,
1159
+ "rewards/chosen": -0.04957855865359306,
1160
+ "rewards/margins": 0.015991564840078354,
1161
+ "rewards/rejected": -0.06557012349367142,
1162
+ "step": 315
1163
+ },
1164
+ {
1165
+ "epoch": 1.9076005961251863,
1166
+ "grad_norm": 15.759064674377441,
1167
+ "learning_rate": 2.7950849718747376e-06,
1168
+ "log_odds_chosen": 1.112966537475586,
1169
+ "log_odds_ratio": -0.37106412649154663,
1170
+ "logits/chosen": 374.7521667480469,
1171
+ "logits/rejected": 395.431396484375,
1172
+ "logps/chosen": -0.642137885093689,
1173
+ "logps/rejected": -1.249108910560608,
1174
+ "loss": 0.9927,
1175
+ "nll_loss": 0.9422394037246704,
1176
+ "rewards/accuracies": 0.8999999761581421,
1177
+ "rewards/chosen": -0.03210689499974251,
1178
+ "rewards/margins": 0.03034854494035244,
1179
+ "rewards/rejected": -0.0624554380774498,
1180
+ "step": 320
1181
+ },
1182
+ {
1183
+ "epoch": 1.9374068554396424,
1184
+ "grad_norm": 18.297237396240234,
1185
+ "learning_rate": 2.773500981126146e-06,
1186
+ "log_odds_chosen": 1.2229207754135132,
1187
+ "log_odds_ratio": -0.34499865770339966,
1188
+ "logits/chosen": 373.3489685058594,
1189
+ "logits/rejected": 404.5666809082031,
1190
+ "logps/chosen": -0.6735208630561829,
1191
+ "logps/rejected": -1.3717957735061646,
1192
+ "loss": 0.983,
1193
+ "nll_loss": 0.9137915372848511,
1194
+ "rewards/accuracies": 0.925000011920929,
1195
+ "rewards/chosen": -0.03367604315280914,
1196
+ "rewards/margins": 0.034913744777441025,
1197
+ "rewards/rejected": -0.06858979165554047,
1198
+ "step": 325
1199
+ },
1200
+ {
1201
+ "epoch": 1.9672131147540983,
1202
+ "grad_norm": 21.227933883666992,
1203
+ "learning_rate": 2.752409412815902e-06,
1204
+ "log_odds_chosen": 0.8807786107063293,
1205
+ "log_odds_ratio": -0.40755215287208557,
1206
+ "logits/chosen": 366.36541748046875,
1207
+ "logits/rejected": 375.8720703125,
1208
+ "logps/chosen": -0.6931061148643494,
1209
+ "logps/rejected": -1.1950337886810303,
1210
+ "loss": 1.0248,
1211
+ "nll_loss": 0.8495981097221375,
1212
+ "rewards/accuracies": 0.875,
1213
+ "rewards/chosen": -0.03465530276298523,
1214
+ "rewards/margins": 0.025096386671066284,
1215
+ "rewards/rejected": -0.059751689434051514,
1216
+ "step": 330
1217
+ },
1218
+ {
1219
+ "epoch": 1.9970193740685542,
1220
+ "grad_norm": 15.724347114562988,
1221
+ "learning_rate": 2.7317918235407652e-06,
1222
+ "log_odds_chosen": 0.43006667494773865,
1223
+ "log_odds_ratio": -0.608497142791748,
1224
+ "logits/chosen": 391.9339904785156,
1225
+ "logits/rejected": 385.08636474609375,
1226
+ "logps/chosen": -0.966523289680481,
1227
+ "logps/rejected": -1.2040079832077026,
1228
+ "loss": 1.0918,
1229
+ "nll_loss": 1.2487541437149048,
1230
+ "rewards/accuracies": 0.6499999761581421,
1231
+ "rewards/chosen": -0.04832616448402405,
1232
+ "rewards/margins": 0.011874236166477203,
1233
+ "rewards/rejected": -0.06020040065050125,
1234
+ "step": 335
1235
+ },
1236
+ {
1237
+ "epoch": 1.9970193740685542,
1238
+ "eval_log_odds_chosen": 0.2209286242723465,
1239
+ "eval_log_odds_ratio": -0.7027979493141174,
1240
+ "eval_logits/chosen": 313.30999755859375,
1241
+ "eval_logits/rejected": 284.8743896484375,
1242
+ "eval_logps/chosen": -1.0012338161468506,
1243
+ "eval_logps/rejected": -1.1474785804748535,
1244
+ "eval_loss": 1.423282504081726,
1245
+ "eval_nll_loss": 1.36614990234375,
1246
+ "eval_rewards/accuracies": 0.49640288949012756,
1247
+ "eval_rewards/chosen": -0.05006168410181999,
1248
+ "eval_rewards/margins": 0.007312240544706583,
1249
+ "eval_rewards/rejected": -0.057373929768800735,
1250
+ "eval_runtime": 112.3167,
1251
+ "eval_samples_per_second": 4.924,
1252
+ "eval_steps_per_second": 1.238,
1253
+ "step": 335
1254
+ },
1255
+ {
1256
+ "epoch": 2.0268256333830106,
1257
+ "grad_norm": 22.363670349121094,
1258
+ "learning_rate": 2.711630722733202e-06,
1259
+ "log_odds_chosen": 1.9970035552978516,
1260
+ "log_odds_ratio": -0.19410225749015808,
1261
+ "logits/chosen": 394.3930358886719,
1262
+ "logits/rejected": 370.94024658203125,
1263
+ "logps/chosen": -0.36809104681015015,
1264
+ "logps/rejected": -1.342511534690857,
1265
+ "loss": 0.6352,
1266
+ "nll_loss": 0.6902519464492798,
1267
+ "rewards/accuracies": 0.9750000238418579,
1268
+ "rewards/chosen": -0.018404554575681686,
1269
+ "rewards/margins": 0.04872102662920952,
1270
+ "rewards/rejected": -0.0671255812048912,
1271
+ "step": 340
1272
+ },
1273
+ {
1274
+ "epoch": 2.0566318926974665,
1275
+ "grad_norm": 17.443252563476562,
1276
+ "learning_rate": 2.691909510290828e-06,
1277
+ "log_odds_chosen": 2.4873645305633545,
1278
+ "log_odds_ratio": -0.12294415384531021,
1279
+ "logits/chosen": 353.0312194824219,
1280
+ "logits/rejected": 357.0645751953125,
1281
+ "logps/chosen": -0.3527178168296814,
1282
+ "logps/rejected": -1.623201608657837,
1283
+ "loss": 0.5679,
1284
+ "nll_loss": 0.5656304359436035,
1285
+ "rewards/accuracies": 0.949999988079071,
1286
+ "rewards/chosen": -0.01763588935136795,
1287
+ "rewards/margins": 0.06352418661117554,
1288
+ "rewards/rejected": -0.08116006851196289,
1289
+ "step": 345
1290
+ },
1291
+ {
1292
+ "epoch": 2.0864381520119224,
1293
+ "grad_norm": 13.353521347045898,
1294
+ "learning_rate": 2.6726124191242444e-06,
1295
+ "log_odds_chosen": 2.534590005874634,
1296
+ "log_odds_ratio": -0.11770644038915634,
1297
+ "logits/chosen": 355.3052673339844,
1298
+ "logits/rejected": 389.3919372558594,
1299
+ "logps/chosen": -0.3792329430580139,
1300
+ "logps/rejected": -1.8535314798355103,
1301
+ "loss": 0.5665,
1302
+ "nll_loss": 0.5773240327835083,
1303
+ "rewards/accuracies": 1.0,
1304
+ "rewards/chosen": -0.018961649388074875,
1305
+ "rewards/margins": 0.07371493428945541,
1306
+ "rewards/rejected": -0.09267657995223999,
1307
+ "step": 350
1308
+ },
1309
+ {
1310
+ "epoch": 2.1162444113263787,
1311
+ "grad_norm": 15.57699203491211,
1312
+ "learning_rate": 2.6537244621713765e-06,
1313
+ "log_odds_chosen": 2.286215305328369,
1314
+ "log_odds_ratio": -0.15262895822525024,
1315
+ "logits/chosen": 352.2881164550781,
1316
+ "logits/rejected": 369.99462890625,
1317
+ "logps/chosen": -0.3723372220993042,
1318
+ "logps/rejected": -1.5929332971572876,
1319
+ "loss": 0.5759,
1320
+ "nll_loss": 0.6265019178390503,
1321
+ "rewards/accuracies": 0.9750000238418579,
1322
+ "rewards/chosen": -0.01861685886979103,
1323
+ "rewards/margins": 0.06102980300784111,
1324
+ "rewards/rejected": -0.07964666187763214,
1325
+ "step": 355
1326
+ },
1327
+ {
1328
+ "epoch": 2.1460506706408347,
1329
+ "grad_norm": 11.725736618041992,
1330
+ "learning_rate": 2.6352313834736496e-06,
1331
+ "log_odds_chosen": 2.5719923973083496,
1332
+ "log_odds_ratio": -0.12209601700305939,
1333
+ "logits/chosen": 359.28118896484375,
1334
+ "logits/rejected": 400.27215576171875,
1335
+ "logps/chosen": -0.3560808300971985,
1336
+ "logps/rejected": -1.588376760482788,
1337
+ "loss": 0.5392,
1338
+ "nll_loss": 0.539588451385498,
1339
+ "rewards/accuracies": 1.0,
1340
+ "rewards/chosen": -0.017804041504859924,
1341
+ "rewards/margins": 0.06161479279398918,
1342
+ "rewards/rejected": -0.0794188380241394,
1343
+ "step": 360
1344
+ },
1345
+ {
1346
+ "epoch": 2.1758569299552906,
1347
+ "grad_norm": 11.369336128234863,
1348
+ "learning_rate": 2.6171196129510684e-06,
1349
+ "log_odds_chosen": 2.051750421524048,
1350
+ "log_odds_ratio": -0.1606641709804535,
1351
+ "logits/chosen": 347.43212890625,
1352
+ "logits/rejected": 334.7518310546875,
1353
+ "logps/chosen": -0.3488374650478363,
1354
+ "logps/rejected": -1.3597064018249512,
1355
+ "loss": 0.5618,
1356
+ "nll_loss": 0.5548380613327026,
1357
+ "rewards/accuracies": 0.9750000238418579,
1358
+ "rewards/chosen": -0.017441872507333755,
1359
+ "rewards/margins": 0.05054344981908798,
1360
+ "rewards/rejected": -0.06798531860113144,
1361
+ "step": 365
1362
+ },
1363
+ {
1364
+ "epoch": 2.2056631892697465,
1365
+ "grad_norm": 14.05441665649414,
1366
+ "learning_rate": 2.599376224550182e-06,
1367
+ "log_odds_chosen": 2.0613701343536377,
1368
+ "log_odds_ratio": -0.20328454673290253,
1369
+ "logits/chosen": 318.46661376953125,
1370
+ "logits/rejected": 339.12286376953125,
1371
+ "logps/chosen": -0.36191436648368835,
1372
+ "logps/rejected": -1.3976794481277466,
1373
+ "loss": 0.5944,
1374
+ "nll_loss": 0.6158479452133179,
1375
+ "rewards/accuracies": 0.8999999761581421,
1376
+ "rewards/chosen": -0.018095718696713448,
1377
+ "rewards/margins": 0.051788248121738434,
1378
+ "rewards/rejected": -0.06988397240638733,
1379
+ "step": 370
1380
+ },
1381
+ {
1382
+ "epoch": 2.235469448584203,
1383
+ "grad_norm": 13.069171905517578,
1384
+ "learning_rate": 2.5819888974716113e-06,
1385
+ "log_odds_chosen": 1.9356635808944702,
1386
+ "log_odds_ratio": -0.19234418869018555,
1387
+ "logits/chosen": 366.2290344238281,
1388
+ "logits/rejected": 386.2459411621094,
1389
+ "logps/chosen": -0.4155746400356293,
1390
+ "logps/rejected": -1.442433476448059,
1391
+ "loss": 0.5766,
1392
+ "nll_loss": 0.5814634561538696,
1393
+ "rewards/accuracies": 0.9750000238418579,
1394
+ "rewards/chosen": -0.020778732374310493,
1395
+ "rewards/margins": 0.051342952996492386,
1396
+ "rewards/rejected": -0.07212167233228683,
1397
+ "step": 375
1398
+ },
1399
+ {
1400
+ "epoch": 2.2652757078986587,
1401
+ "grad_norm": 13.310582160949707,
1402
+ "learning_rate": 2.564945880212886e-06,
1403
+ "log_odds_chosen": 2.3414645195007324,
1404
+ "log_odds_ratio": -0.1297520399093628,
1405
+ "logits/chosen": 361.81097412109375,
1406
+ "logits/rejected": 347.11090087890625,
1407
+ "logps/chosen": -0.2917303442955017,
1408
+ "logps/rejected": -1.3597028255462646,
1409
+ "loss": 0.5492,
1410
+ "nll_loss": 0.5354436039924622,
1411
+ "rewards/accuracies": 1.0,
1412
+ "rewards/chosen": -0.014586517587304115,
1413
+ "rewards/margins": 0.05339862033724785,
1414
+ "rewards/rejected": -0.06798513978719711,
1415
+ "step": 380
1416
+ },
1417
+ {
1418
+ "epoch": 2.2950819672131146,
1419
+ "grad_norm": 11.30221939086914,
1420
+ "learning_rate": 2.5482359571881276e-06,
1421
+ "log_odds_chosen": 2.427121162414551,
1422
+ "log_odds_ratio": -0.14391574263572693,
1423
+ "logits/chosen": 351.34454345703125,
1424
+ "logits/rejected": 348.7616271972656,
1425
+ "logps/chosen": -0.30433347821235657,
1426
+ "logps/rejected": -1.4669969081878662,
1427
+ "loss": 0.5393,
1428
+ "nll_loss": 0.5202994346618652,
1429
+ "rewards/accuracies": 0.949999988079071,
1430
+ "rewards/chosen": -0.015216675586998463,
1431
+ "rewards/margins": 0.058133166283369064,
1432
+ "rewards/rejected": -0.07334984838962555,
1433
+ "step": 385
1434
+ },
1435
+ {
1436
+ "epoch": 2.3248882265275705,
1437
+ "grad_norm": 13.66861629486084,
1438
+ "learning_rate": 2.5318484177091667e-06,
1439
+ "log_odds_chosen": 2.2526602745056152,
1440
+ "log_odds_ratio": -0.12812598049640656,
1441
+ "logits/chosen": 367.953857421875,
1442
+ "logits/rejected": 390.6175537109375,
1443
+ "logps/chosen": -0.3860087990760803,
1444
+ "logps/rejected": -1.594990611076355,
1445
+ "loss": 0.5879,
1446
+ "nll_loss": 0.6172261834144592,
1447
+ "rewards/accuracies": 1.0,
1448
+ "rewards/chosen": -0.019300442188978195,
1449
+ "rewards/margins": 0.060449086129665375,
1450
+ "rewards/rejected": -0.07974953204393387,
1451
+ "step": 390
1452
+ },
1453
+ {
1454
+ "epoch": 2.354694485842027,
1455
+ "grad_norm": 9.35374641418457,
1456
+ "learning_rate": 2.515773027133138e-06,
1457
+ "log_odds_chosen": 2.5175037384033203,
1458
+ "log_odds_ratio": -0.13300301134586334,
1459
+ "logits/chosen": 368.17181396484375,
1460
+ "logits/rejected": 361.74591064453125,
1461
+ "logps/chosen": -0.266899436712265,
1462
+ "logps/rejected": -1.3636187314987183,
1463
+ "loss": 0.5233,
1464
+ "nll_loss": 0.47403964400291443,
1465
+ "rewards/accuracies": 0.9750000238418579,
1466
+ "rewards/chosen": -0.01334497332572937,
1467
+ "rewards/margins": 0.0548359639942646,
1468
+ "rewards/rejected": -0.06818093359470367,
1469
+ "step": 395
1470
+ },
1471
+ {
1472
+ "epoch": 2.384500745156483,
1473
+ "grad_norm": 17.207456588745117,
1474
+ "learning_rate": 2.5e-06,
1475
+ "log_odds_chosen": 2.6303577423095703,
1476
+ "log_odds_ratio": -0.12364117056131363,
1477
+ "logits/chosen": 359.56134033203125,
1478
+ "logits/rejected": 384.45733642578125,
1479
+ "logps/chosen": -0.3128165900707245,
1480
+ "logps/rejected": -1.7372194528579712,
1481
+ "loss": 0.5523,
1482
+ "nll_loss": 0.4810587465763092,
1483
+ "rewards/accuracies": 0.949999988079071,
1484
+ "rewards/chosen": -0.015640828758478165,
1485
+ "rewards/margins": 0.07122014462947845,
1486
+ "rewards/rejected": -0.08686096966266632,
1487
+ "step": 400
1488
+ },
1489
+ {
1490
+ "epoch": 2.4143070044709387,
1491
+ "grad_norm": 13.166074752807617,
1492
+ "learning_rate": 2.484519974999767e-06,
1493
+ "log_odds_chosen": 2.396026849746704,
1494
+ "log_odds_ratio": -0.1698196679353714,
1495
+ "logits/chosen": 415.81658935546875,
1496
+ "logits/rejected": 383.0643310546875,
1497
+ "logps/chosen": -0.3519137501716614,
1498
+ "logps/rejected": -1.5836241245269775,
1499
+ "loss": 0.5829,
1500
+ "nll_loss": 0.5322110652923584,
1501
+ "rewards/accuracies": 0.9750000238418579,
1502
+ "rewards/chosen": -0.0175956878811121,
1503
+ "rewards/margins": 0.061585523188114166,
1504
+ "rewards/rejected": -0.07918120920658112,
1505
+ "step": 405
1506
+ },
1507
+ {
1508
+ "epoch": 2.444113263785395,
1509
+ "grad_norm": 12.805488586425781,
1510
+ "learning_rate": 2.4693239916239746e-06,
1511
+ "log_odds_chosen": 2.3219261169433594,
1512
+ "log_odds_ratio": -0.1753077507019043,
1513
+ "logits/chosen": 361.06781005859375,
1514
+ "logits/rejected": 375.9926452636719,
1515
+ "logps/chosen": -0.39748460054397583,
1516
+ "logps/rejected": -1.5706228017807007,
1517
+ "loss": 0.5614,
1518
+ "nll_loss": 0.6001583337783813,
1519
+ "rewards/accuracies": 0.9750000238418579,
1520
+ "rewards/chosen": -0.019874228164553642,
1521
+ "rewards/margins": 0.05865691229701042,
1522
+ "rewards/rejected": -0.07853114604949951,
1523
+ "step": 410
1524
+ },
1525
+ {
1526
+ "epoch": 2.473919523099851,
1527
+ "grad_norm": 10.840022087097168,
1528
+ "learning_rate": 2.4544034683690802e-06,
1529
+ "log_odds_chosen": 2.408691883087158,
1530
+ "log_odds_ratio": -0.15550675988197327,
1531
+ "logits/chosen": 363.71136474609375,
1532
+ "logits/rejected": 392.62689208984375,
1533
+ "logps/chosen": -0.33126166462898254,
1534
+ "logps/rejected": -1.5768396854400635,
1535
+ "loss": 0.565,
1536
+ "nll_loss": 0.5147169828414917,
1537
+ "rewards/accuracies": 0.9750000238418579,
1538
+ "rewards/chosen": -0.016563083976507187,
1539
+ "rewards/margins": 0.062278907746076584,
1540
+ "rewards/rejected": -0.07884199172258377,
1541
+ "step": 415
1542
+ },
1543
+ {
1544
+ "epoch": 2.503725782414307,
1545
+ "grad_norm": 14.33762264251709,
1546
+ "learning_rate": 2.4397501823713327e-06,
1547
+ "log_odds_chosen": 2.0540168285369873,
1548
+ "log_odds_ratio": -0.1838049441576004,
1549
+ "logits/chosen": 360.244873046875,
1550
+ "logits/rejected": 338.4053649902344,
1551
+ "logps/chosen": -0.382302850484848,
1552
+ "logps/rejected": -1.4437496662139893,
1553
+ "loss": 0.5427,
1554
+ "nll_loss": 0.6479641199111938,
1555
+ "rewards/accuracies": 0.9750000238418579,
1556
+ "rewards/chosen": -0.01911514438688755,
1557
+ "rewards/margins": 0.05307234078645706,
1558
+ "rewards/rejected": -0.07218748331069946,
1559
+ "step": 420
1560
+ },
1561
+ {
1562
+ "epoch": 2.533532041728763,
1563
+ "grad_norm": 11.913477897644043,
1564
+ "learning_rate": 2.4253562503633297e-06,
1565
+ "log_odds_chosen": 2.7403793334960938,
1566
+ "log_odds_ratio": -0.09055573493242264,
1567
+ "logits/chosen": 360.6409912109375,
1568
+ "logits/rejected": 357.6117858886719,
1569
+ "logps/chosen": -0.3066481351852417,
1570
+ "logps/rejected": -1.7102562189102173,
1571
+ "loss": 0.533,
1572
+ "nll_loss": 0.5301586389541626,
1573
+ "rewards/accuracies": 1.0,
1574
+ "rewards/chosen": -0.015332406386733055,
1575
+ "rewards/margins": 0.07018040120601654,
1576
+ "rewards/rejected": -0.08551281690597534,
1577
+ "step": 425
1578
+ },
1579
+ {
1580
+ "epoch": 2.563338301043219,
1581
+ "grad_norm": 12.434874534606934,
1582
+ "learning_rate": 2.411214110852061e-06,
1583
+ "log_odds_chosen": 2.650773525238037,
1584
+ "log_odds_ratio": -0.11397852003574371,
1585
+ "logits/chosen": 359.2686767578125,
1586
+ "logits/rejected": 372.0355224609375,
1587
+ "logps/chosen": -0.29058974981307983,
1588
+ "logps/rejected": -1.6085436344146729,
1589
+ "loss": 0.5369,
1590
+ "nll_loss": 0.48550575971603394,
1591
+ "rewards/accuracies": 1.0,
1592
+ "rewards/chosen": -0.014529486186802387,
1593
+ "rewards/margins": 0.06589768826961517,
1594
+ "rewards/rejected": -0.08042718470096588,
1595
+ "step": 430
1596
+ },
1597
+ {
1598
+ "epoch": 2.593144560357675,
1599
+ "grad_norm": 15.130928993225098,
1600
+ "learning_rate": 2.3973165074269213e-06,
1601
+ "log_odds_chosen": 2.4082794189453125,
1602
+ "log_odds_ratio": -0.14481699466705322,
1603
+ "logits/chosen": 368.16680908203125,
1604
+ "logits/rejected": 336.6341247558594,
1605
+ "logps/chosen": -0.37292951345443726,
1606
+ "logps/rejected": -1.5544594526290894,
1607
+ "loss": 0.5655,
1608
+ "nll_loss": 0.5464103817939758,
1609
+ "rewards/accuracies": 0.9750000238418579,
1610
+ "rewards/chosen": -0.018646476790308952,
1611
+ "rewards/margins": 0.059076495468616486,
1612
+ "rewards/rejected": -0.07772296667098999,
1613
+ "step": 435
1614
+ },
1615
+ {
1616
+ "epoch": 2.6229508196721314,
1617
+ "grad_norm": 13.672365188598633,
1618
+ "learning_rate": 2.3836564731139807e-06,
1619
+ "log_odds_chosen": 2.712573289871216,
1620
+ "log_odds_ratio": -0.11169672012329102,
1621
+ "logits/chosen": 354.31842041015625,
1622
+ "logits/rejected": 366.07745361328125,
1623
+ "logps/chosen": -0.2754111886024475,
1624
+ "logps/rejected": -1.5493534803390503,
1625
+ "loss": 0.5762,
1626
+ "nll_loss": 0.5381069779396057,
1627
+ "rewards/accuracies": 1.0,
1628
+ "rewards/chosen": -0.013770559802651405,
1629
+ "rewards/margins": 0.06369712203741074,
1630
+ "rewards/rejected": -0.07746767997741699,
1631
+ "step": 440
1632
+ },
1633
+ {
1634
+ "epoch": 2.6527570789865873,
1635
+ "grad_norm": 12.229787826538086,
1636
+ "learning_rate": 2.3702273156998867e-06,
1637
+ "log_odds_chosen": 2.407763957977295,
1638
+ "log_odds_ratio": -0.13709630072116852,
1639
+ "logits/chosen": 339.0660705566406,
1640
+ "logits/rejected": 373.5878601074219,
1641
+ "logps/chosen": -0.3847058415412903,
1642
+ "logps/rejected": -1.715640664100647,
1643
+ "loss": 0.5811,
1644
+ "nll_loss": 0.6087844967842102,
1645
+ "rewards/accuracies": 1.0,
1646
+ "rewards/chosen": -0.019235290586948395,
1647
+ "rewards/margins": 0.06654674559831619,
1648
+ "rewards/rejected": -0.08578203618526459,
1649
+ "step": 445
1650
+ },
1651
+ {
1652
+ "epoch": 2.682563338301043,
1653
+ "grad_norm": 11.220720291137695,
1654
+ "learning_rate": 2.357022603955159e-06,
1655
+ "log_odds_chosen": 2.3630270957946777,
1656
+ "log_odds_ratio": -0.12073234468698502,
1657
+ "logits/chosen": 364.763427734375,
1658
+ "logits/rejected": 365.3312072753906,
1659
+ "logps/chosen": -0.3996626138687134,
1660
+ "logps/rejected": -1.661350965499878,
1661
+ "loss": 0.589,
1662
+ "nll_loss": 0.5471979379653931,
1663
+ "rewards/accuracies": 1.0,
1664
+ "rewards/chosen": -0.01998312957584858,
1665
+ "rewards/margins": 0.06308441609144211,
1666
+ "rewards/rejected": -0.08306754380464554,
1667
+ "step": 450
1668
+ },
1669
+ {
1670
+ "epoch": 2.712369597615499,
1671
+ "grad_norm": 14.05644702911377,
1672
+ "learning_rate": 2.3440361546924774e-06,
1673
+ "log_odds_chosen": 2.527470588684082,
1674
+ "log_odds_ratio": -0.128330796957016,
1675
+ "logits/chosen": 393.28240966796875,
1676
+ "logits/rejected": 373.51959228515625,
1677
+ "logps/chosen": -0.3907659351825714,
1678
+ "logps/rejected": -1.6387542486190796,
1679
+ "loss": 0.5997,
1680
+ "nll_loss": 0.5538510084152222,
1681
+ "rewards/accuracies": 0.949999988079071,
1682
+ "rewards/chosen": -0.01953829452395439,
1683
+ "rewards/margins": 0.06239941716194153,
1684
+ "rewards/rejected": -0.08193771541118622,
1685
+ "step": 455
1686
+ },
1687
+ {
1688
+ "epoch": 2.742175856929955,
1689
+ "grad_norm": 14.722325325012207,
1690
+ "learning_rate": 2.3312620206007847e-06,
1691
+ "log_odds_chosen": 2.5439341068267822,
1692
+ "log_odds_ratio": -0.1302451193332672,
1693
+ "logits/chosen": 381.04022216796875,
1694
+ "logits/rejected": 400.01812744140625,
1695
+ "logps/chosen": -0.36085230112075806,
1696
+ "logps/rejected": -1.748968482017517,
1697
+ "loss": 0.5786,
1698
+ "nll_loss": 0.6514776945114136,
1699
+ "rewards/accuracies": 1.0,
1700
+ "rewards/chosen": -0.018042614683508873,
1701
+ "rewards/margins": 0.06940580904483795,
1702
+ "rewards/rejected": -0.08744843304157257,
1703
+ "step": 460
1704
+ },
1705
+ {
1706
+ "epoch": 2.7719821162444114,
1707
+ "grad_norm": 12.233352661132812,
1708
+ "learning_rate": 2.3186944788008413e-06,
1709
+ "log_odds_chosen": 2.5529284477233887,
1710
+ "log_odds_ratio": -0.1319517195224762,
1711
+ "logits/chosen": 376.39300537109375,
1712
+ "logits/rejected": 381.1004333496094,
1713
+ "logps/chosen": -0.2920375168323517,
1714
+ "logps/rejected": -1.539701223373413,
1715
+ "loss": 0.581,
1716
+ "nll_loss": 0.5538360476493835,
1717
+ "rewards/accuracies": 1.0,
1718
+ "rewards/chosen": -0.01460187416523695,
1719
+ "rewards/margins": 0.06238318607211113,
1720
+ "rewards/rejected": -0.07698506116867065,
1721
+ "step": 465
1722
+ },
1723
+ {
1724
+ "epoch": 2.8017883755588673,
1725
+ "grad_norm": 12.4392728805542,
1726
+ "learning_rate": 2.3063280200722128e-06,
1727
+ "log_odds_chosen": 2.2491307258605957,
1728
+ "log_odds_ratio": -0.18098264932632446,
1729
+ "logits/chosen": 377.3009948730469,
1730
+ "logits/rejected": 349.0381164550781,
1731
+ "logps/chosen": -0.3590669333934784,
1732
+ "logps/rejected": -1.5034451484680176,
1733
+ "loss": 0.5715,
1734
+ "nll_loss": 0.5520719289779663,
1735
+ "rewards/accuracies": 0.949999988079071,
1736
+ "rewards/chosen": -0.01795334741473198,
1737
+ "rewards/margins": 0.05721891671419144,
1738
+ "rewards/rejected": -0.07517226040363312,
1739
+ "step": 470
1740
+ },
1741
+ {
1742
+ "epoch": 2.8315946348733236,
1743
+ "grad_norm": 13.7266263961792,
1744
+ "learning_rate": 2.2941573387056174e-06,
1745
+ "log_odds_chosen": 2.518183946609497,
1746
+ "log_odds_ratio": -0.1277451366186142,
1747
+ "logits/chosen": 350.8870544433594,
1748
+ "logits/rejected": 374.2940979003906,
1749
+ "logps/chosen": -0.36442482471466064,
1750
+ "logps/rejected": -1.6510089635849,
1751
+ "loss": 0.5518,
1752
+ "nll_loss": 0.5228261947631836,
1753
+ "rewards/accuracies": 0.9750000238418579,
1754
+ "rewards/chosen": -0.018221240490674973,
1755
+ "rewards/margins": 0.06432920694351196,
1756
+ "rewards/rejected": -0.08255044370889664,
1757
+ "step": 475
1758
+ },
1759
+ {
1760
+ "epoch": 2.8614008941877795,
1761
+ "grad_norm": 10.832075119018555,
1762
+ "learning_rate": 2.2821773229381924e-06,
1763
+ "log_odds_chosen": 2.2293262481689453,
1764
+ "log_odds_ratio": -0.12778417766094208,
1765
+ "logits/chosen": 361.9057922363281,
1766
+ "logits/rejected": 402.4067077636719,
1767
+ "logps/chosen": -0.34709399938583374,
1768
+ "logps/rejected": -1.4698781967163086,
1769
+ "loss": 0.5087,
1770
+ "nll_loss": 0.5175037384033203,
1771
+ "rewards/accuracies": 1.0,
1772
+ "rewards/chosen": -0.017354700714349747,
1773
+ "rewards/margins": 0.05613920837640762,
1774
+ "rewards/rejected": -0.07349390536546707,
1775
+ "step": 480
1776
+ },
1777
+ {
1778
+ "epoch": 2.8912071535022354,
1779
+ "grad_norm": 11.188936233520508,
1780
+ "learning_rate": 2.270383045932499e-06,
1781
+ "log_odds_chosen": 2.5824456214904785,
1782
+ "log_odds_ratio": -0.12692959606647491,
1783
+ "logits/chosen": 357.77374267578125,
1784
+ "logits/rejected": 380.9786376953125,
1785
+ "logps/chosen": -0.3558903634548187,
1786
+ "logps/rejected": -1.7740790843963623,
1787
+ "loss": 0.5513,
1788
+ "nll_loss": 0.5267875790596008,
1789
+ "rewards/accuracies": 1.0,
1790
+ "rewards/chosen": -0.017794519662857056,
1791
+ "rewards/margins": 0.07090945541858673,
1792
+ "rewards/rejected": -0.08870397508144379,
1793
+ "step": 485
1794
+ },
1795
+ {
1796
+ "epoch": 2.9210134128166914,
1797
+ "grad_norm": 12.93087100982666,
1798
+ "learning_rate": 2.2587697572631284e-06,
1799
+ "log_odds_chosen": 2.304809331893921,
1800
+ "log_odds_ratio": -0.18847742676734924,
1801
+ "logits/chosen": 370.79241943359375,
1802
+ "logits/rejected": 337.0198059082031,
1803
+ "logps/chosen": -0.46047478914260864,
1804
+ "logps/rejected": -1.6731350421905518,
1805
+ "loss": 0.6251,
1806
+ "nll_loss": 0.5758861303329468,
1807
+ "rewards/accuracies": 0.9750000238418579,
1808
+ "rewards/chosen": -0.023023737594485283,
1809
+ "rewards/margins": 0.060633014887571335,
1810
+ "rewards/rejected": -0.08365674316883087,
1811
+ "step": 490
1812
+ },
1813
+ {
1814
+ "epoch": 2.9508196721311473,
1815
+ "grad_norm": 12.990720748901367,
1816
+ "learning_rate": 2.2473328748774737e-06,
1817
+ "log_odds_chosen": 2.1052613258361816,
1818
+ "log_odds_ratio": -0.17354901134967804,
1819
+ "logits/chosen": 362.78375244140625,
1820
+ "logits/rejected": 391.7388610839844,
1821
+ "logps/chosen": -0.4515438973903656,
1822
+ "logps/rejected": -1.5366592407226562,
1823
+ "loss": 0.5494,
1824
+ "nll_loss": 0.5879518985748291,
1825
+ "rewards/accuracies": 0.949999988079071,
1826
+ "rewards/chosen": -0.0225771926343441,
1827
+ "rewards/margins": 0.05425576493144035,
1828
+ "rewards/rejected": -0.07683296501636505,
1829
+ "step": 495
1830
+ },
1831
+ {
1832
+ "epoch": 2.9806259314456036,
1833
+ "grad_norm": 11.811108589172363,
1834
+ "learning_rate": 2.23606797749979e-06,
1835
+ "log_odds_chosen": 2.437211513519287,
1836
+ "log_odds_ratio": -0.13766226172447205,
1837
+ "logits/chosen": 372.21160888671875,
1838
+ "logits/rejected": 370.1877136230469,
1839
+ "logps/chosen": -0.32970958948135376,
1840
+ "logps/rejected": -1.5284183025360107,
1841
+ "loss": 0.576,
1842
+ "nll_loss": 0.47357138991355896,
1843
+ "rewards/accuracies": 1.0,
1844
+ "rewards/chosen": -0.01648547872900963,
1845
+ "rewards/margins": 0.05993543937802315,
1846
+ "rewards/rejected": -0.07642091810703278,
1847
+ "step": 500
1848
+ },
1849
+ {
1850
+ "epoch": 2.9865871833084947,
1851
+ "eval_log_odds_chosen": 0.27211081981658936,
1852
+ "eval_log_odds_ratio": -0.7039158344268799,
1853
+ "eval_logits/chosen": 296.6807556152344,
1854
+ "eval_logits/rejected": 267.234619140625,
1855
+ "eval_logps/chosen": -1.0933129787445068,
1856
+ "eval_logps/rejected": -1.2508149147033691,
1857
+ "eval_loss": 1.5379581451416016,
1858
+ "eval_nll_loss": 1.4702765941619873,
1859
+ "eval_rewards/accuracies": 0.5467625856399536,
1860
+ "eval_rewards/chosen": -0.054665643721818924,
1861
+ "eval_rewards/margins": 0.007875093258917332,
1862
+ "eval_rewards/rejected": -0.06254073977470398,
1863
+ "eval_runtime": 112.2732,
1864
+ "eval_samples_per_second": 4.925,
1865
+ "eval_steps_per_second": 1.238,
1866
+ "step": 501
1867
+ },
1868
+ {
1869
+ "epoch": 2.9865871833084947,
1870
+ "step": 501,
1871
+ "total_flos": 0.0,
1872
+ "train_loss": 1.4583633707431025,
1873
+ "train_runtime": 13840.413,
1874
+ "train_samples_per_second": 1.163,
1875
+ "train_steps_per_second": 0.036
1876
+ }
1877
+ ],
1878
+ "logging_steps": 5,
1879
+ "max_steps": 501,
1880
+ "num_input_tokens_seen": 0,
1881
+ "num_train_epochs": 3,
1882
+ "save_steps": 500,
1883
+ "stateful_callbacks": {
1884
+ "TrainerControl": {
1885
+ "args": {
1886
+ "should_epoch_stop": false,
1887
+ "should_evaluate": false,
1888
+ "should_log": false,
1889
+ "should_save": false,
1890
+ "should_training_stop": false
1891
+ },
1892
+ "attributes": {}
1893
+ }
1894
+ },
1895
+ "total_flos": 0.0,
1896
+ "train_batch_size": 2,
1897
+ "trial_name": null,
1898
+ "trial_params": null
1899
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef0399d23b46cfbc5e3cf2cd08aa71405ef6d9278762be54afa0ba8d0d2376a9
3
+ size 6776