hugodk-sch commited on
Commit
1154dcc
1 Parent(s): ca18dae

Training in progress, step 600

Browse files
README.md CHANGED
@@ -20,15 +20,15 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.4886
24
- - Rewards/chosen: 0.0591
25
- - Rewards/rejected: 0.0021
26
- - Rewards/accuracies: 0.6184
27
- - Rewards/margins: 0.0569
28
- - Logps/rejected: -37.5059
29
- - Logps/chosen: -33.7391
30
- - Logits/rejected: -2.1016
31
- - Logits/chosen: -2.1063
32
 
33
  ## Model description
34
 
@@ -57,27 +57,15 @@ The following hyperparameters were used during training:
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
- - num_epochs: 4
61
 
62
  ### Training results
63
 
64
- | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
- |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
- | 0.4947 | 0.26 | 100 | -2.2348 | -2.2299 | -33.9151 | -37.4126 | 0.4994 | 0.5216 | 0.0239 | 0.0031 | 0.0208 |
67
- | 0.4825 | 0.52 | 200 | -2.2322 | -2.2274 | -33.7792 | -37.3179 | 0.4974 | 0.5544 | 0.0511 | 0.0113 | 0.0397 |
68
- | 0.4669 | 0.78 | 300 | -2.2312 | -2.2264 | -33.7681 | -37.2920 | 0.4980 | 0.5158 | 0.0533 | 0.0084 | 0.0449 |
69
- | 0.4399 | 1.04 | 400 | 0.4965 | 0.0777 | 0.0634 | 0.5860 | 0.0143 | -37.1996 | -33.6461 | -2.2104 | -2.2152 |
70
- | 0.4135 | 1.3 | 500 | 0.4956 | 0.0841 | 0.0657 | 0.5772 | 0.0184 | -37.1880 | -33.6138 | -2.1804 | -2.1851 |
71
- | 0.4221 | 1.56 | 600 | 0.4909 | 0.0893 | 0.0507 | 0.5764 | 0.0386 | -37.2632 | -33.5879 | -2.1565 | -2.1612 |
72
- | 0.3967 | 1.82 | 700 | 0.4921 | 0.0690 | 0.0338 | 0.5793 | 0.0352 | -37.3475 | -33.6896 | -2.1408 | -2.1454 |
73
- | 0.365 | 2.08 | 800 | 0.4899 | 0.0749 | 0.0300 | 0.5976 | 0.0449 | -37.3664 | -33.6601 | -2.1325 | -2.1372 |
74
- | 0.3419 | 2.34 | 900 | 0.4895 | 0.0666 | 0.0169 | 0.5681 | 0.0496 | -37.4319 | -33.7016 | -2.1150 | -2.1197 |
75
- | 0.3845 | 2.6 | 1000 | 0.4891 | 0.0624 | 0.0084 | 0.5980 | 0.0540 | -37.4745 | -33.7224 | -2.1060 | -2.1106 |
76
- | 0.3914 | 2.86 | 1100 | 0.4901 | 0.0568 | 0.0067 | 0.5918 | 0.0501 | -37.4831 | -33.7504 | -2.1031 | -2.1078 |
77
- | 0.3451 | 3.12 | 1200 | 0.4890 | 0.0594 | 0.0041 | 0.6005 | 0.0553 | -37.4961 | -33.7375 | -2.1037 | -2.1084 |
78
- | 0.3358 | 3.38 | 1300 | 0.4890 | 0.0605 | 0.0050 | 0.6213 | 0.0555 | -37.4915 | -33.7320 | -2.1025 | -2.1072 |
79
- | 0.3651 | 3.64 | 1400 | 0.4891 | 0.0579 | 0.0022 | 0.5947 | 0.0556 | -37.5054 | -33.7452 | -2.1019 | -2.1065 |
80
- | 0.3472 | 3.9 | 1500 | 0.4892 | 0.0584 | 0.0040 | 0.6092 | 0.0545 | -37.4968 | -33.7423 | -2.1019 | -2.1066 |
81
 
82
 
83
  ### Framework versions
 
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 6.1942
24
+ - Rewards/chosen: 0.0027
25
+ - Rewards/rejected: -0.0054
26
+ - Rewards/accuracies: 0.5664
27
+ - Rewards/margins: 0.0081
28
+ - Logps/rejected: -37.5438
29
+ - Logps/chosen: -34.0212
30
+ - Logits/rejected: -2.2240
31
+ - Logits/chosen: -2.2288
32
 
33
  ## Model description
34
 
 
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 1
61
 
62
  ### Training results
63
 
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 5.6745 | 0.26 | 100 | 6.2508 | 0.0038 | -0.0003 | 0.5461 | 0.0041 | -37.5181 | -34.0153 | -2.2290 | -2.2338 |
67
+ | 5.2135 | 0.52 | 200 | 6.2881 | 0.0066 | 0.0025 | 0.5403 | 0.0041 | -37.5042 | -34.0014 | -2.2267 | -2.2315 |
68
+ | 4.3883 | 0.78 | 300 | 6.2382 | 0.0031 | -0.0032 | 0.5166 | 0.0062 | -37.5325 | -34.0193 | -2.2243 | -2.2291 |
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71
  ### Framework versions
adapter_config.json CHANGED
@@ -20,11 +20,11 @@
20
  "revision": null,
21
  "target_modules": [
22
  "v_proj",
 
23
  "o_proj",
24
  "q_proj",
25
- "gate_proj",
26
- "down_proj",
27
  "k_proj",
 
28
  "up_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
 
20
  "revision": null,
21
  "target_modules": [
22
  "v_proj",
23
+ "down_proj",
24
  "o_proj",
25
  "q_proj",
 
 
26
  "k_proj",
27
+ "gate_proj",
28
  "up_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a52c7ecdad5acac487f7daf73072a438f60b86cd489dae1fa076e0343258d78
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1d8dafc0d36a9df6dd67f9e84da1bb520a8d4b5d220f8d84573e7df2beb7fc0
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.1062839031219482,
4
- "eval_logits/rejected": -2.101640224456787,
5
- "eval_logps/chosen": -33.739131927490234,
6
- "eval_logps/rejected": -37.50592041015625,
7
- "eval_loss": 0.48858171701431274,
8
- "eval_rewards/accuracies": 0.6183555126190186,
9
- "eval_rewards/chosen": 0.05908490717411041,
10
- "eval_rewards/margins": 0.05694425478577614,
11
- "eval_rewards/rejected": 0.002140657976269722,
12
- "eval_runtime": 145.7832,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.353,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.30868637050901143,
17
- "train_runtime": 10803.6,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 1.14,
20
- "train_steps_per_second": 0.143
21
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.22882342338562,
4
+ "eval_logits/rejected": -2.223994731903076,
5
+ "eval_logps/chosen": -34.021240234375,
6
+ "eval_logps/rejected": -37.54384994506836,
7
+ "eval_loss": 6.194193363189697,
8
+ "eval_rewards/accuracies": 0.5664451718330383,
9
+ "eval_rewards/chosen": 0.0026620819699019194,
10
+ "eval_rewards/margins": 0.008108925074338913,
11
+ "eval_rewards/rejected": -0.0054468438029289246,
12
+ "eval_runtime": 145.4532,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.358,
15
+ "eval_steps_per_second": 0.296,
16
+ "train_loss": 5.437272009911475,
17
+ "train_runtime": 3253.1823,
18
  "train_samples": 3079,
19
+ "train_samples_per_second": 0.946,
20
+ "train_steps_per_second": 0.118
21
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.1062839031219482,
4
- "eval_logits/rejected": -2.101640224456787,
5
- "eval_logps/chosen": -33.739131927490234,
6
- "eval_logps/rejected": -37.50592041015625,
7
- "eval_loss": 0.48858171701431274,
8
- "eval_rewards/accuracies": 0.6183555126190186,
9
- "eval_rewards/chosen": 0.05908490717411041,
10
- "eval_rewards/margins": 0.05694425478577614,
11
- "eval_rewards/rejected": 0.002140657976269722,
12
- "eval_runtime": 145.7832,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.353,
15
- "eval_steps_per_second": 0.295
16
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.22882342338562,
4
+ "eval_logits/rejected": -2.223994731903076,
5
+ "eval_logps/chosen": -34.021240234375,
6
+ "eval_logps/rejected": -37.54384994506836,
7
+ "eval_loss": 6.194193363189697,
8
+ "eval_rewards/accuracies": 0.5664451718330383,
9
+ "eval_rewards/chosen": 0.0026620819699019194,
10
+ "eval_rewards/margins": 0.008108925074338913,
11
+ "eval_rewards/rejected": -0.0054468438029289246,
12
+ "eval_runtime": 145.4532,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.358,
15
+ "eval_steps_per_second": 0.296
16
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.0,
3
- "train_loss": 0.30868637050901143,
4
- "train_runtime": 10803.6,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 1.14,
7
- "train_steps_per_second": 0.143
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 5.437272009911475,
4
+ "train_runtime": 3253.1823,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
+ "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c326ac89e4c50e7bc12176a6c925130ebcfcfb2364bc0120a8ef9c18dc894821
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a62740126001c4e37753bce979b61832119e1036635abceee907dccff0b6baa
3
  size 4984