hugodk-sch commited on
Commit
c1346c1
1 Parent(s): 415536c

Training in progress, step 500

Browse files
README.md CHANGED
@@ -20,15 +20,15 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 1.6510
24
- - Rewards/chosen: 0.0280
25
- - Rewards/rejected: 0.0141
26
- - Rewards/accuracies: 0.5129
27
- - Rewards/margins: 0.0139
28
- - Logps/rejected: -37.4814
29
- - Logps/chosen: -33.9646
30
- - Logits/rejected: -2.2329
31
- - Logits/chosen: -2.2378
32
 
33
  ## Model description
34
 
@@ -57,33 +57,21 @@ The following hyperparameters were used during training:
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
- - num_epochs: 4
61
 
62
  ### Training results
63
 
64
- | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
- |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
- | 1.4583 | 0.26 | 100 | -2.2357 | -2.2308 | -34.0303 | -37.5236 | 1.6162 | 0.5245 | 0.0017 | 0.0045 | -0.0028 |
67
- | 1.279 | 0.52 | 200 | -2.2359 | -2.2311 | -34.0825 | -37.5923 | 1.6100 | 0.5257 | -0.0192 | 0.0111 | -0.0303 |
68
- | 1.0043 | 0.78 | 300 | -2.2312 | -2.2263 | -34.0845 | -37.6004 | 1.5962 | 0.5166 | -0.0200 | 0.0135 | -0.0335 |
69
- | 0.7239 | 1.04 | 400 | 1.6461 | -0.0219 | -0.0311 | 0.5341 | 0.0092 | -37.5945 | -34.0893 | -2.2276 | -2.2324 |
70
- | 0.6061 | 1.3 | 500 | 1.6487 | -0.0274 | -0.0429 | 0.5395 | 0.0155 | -37.6239 | -34.1030 | -2.2282 | -2.2330 |
71
- | 0.9255 | 1.56 | 600 | 1.5912 | 0.0108 | -0.0119 | 0.5544 | 0.0228 | -37.5464 | -34.0074 | -2.2273 | -2.2321 |
72
- | 0.8252 | 1.82 | 700 | 1.6334 | 0.0226 | 0.0045 | 0.5216 | 0.0180 | -37.5053 | -33.9781 | -2.2298 | -2.2346 |
73
- | 0.2848 | 2.08 | 800 | 1.6033 | 0.0153 | -0.0031 | 0.5249 | 0.0184 | -37.5244 | -33.9964 | -2.2313 | -2.2361 |
74
- | 0.3671 | 2.34 | 900 | 1.6569 | 0.0283 | 0.0177 | 0.5162 | 0.0106 | -37.4723 | -33.9637 | -2.2309 | -2.2358 |
75
- | 0.3936 | 2.6 | 1000 | 1.6203 | 0.0348 | 0.0187 | 0.5428 | 0.0161 | -37.4698 | -33.9475 | -2.2325 | -2.2374 |
76
- | 0.3156 | 2.86 | 1100 | 1.6012 | 0.0302 | 0.0108 | 0.5606 | 0.0194 | -37.4896 | -33.9592 | -2.2326 | -2.2375 |
77
- | 0.2893 | 3.12 | 1200 | 1.5705 | 0.0346 | 0.0103 | 0.5365 | 0.0243 | -37.4909 | -33.9480 | -2.2335 | -2.2383 |
78
- | 0.277 | 3.38 | 1300 | 1.6102 | 0.0314 | 0.0121 | 0.5403 | 0.0194 | -37.4865 | -33.9559 | -2.2333 | -2.2382 |
79
- | 0.139 | 3.64 | 1400 | 1.6181 | 0.0273 | 0.0092 | 0.5307 | 0.0181 | -37.4937 | -33.9663 | -2.2333 | -2.2381 |
80
- | 0.24 | 3.9 | 1500 | 1.6201 | 0.0327 | 0.0149 | 0.5249 | 0.0178 | -37.4793 | -33.9527 | -2.2332 | -2.2381 |
81
 
82
 
83
  ### Framework versions
84
 
85
- - PEFT 0.10.0
86
- - Transformers 4.39.0.dev0
87
  - Pytorch 2.1.2+cu121
88
- - Datasets 2.14.6
89
  - Tokenizers 0.15.1
 
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.9658
24
+ - Rewards/chosen: -0.0301
25
+ - Rewards/rejected: -0.0652
26
+ - Rewards/accuracies: 0.5278
27
+ - Rewards/margins: 0.0352
28
+ - Logps/rejected: -37.6098
29
+ - Logps/chosen: -34.0775
30
+ - Logits/rejected: -2.2182
31
+ - Logits/chosen: -2.2231
32
 
33
  ## Model description
34
 
 
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 1
61
 
62
  ### Training results
63
 
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.8961 | 0.26 | 100 | 0.9767 | 0.0065 | -0.0170 | 0.5365 | 0.0235 | -37.5409 | -34.0252 | -2.2266 | -2.2315 |
67
+ | 0.7699 | 0.52 | 200 | 0.9742 | -0.0141 | -0.0400 | 0.5303 | 0.0259 | -37.5737 | -34.0547 | -2.2234 | -2.2282 |
68
+ | 0.6723 | 0.78 | 300 | 0.9761 | -0.0366 | -0.0616 | 0.5299 | 0.0250 | -37.6047 | -34.0868 | -2.2186 | -2.2234 |
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71
  ### Framework versions
72
 
73
+ - PEFT 0.8.2
74
+ - Transformers 4.37.2
75
  - Pytorch 2.1.2+cu121
76
+ - Datasets 2.17.0
77
  - Tokenizers 0.15.1
adapter_config.json CHANGED
@@ -21,12 +21,12 @@
21
  "revision": null,
22
  "target_modules": [
23
  "down_proj",
 
24
  "o_proj",
25
- "v_proj",
26
  "k_proj",
27
- "up_proj",
28
- "gate_proj",
29
- "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "down_proj",
24
+ "up_proj",
25
  "o_proj",
26
+ "q_proj",
27
  "k_proj",
28
+ "v_proj",
29
+ "gate_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c47bb1a324b5553592841a0c57e589c161272bd4e084e17244ed50fd058977a
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42067a4afd5eea30b7af1f79d96b539218ed034c29e4342836634cae3abd5bf4
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.237769603729248,
4
- "eval_logits/rejected": -2.232879638671875,
5
- "eval_logps/chosen": -33.96460723876953,
6
- "eval_logps/rejected": -37.48141860961914,
7
- "eval_loss": 1.6509573459625244,
8
- "eval_rewards/accuracies": 0.5128737688064575,
9
- "eval_rewards/chosen": 0.02797684073448181,
10
- "eval_rewards/margins": 0.013894001953303814,
11
- "eval_rewards/rejected": 0.014082840643823147,
12
- "eval_runtime": 145.2372,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.362,
15
- "eval_steps_per_second": 0.296,
16
- "train_loss": 0.36088944433571457,
17
- "train_runtime": 10769.3493,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 1.144,
20
- "train_steps_per_second": 0.143
21
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2230520248413086,
4
+ "eval_logits/rejected": -2.2182328701019287,
5
+ "eval_logps/chosen": -34.07753372192383,
6
+ "eval_logps/rejected": -37.60981750488281,
7
+ "eval_loss": 0.9658033847808838,
8
+ "eval_rewards/accuracies": 0.5278239250183105,
9
+ "eval_rewards/chosen": -0.03008819743990898,
10
+ "eval_rewards/margins": 0.03515118733048439,
11
+ "eval_rewards/rejected": -0.06523937731981277,
12
+ "eval_runtime": 145.6671,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.355,
15
+ "eval_steps_per_second": 0.295,
16
+ "train_loss": 0.8438688600218142,
17
+ "train_runtime": 3250.9917,
18
  "train_samples": 3079,
19
+ "train_samples_per_second": 0.947,
20
+ "train_steps_per_second": 0.118
21
  }
config.json CHANGED
@@ -18,8 +18,6 @@
18
  "n_layer": 28,
19
  "n_positions": 2048,
20
  "quantization_config": {
21
- "_load_in_4bit": true,
22
- "_load_in_8bit": false,
23
  "bnb_4bit_compute_dtype": "float16",
24
  "bnb_4bit_quant_type": "nf4",
25
  "bnb_4bit_use_double_quant": false,
@@ -47,7 +45,7 @@
47
  },
48
  "tie_word_embeddings": false,
49
  "tokenizer_class": "GPT2Tokenizer",
50
- "transformers_version": "4.39.0.dev0",
51
  "use_cache": true,
52
  "vocab_size": 50400
53
  }
 
18
  "n_layer": 28,
19
  "n_positions": 2048,
20
  "quantization_config": {
 
 
21
  "bnb_4bit_compute_dtype": "float16",
22
  "bnb_4bit_quant_type": "nf4",
23
  "bnb_4bit_use_double_quant": false,
 
45
  },
46
  "tie_word_embeddings": false,
47
  "tokenizer_class": "GPT2Tokenizer",
48
+ "transformers_version": "4.37.2",
49
  "use_cache": true,
50
  "vocab_size": 50400
51
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.237769603729248,
4
- "eval_logits/rejected": -2.232879638671875,
5
- "eval_logps/chosen": -33.96460723876953,
6
- "eval_logps/rejected": -37.48141860961914,
7
- "eval_loss": 1.6509573459625244,
8
- "eval_rewards/accuracies": 0.5128737688064575,
9
- "eval_rewards/chosen": 0.02797684073448181,
10
- "eval_rewards/margins": 0.013894001953303814,
11
- "eval_rewards/rejected": 0.014082840643823147,
12
- "eval_runtime": 145.2372,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.362,
15
- "eval_steps_per_second": 0.296
16
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2230520248413086,
4
+ "eval_logits/rejected": -2.2182328701019287,
5
+ "eval_logps/chosen": -34.07753372192383,
6
+ "eval_logps/rejected": -37.60981750488281,
7
+ "eval_loss": 0.9658033847808838,
8
+ "eval_rewards/accuracies": 0.5278239250183105,
9
+ "eval_rewards/chosen": -0.03008819743990898,
10
+ "eval_rewards/margins": 0.03515118733048439,
11
+ "eval_rewards/rejected": -0.06523937731981277,
12
+ "eval_runtime": 145.6671,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.355,
15
+ "eval_steps_per_second": 0.295
16
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.0,
3
- "train_loss": 0.36088944433571457,
4
- "train_runtime": 10769.3493,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 1.144,
7
- "train_steps_per_second": 0.143
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.8438688600218142,
4
+ "train_runtime": 3250.9917,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
+ "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07e1cf58470e526e0cd9888eb2e74b50cb325be26bc3b03661761b2566ce04ee
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b715c426f0a7cb99b86fb1b4c90c55353be43921743323373dda6cb1c70085d8
3
  size 5176