hugodk-sch commited on
Commit
8bc3984
1 Parent(s): 9ed5470

Training in progress, step 1400

Browse files
README.md CHANGED
@@ -20,15 +20,15 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.4770
24
- - Rewards/chosen: 0.1934
25
- - Rewards/rejected: 0.0834
26
- - Rewards/accuracies: 0.5893
27
- - Rewards/margins: 0.1100
28
- - Logps/rejected: -37.3498
29
- - Logps/chosen: -33.6478
30
- - Logits/rejected: -2.1388
31
- - Logits/chosen: -2.1435
32
 
33
  ## Model description
34
 
@@ -57,33 +57,21 @@ The following hyperparameters were used during training:
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
- - num_epochs: 4
61
 
62
  ### Training results
63
 
64
- | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
- |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
- | 0.4794 | 0.26 | 100 | -2.2375 | -2.2327 | -33.8699 | -37.3767 | 0.4971 | 0.5573 | 0.0824 | 0.0124 | 0.0699 |
67
- | 0.4561 | 0.52 | 200 | -2.2342 | -2.2294 | -33.7761 | -37.2930 | 0.4959 | 0.5424 | 0.1292 | 0.0174 | 0.1118 |
68
- | 0.4202 | 0.78 | 300 | -2.2333 | -2.2285 | -33.7508 | -37.2789 | 0.4944 | 0.5307 | 0.1419 | 0.0230 | 0.1189 |
69
- | 0.3592 | 1.04 | 400 | 0.4901 | 0.1964 | 0.1560 | 0.5768 | 0.0404 | -37.2046 | -33.6418 | -2.2025 | -2.2073 |
70
- | 0.3252 | 1.3 | 500 | 0.4906 | 0.2174 | 0.1800 | 0.5162 | 0.0374 | -37.1566 | -33.5997 | -2.2010 | -2.2057 |
71
- | 0.3006 | 1.56 | 600 | 0.4827 | 0.2385 | 0.1637 | 0.5710 | 0.0747 | -37.1891 | -33.5576 | -2.1813 | -2.1861 |
72
- | 0.3054 | 1.82 | 700 | 0.4810 | 0.2268 | 0.1440 | 0.5976 | 0.0828 | -37.2286 | -33.5809 | -2.1687 | -2.1734 |
73
- | 0.2314 | 2.08 | 800 | 0.4799 | 0.1958 | 0.1042 | 0.5743 | 0.0916 | -37.3083 | -33.6429 | -2.1556 | -2.1603 |
74
- | 0.2187 | 2.34 | 900 | 0.4799 | 0.1986 | 0.1061 | 0.5860 | 0.0925 | -37.3044 | -33.6374 | -2.1490 | -2.1537 |
75
- | 0.2382 | 2.6 | 1000 | 0.4784 | 0.1879 | 0.0856 | 0.6121 | 0.1023 | -37.3454 | -33.6588 | -2.1419 | -2.1466 |
76
- | 0.2041 | 2.86 | 1100 | 0.4782 | 0.1909 | 0.0885 | 0.5889 | 0.1024 | -37.3396 | -33.6528 | -2.1392 | -2.1438 |
77
- | 0.2467 | 3.12 | 1200 | 0.4766 | 0.1941 | 0.0835 | 0.5893 | 0.1106 | -37.3496 | -33.6463 | -2.1390 | -2.1437 |
78
- | 0.1951 | 3.38 | 1300 | 0.4767 | 0.1928 | 0.0830 | 0.5889 | 0.1098 | -37.3506 | -33.6490 | -2.1394 | -2.1441 |
79
- | 0.1994 | 3.64 | 1400 | 0.4777 | 0.1881 | 0.0826 | 0.5743 | 0.1055 | -37.3515 | -33.6584 | -2.1386 | -2.1433 |
80
- | 0.2272 | 3.9 | 1500 | 0.4770 | 0.1914 | 0.0827 | 0.5922 | 0.1087 | -37.3512 | -33.6518 | -2.1389 | -2.1436 |
81
 
82
 
83
  ### Framework versions
84
 
85
- - PEFT 0.10.0
86
- - Transformers 4.39.0.dev0
87
  - Pytorch 2.1.2+cu121
88
- - Datasets 2.14.6
89
  - Tokenizers 0.15.1
 
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 1.0817
24
+ - Rewards/chosen: 0.0048
25
+ - Rewards/rejected: -0.0009
26
+ - Rewards/accuracies: 0.5104
27
+ - Rewards/margins: 0.0057
28
+ - Logps/rejected: -37.5184
29
+ - Logps/chosen: -34.0250
30
+ - Logits/rejected: -2.2281
31
+ - Logits/chosen: -2.2329
32
 
33
  ## Model description
34
 
 
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 1
61
 
62
  ### Training results
63
 
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.8581 | 0.26 | 100 | 1.0730 | 0.0088 | 0.0069 | 0.5199 | 0.0019 | -37.5028 | -34.0170 | -2.2326 | -2.2374 |
67
+ | 0.899 | 0.52 | 200 | 1.0716 | -0.0015 | -0.0088 | 0.5071 | 0.0072 | -37.5342 | -34.0376 | -2.2306 | -2.2355 |
68
+ | 0.6105 | 0.78 | 300 | 1.0315 | 0.0102 | -0.0099 | 0.5602 | 0.0201 | -37.5365 | -34.0141 | -2.2289 | -2.2338 |
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71
  ### Framework versions
72
 
73
+ - PEFT 0.8.2
74
+ - Transformers 4.37.2
75
  - Pytorch 2.1.2+cu121
76
+ - Datasets 2.17.0
77
  - Tokenizers 0.15.1
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "down_proj",
 
24
  "o_proj",
 
25
  "q_proj",
26
  "k_proj",
27
- "v_proj",
28
- "up_proj",
29
- "gate_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "up_proj",
24
+ "gate_proj",
25
  "o_proj",
26
+ "v_proj",
27
  "q_proj",
28
  "k_proj",
29
+ "down_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f46c85f0fcd659e30195c2a03dc670cbb5bf2695d2760e07499028db39b52e8
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12f6d09aa3ac048cb53037f14ce14baac1d201ebceec3988e0749fb178db23ef
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.1435129642486572,
4
- "eval_logits/rejected": -2.1388447284698486,
5
- "eval_logps/chosen": -33.64778518676758,
6
- "eval_logps/rejected": -37.34977340698242,
7
- "eval_loss": 0.47702479362487793,
8
- "eval_rewards/accuracies": 0.5892857313156128,
9
- "eval_rewards/chosen": 0.19338257610797882,
10
- "eval_rewards/margins": 0.109960176050663,
11
- "eval_rewards/rejected": 0.08342239260673523,
12
- "eval_runtime": 145.8636,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.352,
15
  "eval_steps_per_second": 0.295,
16
- "train_loss": 0.2131701453939661,
17
- "train_runtime": 10811.0627,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 1.139,
20
- "train_steps_per_second": 0.142
21
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.232933521270752,
4
+ "eval_logits/rejected": -2.2280824184417725,
5
+ "eval_logps/chosen": -34.02503967285156,
6
+ "eval_logps/rejected": -37.51841735839844,
7
+ "eval_loss": 1.0816813707351685,
8
+ "eval_rewards/accuracies": 0.5103820562362671,
9
+ "eval_rewards/chosen": 0.004756772890686989,
10
+ "eval_rewards/margins": 0.005652438849210739,
11
+ "eval_rewards/rejected": -0.0008956658421084285,
12
+ "eval_runtime": 145.5936,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.356,
15
  "eval_steps_per_second": 0.295,
16
+ "train_loss": 0.8446982934877469,
17
+ "train_runtime": 3252.3399,
18
  "train_samples": 3079,
19
+ "train_samples_per_second": 0.947,
20
+ "train_steps_per_second": 0.118
21
  }
config.json CHANGED
@@ -18,8 +18,6 @@
18
  "n_layer": 28,
19
  "n_positions": 2048,
20
  "quantization_config": {
21
- "_load_in_4bit": true,
22
- "_load_in_8bit": false,
23
  "bnb_4bit_compute_dtype": "float16",
24
  "bnb_4bit_quant_type": "nf4",
25
  "bnb_4bit_use_double_quant": false,
@@ -47,7 +45,7 @@
47
  },
48
  "tie_word_embeddings": false,
49
  "tokenizer_class": "GPT2Tokenizer",
50
- "transformers_version": "4.39.0.dev0",
51
  "use_cache": true,
52
  "vocab_size": 50400
53
  }
 
18
  "n_layer": 28,
19
  "n_positions": 2048,
20
  "quantization_config": {
 
 
21
  "bnb_4bit_compute_dtype": "float16",
22
  "bnb_4bit_quant_type": "nf4",
23
  "bnb_4bit_use_double_quant": false,
 
45
  },
46
  "tie_word_embeddings": false,
47
  "tokenizer_class": "GPT2Tokenizer",
48
+ "transformers_version": "4.37.2",
49
  "use_cache": true,
50
  "vocab_size": 50400
51
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.1435129642486572,
4
- "eval_logits/rejected": -2.1388447284698486,
5
- "eval_logps/chosen": -33.64778518676758,
6
- "eval_logps/rejected": -37.34977340698242,
7
- "eval_loss": 0.47702479362487793,
8
- "eval_rewards/accuracies": 0.5892857313156128,
9
- "eval_rewards/chosen": 0.19338257610797882,
10
- "eval_rewards/margins": 0.109960176050663,
11
- "eval_rewards/rejected": 0.08342239260673523,
12
- "eval_runtime": 145.8636,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.352,
15
  "eval_steps_per_second": 0.295
16
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.232933521270752,
4
+ "eval_logits/rejected": -2.2280824184417725,
5
+ "eval_logps/chosen": -34.02503967285156,
6
+ "eval_logps/rejected": -37.51841735839844,
7
+ "eval_loss": 1.0816813707351685,
8
+ "eval_rewards/accuracies": 0.5103820562362671,
9
+ "eval_rewards/chosen": 0.004756772890686989,
10
+ "eval_rewards/margins": 0.005652438849210739,
11
+ "eval_rewards/rejected": -0.0008956658421084285,
12
+ "eval_runtime": 145.5936,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.356,
15
  "eval_steps_per_second": 0.295
16
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.0,
3
- "train_loss": 0.2131701453939661,
4
- "train_runtime": 10811.0627,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 1.139,
7
- "train_steps_per_second": 0.142
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.8446982934877469,
4
+ "train_runtime": 3252.3399,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
+ "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02ca49827b893bc712e0a301674e01ebffe16d4c7500943127387ef65d9c0346
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cba7cea87426fb79c9f89c8c302642cfb8f90ae77279e90f5d889e1a7e26dc53
3
  size 5176