hugodk-sch commited on
Commit
d3bc83b
1 Parent(s): 19c4592

Training in progress, step 700

Browse files
README.md CHANGED
@@ -20,15 +20,15 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.9194
24
- - Rewards/chosen: -0.1677
25
- - Rewards/rejected: -0.2502
26
- - Rewards/accuracies: 0.5714
27
- - Rewards/margins: 0.0825
28
- - Logps/rejected: -38.3506
29
- - Logps/chosen: -34.5937
30
- - Logits/rejected: -2.0928
31
- - Logits/chosen: -2.0975
32
 
33
  ## Model description
34
 
@@ -57,33 +57,21 @@ The following hyperparameters were used during training:
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
- - num_epochs: 4
61
 
62
  ### Training results
63
 
64
- | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
- |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
- | 0.956 | 0.26 | 100 | -2.2325 | -2.2276 | -34.0495 | -37.5492 | 0.9948 | 0.5631 | -0.0045 | 0.0053 | -0.0098 |
67
- | 0.8922 | 0.52 | 200 | -2.2279 | -2.2231 | -34.0659 | -37.5894 | 0.9876 | 0.5303 | -0.0094 | 0.0124 | -0.0218 |
68
- | 0.8033 | 0.78 | 300 | -2.2250 | -2.2202 | -34.1029 | -37.6211 | 0.9891 | 0.5183 | -0.0205 | 0.0108 | -0.0313 |
69
- | 0.6314 | 1.04 | 400 | 0.9659 | -0.0660 | -0.1001 | 0.5332 | 0.0341 | -37.8504 | -34.2547 | -2.1806 | -2.1854 |
70
- | 0.5138 | 1.3 | 500 | 0.9765 | -0.1494 | -0.1726 | 0.5361 | 0.0232 | -38.0921 | -34.5325 | -2.1597 | -2.1645 |
71
- | 0.5152 | 1.56 | 600 | 0.9496 | -0.1240 | -0.1747 | 0.5885 | 0.0507 | -38.0989 | -34.4478 | -2.1318 | -2.1366 |
72
- | 0.4499 | 1.82 | 700 | 0.9377 | -0.1391 | -0.2015 | 0.5772 | 0.0624 | -38.1884 | -34.4983 | -2.1135 | -2.1182 |
73
- | 0.2662 | 2.08 | 800 | 0.9329 | -0.1699 | -0.2385 | 0.5743 | 0.0686 | -38.3115 | -34.6008 | -2.1005 | -2.1052 |
74
- | 0.2981 | 2.34 | 900 | 0.9285 | -0.1677 | -0.2406 | 0.5540 | 0.0729 | -38.3186 | -34.5936 | -2.0936 | -2.0983 |
75
- | 0.3625 | 2.6 | 1000 | 0.9330 | -0.1841 | -0.2532 | 0.5976 | 0.0691 | -38.3607 | -34.6484 | -2.0933 | -2.0980 |
76
- | 0.2579 | 2.86 | 1100 | 0.9269 | -0.1781 | -0.2519 | 0.5831 | 0.0738 | -38.3564 | -34.6283 | -2.0935 | -2.0982 |
77
- | 0.343 | 3.12 | 1200 | 0.9258 | -0.1724 | -0.2484 | 0.5772 | 0.0760 | -38.3445 | -34.6093 | -2.0923 | -2.0970 |
78
- | 0.23 | 3.38 | 1300 | 0.9245 | -0.1715 | -0.2485 | 0.5831 | 0.0770 | -38.3449 | -34.6061 | -2.0920 | -2.0967 |
79
- | 0.1873 | 3.64 | 1400 | 0.9230 | -0.1736 | -0.2514 | 0.5835 | 0.0779 | -38.3547 | -34.6131 | -2.0920 | -2.0967 |
80
- | 0.3099 | 3.9 | 1500 | 0.9251 | -0.1713 | -0.2480 | 0.5772 | 0.0767 | -38.3433 | -34.6056 | -2.0925 | -2.0972 |
81
 
82
 
83
  ### Framework versions
84
 
85
- - PEFT 0.10.0
86
- - Transformers 4.39.0.dev0
87
  - Pytorch 2.1.2+cu121
88
- - Datasets 2.14.6
89
  - Tokenizers 0.15.1
 
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.9479
24
+ - Rewards/chosen: -0.0348
25
+ - Rewards/rejected: -0.0895
26
+ - Rewards/accuracies: 0.5419
27
+ - Rewards/margins: 0.0548
28
+ - Logps/rejected: -37.6161
29
+ - Logps/chosen: -34.0732
30
+ - Logits/rejected: -2.2190
31
+ - Logits/chosen: -2.2239
32
 
33
  ## Model description
34
 
 
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 1
61
 
62
  ### Training results
63
 
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.8913 | 0.26 | 100 | 0.9845 | -0.0055 | -0.0215 | 0.5195 | 0.0159 | -37.5405 | -34.0407 | -2.2273 | -2.2322 |
67
+ | 0.7293 | 0.52 | 200 | 0.9602 | -0.0172 | -0.0580 | 0.5714 | 0.0408 | -37.5811 | -34.0537 | -2.2238 | -2.2286 |
68
+ | 0.6144 | 0.78 | 300 | 0.9713 | -0.0468 | -0.0779 | 0.5282 | 0.0310 | -37.6032 | -34.0866 | -2.2201 | -2.2249 |
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71
  ### Framework versions
72
 
73
+ - PEFT 0.8.2
74
+ - Transformers 4.37.2
75
  - Pytorch 2.1.2+cu121
76
+ - Datasets 2.17.0
77
  - Tokenizers 0.15.1
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "up_proj",
24
  "q_proj",
 
25
  "k_proj",
26
  "o_proj",
 
27
  "down_proj",
28
- "gate_proj",
29
- "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "q_proj",
24
+ "up_proj",
25
  "k_proj",
26
  "o_proj",
27
+ "v_proj",
28
  "down_proj",
29
+ "gate_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:abba74746ef138932bd922612c1131e655c728a3c5e8573c3d30d9470d7f2b52
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9421b87f0502e1959890f4f41885afb6dd73ab1f0ff285e1756787151e264d2
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.097468614578247,
4
- "eval_logits/rejected": -2.0927796363830566,
5
- "eval_logps/chosen": -34.59366989135742,
6
- "eval_logps/rejected": -38.35063171386719,
7
- "eval_loss": 0.9193615317344666,
8
- "eval_rewards/accuracies": 0.5714285373687744,
9
- "eval_rewards/chosen": -0.16773515939712524,
10
- "eval_rewards/margins": 0.08246820420026779,
11
- "eval_rewards/rejected": -0.25020334124565125,
12
- "eval_runtime": 145.7009,
13
  "eval_samples": 343,
14
  "eval_samples_per_second": 2.354,
15
  "eval_steps_per_second": 0.295,
16
- "train_loss": 0.32053508061867253,
17
- "train_runtime": 10790.1902,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 1.141,
20
- "train_steps_per_second": 0.143
21
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.223867654800415,
4
+ "eval_logits/rejected": -2.219041109085083,
5
+ "eval_logps/chosen": -34.07316970825195,
6
+ "eval_logps/rejected": -37.6160888671875,
7
+ "eval_loss": 0.947879433631897,
8
+ "eval_rewards/accuracies": 0.5419435501098633,
9
+ "eval_rewards/chosen": -0.0347554087638855,
10
+ "eval_rewards/margins": 0.05476636067032814,
11
+ "eval_rewards/rejected": -0.08952176570892334,
12
+ "eval_runtime": 145.6786,
13
  "eval_samples": 343,
14
  "eval_samples_per_second": 2.354,
15
  "eval_steps_per_second": 0.295,
16
+ "train_loss": 0.8125879040012112,
17
+ "train_runtime": 3249.3137,
18
  "train_samples": 3079,
19
+ "train_samples_per_second": 0.948,
20
+ "train_steps_per_second": 0.118
21
  }
config.json CHANGED
@@ -18,8 +18,6 @@
18
  "n_layer": 28,
19
  "n_positions": 2048,
20
  "quantization_config": {
21
- "_load_in_4bit": true,
22
- "_load_in_8bit": false,
23
  "bnb_4bit_compute_dtype": "float16",
24
  "bnb_4bit_quant_type": "nf4",
25
  "bnb_4bit_use_double_quant": false,
@@ -47,7 +45,7 @@
47
  },
48
  "tie_word_embeddings": false,
49
  "tokenizer_class": "GPT2Tokenizer",
50
- "transformers_version": "4.39.0.dev0",
51
  "use_cache": true,
52
  "vocab_size": 50400
53
  }
 
18
  "n_layer": 28,
19
  "n_positions": 2048,
20
  "quantization_config": {
 
 
21
  "bnb_4bit_compute_dtype": "float16",
22
  "bnb_4bit_quant_type": "nf4",
23
  "bnb_4bit_use_double_quant": false,
 
45
  },
46
  "tie_word_embeddings": false,
47
  "tokenizer_class": "GPT2Tokenizer",
48
+ "transformers_version": "4.37.2",
49
  "use_cache": true,
50
  "vocab_size": 50400
51
  }
eval_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.097468614578247,
4
- "eval_logits/rejected": -2.0927796363830566,
5
- "eval_logps/chosen": -34.59366989135742,
6
- "eval_logps/rejected": -38.35063171386719,
7
- "eval_loss": 0.9193615317344666,
8
- "eval_rewards/accuracies": 0.5714285373687744,
9
- "eval_rewards/chosen": -0.16773515939712524,
10
- "eval_rewards/margins": 0.08246820420026779,
11
- "eval_rewards/rejected": -0.25020334124565125,
12
- "eval_runtime": 145.7009,
13
  "eval_samples": 343,
14
  "eval_samples_per_second": 2.354,
15
  "eval_steps_per_second": 0.295
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.223867654800415,
4
+ "eval_logits/rejected": -2.219041109085083,
5
+ "eval_logps/chosen": -34.07316970825195,
6
+ "eval_logps/rejected": -37.6160888671875,
7
+ "eval_loss": 0.947879433631897,
8
+ "eval_rewards/accuracies": 0.5419435501098633,
9
+ "eval_rewards/chosen": -0.0347554087638855,
10
+ "eval_rewards/margins": 0.05476636067032814,
11
+ "eval_rewards/rejected": -0.08952176570892334,
12
+ "eval_runtime": 145.6786,
13
  "eval_samples": 343,
14
  "eval_samples_per_second": 2.354,
15
  "eval_steps_per_second": 0.295
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.0,
3
- "train_loss": 0.32053508061867253,
4
- "train_runtime": 10790.1902,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 1.141,
7
- "train_steps_per_second": 0.143
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.8125879040012112,
4
+ "train_runtime": 3249.3137,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.948,
7
+ "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:03f767c74f345697032eed65875a78939878c73ae7049f800d864f7bd6bbcfac
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b693805000bf6ee72fa3619058beec5d399922cc80612ea5e9a7a7feb2374ed
3
  size 5176