hugodk-sch commited on
Commit
b4f6f08
1 Parent(s): 3829c7f

End of training

Browse files
Files changed (4) hide show
  1. README.md +9 -6
  2. all_results.json +13 -0
  3. config.json +51 -0
  4. eval_results.json +16 -0
README.md CHANGED
@@ -1,10 +1,13 @@
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
@@ -15,17 +18,17 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # aftonposten-6b-align-scan
17
 
18
- This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
  - Loss: 0.6931
21
  - Rewards/chosen: 0.0001
22
  - Rewards/rejected: 0.0001
23
- - Rewards/accuracies: 0.4958
24
  - Rewards/margins: 0.0000
25
- - Logps/rejected: -37.5088
26
- - Logps/chosen: -34.0255
27
- - Logits/rejected: -2.2394
28
- - Logits/chosen: -2.2443
29
 
30
  ## Model description
31
 
 
1
  ---
2
  library_name: peft
3
  tags:
4
+ - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
+ datasets:
10
+ - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
 
18
 
19
  # aftonposten-6b-align-scan
20
 
21
+ This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
  - Loss: 0.6931
24
  - Rewards/chosen: 0.0001
25
  - Rewards/rejected: 0.0001
26
+ - Rewards/accuracies: 0.5328
27
  - Rewards/margins: 0.0000
28
+ - Logps/rejected: -37.5101
29
+ - Logps/chosen: -34.0230
30
+ - Logits/rejected: -2.2387
31
+ - Logits/chosen: -2.2436
32
 
33
  ## Model description
34
 
all_results.json CHANGED
@@ -1,5 +1,18 @@
1
  {
2
  "epoch": 1.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "train_loss": 0.6930816006350827,
4
  "train_runtime": 3252.6347,
5
  "train_samples": 3079,
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2436320781707764,
4
+ "eval_logits/rejected": -2.2387471199035645,
5
+ "eval_logps/chosen": -34.02302932739258,
6
+ "eval_logps/rejected": -37.51007080078125,
7
+ "eval_loss": 0.6931225061416626,
8
+ "eval_rewards/accuracies": 0.5328072905540466,
9
+ "eval_rewards/chosen": 0.00011527122114785016,
10
+ "eval_rewards/margins": 4.97526052640751e-05,
11
+ "eval_rewards/rejected": 6.551862315973267e-05,
12
+ "eval_runtime": 145.6938,
13
+ "eval_samples": 343,
14
+ "eval_samples_per_second": 2.354,
15
+ "eval_steps_per_second": 0.295,
16
  "train_loss": 0.6930816006350827,
17
  "train_runtime": 3252.6347,
18
  "train_samples": 3079,
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NbAiLab/nb-gpt-j-6B-v2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTJForCausalLM"
6
+ ],
7
+ "attn_pdrop": 0.0,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.0,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gptj",
15
+ "n_embd": 4096,
16
+ "n_head": 16,
17
+ "n_inner": null,
18
+ "n_layer": 28,
19
+ "n_positions": 2048,
20
+ "quantization_config": {
21
+ "bnb_4bit_compute_dtype": "float16",
22
+ "bnb_4bit_quant_type": "nf4",
23
+ "bnb_4bit_use_double_quant": false,
24
+ "llm_int8_enable_fp32_cpu_offload": false,
25
+ "llm_int8_has_fp16_weight": false,
26
+ "llm_int8_skip_modules": null,
27
+ "llm_int8_threshold": 6.0,
28
+ "load_in_4bit": true,
29
+ "load_in_8bit": false,
30
+ "quant_method": "bitsandbytes"
31
+ },
32
+ "resid_pdrop": 0.0,
33
+ "rotary_dim": 64,
34
+ "summary_activation": null,
35
+ "summary_first_dropout": 0.1,
36
+ "summary_proj_to_labels": true,
37
+ "summary_type": "cls_index",
38
+ "summary_use_proj": true,
39
+ "task_specific_params": {
40
+ "text-generation": {
41
+ "do_sample": true,
42
+ "max_length": 50,
43
+ "temperature": 1.0
44
+ }
45
+ },
46
+ "tie_word_embeddings": false,
47
+ "tokenizer_class": "GPT2Tokenizer",
48
+ "transformers_version": "4.37.2",
49
+ "use_cache": true,
50
+ "vocab_size": 50400
51
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2436320781707764,
4
+ "eval_logits/rejected": -2.2387471199035645,
5
+ "eval_logps/chosen": -34.02302932739258,
6
+ "eval_logps/rejected": -37.51007080078125,
7
+ "eval_loss": 0.6931225061416626,
8
+ "eval_rewards/accuracies": 0.5328072905540466,
9
+ "eval_rewards/chosen": 0.00011527122114785016,
10
+ "eval_rewards/margins": 4.97526052640751e-05,
11
+ "eval_rewards/rejected": 6.551862315973267e-05,
12
+ "eval_runtime": 145.6938,
13
+ "eval_samples": 343,
14
+ "eval_samples_per_second": 2.354,
15
+ "eval_steps_per_second": 0.295
16
+ }