jjovalle99 commited on
Commit
c505945
·
verified ·
1 Parent(s): 4823dea

llama7b-ft-lora-sql-v2adapters

Browse files
Files changed (3) hide show
  1. README.md +25 -15
  2. adapter_config.json +6 -6
  3. training_args.bin +1 -1
README.md CHANGED
@@ -19,7 +19,7 @@ should probably proofread and complete it, then remove this comment. -->
19
 
20
  This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) on the generator dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 0.7207
23
 
24
  ## Model description
25
 
@@ -38,29 +38,39 @@ More information needed
38
  ### Training hyperparameters
39
 
40
  The following hyperparameters were used during training:
41
- - learning_rate: 0.0002
42
- - train_batch_size: 4
43
  - eval_batch_size: 8
44
  - seed: 1399
45
- - gradient_accumulation_steps: 8
46
  - total_train_batch_size: 32
47
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
- - lr_scheduler_type: constant
49
- - lr_scheduler_warmup_steps: 10
50
- - training_steps: 100
51
 
52
  ### Training results
53
 
54
  | Training Loss | Epoch | Step | Validation Loss |
55
  |:-------------:|:-----:|:----:|:---------------:|
56
- | 1.362 | 0.89 | 5 | 1.1124 |
57
- | 1.005 | 1.78 | 10 | 0.8794 |
58
- | 0.8444 | 2.67 | 15 | 0.8025 |
59
- | 0.7662 | 3.56 | 20 | 0.7610 |
60
- | 0.7049 | 4.44 | 25 | 0.7361 |
61
- | 0.6586 | 5.33 | 30 | 0.7199 |
62
- | 0.599 | 6.22 | 35 | 0.7221 |
63
- | 0.5506 | 7.11 | 40 | 0.7207 |
 
 
 
 
 
 
 
 
 
 
64
 
65
 
66
  ### Framework versions
 
19
 
20
  This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) on the generator dataset.
21
  It achieves the following results on the evaluation set:
22
+ - Loss: 0.3700
23
 
24
  ## Model description
25
 
 
38
  ### Training hyperparameters
39
 
40
  The following hyperparameters were used during training:
41
+ - learning_rate: 0.0003
42
+ - train_batch_size: 8
43
  - eval_batch_size: 8
44
  - seed: 1399
45
+ - gradient_accumulation_steps: 4
46
  - total_train_batch_size: 32
47
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_steps: 100
50
+ - training_steps: 500
51
 
52
  ### Training results
53
 
54
  | Training Loss | Epoch | Step | Validation Loss |
55
  |:-------------:|:-----:|:----:|:---------------:|
56
+ | 1.2068 | 0.06 | 20 | 0.8181 |
57
+ | 0.6757 | 0.12 | 40 | 0.5148 |
58
+ | 0.5104 | 0.17 | 60 | 0.4552 |
59
+ | 0.4633 | 0.23 | 80 | 0.4269 |
60
+ | 0.442 | 0.29 | 100 | 0.4110 |
61
+ | 0.428 | 0.35 | 120 | 0.3993 |
62
+ | 0.4209 | 0.41 | 140 | 0.3983 |
63
+ | 0.4142 | 0.47 | 160 | 0.3932 |
64
+ | 0.4032 | 0.52 | 180 | 0.3888 |
65
+ | 0.3999 | 0.58 | 200 | 0.3841 |
66
+ | 0.3977 | 0.64 | 220 | 0.3827 |
67
+ | 0.397 | 0.7 | 240 | 0.3811 |
68
+ | 0.3927 | 0.76 | 260 | 0.3781 |
69
+ | 0.3873 | 0.82 | 280 | 0.3762 |
70
+ | 0.3871 | 0.87 | 300 | 0.3728 |
71
+ | 0.3861 | 0.93 | 320 | 0.3715 |
72
+ | 0.3809 | 0.99 | 340 | 0.3695 |
73
+ | 0.3664 | 1.05 | 360 | 0.3700 |
74
 
75
 
76
  ### Framework versions
adapter_config.json CHANGED
@@ -10,22 +10,22 @@
10
  "layers_to_transform": null,
11
  "loftq_config": {},
12
  "lora_alpha": 32,
13
- "lora_dropout": 0.1,
14
  "megatron_config": null,
15
  "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
- "r": 8,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "o_proj",
23
  "gate_proj",
24
- "v_proj",
25
  "k_proj",
26
- "down_proj",
27
  "up_proj",
28
- "q_proj"
 
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_dora": false,
 
10
  "layers_to_transform": null,
11
  "loftq_config": {},
12
  "lora_alpha": 32,
13
+ "lora_dropout": 0.05,
14
  "megatron_config": null,
15
  "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
+ "r": 16,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "q_proj",
23
  "gate_proj",
 
24
  "k_proj",
25
+ "v_proj",
26
  "up_proj",
27
+ "down_proj",
28
+ "o_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_dora": false,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5370046d575f40c699c1733a264a23118b545f3776e6e9fc192d66d38f2daa1f
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b265d2440bdc7920eda82925c8b22f165e59e302b2125d6ccacac9ccb3a3924
3
  size 4920