asm3515 commited on
Commit
fe50eb0
1 Parent(s): 0140b2f

Training in progress, epoch 1

Browse files
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: gpt2
3
+ library_name: peft
4
+ license: mit
5
+ metrics:
6
+ - accuracy
7
+ - f1
8
+ - precision
9
+ - recall
10
+ tags:
11
+ - generated_from_trainer
12
+ model-index:
13
+ - name: gpt2-sst2-sentiment-classifier-lora
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # gpt2-sst2-sentiment-classifier-lora
21
+
22
+ This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 0.2636
25
+ - Accuracy: 0.9083
26
+ - F1: 0.9111
27
+ - Precision: 0.8991
28
+ - Recall: 0.9234
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 5e-05
48
+ - train_batch_size: 16
49
+ - eval_batch_size: 64
50
+ - seed: 42
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: linear
53
+ - lr_scheduler_warmup_steps: 500
54
+ - num_epochs: 3
55
+
56
+ ### Training results
57
+
58
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall |
59
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:---------:|:------:|
60
+ | 0.3138 | 1.0 | 4210 | 0.2550 | 0.9014 | 0.9034 | 0.9013 | 0.9054 |
61
+ | 0.2597 | 2.0 | 8420 | 0.2666 | 0.9014 | 0.9061 | 0.8792 | 0.9347 |
62
+ | 0.2436 | 3.0 | 12630 | 0.2636 | 0.9083 | 0.9111 | 0.8991 | 0.9234 |
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - PEFT 0.12.0
68
+ - Transformers 4.44.2
69
+ - Pytorch 2.4.1+cu121
70
+ - Datasets 3.0.0
71
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google-bert/bert-base-uncased",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": [
18
+ "classifier",
19
+ "score"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "query",
27
+ "value",
28
+ "key"
29
+ ],
30
+ "task_type": "SEQ_CLS",
31
+ "use_dora": false,
32
+ "use_rslora": false
33
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3469cac36a9a2586c40aac9daab57adc619ce419aa096b8d6b0a8e359198a993
3
+ size 1785816
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:191737278e01bb01bd153dc3e353c1363918f8554d2bb93e2f0f828c0f46b6c6
3
+ size 5176