apwic commited on
Commit
7e1799a
1 Parent(s): 40cb9de

Model save

Browse files
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: indolem/indobert-base-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - precision
8
+ - recall
9
+ - f1
10
+ - accuracy
11
+ model-index:
12
+ - name: nerugm-lora-r8-2
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # nerugm-lora-r8-2
20
+
21
+ This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.1743
24
+ - Precision: 0.6820
25
+ - Recall: 0.8289
26
+ - F1: 0.7483
27
+ - Accuracy: 0.9445
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 5e-05
47
+ - train_batch_size: 16
48
+ - eval_batch_size: 64
49
+ - seed: 42
50
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
+ - lr_scheduler_type: linear
52
+ - num_epochs: 20.0
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
57
+ |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
58
+ | 1.2665 | 1.0 | 106 | 0.7137 | 0.0 | 0.0 | 0.0 | 0.8449 |
59
+ | 0.713 | 2.0 | 212 | 0.6075 | 0.0 | 0.0 | 0.0 | 0.8451 |
60
+ | 0.6346 | 3.0 | 318 | 0.5231 | 0.1905 | 0.0118 | 0.0222 | 0.8494 |
61
+ | 0.5555 | 4.0 | 424 | 0.4458 | 0.275 | 0.0649 | 0.1050 | 0.8656 |
62
+ | 0.4696 | 5.0 | 530 | 0.3715 | 0.4802 | 0.2861 | 0.3586 | 0.8949 |
63
+ | 0.3932 | 6.0 | 636 | 0.3134 | 0.5563 | 0.5251 | 0.5402 | 0.9194 |
64
+ | 0.3299 | 7.0 | 742 | 0.2706 | 0.5968 | 0.6637 | 0.6285 | 0.9277 |
65
+ | 0.2896 | 8.0 | 848 | 0.2433 | 0.62 | 0.7316 | 0.6712 | 0.9340 |
66
+ | 0.2656 | 9.0 | 954 | 0.2277 | 0.6289 | 0.7699 | 0.6923 | 0.9355 |
67
+ | 0.2442 | 10.0 | 1060 | 0.2082 | 0.6526 | 0.7758 | 0.7089 | 0.9387 |
68
+ | 0.23 | 11.0 | 1166 | 0.2020 | 0.6390 | 0.7935 | 0.7079 | 0.9382 |
69
+ | 0.2229 | 12.0 | 1272 | 0.1977 | 0.6524 | 0.8083 | 0.7220 | 0.9385 |
70
+ | 0.2132 | 13.0 | 1378 | 0.1886 | 0.6602 | 0.8083 | 0.7268 | 0.9402 |
71
+ | 0.2055 | 14.0 | 1484 | 0.1810 | 0.6708 | 0.7994 | 0.7295 | 0.9415 |
72
+ | 0.2038 | 15.0 | 1590 | 0.1822 | 0.6595 | 0.8112 | 0.7275 | 0.9405 |
73
+ | 0.2004 | 16.0 | 1696 | 0.1788 | 0.6731 | 0.8201 | 0.7394 | 0.9430 |
74
+ | 0.1966 | 17.0 | 1802 | 0.1775 | 0.6731 | 0.8260 | 0.7417 | 0.9432 |
75
+ | 0.1931 | 18.0 | 1908 | 0.1765 | 0.6683 | 0.8260 | 0.7388 | 0.9435 |
76
+ | 0.1937 | 19.0 | 2014 | 0.1749 | 0.6747 | 0.8260 | 0.7427 | 0.9437 |
77
+ | 0.1888 | 20.0 | 2120 | 0.1743 | 0.6820 | 0.8289 | 0.7483 | 0.9445 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - Transformers 4.39.3
83
+ - Pytorch 2.3.0+cu121
84
+ - Datasets 2.19.1
85
+ - Tokenizers 0.15.2
nerugm-lora/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "alpha": 8,
4
+ "architecture": "lora",
5
+ "attn_matrices": [
6
+ "q",
7
+ "v"
8
+ ],
9
+ "composition_mode": "add",
10
+ "dropout": 0.0,
11
+ "init_weights": "lora",
12
+ "intermediate_lora": false,
13
+ "leave_out": [],
14
+ "output_lora": false,
15
+ "r": 8,
16
+ "selfattn_lora": true,
17
+ "use_gating": false
18
+ },
19
+ "config_id": "625403edad0bf919",
20
+ "hidden_size": 768,
21
+ "model_class": "BertForTokenClassification",
22
+ "model_name": "indolem/indobert-base-uncased",
23
+ "model_type": "bert",
24
+ "name": "nerugm-lora",
25
+ "version": "0.2.0"
26
+ }
nerugm-lora/head_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": null,
3
+ "hidden_size": 768,
4
+ "label2id": {
5
+ "B-LOCATION": 0,
6
+ "B-ORGANIZATION": 1,
7
+ "B-PERSON": 2,
8
+ "B-QUANTITY": 3,
9
+ "B-TIME": 4,
10
+ "I-LOCATION": 5,
11
+ "I-ORGANIZATION": 6,
12
+ "I-PERSON": 7,
13
+ "I-QUANTITY": 8,
14
+ "I-TIME": 9,
15
+ "O": 10
16
+ },
17
+ "model_class": "BertForTokenClassification",
18
+ "model_name": "indolem/indobert-base-uncased",
19
+ "model_type": "bert",
20
+ "name": null,
21
+ "num_labels": 11,
22
+ "version": "0.2.0"
23
+ }
nerugm-lora/pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ace107e8b0b993f3247a38ae8097068ebce770e9a05f10eaf60a092802067167
3
+ size 1197350
nerugm-lora/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73d2f8f3708d828f1536e8babd36e27912938d926c9cb5e01b6755dd0e8d934f
3
+ size 35354
runs/May27_05-16-45_indolem-petl-vm/events.out.tfevents.1716787012.indolem-petl-vm.3331769.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd97e9b8d5428ddbb17497751bccb33fa7cda32894f07ff711f3a94e9f7f8136
3
- size 18174
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e78c34fe7915684dec56c167e9d28eead40c0334de6d41d4ed837045b6d560ab
3
+ size 19211