Meli101 commited on
Commit
d44a591
·
verified ·
1 Parent(s): aac9bd7

End of training

Browse files
README.md CHANGED
@@ -1,5 +1,6 @@
1
  ---
2
- base_model: dmis-lab/biobert-v1.1
 
3
  tags:
4
  - generated_from_trainer
5
  metrics:
@@ -17,13 +18,13 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # results
19
 
20
- This model is a fine-tuned version of [dmis-lab/biobert-v1.1](https://huggingface.co/dmis-lab/biobert-v1.1) on the None dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 0.4062
23
- - Precision: 0.9169
24
- - Recall: 0.9170
25
- - Accuracy: 0.9170
26
- - F1: 0.9170
27
 
28
  ## Model description
29
 
@@ -54,11 +55,11 @@ The following hyperparameters were used during training:
54
 
55
  | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | Accuracy | F1 |
56
  |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:--------:|:------:|
57
- | No log | 1.0 | 308 | 0.3815 | 0.8655 | 0.8595 | 0.8584 | 0.8587 |
58
- | 0.4485 | 2.0 | 616 | 0.3222 | 0.9076 | 0.9073 | 0.9072 | 0.9072 |
59
- | 0.4485 | 3.0 | 924 | 0.3684 | 0.9101 | 0.9098 | 0.9097 | 0.9099 |
60
- | 0.1574 | 4.0 | 1232 | 0.3878 | 0.9163 | 0.9162 | 0.9162 | 0.9162 |
61
- | 0.0613 | 5.0 | 1540 | 0.4062 | 0.9169 | 0.9170 | 0.9170 | 0.9170 |
62
 
63
 
64
  ### Framework versions
 
1
  ---
2
+ license: mit
3
+ base_model: microsoft/BiomedNLP-KRISSBERT-PubMed-UMLS-EL
4
  tags:
5
  - generated_from_trainer
6
  metrics:
 
18
 
19
  # results
20
 
21
+ This model is a fine-tuned version of [microsoft/BiomedNLP-KRISSBERT-PubMed-UMLS-EL](https://huggingface.co/microsoft/BiomedNLP-KRISSBERT-PubMed-UMLS-EL) on the None dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.4228
24
+ - Precision: 0.9215
25
+ - Recall: 0.9209
26
+ - Accuracy: 0.9211
27
+ - F1: 0.9210
28
 
29
  ## Model description
30
 
 
55
 
56
  | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | Accuracy | F1 |
57
  |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:--------:|:------:|
58
+ | No log | 1.0 | 308 | 0.3266 | 0.8847 | 0.8822 | 0.8820 | 0.8824 |
59
+ | 0.4217 | 2.0 | 616 | 0.3034 | 0.9072 | 0.9066 | 0.9064 | 0.9065 |
60
+ | 0.4217 | 3.0 | 924 | 0.3483 | 0.9171 | 0.9170 | 0.9170 | 0.9171 |
61
+ | 0.163 | 4.0 | 1232 | 0.3952 | 0.9227 | 0.9227 | 0.9227 | 0.9226 |
62
+ | 0.0722 | 5.0 | 1540 | 0.4228 | 0.9215 | 0.9209 | 0.9211 | 0.9210 |
63
 
64
 
65
  ### Framework versions
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "</ENT>": 30523,
3
+ "<ENT>": 30522,
4
+ "[DUMMY]": 30524
5
+ }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "dmis-lab/biobert-v1.1",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
@@ -33,5 +33,5 @@
33
  "transformers_version": "4.35.2",
34
  "type_vocab_size": 2,
35
  "use_cache": true,
36
- "vocab_size": 28996
37
  }
 
1
  {
2
+ "_name_or_path": "microsoft/BiomedNLP-KRISSBERT-PubMed-UMLS-EL",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
 
33
  "transformers_version": "4.35.2",
34
  "type_vocab_size": 2,
35
  "use_cache": true,
36
+ "vocab_size": 30525
37
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a56f4cc388f1ece1288cbc2db626d27d57752ea108972addbf13f281591253f8
3
- size 433273844
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8904822494add51cb1353ddd2899772b314be500c192bb3e0083cf425c872aae
3
+ size 437970940
runs/Feb20_12-18-47_b7bcaa3b0fd6/events.out.tfevents.1708431528.b7bcaa3b0fd6.252.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:693da275179a41f34286763d234070ecd1daf856933ef88ec5ed9ec7e34123aa
3
+ size 7623
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -8,7 +8,7 @@
8
  "single_word": false,
9
  "special": true
10
  },
11
- "100": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
@@ -16,7 +16,7 @@
16
  "single_word": false,
17
  "special": true
18
  },
19
- "101": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
@@ -24,7 +24,7 @@
24
  "single_word": false,
25
  "special": true
26
  },
27
- "102": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
@@ -32,21 +32,46 @@
32
  "single_word": false,
33
  "special": true
34
  },
35
- "103": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  }
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
  "do_basic_tokenize": true,
47
- "do_lower_case": false,
 
48
  "mask_token": "[MASK]",
49
- "model_max_length": 512,
50
  "never_split": null,
51
  "pad_token": "[PAD]",
52
  "sep_token": "[SEP]",
 
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "1": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "2": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
 
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "3": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
 
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "4": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
+ },
43
+ "30522": {
44
+ "content": "<ENT>",
45
+ "lstrip": false,
46
+ "normalized": true,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": false
50
+ },
51
+ "30523": {
52
+ "content": "</ENT>",
53
+ "lstrip": false,
54
+ "normalized": true,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": false
58
+ },
59
+ "30524": {
60
+ "content": "[DUMMY]",
61
+ "lstrip": false,
62
+ "normalized": true,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": false
66
  }
67
  },
68
  "clean_up_tokenization_spaces": true,
69
  "cls_token": "[CLS]",
70
  "do_basic_tokenize": true,
71
+ "do_lower_case": true,
72
+ "full_tokenizer_file": null,
73
  "mask_token": "[MASK]",
74
+ "model_max_length": 1000000000000000019884624838656,
75
  "never_split": null,
76
  "pad_token": "[PAD]",
77
  "sep_token": "[SEP]",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:852a9c7ef42acb0f8f5560f49764d9c66c773586f7e3c5753cd44f76dbaba9a9
3
  size 4536
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:322719c22edf3ee22cc1580c3b3f71214c4ea60d87a90b16be8e69d373a40d58
3
  size 4536
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff