UchihaMadara commited on
Commit
5706157
·
1 Parent(s): f660bb9

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Pretrained checkpoint: roberta-large-mnli
3
+ # Traning hyperparameters:
4
+ The following hyperparameters were used during training:
5
+ - learning_rate: 2e-05
6
+ - train_batch_size: 24
7
+ - eval_batch_size: 24
8
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
9
+ - lr_scheduler_type: linear
10
+ - num_epochs: 3
11
+
12
+ # Training results
13
+
14
+ |Epoch | Train loss| Test loss | Subtask 3 f1 | Subtask 3 precision | Subtask 3 recall | Subtask4 accuracy |
15
+ |:----:|:---------:|:---------:|:------------:|:-------------------:|:----------------:|:-----------------:|
16
+ |1|340.1608857823303|68.94318291614763|0.8756704046806436|0.8752436647173489|0.8760975609756098|0.8458536585365853|
17
+ |2|148.33983786634053|36.02450433204649|0.9217221135029354|0.9244357212953876|0.9190243902439025|0.8741463414634146|
18
+ |3|60.1067302722804|29.687325364822755|0.9230769230769231|0.9393939393939394|0.9073170731707317|0.8848780487804878|
19
+
20
+
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "roberta-large-mnli",
3
+ "_num_labels": 3,
4
+ "architectures": [
5
+ "RobertaForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": null,
10
+ "eos_token_id": 2,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1024,
14
+ "id2label": {
15
+ "0": "CONTRADICTION",
16
+ "1": "NEUTRAL",
17
+ "2": "ENTAILMENT"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 4096,
21
+ "label2id": {
22
+ "CONTRADICTION": 0,
23
+ "ENTAILMENT": 2,
24
+ "NEUTRAL": 1
25
+ },
26
+ "layer_norm_eps": 1e-05,
27
+ "max_position_embeddings": 514,
28
+ "model_type": "roberta",
29
+ "num_attention_heads": 16,
30
+ "num_hidden_layers": 24,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "single_label_classification",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.28.1",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 50265
39
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:317cdbad9cb0f7a5b8bced916d858323890f707fe19a25438ae03108fa6b1290
3
+ size 1421591285
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "cls_token": "<s>",
6
+ "eos_token": "</s>",
7
+ "errors": "replace",
8
+ "mask_token": "<mask>",
9
+ "model_max_length": 512,
10
+ "pad_token": "<pad>",
11
+ "sep_token": "</s>",
12
+ "tokenizer_class": "RobertaTokenizer",
13
+ "trim_offsets": true,
14
+ "unk_token": "<unk>"
15
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff