hcene commited on
Commit
6f2175b
·
verified ·
1 Parent(s): c196260

hcene/Camembert-xnli

Browse files
README.md ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ - precision
9
+ - recall
10
+ - f1
11
+ base_model: MoritzLaurer/mDeBERTa-v3-base-mnli-xnli
12
+ model-index:
13
+ - name: legal-data-mDeBERTa_V3
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # legal-data-mDeBERTa_V3
21
+
22
+ This model is a fine-tuned version of [MoritzLaurer/mDeBERTa-v3-base-mnli-xnli](https://huggingface.co/MoritzLaurer/mDeBERTa-v3-base-mnli-xnli) on the None dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 0.6731
25
+ - Accuracy: 0.7634
26
+ - Precision: 0.7683
27
+ - Recall: 0.7644
28
+ - F1: 0.7623
29
+ - Ratio: 0.3297
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 0.005
49
+ - train_batch_size: 20
50
+ - eval_batch_size: 16
51
+ - seed: 42
52
+ - gradient_accumulation_steps: 2
53
+ - total_train_batch_size: 40
54
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
+ - lr_scheduler_type: linear
56
+ - lr_scheduler_warmup_ratio: 0.06
57
+ - lr_scheduler_warmup_steps: 4
58
+ - num_epochs: 15
59
+ - label_smoothing_factor: 0.1
60
+
61
+ ### Training results
62
+
63
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | Ratio |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|:------:|
65
+ | 1.4203 | 0.34 | 10 | 1.5822 | 0.6022 | 0.6054 | 0.6046 | 0.5997 | 0.3226 |
66
+ | 1.1177 | 0.69 | 20 | 0.8339 | 0.7240 | 0.7270 | 0.7253 | 0.7258 | 0.3262 |
67
+ | 0.9484 | 1.03 | 30 | 0.7998 | 0.7168 | 0.7610 | 0.7192 | 0.6951 | 0.3190 |
68
+ | 0.9257 | 1.38 | 40 | 0.7183 | 0.7204 | 0.7221 | 0.7220 | 0.7219 | 0.3297 |
69
+ | 0.9529 | 1.72 | 50 | 0.7397 | 0.6989 | 0.7022 | 0.7001 | 0.6959 | 0.3297 |
70
+ | 0.9111 | 2.07 | 60 | 0.6820 | 0.7204 | 0.7215 | 0.7216 | 0.7188 | 0.3333 |
71
+ | 0.9021 | 2.41 | 70 | 0.6832 | 0.7563 | 0.7644 | 0.7570 | 0.7509 | 0.3333 |
72
+ | 0.8849 | 2.76 | 80 | 0.7858 | 0.7204 | 0.7365 | 0.7227 | 0.7079 | 0.3297 |
73
+ | 0.8767 | 3.1 | 90 | 0.8523 | 0.5520 | 0.6258 | 0.5527 | 0.5677 | 0.1935 |
74
+ | 0.9186 | 3.45 | 100 | 0.6877 | 0.7276 | 0.7430 | 0.7283 | 0.7183 | 0.3262 |
75
+ | 0.9127 | 3.79 | 110 | 0.6426 | 0.7348 | 0.7398 | 0.7357 | 0.7298 | 0.3333 |
76
+ | 0.9126 | 4.14 | 120 | 0.7509 | 0.7348 | 0.7564 | 0.7370 | 0.7215 | 0.3297 |
77
+ | 0.8477 | 4.48 | 130 | 0.6818 | 0.7491 | 0.7684 | 0.7497 | 0.7406 | 0.3262 |
78
+ | 0.8747 | 4.83 | 140 | 0.7813 | 0.6810 | 0.7704 | 0.6842 | 0.6067 | 0.3262 |
79
+ | 0.9112 | 5.17 | 150 | 0.7799 | 0.7204 | 0.8141 | 0.7205 | 0.6686 | 0.3297 |
80
+ | 0.8767 | 5.52 | 160 | 0.7959 | 0.6989 | 0.8418 | 0.7021 | 0.6271 | 0.3297 |
81
+ | 0.863 | 5.86 | 170 | 0.7007 | 0.7240 | 0.7395 | 0.7247 | 0.7139 | 0.3262 |
82
+ | 0.9029 | 6.21 | 180 | 0.6524 | 0.7634 | 0.7717 | 0.7642 | 0.7621 | 0.3262 |
83
+ | 0.8427 | 6.55 | 190 | 0.7417 | 0.7133 | 0.7374 | 0.7157 | 0.6957 | 0.3262 |
84
+ | 0.8945 | 6.9 | 200 | 0.7312 | 0.7527 | 0.7738 | 0.7532 | 0.7437 | 0.3262 |
85
+ | 0.8913 | 7.24 | 210 | 0.6410 | 0.7455 | 0.7523 | 0.7473 | 0.7433 | 0.3297 |
86
+ | 0.8848 | 7.59 | 220 | 0.7137 | 0.7563 | 0.7585 | 0.7574 | 0.7567 | 0.3297 |
87
+ | 0.8553 | 7.93 | 230 | 0.6940 | 0.7599 | 0.7743 | 0.7605 | 0.7530 | 0.3297 |
88
+ | 0.8154 | 8.28 | 240 | 0.6460 | 0.7276 | 0.7453 | 0.7298 | 0.7154 | 0.3297 |
89
+ | 0.8842 | 8.62 | 250 | 0.7455 | 0.7563 | 0.7694 | 0.7570 | 0.7498 | 0.3297 |
90
+ | 0.8773 | 8.97 | 260 | 0.7369 | 0.7348 | 0.7490 | 0.7367 | 0.7291 | 0.3262 |
91
+ | 0.8615 | 9.31 | 270 | 0.6577 | 0.7455 | 0.7539 | 0.7464 | 0.7411 | 0.3297 |
92
+ | 0.8664 | 9.66 | 280 | 0.6970 | 0.7563 | 0.7631 | 0.7580 | 0.7545 | 0.3297 |
93
+ | 0.8855 | 10.0 | 290 | 0.7167 | 0.7204 | 0.7269 | 0.7224 | 0.7169 | 0.3297 |
94
+ | 0.8564 | 10.34 | 300 | 0.6808 | 0.7670 | 0.7846 | 0.7676 | 0.7594 | 0.3297 |
95
+ | 0.841 | 10.69 | 310 | 0.6604 | 0.7455 | 0.7491 | 0.7472 | 0.7455 | 0.3297 |
96
+ | 0.8415 | 11.03 | 320 | 0.7150 | 0.7563 | 0.7694 | 0.7570 | 0.7498 | 0.3297 |
97
+ | 0.848 | 11.38 | 330 | 0.6495 | 0.7670 | 0.7685 | 0.7682 | 0.7680 | 0.3297 |
98
+ | 0.8648 | 11.72 | 340 | 0.7094 | 0.7348 | 0.7562 | 0.7369 | 0.7245 | 0.3262 |
99
+ | 0.8465 | 12.07 | 350 | 0.7125 | 0.7384 | 0.7758 | 0.7387 | 0.7181 | 0.3262 |
100
+ | 0.8875 | 12.41 | 360 | 0.6962 | 0.7563 | 0.7590 | 0.7573 | 0.7564 | 0.3297 |
101
+ | 0.8192 | 12.76 | 370 | 0.6496 | 0.7455 | 0.7539 | 0.7464 | 0.7411 | 0.3297 |
102
+ | 0.8089 | 13.1 | 380 | 0.6569 | 0.7599 | 0.7621 | 0.7613 | 0.7607 | 0.3297 |
103
+ | 0.8191 | 13.45 | 390 | 0.6808 | 0.7348 | 0.7679 | 0.7372 | 0.7150 | 0.3297 |
104
+ | 0.8468 | 13.79 | 400 | 0.6843 | 0.7670 | 0.7789 | 0.7677 | 0.7621 | 0.3297 |
105
+ | 0.8277 | 14.14 | 410 | 0.6630 | 0.7599 | 0.7660 | 0.7607 | 0.7578 | 0.3297 |
106
+ | 0.8159 | 14.48 | 420 | 0.6621 | 0.7599 | 0.7650 | 0.7608 | 0.7584 | 0.3297 |
107
+ | 0.8803 | 14.83 | 430 | 0.6731 | 0.7634 | 0.7683 | 0.7644 | 0.7623 | 0.3297 |
108
+
109
+
110
+ ### Framework versions
111
+
112
+ - PEFT 0.9.0
113
+ - Transformers 4.39.0.dev0
114
+ - Pytorch 2.1.0+cu121
115
+ - Datasets 2.18.0
116
+ - Tokenizers 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 8,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 16,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "classifier"
23
+ ],
24
+ "task_type": "SEQ_CLS",
25
+ "use_dora": false,
26
+ "use_rslora": false
27
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b6c50c1968ff3c3d22f37ce79312791f3c9a4c0859697c43b63cc8f63bb08c5
3
+ size 768987780
logs/events.out.tfevents.1709401842.1035aef27825.205.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84841c24b54d3ce8665dc4d88a940b79256f1bc7d0bd5407b9ea2f905e261f6b
3
+ size 8228
logs/events.out.tfevents.1709402144.1035aef27825.205.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa29ddd673e3264eadd09fb56c961caefc1cae9dafb75c8d2fc12bab2e59c748
3
+ size 7065
logs/events.out.tfevents.1709402291.1035aef27825.205.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9869e9ef81d0350f71da8777fb8070ca923f1e51bcdca1b15e1100061060f627
3
+ size 37291
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dec11c4ec285a5f0e3fd69baf6b48238716fee4ad2d475c4dda410e982322bc8
3
+ size 4856