Asier Gutiérrez Fandiño
commited on
Commit
•
38cc31e
1
Parent(s):
92b420c
Initial commit
Browse files- .gitattributes +1 -0
- README.md +30 -0
- config.json +25 -0
- dict.txt +0 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
.gitattributes
CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- es
|
4 |
+
license: apache-2.0
|
5 |
+
tags:
|
6 |
+
- legal
|
7 |
+
- spanish
|
8 |
+
datasets:
|
9 |
+
- legal_ES
|
10 |
+
- temu_legal
|
11 |
+
metrics:
|
12 |
+
- ppl
|
13 |
+
widget:
|
14 |
+
- text: "La ley fue <mask> finalmente."
|
15 |
+
- text: "El Tribunal <mask> desestimó el recurso de amparo."
|
16 |
+
- text: "Hay base legal dentro del marco <mask> actual."
|
17 |
+
|
18 |
+
---
|
19 |
+
# Spanish Legal-domain RoBERTa
|
20 |
+
|
21 |
+
There are two main models made specifically for the Spanish language, the BETO model and a GPT-2. There is also a multilingual BERT (mBERT) that is often used as it might be better sometimes.
|
22 |
+
|
23 |
+
Both BETO and GPT-2 models for Spanish have been trained with rather low resources, 4GB and 3GB of data respectively. The data used for training both models might be various but the amount is not enough to cover all domains. Furthermore, training a BERT-like domain-specific model is better as it effectively covers the vocabulary and understands the legal jargon. We present our model trained on 9GB that are specifically of the legal domain.
|
24 |
+
|
25 |
+
## Citing
|
26 |
+
```
|
27 |
+
TBA
|
28 |
+
```
|
29 |
+
|
30 |
+
For more information visit our [GitHub repository](https://github.com/PlanTL-SANIDAD/lm-legal-es)
|
config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"RobertaForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"bos_token_id": 0,
|
7 |
+
"eos_token_id": 2,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-05,
|
15 |
+
"max_position_embeddings": 514,
|
16 |
+
"model_type": "roberta",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 1,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"transformers_version": "4.4.2",
|
22 |
+
"type_vocab_size": 1,
|
23 |
+
"use_cache": true,
|
24 |
+
"vocab_size": 52000
|
25 |
+
}
|
dict.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7fd3da1f60a3d1212338f5b79c020681ac303495ba1b9aebf035b70db623fa81
|
3 |
+
size 504422671
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": true, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "max_len": 512, "special_tokens_map_file": null, "name_or_path": "D:\\BSC\\corpus-utils-lm\\output\\model-ready_output\\test-2021-06-09-0211-139b-008b\\train_tokenizer_output\\train-tokenizer-2021-06-09-0231-139b-4086"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|