Commit
·
663bfea
1
Parent(s):
de4bbb1
Training complete
Browse files- README.md +4 -3
- config.json +1 -1
- pytorch_model.bin +2 -2
- tokenizer_config.json +1 -1
- training_args.bin +2 -2
README.md
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
---
|
2 |
license: mit
|
|
|
3 |
tags:
|
4 |
- classification
|
5 |
- generated_from_trainer
|
@@ -42,7 +43,7 @@ The following hyperparameters were used during training:
|
|
42 |
|
43 |
### Framework versions
|
44 |
|
45 |
-
- Transformers 4.
|
46 |
-
- Pytorch 2.0.
|
47 |
-
- Datasets 2.
|
48 |
- Tokenizers 0.13.3
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
+
base_model: papluca/xlm-roberta-base-language-detection
|
4 |
tags:
|
5 |
- classification
|
6 |
- generated_from_trainer
|
|
|
43 |
|
44 |
### Framework versions
|
45 |
|
46 |
+
- Transformers 4.33.2
|
47 |
+
- Pytorch 2.0.1+cu118
|
48 |
+
- Datasets 2.14.5
|
49 |
- Tokenizers 0.13.3
|
config.json
CHANGED
@@ -66,7 +66,7 @@
|
|
66 |
"position_embedding_type": "absolute",
|
67 |
"problem_type": "single_label_classification",
|
68 |
"torch_dtype": "float32",
|
69 |
-
"transformers_version": "4.
|
70 |
"type_vocab_size": 1,
|
71 |
"use_cache": true,
|
72 |
"vocab_size": 250002
|
|
|
66 |
"position_embedding_type": "absolute",
|
67 |
"problem_type": "single_label_classification",
|
68 |
"torch_dtype": "float32",
|
69 |
+
"transformers_version": "4.33.2",
|
70 |
"type_vocab_size": 1,
|
71 |
"use_cache": true,
|
72 |
"vocab_size": 250002
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d09339f6b60dfb9440ef35f2718619ed24b43db83ac73e224aaef5e28afc98fe
|
3 |
+
size 1112305073
|
tokenizer_config.json
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
{
|
2 |
"bos_token": "<s>",
|
|
|
3 |
"cls_token": "<s>",
|
4 |
"eos_token": "</s>",
|
5 |
"mask_token": {
|
@@ -13,7 +14,6 @@
|
|
13 |
"model_max_length": 512,
|
14 |
"pad_token": "<pad>",
|
15 |
"sep_token": "</s>",
|
16 |
-
"special_tokens_map_file": null,
|
17 |
"tokenizer_class": "XLMRobertaTokenizer",
|
18 |
"unk_token": "<unk>"
|
19 |
}
|
|
|
1 |
{
|
2 |
"bos_token": "<s>",
|
3 |
+
"clean_up_tokenization_spaces": true,
|
4 |
"cls_token": "<s>",
|
5 |
"eos_token": "</s>",
|
6 |
"mask_token": {
|
|
|
14 |
"model_max_length": 512,
|
15 |
"pad_token": "<pad>",
|
16 |
"sep_token": "</s>",
|
|
|
17 |
"tokenizer_class": "XLMRobertaTokenizer",
|
18 |
"unk_token": "<unk>"
|
19 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6ffb3fc798153faedd56b3a5529c3132a3e034c0f47b9b378c1580de971aae7c
|
3 |
+
size 4091
|