Fill-Mask
Transformers
PyTorch
xlm-roberta
Inference Endpoints
fenchri commited on
Commit
299a80c
1 Parent(s): 41f8bc6
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"</nl>": 250053, "<en>": 250015, "<ml>": 250010, "</tr>": 250076, "</it>": 250063, "</th>": 250071, "</pt>": 250064, "</he>": 250042, "<fa>": 250022, "<ka>": 250030, "<zh>": 250035, "</ja>": 250068, "</de>": 250055, "</sw>": 250072, "</ka>": 250069, "</vi>": 250043, "</ms>": 250046, "</eu>": 250048, "</hi>": 250058, "</kk>": 250075, "</mr>": 250059, "<sw>": 250033, "</ko>": 250070, "<jv>": 250006, "<tr>": 250037, "</bn>": 250057, "<ja>": 250029, "<eu>": 250009, "<bn>": 250018, "</ar>": 250041, "</af>": 250052, "<ur>": 250021, "<pt>": 250025, "</my>": 250073, "<ru>": 250028, "<es>": 250026, "</ru>": 250067, "</ml>": 250049, "<kk>": 250036, "<ta>": 250011, "<te>": 250012, "<th>": 250032, "</ur>": 250060, "</et>": 250077, "<fi>": 250039, "<hi>": 250019, "<fr>": 250023, "<ms>": 250007, "<af>": 250013, "</hu>": 250079, "<tl>": 250008, "</es>": 250065, "<nl>": 250014, "<bg>": 250027, "</tl>": 250047, "<it>": 250024, "<my>": 250034, "</ta>": 250050, "<hu>": 250040, "</fa>": 250061, "</bg>": 250066, "</jv>": 250045, "<et>": 250038, "<el>": 250017, "</te>": 250051, "</en>": 250054, "<ko>": 250031, "<he>": 250003, "<id>": 250005, "</zh>": 250074, "<ar>": 250002, "</id>": 250044, "</el>": 250056, "</fr>": 250062, "<vi>": 250004, "</fi>": 250078, "<mr>": 250020, "<de>": 250016}
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-base-nodp",
3
+ "architectures": [
4
+ "XLMRobertaForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.0,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.15.0",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250080
28
+ }
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}, "additional_special_tokens": ["<ar>", "<he>", "<vi>", "<id>", "<jv>", "<ms>", "<tl>", "<eu>", "<ml>", "<ta>", "<te>", "<af>", "<nl>", "<en>", "<de>", "<el>", "<bn>", "<hi>", "<mr>", "<ur>", "<fa>", "<fr>", "<it>", "<pt>", "<es>", "<bg>", "<ru>", "<ja>", "<ka>", "<ko>", "<th>", "<sw>", "<my>", "<zh>", "<kk>", "<tr>", "<et>", "<fi>", "<hu>", "</ar>", "</he>", "</vi>", "</id>", "</jv>", "</ms>", "</tl>", "</eu>", "</ml>", "</ta>", "</te>", "</af>", "</nl>", "</en>", "</de>", "</el>", "</bn>", "</hi>", "</mr>", "</ur>", "</fa>", "</fr>", "</it>", "</pt>", "</es>", "</bg>", "</ru>", "</ja>", "</ka>", "</ko>", "</th>", "</sw>", "</my>", "</zh>", "</kk>", "</tr>", "</et>", "</fi>", "</hu>"]}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "special_tokens_map_file": null, "name_or_path": "xlm-roberta-base-nodp", "tokenizer_class": "XLMRobertaTokenizer"}