KoichiYasuoka commited on
Commit
1be983c
·
1 Parent(s): 5d1d2cf

model improved

Browse files
Files changed (4) hide show
  1. config.json +0 -0
  2. pytorch_model.bin +2 -2
  3. supar.model +2 -2
  4. tokenizer_config.json +0 -4
config.json CHANGED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9202a28d96ff1adb3071bfc6b3b95edbec0e8545a81db9acb10c6bd266935cf
3
- size 499571746
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0242cb69bc8ce9a0ffaa95932e4bb6b0dce1dcc42df5b9f2b2a6dbd176c262a
3
+ size 499606165
supar.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c87a3eb9f5815493ae63d0b81b854e2e69fb0defa87a8b4dc4434d30f52c220
3
- size 547476098
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac68c178bbac6a8a7582923971bdb10d0e1ecdd1f7fa1335a16569cc12ad0160
3
+ size 547493197
tokenizer_config.json CHANGED
@@ -46,7 +46,6 @@
46
  "do_basic_tokenize": true,
47
  "do_lower_case": true,
48
  "mask_token": "[MASK]",
49
- "max_length": 510,
50
  "model_max_length": 512,
51
  "never_split": [
52
  "[CLS]",
@@ -57,11 +56,8 @@
57
  ],
58
  "pad_token": "[PAD]",
59
  "sep_token": "[SEP]",
60
- "stride": 0,
61
  "strip_accents": false,
62
  "tokenize_chinese_chars": true,
63
  "tokenizer_class": "BertTokenizer",
64
- "truncation_side": "right",
65
- "truncation_strategy": "longest_first",
66
  "unk_token": "[UNK]"
67
  }
 
46
  "do_basic_tokenize": true,
47
  "do_lower_case": true,
48
  "mask_token": "[MASK]",
 
49
  "model_max_length": 512,
50
  "never_split": [
51
  "[CLS]",
 
56
  ],
57
  "pad_token": "[PAD]",
58
  "sep_token": "[SEP]",
 
59
  "strip_accents": false,
60
  "tokenize_chinese_chars": true,
61
  "tokenizer_class": "BertTokenizer",
 
 
62
  "unk_token": "[UNK]"
63
  }