omar0scarf commited on
Commit
f058772
·
1 Parent(s): 59293ff

Training in progress, epoch 1

Browse files
logs/events.out.tfevents.1737219212.DESKTOP-DS0E98J.11784.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7e7fc9359591cad9ad9d207ca45d5f190cf85e0950b498a99769445dab757eb
3
+ size 4827
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be94aaced95831dff8adeab8fc75190515fc442a789fb733d72fcdddb7b02a6c
3
  size 436453813
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbf5bd50317d64694030be723840bdcd3b81b2c8ac399a596c97ee411fb32943
3
  size 436453813
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "full_tokenizer_file": null,
7
+ "mask_token": "[MASK]",
8
+ "model_max_length": 1000000000000000019884624838656,
9
+ "never_split": null,
10
+ "pad_token": "[PAD]",
11
+ "sep_token": "[SEP]",
12
+ "strip_accents": null,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "BertTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:808a8e9eb31fb39eee45896284bb72d436c5b64a410edc5df75b1ec0b11f38d3
3
  size 3963
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd51c00d1a4ad8c6134a948570552248f8da53501a4d157d71c47cc9eb417b85
3
  size 3963
vocab.txt ADDED
The diff for this file is too large to render. See raw diff