add files
Browse files- README.md +9 -0
- added_tokens.json +1 -0
- config.json +95 -0
- generation_config.json +8 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
README.md
CHANGED
@@ -1,3 +1,12 @@
|
|
1 |
---
|
|
|
2 |
license: apache-2.0
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
language: code
|
3 |
license: apache-2.0
|
4 |
+
|
5 |
---
|
6 |
+
# CodeReviewer
|
7 |
+
|
8 |
+
## Fine-tuning Process
|
9 |
+
|
10 |
+
We fine-tuned the [original CodeReviewer model](https://huggingface.co/microsoft/codereviewer) on the [Comment Generation dataset](https://zenodo.org/record/6900648) using a single A100 GPU, 8 CPU cores, and 20 GB of RAM. The fine-tuning process was run for 12 hours.
|
11 |
+
|
12 |
+
For further information please consider taking a look at the original [CodeReviewer paper](https://arxiv.org/abs/2203.09095).
|
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<keep>": 32100, "<add>": 32101, "<del>": 32102, "<start>": 32103, "<end>": 32104, "<e99>": 32105, "<e98>": 32106, "<e97>": 32107, "<e96>": 32108, "<e95>": 32109, "<e94>": 32110, "<e93>": 32111, "<e92>": 32112, "<e91>": 32113, "<e90>": 32114, "<e89>": 32115, "<e88>": 32116, "<e87>": 32117, "<e86>": 32118, "<e85>": 32119, "<e84>": 32120, "<e83>": 32121, "<e82>": 32122, "<e81>": 32123, "<e80>": 32124, "<e79>": 32125, "<e78>": 32126, "<e77>": 32127, "<e76>": 32128, "<e75>": 32129, "<e74>": 32130, "<e73>": 32131, "<e72>": 32132, "<e71>": 32133, "<e70>": 32134, "<e69>": 32135, "<e68>": 32136, "<e67>": 32137, "<e66>": 32138, "<e65>": 32139, "<e64>": 32140, "<e63>": 32141, "<e62>": 32142, "<e61>": 32143, "<e60>": 32144, "<e59>": 32145, "<e58>": 32146, "<e57>": 32147, "<e56>": 32148, "<e55>": 32149, "<e54>": 32150, "<e53>": 32151, "<e52>": 32152, "<e51>": 32153, "<e50>": 32154, "<e49>": 32155, "<e48>": 32156, "<e47>": 32157, "<e46>": 32158, "<e45>": 32159, "<e44>": 32160, "<e43>": 32161, "<e42>": 32162, "<e41>": 32163, "<e40>": 32164, "<e39>": 32165, "<e38>": 32166, "<e37>": 32167, "<e36>": 32168, "<e35>": 32169, "<e34>": 32170, "<e33>": 32171, "<e32>": 32172, "<e31>": 32173, "<e30>": 32174, "<e29>": 32175, "<e28>": 32176, "<e27>": 32177, "<e26>": 32178, "<e25>": 32179, "<e24>": 32180, "<e23>": 32181, "<e22>": 32182, "<e21>": 32183, "<e20>": 32184, "<e19>": 32185, "<e18>": 32186, "<e17>": 32187, "<e16>": 32188, "<e15>": 32189, "<e14>": 32190, "<e13>": 32191, "<e12>": 32192, "<e11>": 32193, "<e10>": 32194, "<e9>": 32195, "<e8>": 32196, "<e7>": 32197, "<e6>": 32198, "<e5>": 32199, "<e4>": 32200, "<e3>": 32201, "<e2>": 32202, "<e1>": 32203, "<e0>": 32204, "<msg>": 32205, "<en>": 32206, "<python>": 32207, "<java>": 32208, "<javascript>": 32209, "<ruby>": 32210, "<php>": 32211, "<go>": 32212, "<c>": 32213, "<c_sharp>": 32214, "<c_plus_plus>": 32215}
|
config.json
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/content/drive/MyDrive/CodeT5/pretrained_models/codet5_base",
|
3 |
+
"add_token_id": 32101,
|
4 |
+
"architectures": [
|
5 |
+
"T5ForConditionalGeneration"
|
6 |
+
],
|
7 |
+
"bos_token_id": 1,
|
8 |
+
"d_ff": 3072,
|
9 |
+
"d_kv": 64,
|
10 |
+
"d_model": 768,
|
11 |
+
"decoder_start_token_id": 0,
|
12 |
+
"del_token_id": 32102,
|
13 |
+
"dropout_rate": 0.1,
|
14 |
+
"end_token_id": 32104,
|
15 |
+
"eos_token_id": 2,
|
16 |
+
"feed_forward_proj": "relu",
|
17 |
+
"gradient_checkpointing": false,
|
18 |
+
"id2label": {
|
19 |
+
"0": "LABEL_0"
|
20 |
+
},
|
21 |
+
"initializer_factor": 1.0,
|
22 |
+
"is_encoder_decoder": true,
|
23 |
+
"keep_token_id": 32100,
|
24 |
+
"label2id": {
|
25 |
+
"LABEL_0": 0
|
26 |
+
},
|
27 |
+
"lang_id": {
|
28 |
+
"<c>": 32213,
|
29 |
+
"<c_plus_plus>": 32215,
|
30 |
+
"<c_sharp>": 32214,
|
31 |
+
"<en>": 32206,
|
32 |
+
"<go>": 32212,
|
33 |
+
"<java>": 32208,
|
34 |
+
"<javascript>": 32209,
|
35 |
+
"<php>": 32211,
|
36 |
+
"<python>": 32207,
|
37 |
+
"<ruby>": 32210
|
38 |
+
},
|
39 |
+
"lang_tokens": [
|
40 |
+
"<en>",
|
41 |
+
"<python>",
|
42 |
+
"<java>",
|
43 |
+
"<javascript>",
|
44 |
+
"<ruby>",
|
45 |
+
"<php>",
|
46 |
+
"<go>",
|
47 |
+
"<c>",
|
48 |
+
"<c_sharp>",
|
49 |
+
"<c_plus_plus>"
|
50 |
+
],
|
51 |
+
"layer_norm_epsilon": 1e-06,
|
52 |
+
"mask_token_id": 4,
|
53 |
+
"model_type": "t5",
|
54 |
+
"n_positions": 512,
|
55 |
+
"num_decoder_layers": 12,
|
56 |
+
"num_heads": 12,
|
57 |
+
"num_layers": 12,
|
58 |
+
"output_past": true,
|
59 |
+
"pad_token_id": 0,
|
60 |
+
"relative_attention_num_buckets": 32,
|
61 |
+
"start_token_id": 32103,
|
62 |
+
"task_specific_params": {
|
63 |
+
"summarization": {
|
64 |
+
"early_stopping": true,
|
65 |
+
"length_penalty": 2.0,
|
66 |
+
"max_length": 200,
|
67 |
+
"min_length": 30,
|
68 |
+
"no_repeat_ngram_size": 3,
|
69 |
+
"num_beams": 4,
|
70 |
+
"prefix": "summarize: "
|
71 |
+
},
|
72 |
+
"translation_en_to_de": {
|
73 |
+
"early_stopping": true,
|
74 |
+
"max_length": 300,
|
75 |
+
"num_beams": 4,
|
76 |
+
"prefix": "translate English to German: "
|
77 |
+
},
|
78 |
+
"translation_en_to_fr": {
|
79 |
+
"early_stopping": true,
|
80 |
+
"max_length": 300,
|
81 |
+
"num_beams": 4,
|
82 |
+
"prefix": "translate English to French: "
|
83 |
+
},
|
84 |
+
"translation_en_to_ro": {
|
85 |
+
"early_stopping": true,
|
86 |
+
"max_length": 300,
|
87 |
+
"num_beams": 4,
|
88 |
+
"prefix": "translate English to Romanian: "
|
89 |
+
}
|
90 |
+
},
|
91 |
+
"torch_dtype": "float32",
|
92 |
+
"transformers_version": "4.15.0",
|
93 |
+
"use_cache": true,
|
94 |
+
"vocab_size": 32216
|
95 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"decoder_start_token_id": 0,
|
5 |
+
"eos_token_id": 2,
|
6 |
+
"pad_token_id": 0,
|
7 |
+
"transformers_version": "4.27.0.dev0"
|
8 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:edccaec26eb22f8e0c708d245f8ad528102aa17ca02b546a530a4775adf3fd74
|
3 |
+
size 892009595
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}, "additional_special_tokens": ["<en>", "<python>", "<java>", "<javascript>", "<ruby>", "<php>", "<go>", "<c>", "<c_sharp>", "<c_plus_plus>"]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"errors": "replace", "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 512, "special_tokens_map_file": "/home/shuailu/lushuai/code_review/PreViewer/pretrained_models/codet5/special_tokens_map.json", "tokenizer_file": null, "name_or_path": "/home/shuailu/lushuai/code_review/PreViewer/pretrained_models/codet5", "tokenizer_class": "RobertaTokenizer"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|