Add Plant DNABERT model
Browse files- README.md +76 -3
- added_tokens.json +7 -0
- config.json +24 -0
- generation_config.json +5 -0
- model.safetensors +3 -0
- special_tokens_map.json +51 -0
- tokenizer.json +0 -0
- tokenizer_config.json +64 -0
README.md
CHANGED
@@ -1,3 +1,76 @@
|
|
1 |
-
---
|
2 |
-
license: cc-by-nc-sa-4.0
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: cc-by-nc-sa-4.0
|
3 |
+
widget:
|
4 |
+
- text: ACCTGA<mask>TTCTGAGTC
|
5 |
+
tags:
|
6 |
+
- DNA
|
7 |
+
- biology
|
8 |
+
- genomics
|
9 |
+
datasets:
|
10 |
+
- zhangtaolab/plant_reference_genomes
|
11 |
+
---
|
12 |
+
# Plant foundation DNA large language models
|
13 |
+
|
14 |
+
The plant DNA large language models (LLMs) contain a series of foundation models based on different model architectures, which are pre-trained on various plant reference genomes.
|
15 |
+
All the models have a comparable model size between 90 MB and 150 MB, BPE tokenizer is used for tokenization and 8000 tokens are included in the vocabulary.
|
16 |
+
|
17 |
+
|
18 |
+
Part of this collection is the **nucleotide-transformer-v2-100m-multi-species**, a 100m parameters transformer pre-trained on a collection of 850 genomes from a wide range of species, including model and non-model organisms.
|
19 |
+
|
20 |
+
**Developed by:** zhangtaolab
|
21 |
+
|
22 |
+
### Model Sources
|
23 |
+
|
24 |
+
- **Repository:** [Plant DNA LLMs](https://github.com/zhangtaolab/plant_DNA_LLMs)
|
25 |
+
- **Manuscript:** [Versatile applications of foundation DNA language models in plant genomes]()
|
26 |
+
|
27 |
+
### Architecture
|
28 |
+
|
29 |
+
The model is trained based on the Google BERT base model with modified tokenizer specific for DNA sequence.
|
30 |
+
|
31 |
+
### How to use
|
32 |
+
|
33 |
+
Install the runtime library first:
|
34 |
+
```bash
|
35 |
+
pip install transformers
|
36 |
+
```
|
37 |
+
|
38 |
+
Here is a simple code for inference:
|
39 |
+
```python
|
40 |
+
from transformers import AutoModelForMaskedLM, AutoTokenizer
|
41 |
+
import torch
|
42 |
+
|
43 |
+
model_name = 'plant-dnabert'
|
44 |
+
# load model and tokenizer
|
45 |
+
model = AutoModelForMaskedLM.from_pretrained(f'zhangtaolab/{model_name}', trust_remote_code=True)
|
46 |
+
tokenizer = AutoTokenizer.from_pretrained(f'zhangtaolab/{model_name}', trust_remote_code=True)
|
47 |
+
|
48 |
+
# example sequence and tokenization
|
49 |
+
sequences = ['ATATACGGCCGNC','GGGTATCGCTTCCGAC']
|
50 |
+
tokens = tokenizer(sequences,padding="longest")['input_ids']
|
51 |
+
print(f"Tokenzied sequence: {tokenizer.batch_decode(tokens)}")
|
52 |
+
|
53 |
+
# inference
|
54 |
+
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
55 |
+
model.to(device)
|
56 |
+
inputs = tokenizer(sequences, truncation=True, padding='max_length', max_length=512,
|
57 |
+
return_tensors="pt")
|
58 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
59 |
+
outs = model(
|
60 |
+
**inputs,
|
61 |
+
output_hidden_states=True
|
62 |
+
)
|
63 |
+
|
64 |
+
# get the final layer embeddings and prediction logits
|
65 |
+
embeddings = outs['hidden_states'][-1].detach().numpy()
|
66 |
+
logits = outs['logits'].detach().numpy()
|
67 |
+
```
|
68 |
+
|
69 |
+
|
70 |
+
### Training data
|
71 |
+
We use MaskedLM method to pre-train the model, the tokenized sequence have a maximum length of 512.
|
72 |
+
Detailed training procedure can be found in our manuscript.
|
73 |
+
|
74 |
+
|
75 |
+
#### Hardware
|
76 |
+
Model was pre-trained on a NVIDIA RTX4090 GPU (24 GB).
|
added_tokens.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"[CLS]": 8000,
|
3 |
+
"[MASK]": 8004,
|
4 |
+
"[PAD]": 8003,
|
5 |
+
"[SEP]": 8001,
|
6 |
+
"[UNK]": 8002
|
7 |
+
}
|
config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"classifier_dropout": null,
|
7 |
+
"hidden_act": "gelu",
|
8 |
+
"hidden_dropout_prob": 0.1,
|
9 |
+
"hidden_size": 768,
|
10 |
+
"initializer_range": 0.02,
|
11 |
+
"intermediate_size": 3072,
|
12 |
+
"layer_norm_eps": 1e-12,
|
13 |
+
"max_position_embeddings": 512,
|
14 |
+
"model_type": "bert",
|
15 |
+
"num_attention_heads": 12,
|
16 |
+
"num_hidden_layers": 12,
|
17 |
+
"pad_token_id": 0,
|
18 |
+
"position_embedding_type": "absolute",
|
19 |
+
"torch_dtype": "float32",
|
20 |
+
"transformers_version": "4.35.2",
|
21 |
+
"type_vocab_size": 2,
|
22 |
+
"use_cache": true,
|
23 |
+
"vocab_size": 8005
|
24 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"pad_token_id": 0,
|
4 |
+
"transformers_version": "4.35.2"
|
5 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ff4c3d0c1421a9f2234c5593cb8c4c40c60009edd853a4fff1acae4c90c67af9
|
3 |
+
size 368818524
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "[CLS]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "[SEP]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "[MASK]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "[PAD]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "[SEP]",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "[UNK]",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": true,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"8000": {
|
4 |
+
"content": "[CLS]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"8001": {
|
12 |
+
"content": "[SEP]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"8002": {
|
20 |
+
"content": "[UNK]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": true,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"8003": {
|
28 |
+
"content": "[PAD]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"8004": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "[CLS]",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "[CLS]",
|
47 |
+
"do_lower_case": false,
|
48 |
+
"eos_token": "[SEP]",
|
49 |
+
"mask_token": "[MASK]",
|
50 |
+
"max_length": 512,
|
51 |
+
"model_max_length": 1000000000000000019884624838656,
|
52 |
+
"pad_to_multiple_of": null,
|
53 |
+
"pad_token": "[PAD]",
|
54 |
+
"pad_token_type_id": 0,
|
55 |
+
"padding_side": "right",
|
56 |
+
"sep_token": "[SEP]",
|
57 |
+
"sp_model_kwargs": {},
|
58 |
+
"split_by_punct": false,
|
59 |
+
"stride": 0,
|
60 |
+
"tokenizer_class": "DebertaV2Tokenizer",
|
61 |
+
"truncation_side": "right",
|
62 |
+
"truncation_strategy": "longest_first",
|
63 |
+
"unk_token": "[UNK]"
|
64 |
+
}
|