Spaces:
Sleeping
Sleeping
File size: 5,792 Bytes
d9a04ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import Bio.SeqIO as sio
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import LabelBinarizer
from tensorflow.keras.utils import to_categorical
import random
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import tqdm
#load model
filterm = tf.keras.models.load_model(os.path.join(os.path.dirname(__file__), '../model/AELS_tall.h5'))
classifier = tf.keras.models.load_model(os.path.join(os.path.dirname(__file__), '../model/classifier-ls_tall.h5'))
#encode, encode all the sequence to 1600 aa length
char_dict = {}
chars = 'ACDEFGHIKLMNPQRSTVWXYBJZ'
new_chars = "ACDEFGHIKLMNPQRSTVWXY"
for char in chars:
temp = np.zeros(22)
if char == 'B':
for ch in 'DN':
temp[new_chars.index(ch)] = 0.5
elif char == 'J':
for ch in 'IL':
temp[new_chars.index(ch)] = 0.5
elif char == 'Z':
for ch in 'EQ':
temp[new_chars.index(ch)] = 0.5
else:
temp[new_chars.index(char)] = 1
char_dict[char] = temp
def encode(seq):
char = 'ACDEFGHIKLMNPQRSTVWXY'
train_array = np.zeros((1600,22))
for i in range(1600):
if i<len(seq):
train_array[i] = char_dict[seq[i]]
else:
train_array[i][21] = 1
return train_array
def test_encode(tests):
tests_seq = []
for test in tests:
tests_seq.append(encode(test))
tests_seq = np.array(tests_seq)
return tests_seq
def newEncodeVaryLength(seq):
char = 'ACDEFGHIKLMNPQRSTVWXY'
mol = len(seq) % 16
dimension1 = len(seq) - mol + 16
train_array = np.zeros((dimension1,22))
for i in range(dimension1):
if i < len(seq):
train_array[i] = char_dict[seq[i]]
else:
train_array[i][21] = 1
return train_array
def test_newEncodeVaryLength(tests):
tests_seq = []
for test in tests:
tests_seq.append(newEncodeVaryLength(test))
tests_seq = np.array(tests_seq)
return tests_seq
def filter_prediction_batch(seqs):
predictions = []
# for seq in seqs:
# temp = model.predict(np.array([seq]))
# predictions.append(temp)
temp = filterm.predict(seqs, batch_size = 512)
predictions.append(temp)
return predictions
def prediction(seqs):
predictions = []
for seq in seqs:
temp = model.predict(np.array([seq]))
predictions.append(temp)
return predictions
def reconstruction_simi(pres, ori):
simis = []
reconstructs = []
for index, ele in enumerate(pres):
length = 0
if len(ori[index]) <= 1600:
length = len(ori[index])
else:
length = 1600
count_simi = 0
reconstruct = ''
for pos in range(length):
if chars[np.argmax(ele[pos])] == ori[index][pos]:
count_simi += 1
reconstruct += chars[np.argmax(ele[pos])]
simis.append(count_simi / length)
reconstructs.append(reconstruct)
return reconstructs, simis
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def argnet_lsaa(input_file, outfile):
cut = 0.25868536454055224
print('read in file...')
test = [i for i in sio.parse(input_file, 'fasta')]
test_ids = [ele.id for ele in test]
print('encoding test file...')
testencode = test_encode(test)
print('batch predict...')
testencode_pre1 = []
for ele in list(chunks(testencode, 10000)):
temp = filter_prediction_batch(ele) # if huge volumn of seqs (~ millions) this will be change to create batch in advance•
testencode_pre1.append(temp)
testencode_pre = np.vstack([item for sublist in testencode_pre1 for item in sublist])
print('reconstruct, simi...')
testencode_pre = filter_prediction_batch(testencode) # if huge volumn of seqs (~ millions) this will be change to create batch in advance
reconstructs, simis = reconstruction_simi(testencode_pre, test)
#results = calErrorRate(simis, cut)
#passed = []
passed_encode = [] ### notice list and np.array
passed_idx = []
notpass_idx = []
for index, ele in enumerate(simis):
if ele >= cut:
#passed.append(test[index])
passed_encode.append(testencode[index])
passed_idx.append(index)
else:
notpass_idx.append(index)
###classification
print('classifying...')
train_data = [i for i in sio.parse(os.path.join(os.path.dirname(__file__), "../data/train.fasta"),'fasta')]
train_labels = [ele.id.split('|')[3].strip() for ele in train_data]
encodeder = LabelBinarizer()
encoded_train_labels = encodeder.fit_transform(train_labels)
prepare = sorted(list(set(train_labels)))
label_dic = {}
for index, ele in enumerate(prepare):
label_dic[index] = ele
classifications = []
classifications = classifier.predict(np.stack(passed_encode, axis=0), batch_size = 512)
out = {}
for i, ele in enumerate(passed_idx):
out[ele] = [np.max(classifications[i]), label_dic[np.argmax(classifications[i])]]
### output
print('writing output...')
with open(os.path.join(os.path.dirname(__file__), "../results/" + outfile) , 'w') as f:
f.write('test_id' + '\t' + 'ARG_prediction' + '\t' + 'resistance_category' + '\t' + 'probability' + '\n')
for idx, ele in enumerate(test):
if idx in passed_idx:
f.write(test[idx].id + '\t')
f.write('ARG' + '\t')
f.write(out[idx][-1] + '\t')
f.write(str(out[idx][0]) + '\n')
if idx in notpass_idx:
f.write(test[idx].id + '\t')
f.write('non-ARG' + '\t' + '' + '\t' + '' + '\n')
|