sagawa commited on
Commit
20946b6
1 Parent(s): e2146c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +142 -114
app.py CHANGED
@@ -1,24 +1,22 @@
1
  import os
2
- import gc
3
  import random
4
- import warnings
5
- warnings.filterwarnings('ignore')
6
  import numpy as np
 
7
  import pandas as pd
8
  import torch
9
- import tokenizers
10
- import transformers
11
- from transformers import AutoTokenizer, EncoderDecoderModel, AutoModelForSeq2SeqLM
12
- import sentencepiece
13
- from rdkit import Chem
14
- import rdkit
15
  import streamlit as st
 
 
 
16
 
17
- st.title('predictproduct-t5')
18
  st.markdown('##### At this space, you can predict the products of reactions from their inputs.')
19
  st.markdown('##### The code expects input_data as a string or CSV file that contains an "input" column. The format of the string or contents of the column are like "REACTANT:{reactants of the reaction}REAGENT:{reagents, catalysts, or solvents of the reaction}".')
20
  st.markdown('##### If there is no reagent, fill the blank with a space. And if there are multiple compounds, concatenate them with "."')
21
- st.markdown('##### The output contains smiles of predicted products and sum of log-likelihood for each prediction. Predictions are ordered by their log-likelihood.(0th is the most probable product.) "valid compound" is the most probable and valid(can be recognized by RDKit) prediction.')
22
 
23
 
24
  display_text = 'input the reaction smiles (e.g. REACTANT:COC(=O)C1=CCCN(C)C1.O.[Al+3].[H-].[Li+].[Na+].[OH-]REAGENT:C1CCOC1)'
@@ -35,120 +33,150 @@ class CFG():
35
  num_return_sequences = num_beams
36
  uploaded_file = st.file_uploader("Choose a CSV file")
37
  input_data = st.text_area(display_text)
38
- model_name_or_path = 'sagawa/ZINC-t5-productpredicition'
 
 
39
  model = 't5'
40
  seed = 42
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  if st.button('predict'):
43
  with st.spinner('Now processing. If num beams=5, this process takes about 15 seconds per reaction.'):
 
 
44
 
45
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
46
-
47
- def seed_everything(seed=42):
48
- random.seed(seed)
49
- os.environ['PYTHONHASHSEED'] = str(seed)
50
- np.random.seed(seed)
51
- torch.manual_seed(seed)
52
- torch.cuda.manual_seed(seed)
53
- torch.backends.cudnn.deterministic = True
54
- seed_everything(seed=CFG.seed)
55
-
56
-
57
- tokenizer = AutoTokenizer.from_pretrained(CFG.model_name_or_path, return_tensors='pt')
58
-
59
- if CFG.model == 't5':
60
- model = AutoModelForSeq2SeqLM.from_pretrained(CFG.model_name_or_path).to(device)
61
- elif CFG.model == 'deberta':
62
- model = EncoderDecoderModel.from_pretrained(CFG.model_name_or_path).to(device)
63
 
 
 
 
64
 
65
- if CFG.uploaded_file is not None:
66
- input_data = pd.read_csv(CFG.uploaded_file)
67
- outputs = []
68
- for idx, row in input_data.iterrows():
69
- input_compound = row['input']
70
- # min_length = min(input_compound.find('CATALYST') - input_compound.find(':') - 10, 0)
71
- inp = tokenizer(input_compound, return_tensors='pt').to(device)
72
- output = model.generate(**inp, min_length=2, max_length=181, num_beams=CFG.num_beams, num_return_sequences=CFG.num_return_sequences, return_dict_in_generate=True, output_scores=True)
73
- if CFG.num_beams > 1:
74
- scores = output['sequences_scores'].tolist()
75
- output = [tokenizer.decode(i, skip_special_tokens=True).replace(' ', '').rstrip('.') for i in output['sequences']]
76
- for ith, out in enumerate(output):
77
- mol = Chem.MolFromSmiles(out.rstrip('.'))
78
- if type(mol) == rdkit.Chem.rdchem.Mol:
79
- output.append(out.rstrip('.'))
80
- scores.append(scores[ith])
81
- break
82
- if type(mol) == None:
83
- output.append(None)
84
- scores.append(None)
85
- output += scores
86
- output = [input_compound] + output
87
- outputs.append(output)
88
-
89
- else:
90
- output = [tokenizer.decode(output['sequences'][0], skip_special_tokens=True).replace('. ', '.').rstrip('.')]
91
- mol = Chem.MolFromSmiles(output[0])
92
- if type(mol) == rdkit.Chem.rdchem.Mol:
93
- output.append(output[0])
94
- else:
95
- output.append(None)
96
- output = [input_compound] + output
97
- outputs.append(output)
98
-
99
- if CFG.num_beams > 1:
100
- output_df = pd.DataFrame(outputs, columns=['input'] + [f'{i}th' for i in range(CFG.num_beams)] + ['valid compound'] + [f'{i}th score' for i in range(CFG.num_beams)] + ['valid compound score'])
101
- else:
102
- output_df = pd.DataFrame(outputs, columns=['input', '0th', 'valid compound'])
103
-
104
-
105
- @st.cache
106
- def convert_df(df):
107
- # IMPORTANT: Cache the conversion to prevent computation on every rerun
108
- return df.to_csv(index=False)
109
-
110
- csv = convert_df(output_df)
111
-
112
- st.download_button(
113
- label="Download data as CSV",
114
- data=csv,
115
- file_name='output.csv',
116
- mime='text/csv',
117
  )
118
 
119
- else:
120
- input_compound = CFG.input_data
121
- # min_length = min(input_compound.find('CATALYST') - input_compound.find(':') - 10, 0)
122
- inp = tokenizer(input_compound, return_tensors='pt').to(device)
123
- output = model.generate(**inp, min_length=2, max_length=181, num_beams=CFG.num_beams, num_return_sequences=CFG.num_return_sequences, return_dict_in_generate=True, output_scores=True)
124
- if CFG.num_beams > 1:
125
- scores = output['sequences_scores'].tolist()
126
- output = [tokenizer.decode(i, skip_special_tokens=True).replace(' ', '').rstrip('.') for i in output['sequences']]
127
- for ith, out in enumerate(output):
128
- mol = Chem.MolFromSmiles(out.rstrip('.'))
129
- if type(mol) == rdkit.Chem.rdchem.Mol:
130
- output.append(out.rstrip('.'))
131
- scores.append(scores[ith])
132
- break
133
- if type(mol) == None:
134
- output.append(None)
135
- scores.append(None)
136
- output += scores
137
- output = [input_compound] + output
138
-
139
- else:
140
- output = [tokenizer.decode(output['sequences'][0], skip_special_tokens=True).replace('. ', '.').rstrip('.')]
141
- mol = Chem.MolFromSmiles(output[0])
142
- if type(mol) == rdkit.Chem.rdchem.Mol:
143
- output.append(output[0])
144
- else:
145
- output.append(None)
146
-
147
 
148
- if CFG.num_beams > 1:
149
- output_df = pd.DataFrame(np.array(output).reshape(1, -1), columns=['input'] + [f'{i}th' for i in range(CFG.num_beams)] + ['valid compound'] + [f'{i}th score' for i in range(CFG.num_beams)] + ['valid compound score'])
150
- else:
151
- output_df = pd.DataFrame(np.array([input_compound]+output).reshape(1, -1), columns=['input', '0th', 'valid compound'])
152
  st.table(output_df)
153
 
154
  @st.cache
 
1
  import os
 
2
  import random
 
 
3
  import numpy as np
4
+ import warnings
5
  import pandas as pd
6
  import torch
7
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
8
+ from torch.utils.data import Dataset, DataLoader
9
+ import gc
 
 
 
10
  import streamlit as st
11
+
12
+ warnings.filterwarnings("ignore")
13
+
14
 
15
+ st.title('ReactionT5_task_forward')
16
  st.markdown('##### At this space, you can predict the products of reactions from their inputs.')
17
  st.markdown('##### The code expects input_data as a string or CSV file that contains an "input" column. The format of the string or contents of the column are like "REACTANT:{reactants of the reaction}REAGENT:{reagents, catalysts, or solvents of the reaction}".')
18
  st.markdown('##### If there is no reagent, fill the blank with a space. And if there are multiple compounds, concatenate them with "."')
19
+ st.markdown('##### The output contains smiles of predicted products and sum of log-likelihood for each prediction. Predictions are ordered by their log-likelihood.(0th is the most probable product.)')
20
 
21
 
22
  display_text = 'input the reaction smiles (e.g. REACTANT:COC(=O)C1=CCCN(C)C1.O.[Al+3].[H-].[Li+].[Na+].[OH-]REAGENT:C1CCOC1)'
 
33
  num_return_sequences = num_beams
34
  uploaded_file = st.file_uploader("Choose a CSV file")
35
  input_data = st.text_area(display_text)
36
+ model_name_or_path = 'sagawa/ReactionT5-forward-v2'
37
+ input_column = 'input'
38
+ input_max_length = 400
39
  model = 't5'
40
  seed = 42
41
 
42
+ def seed_everything(seed=42):
43
+ random.seed(seed)
44
+ os.environ['PYTHONHASHSEED'] = str(seed)
45
+ np.random.seed(seed)
46
+ torch.manual_seed(seed)
47
+ torch.cuda.manual_seed(seed)
48
+ torch.backends.cudnn.deterministic = True
49
+
50
+
51
+
52
+ def prepare_input(cfg, text):
53
+ inputs = tokenizer(
54
+ text,
55
+ return_tensors="pt",
56
+ max_length=cfg.input_max_length,
57
+ padding="max_length",
58
+ truncation=True,
59
+ )
60
+ dic = {"input_ids": [], "attention_mask": []}
61
+ for k, v in inputs.items():
62
+ dic[k].append(torch.tensor(v[0], dtype=torch.long))
63
+ return dic
64
+
65
+
66
+ class ProductDataset(Dataset):
67
+ def __init__(self, cfg, df):
68
+ self.cfg = cfg
69
+ self.inputs = df[cfg.input_column].values
70
+
71
+ def __len__(self):
72
+ return len(self.inputs)
73
+
74
+ def __getitem__(self, idx):
75
+ return prepare_input(self.cfg, self.inputs[idx])
76
+
77
+
78
+ def predict_single_input(input_compound):
79
+ inp = tokenizer(input_compound, return_tensors="pt").to(device)
80
+ with torch.no_grad():
81
+ output = model.generate(
82
+ **inp,
83
+ num_beams=CFG.num_beams,
84
+ num_return_sequences=CFG.num_return_sequences,
85
+ return_dict_in_generate=True,
86
+ output_scores=True,
87
+ )
88
+ return output
89
+
90
+
91
+ def decode_output(output):
92
+ sequences = [
93
+ tokenizer.decode(seq, skip_special_tokens=True).replace(" ", "").rstrip(".")
94
+ for seq in output["sequences"]
95
+ ]
96
+ if CFG.num_beams > 1:
97
+ scores = output["sequences_scores"].tolist()
98
+ return sequences, scores
99
+ return sequences, None
100
+
101
+
102
+ def save_single_prediction(input_compound, output, scores):
103
+ output_data = [input_compound] + output + (scores if scores else [])
104
+ columns = (
105
+ ["input"]
106
+ + [f"{i}th" for i in range(CFG.num_beams)]
107
+ + ([f"{i}th score" for i in range(CFG.num_beams)] if scores else [])
108
+ )
109
+ output_df = pd.DataFrame([output_data], columns=columns)
110
+ return output_df
111
+
112
+
113
+ def save_multiple_predictions(input_data, sequences, scores):
114
+ output_list = [
115
+ [input_data.loc[i // CFG.num_return_sequences, CFG.input_column]]
116
+ + sequences[i : i + CFG.num_return_sequences]
117
+ + scores[i : i + CFG.num_return_sequences]
118
+ for i in range(0, len(sequences), CFG.num_return_sequences)
119
+ ]
120
+ columns = (
121
+ ["input"]
122
+ + [f"{i}th" for i in range(CFG.num_return_sequences)]
123
+ + ([f"{i}th score" for i in range(CFG.num_return_sequences)] if scores else [])
124
+ )
125
+ output_df = pd.DataFrame(output_list, columns=columns)
126
+ return output_df
127
+
128
+
129
  if st.button('predict'):
130
  with st.spinner('Now processing. If num beams=5, this process takes about 15 seconds per reaction.'):
131
+
132
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
133
 
134
+ seed_everything(seed=CFG.seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
+ tokenizer = AutoTokenizer.from_pretrained(CFG.model_name_or_path, return_tensors="pt")
137
+ model = AutoModelForSeq2SeqLM.from_pretrained(CFG.model_name_or_path).to(device)
138
+ model.eval()
139
 
140
+ if CFG.uploaded_file is None:
141
+ input_compound = CFG.input_data
142
+ output = predict_single_input(input_compound)
143
+ sequences, scores = decode_output(output)
144
+ output_df = save_single_prediction(input_compound, sequences, scores)
145
+ else:
146
+ input_data = pd.read_csv(CFG.input_data)
147
+ dataset = ProductDataset(CFG, input_data)
148
+ dataloader = DataLoader(
149
+ dataset,
150
+ batch_size=CFG.batch_size,
151
+ shuffle=False,
152
+ num_workers=4,
153
+ pin_memory=True,
154
+ drop_last=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  )
156
 
157
+ all_sequences, all_scores = [], []
158
+ for inputs in dataloader:
159
+ inputs = {k: v[0].to(device) for k, v in inputs.items()}
160
+ with torch.no_grad():
161
+ output = model.generate(
162
+ **inputs,
163
+ min_length=CFG.output_min_length,
164
+ max_length=CFG.output_max_length,
165
+ num_beams=CFG.num_beams,
166
+ num_return_sequences=CFG.num_return_sequences,
167
+ return_dict_in_generate=True,
168
+ output_scores=True,
169
+ )
170
+ sequences, scores = decode_output(output)
171
+ all_sequences.extend(sequences)
172
+ if scores:
173
+ all_scores.extend(scores)
174
+ del output
175
+ torch.cuda.empty_cache()
176
+ gc.collect()
177
+
178
+ output_df = save_multiple_predictions(input_data, all_sequences, all_scores)
 
 
 
 
 
 
179
 
 
 
 
 
180
  st.table(output_df)
181
 
182
  @st.cache