File size: 9,900 Bytes
ad23950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2626977
 
 
 
ad23950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b76ffc6
ad23950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b76ffc6
ad23950
 
 
 
 
 
 
 
 
b76ffc6
ad23950
 
 
 
 
 
 
 
 
b76ffc6
ad23950
 
 
 
b76ffc6
ad23950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
import os
import sys
import math
import torch
import argparse
import textwrap
import transformers
from peft import PeftModel
from transformers import GenerationConfig, TextStreamer
#from llama_attn_replace import replace_llama_attn
from transformers import RobertaTokenizerFast, RobertaForSequenceClassification
import re
import gradio as gr

PROMPT_DICT = {
    "prompt_no_input": (
        "Below is an instruction that describes a task. "
        "Write a response that appropriately completes the request.\n\n"
        "### Instruction:\n{instruction}\n\n### Response:"
    ),
    "prompt_no_input_llama2": (
        "<s>[INST] <<SYS>>\n"
        "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe.  Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\n"
        "If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n"
        "<</SYS>> \n\n {instruction} [/INST]"
    ),
    "prompt_input_llama2": (
        "<s>[INST] <<SYS>>\n"
        "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, #while being safe.  Your answers should not include any harmful, unethical, racist, sexist, #toxic, dangerous, or illegal content. Please ensure that your responses are socially #unbiased and positive in nature.\n\n"
        "If a question does not make any sense, or is not factually coherent, explain why instead of #answering something not correct. If you don't know the answer to a question, please don't #share false information.\n"
        "<</SYS>> \n\n {instruction} [/INST]"
     )
}

class Args:
    def __init__(self):
        self.bert_rounter = "waleyWang/CO2RRChat/bert_router_model"
        self.bert_model = "waleyWang/CO2RRChat/product_predict"
        self.com_plan_model = "waleyWang/CO2RRChat/computational_plan"
        self.code_model = "waleyWang/CO2RRChat/code_generate"
        self.context_size = 32768
        self.max_gen_len = 30000
        self.cache_dir = "./cache"
        self.temperature = 0.6
        self.top_p = 0.9

def get_label_map():
    return {
        0: "The main product is CH3CH2OH",
        1: "The main product is C2H4",
        2: "The main product is HCOOH/HCOO-",
        3: "The main product is C2+",
        4: "The main product is CH3OH",
        5: "The main product is CH4",
        6: "The main product is CO"}
    
def load_BERT_Rounter(model_path, query):
    tokenizer = transformers.BertTokenizer.from_pretrained(model_path)
    model = transformers.BertForSequenceClassification.from_pretrained(model_path, num_labels=3)
    model.eval()
    label_list = [f'Label_{i}' for i in range(model.config.num_labels)]
    inputs = tokenizer(query, return_tensors="pt", 
                       padding=True, 
                       truncation=True, 
                       max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        
        probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
        
        predicted_label_id = torch.argmax(probabilities, dim=-1).item()
        predicted_label = label_list[predicted_label_id]
        confidence = probabilities[0][predicted_label_id].item()
    return predicted_label

def load_bert(model_name):
    tokenizer = RobertaTokenizerFast.from_pretrained(model_name)
    model = RobertaForSequenceClassification.from_pretrained(model_name)
    return model, tokenizer

def run_bert(text, model, tokenizer):
    label_map = get_label_map()
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
    outputs = model(**inputs)
    logits = outputs.logits
    predictions = torch.argmax(logits, dim=-1)
    product_label = label_map[predictions.item()]
    return product_label

def build_generator(model, tokenizer, temperature=0.6, top_p=0.9, max_gen_len=4096, use_cache=True):
    def response(prompt):
        inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
        output = model.generate(
            **inputs,
            max_new_tokens=max_gen_len,
            temperature=temperature,
            top_p=top_p,
            use_cache=use_cache
        )
        out = tokenizer.decode(output[0], skip_special_tokens=True)
        
        if "[/INST]" in out:
            out = out.split("[/INST]")[1].strip()
        return out
    return response

def load_llama_model(model_path, device, context_size, cache_dir):  
    config = transformers.AutoConfig.from_pretrained(
        model_path,
        cache_dir=cache_dir,  
    )

    orig_ctx_len = getattr(config, "max_position_embeddings", None)
    if orig_ctx_len and context_size > orig_ctx_len:
        scaling_factor = float(math.ceil(context_size / orig_ctx_len))
        config.rope_scaling = {"type": "linear", "factor": scaling_factor}

    model = transformers.AutoModelForCausalLM.from_pretrained(
        model_path,
        config=config,
        cache_dir=cache_dir,  
        torch_dtype=torch.float16,
        device_map="auto",
    )
    model.to(device)
    model.resize_token_embeddings(32001)

    tokenizer = transformers.AutoTokenizer.from_pretrained(
        model_path,
        cache_dir=cache_dir, 
        model_max_length=context_size if context_size > orig_ctx_len else orig_ctx_len,
        padding_side="right",
        use_fast=False,
    )

    model.eval()
    if torch.__version__ >= "2" and sys.platform != "win32":
        model = torch.compile(model)
    model.eval()

    return model, tokenizer

def generate_llama_output(model, tokenizer, temperature=0.6, top_p=0.9, max_gen_len=4096, use_cache=True):
    def response(prompt):
        print("Original prompt:", prompt)
        inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
        print("Tokenized prompt:", inputs)
        streamer = TextStreamer(tokenizer)
        print("Generation parameters:")
        print("Max new tokens:", max_gen_len)
        print("Temperature:", temperature)
        print("Top p:", top_p)

        output = model.generate(
            **inputs,
            max_new_tokens=max_gen_len,
            temperature=temperature,
            top_p=top_p,
            use_cache=use_cache,
            streamer=streamer,
        )
        print("Raw model output:", output)       
        out = tokenizer.decode(output[0], skip_special_tokens= False)
        print("Decoded output:", out)
        #out = out.split(prompt.lstrip("<s>"))[1].strip()
        return out

    return response

def main(args):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    predicted_label = load_BERT_Rounter(args.bert_rounter, args.question)

    try:
        if predicted_label == 'Label_0':
            bert_model, bert_tokenizer = load_bert(args.bert_model)
            product_label = run_bert(args.question, bert_model, bert_tokenizer)
            response = f"Product Prediction Result:\n{product_label}"
            print("Generating response:", response)  
            return response

        elif predicted_label == 'Label_1':
            plan_model, plan_tokenizer = load_llama_model(args.com_plan_model, device, args.context_size, args.cache_dir)
            plan_response = build_generator(plan_model, plan_tokenizer, temperature=args.temperature, top_p=args.top_p, max_gen_len=args.max_gen_len, use_cache=True)
            prompt_no_input = PROMPT_DICT["prompt_no_input_llama2"]
            prompt = prompt_no_input.format_map({"instruction": args.question})
            output = plan_response(prompt=prompt)
            response = f"Computational Planning Result:\n{output}"
            print("Generating response:", response) 
            return response

        else:
            code_model, code_tokenizer = load_llama_model(args.code_model, device, args.context_size, args.cache_dir)
            code_response = build_generator(code_model, code_tokenizer, temperature=args.temperature, top_p=args.top_p, max_gen_len=args.max_gen_len, use_cache=True)
            prompt_no_input = PROMPT_DICT["prompt_no_input_llama2"]
            prompt = prompt_no_input.format_map({"instruction": args.question})
            output = code_response(prompt=prompt)
            response = f"Code Generation Result:\n{output}"
            print("Generating response:", response) 
            return response

    except Exception as e:
        error_msg = f"Error occurred: {str(e)}"
        print("Error:", error_msg) 
        return error_msg

def process_question(question):
    args = Args()
    args.question = question
    try:
        result = main(args)
        return result
    except Exception as e:
        return f"Error occurred: {str(e)}"

def create_demo():
    iface = gr.Interface(
        fn=process_question,
        inputs=gr.Textbox(
            lines=3, 
            placeholder="Enter your CO2RR related question here...",
            label="Question"
        ),
        outputs=gr.Textbox(
            lines=10,
            label="Response"
        ),
        title="CO2RR Assistant",
        description="Ask questions about CO2 reduction reaction (CO2RR), including product prediction, computational planning, and code generation.",
        examples=[
            ["What is the main product of CO2RR on Cu(100) surface at -0.9V vs. RHE?"],
            ["Simulate the CO2 reduction reaction (CO2RR) to produce CH4 (methane) on a Cs-Lu alloy (111) surface."],
            ["Generate VASP input files for CO2RR simulation on Cu(111) surface"]
        ]
    )
    return iface

if __name__ == "__main__":
    demo = create_demo()
    demo.launch(
        share=True, 
        server_name="0.0.0.0"
    )