|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
|
|
@st.cache(allow_output_mutation=True) |
|
def load_model(): |
|
model_path = "Canstralian/pentest_ai" |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_path, |
|
torch_dtype=torch.float16, |
|
device_map="auto", |
|
load_in_4bit=False, |
|
load_in_8bit=True, |
|
trust_remote_code=True, |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) |
|
return model, tokenizer |
|
|
|
def generate_text(model, tokenizer, instruction): |
|
tokens = tokenizer.encode(instruction, return_tensors='pt').to('cuda') |
|
generated_tokens = model.generate( |
|
tokens, |
|
max_length=1024, |
|
top_p=1.0, |
|
temperature=0.5, |
|
top_k=50 |
|
) |
|
return tokenizer.decode(generated_tokens[0], skip_special_tokens=True) |
|
|
|
model, tokenizer = load_model() |
|
|
|
st.title("Penetration Testing AI Assistant") |
|
instruction = st.text_area("Enter your question:") |
|
if st.button("Generate"): |
|
response = generate_text(model, tokenizer, instruction) |
|
st.write(response) |