File size: 1,367 Bytes
68ca8cd
06bddba
 
68ca8cd
06bddba
 
 
 
68ca8cd
9804479
06bddba
 
 
 
 
 
316fef8
06bddba
 
316fef8
68ca8cd
 
 
9804479
 
 
 
68ca8cd
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

# Load the model and tokenizer
model_name = "meta-llama/Llama-3.1-8B-Instruct"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

def evaluate_code_security(code):
    # Prepare the input
    input_text = f"You are BoxyReviewer. You work for OmniBlocks, a Python IDE. Your job is to review code before it is uploaded to the cloud. Not for bugs, that's the compiler's thing. Your job is to look at the code and make sure none of it is malicious before it gets to OmniBlocks' server. Your response must be formatted in a JSON format. The key 'result' should be either 'Safe' or 'Unsafe'. The key 'reason' should explain your reasoning. Here's the code {code}"
    inputs = tokenizer(input_text, return_tensors="pt")

    # Run the model
    outputs = model.generate(**inputs, max_length=500)

    # Convert the output to text
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)

    return response

demo = gr.Interface(
    evaluate_code_security,
    gr.Textbox(label="Enter your code"),
    gr.Textbox(label="Code Security Evaluation"),
    title="Code Security Evaluator",
    description="Get the AI's evaluation of your password strength.",
)

if __name__ == "__main__":
    demo.launch()