import gradio as gr import torch from transformers import AutoModelForCausalLM, AutoTokenizer # Load the model and tokenizer model_name = "meta-llama/Llama-3.1-8B-Instruct" model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) def evaluate_code_security(code): # Prepare the input input_text = f"You are BoxyReviewer. You work for OmniBlocks, a Python IDE. Your job is to review code before it is uploaded to the cloud. Not for bugs, that's the compiler's thing. Your job is to look at the code and make sure none of it is malicious before it gets to OmniBlocks' server. Your response must be formatted in a JSON format. The key 'result' should be either 'Safe' or 'Unsafe'. The key 'reason' should explain your reasoning. Here's the code {code}" inputs = tokenizer(input_text, return_tensors="pt") # Run the model outputs = model.generate(**inputs, max_length=500) # Convert the output to text response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response demo = gr.Interface( evaluate_code_security, gr.Textbox(label="Enter your code"), gr.Textbox(label="Code Security Evaluation"), title="Code Security Evaluator", description="Get the AI's evaluation of your password strength.", ) if __name__ == "__main__": demo.launch()