naftalindeapo's picture
Rename Abuse_HateSpe_Profanity_Detector_BERT.py to app.py
f4d2d25 verified
raw
history blame
1.34 kB
import os
import torch
import gradio as gr
from transformers import pipeline
# Load the models
Abuse_detector = pipeline("text-classification", model="Hate-speech-CNERG/english-abusive-MuRIL")
Hate_Speech_detector = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-hate-latest")
Profanity_detector = pipeline("text-classification", model="tarekziade/pardonmyai")
# Function to classify based on the user's choice
def classify_text(text, detection_type):
if detection_type == "Abuse":
result = Abuse_detector(text)
if result[0]['label'] == 'LABEL_0':
result[0]['label'] = "Not-abusive"
elif result[0]['label'] == 'LABEL_1':
result[0]['label'] = "Abusive"
elif detection_type == "Hate Speech":
result = Hate_Speech_detector(text)
elif detection_type == "Profanity":
result = Profanity_detector(text)
return result[0]
# Gradio interface
interface = gr.Interface(
fn=classify_text,
inputs=[gr.Textbox(label="Enter text"), gr.Radio(["Abuse", "Hate Speech", "Profanity"], label="Detection Type")],
outputs="json",
title="Abuse, Hate Speech, and Profanity Detection",
description="Enter text and select the type of detection you want."
)
# Launch the Gradio app
if __name__ == "__main__":
interface.launch(share=True)