File size: 1,342 Bytes
1f16daf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import os
import torch
import gradio as gr
from transformers import pipeline
# Load the models
Abuse_detector = pipeline("text-classification", model="Hate-speech-CNERG/english-abusive-MuRIL")
Hate_Speech_detector = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-hate-latest")
Profanity_detector = pipeline("text-classification", model="tarekziade/pardonmyai")
# Function to classify based on the user's choice
def classify_text(text, detection_type):
if detection_type == "Abuse":
result = Abuse_detector(text)
if result[0]['label'] == 'LABEL_0':
result[0]['label'] = "Not-abusive"
elif result[0]['label'] == 'LABEL_1':
result[0]['label'] = "Abusive"
elif detection_type == "Hate Speech":
result = Hate_Speech_detector(text)
elif detection_type == "Profanity":
result = Profanity_detector(text)
return result[0]
# Gradio interface
interface = gr.Interface(
fn=classify_text,
inputs=[gr.Textbox(label="Enter text"), gr.Radio(["Abuse", "Hate Speech", "Profanity"], label="Detection Type")],
outputs="json",
title="Abuse, Hate Speech, and Profanity Detection",
description="Enter text and select the type of detection you want."
)
# Launch the Gradio app
if __name__ == "__main__":
interface.launch(share=True) |