NLP_app / app_pages /page2_rubert_toxicity.py
Awlly's picture
first
a15e210
raw
history blame
660 Bytes
import streamlit as st
from app_models.toxicity_MODEL import text2toxicity
def run():
st.title('Toxicity Detection')
st.write('This tool classifies text as toxic or non-toxic using RuBERT.')
user_input = st.text_area("Enter text to classify", "Type your text here...")
if st.button('Classify'):
toxicity_score = text2toxicity(user_input)
st.write('Toxicity score:', toxicity_score)
# Optional: Interpret the score for the user
if toxicity_score > 0.5:
st.write("This text is likely to be considered toxic.")
else:
st.write("This text is likely to be considered non-toxic.")