Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
# Load the models and tokenizers | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
tokenizer1 = AutoTokenizer.from_pretrained("textattack/bert-base-uncased-imdb") | |
tokenizer2 = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment") | |
model1 = AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-imdb") | |
model2 = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment") | |
# Define the sentiment prediction functions | |
def predict_sentiment(text): | |
# Predict sentiment using model 1 | |
inputs1 = tokenizer1.encode_plus(text, padding="longest", truncation=True, return_tensors="pt") | |
outputs1 = model1(**inputs1) | |
predicted_label1 = outputs1.logits.argmax().item() | |
sentiment1 = "Positive" if predicted_label1 == 1 else "Negative" if predicted_label1 == 0 else "Neutral" | |
# Predict sentiment using model 2 | |
inputs2 = tokenizer2.encode_plus(text, padding="longest", truncation=True, return_tensors="pt") | |
outputs2 = model2(**inputs2) | |
predicted_label2 = outputs2.logits.argmax().item() | |
sentiment2 = "Positive" if predicted_label2 == 1 else "Negative" if predicted_label2 == 0 else "Neutral" | |
return sentiment1, sentiment2 | |
# Create the Gradio interface | |
iface = gr.Interface( | |
fn=predict_sentiment, | |
inputs="text", | |
outputs=["text", "text"], | |
title="Sentiment Analysis (Model 1 vs Model 2)", | |
description="Compare sentiment predictions from two models.", | |
) | |
# Launch the interface | |
iface.launch() | |