katsuchi's picture
Update app.py
0b259b5 verified
import gradio as gr
import torch
from peft import PeftModel
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import numpy as np
base_model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model = PeftModel.from_pretrained(base_model, "katsuchi/bert-base-uncased-twitter-sentiment-analysis")
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
def get_sentiment(input_sentence):
inputs = tokenizer(input_sentence, return_tensors="pt", padding=True, truncation=True, max_length=512)
inputs = {k: v.to(model.device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
probabilities = torch.nn.functional.softmax(logits, dim=-1).squeeze().cpu().numpy()
labels = ["Negative", "Positive"]
result = {labels[i]: round(prob, 3) for i, prob in enumerate(probabilities)}
return result
# Example sentences
examples = [
["I love this product!"],
["This is the worst experience ever."],
["The movie was okay, not great but not bad."],
["Absolutely terrible, do not buy!"],
["I feel amazing today!"]
]
iface = gr.Interface(
fn=get_sentiment,
inputs=gr.Textbox(label="Enter a sentence for sentiment analysis"),
outputs=gr.JSON(label="Sentiment Probabilities"),
title="Sentiment Analysis with Bert",
description="Enter a sentence, and this model will predict the sentiment (positive/negative) along with the probabilities.<br><br>Check out the source code on <a href='https://github.com/katsuchi23/Twitter-Sentiment-Analysis' target='_blank'>GitHub</a>!<br><br>Here are some example sentences to test:",
examples=examples
)
iface.launch()