Spaces:
Sleeping
Sleeping
# Imports | |
import os | |
from typing import Union | |
from src.utils import preprocess | |
from fastapi import FastAPI | |
from fastapi.responses import RedirectResponse | |
from transformers import AutoModelForSequenceClassification,AutoTokenizer, AutoConfig | |
import numpy as np | |
#convert logits to probabilities | |
from scipy.special import softmax | |
# Config | |
app = FastAPI() | |
#/docs, page to see auto-generated API documentation | |
#loading ML/DL components | |
os.environ['SENTENCE_TRANSFORMERS_HOME'] = './.cache' | |
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased') | |
model_path = f"Junr-syl/tweet_sentiments_analysis" | |
config = AutoConfig.from_pretrained(model_path) | |
config.id2label = {0: 'NEGATIVE', 1: 'NEUTRAL', 2: 'POSITIVE'} | |
model = AutoModelForSequenceClassification.from_pretrained(model_path) | |
# Endpoints | |
# @app.get("/") | |
# def read_root(): | |
# "Home endpoint" | |
# return {"greeting": "Hello World..!", | |
# "cohort": "2", | |
# "docs": "https://eaedk-tweetsentimentanalysisapi.hf.space/docs", | |
# } | |
def read_root(): | |
return RedirectResponse(url="/docs") | |
def predict(text:str): | |
"prediction endpoint, classifying tweets" | |
print(f"\n[Info] Starting prediction") | |
try: | |
text = preprocess(text) | |
# PyTorch-based models | |
encoded_input = tokenizer(text, return_tensors='pt') | |
output = model(**encoded_input) | |
scores = output[0][0].detach().numpy() | |
scores = softmax(scores) | |
#Process scores | |
ranking = np.argsort(scores) | |
ranking = ranking[::-1] | |
predicted_label = config.id2label[ranking[0]] | |
predicted_score = float(scores[ranking[0]]) | |
response = {"text":text, | |
"predicted_label":predicted_label, | |
"confidence_score":predicted_score | |
} | |
print(f"\n[Info] Prediction done.") | |
print(f"\n[Info] Have a look at the API response") | |
print(response) | |
return response | |
except Exception as e: | |
return { | |
"error": str(e) | |
} |