File size: 1,697 Bytes
d812ab5
b01c113
 
d812ab5
 
 
b01c113
 
 
 
 
 
 
d812ab5
 
 
b01c113
 
79fd2ad
b01c113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cc6b847
b01c113
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
from fastapi import FastAPI
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
import torch

app = FastAPI()



model_name = "cardiffnlp/twitter-xlm-roberta-base-sentiment"
sentiment_model = AutoModelForSequenceClassification.from_pretrained(model_name)
sentiment_tokenizer = AutoTokenizer.from_pretrained(model_name)
sentiment_model.config.id2label[3] = "mixed"

@app.get("/")
def greet_json():
    return {"Hello": "World!"}


@app.post("/sentiment_score")
async def sentiment_score(text: str):
    inputs = sentiment_tokenizer(text[:2500], return_tensors='pt')
    
    with torch.no_grad():
        logits = sentiment_model(**inputs).logits #+ 1
    
    
    print(logits)
    
    logits = logits + logits[0,1].abs()
    
    # print(torch.nn.functional.sigmoid(logits))
    
    # logits = logits / 10
    
    # print(logits)
    
    # print(torch.abs(logits[0,0] - logits[0,-1]))
    # print(logits[0,1]//torch.max(torch.abs(logits[0,::2])))
    
    logits = torch.cat(
        (
            logits, (
                # ( logits[0,1] + torch.sign(logits[0,0] - logits[0,-1]) * (logits[0,0] - logits[0,-1])/2 )/2 + 
                # (logits[0,0] + logits[0,-1])/20
                (1 - torch.abs(logits[0,0] - logits[0,-1])*(2+(logits[0,1]//torch.max(torch.abs(logits[0,::2])))))
                ).unsqueeze(0).unsqueeze(0)
        ), dim=-1
    )
    
    softmax = torch.nn.functional.softmax(
        logits, 
        dim=-1
    )
    
    return [{"label":sentiment_model.config.id2label[predicted_class_id.tolist()], "score":softmax[0, predicted_class_id].tolist()} for predicted_class_id in softmax.argsort(dim=-1, descending=True)[0]]