|
from transformers import XLNetForSequenceClassification, XLNetTokenizer,BertForSequenceClassification,BertTokenizer, RobertaForSequenceClassification,RobertaTokenizer |
|
import torch |
|
from typing import Dict |
|
import gradio as gr |
|
|
|
|
|
model = BertForSequenceClassification.from_pretrained("./Personality_detection_Classification_Save/", num_labels=5) |
|
tokenizer = BertTokenizer.from_pretrained('./Personality_detection_Classification_Save/', do_lower_case=True) |
|
model.config.label2id= { |
|
"Extroversion": 0, |
|
"Neuroticism": 1, |
|
"Agreeableness": 2, |
|
"Conscientiousness": 3, |
|
"Openness": 4, |
|
} |
|
|
|
model.config.id2label={ |
|
"0": "Extroversion", |
|
"1": "Neuroticism", |
|
"2": "Agreeableness", |
|
"3": "Conscientiousness", |
|
"4": "Openness",} |
|
|
|
def Personality_Detection_from_reviews_submitted (model_input: str) -> Dict[str, float]: |
|
|
|
dict_custom={} |
|
Preprocess_part1=model_input[:len(model_input)] |
|
Preprocess_part2=model_input[len(model_input):] |
|
dict1=tokenizer.encode_plus(Preprocess_part1,max_length=1024,padding=True,truncation=True) |
|
dict2=tokenizer.encode_plus(Preprocess_part2,max_length=1024,padding=True,truncation=True) |
|
dict_custom['input_ids']=[dict1['input_ids'],dict1['input_ids']] |
|
dict_custom['token_type_ids']=[dict1['token_type_ids'],dict1['token_type_ids']] |
|
dict_custom['attention_mask']=[dict1['attention_mask'],dict1['attention_mask']] |
|
outs = model(torch.tensor(dict_custom['input_ids']), token_type_ids=None, attention_mask=torch.tensor(dict_custom['attention_mask'])) |
|
b_logit_pred = outs[0] |
|
pred_label = torch.sigmoid(b_logit_pred) |
|
ret ={ |
|
"Extroversion": float(pred_label[0][0]), |
|
"Neuroticism": float(pred_label[0][1]), |
|
"Agreeableness": float(pred_label[0][2]), |
|
"Conscientiousness": float(pred_label[0][3]), |
|
"Openness": float(pred_label[0][4]),} |
|
return ret |
|
model_input = gr.Textbox("Input text here (Note: This model is trained to classify Essays(Still in Progress phase))", show_label=False) |
|
model_output = gr.Label(" Big-Five personality traits Result", num_top_classes=6, show_label=True, label="Big-Five personality traits Labels assigned to this text") |
|
examples = [ |
|
( "Well, here we go with the stream of consciousness essay. I used to do things like this in high school sometimes.", |
|
"They were pretty interesting, but I often find myself with a lack of things to say. ", |
|
"I normally consider myself someone who gets straight to the point. I wonder if I should hit enter any time to send this back to the front", |
|
"Maybe I'll fix it later. My friend is playing guitar in my room now. Sort of playing anyway.", |
|
"More like messing with it. He's still learning. There's a drawing on the wall next to me. " |
|
), |
|
( "An open keyboard and buttons to push. The thing finally worked and I need not use periods, commas, and all those things.", |
|
"Double space after a period. We can't help it. I put spaces between my words and I do my happy little assignment of jibber-jabber.", |
|
"Babble babble babble for 20 relaxing minutes and I feel silly and grammatically incorrect. I am linked to an unknown reader.", |
|
"A graduate student with an absurd job. I type. I jabber and I think about dinoflagellates. About sunflower crosses and about ", |
|
"the fiberglass that has to be added to my lips via clove cigarettes and I think about things that I shouldn't be thinking.", |
|
"I know I shouldn't be thinking. or writing let's say/ So I don't. Thoughts don't solidify. They lodge in the back. behind my tongue maybe.", |
|
) |
|
] |
|
|
|
title = "Big Five Personality Traits Detection From Expository text features" |
|
description = ("In traditional machine learning, it can be challenging to train an accurate model if there is a lack of labeled data specific to the task or ", |
|
"domain of interest. Transfer learning offers a way to address this issue by utilizing the pre-existing labeled data from a similar task or ", |
|
"domain to improve model performance. By transferring knowledge learned from one task to another, transfer learning enables us to overcome ", |
|
"the limitations posed by a shortage of labeled data, and to train more effective models even in data-scarce scenarios. We try to store this ", |
|
"knowledge gained in solving the source task in the source domain and applying it to our problem of interest. In this work, I have utilized ", |
|
"Transfer Learning utilizing BERT BASE UNCASED model to fine-tune on Big-Five Personality traits Dataset.") |
|
Fotter = ( |
|
|
|
"<center>© 2023 Thoucentric </center>" |
|
) |
|
|
|
app = gr.Interface( |
|
Personality_Detection_from_reviews_submitted, |
|
inputs=model_input, |
|
outputs=model_output, |
|
examples=examples, |
|
title=title, |
|
description=description, |
|
article=Fotter, |
|
allow_flagging='never', |
|
analytics_enabled=False, |
|
) |
|
|
|
|
|
app.launch(inline=True,share=True, show_error=False) |