Chitti_ver1 / app.py
Pavankalyan's picture
Update app.py
5af0347
raw
history blame
1.47 kB
import gradio as gr
import pandas as pd
from retrieval import *
import os
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
model_name = "deepset/deberta-v3-large-squad2"
# a) Get predictions
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
hf_writer = gr.HuggingFaceDatasetSaver('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', "Pavankalyan/chitti_data")
df = pd.read_csv("Responses.csv")
text = list(df["text"].values)
def chitti(query):
re_table = search(query, text)
answers_re_table = [re_table[i][0] for i in range(0,5)]
sorted_indices = sorted(range(len(answers_re_table)), key=lambda k: len(answers_re_table[k]))
repeated_answers_indices =list()
for i in range(4):
if answers_re_table[sorted_indices[i]] in answers_re_table[sorted_indices[i+1]]:
repeated_answers_indices.append(sorted_indices[i])
for idx in repeated_answers_indices:
answers_re_table.pop(idx)
QA_input = {'question': query,'context': answers_re_table[0]}
res1 = nlp(QA_input)['answer']
QA_input = {'question': query,'context': answers_re_table[1]}
res2 = nlp(QA_input)['answer']
return [res1,answers_re_table[0],res2,answers_re_table[1]]
demo = gr.Interface(
fn=chitti,
inputs=["text"],
outputs=["text","text","text","text"],
allow_flagging = "manual",
flagging_options = ["0","1","None"],
flagging_callback=hf_writer
)
demo.launch()