Spaces:
Sleeping
Sleeping
File size: 1,112 Bytes
de0aa88 a32d995 de0aa88 a32d995 de0aa88 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer
import json
# Load the dataset
with open('faq_dataset.json') as f:
faq_data = json.load(f)
# Initialize the model and tokenizer
model_name = "distilbert-base-uncased-distilled-squad"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
nlp = pipeline("question-answering", model=model, tokenizer=tokenizer)
# Create a function to get answers
def get_answer(question):
for item in faq_data:
context = item['answer']
result = nlp(question=question, context=context)
if result['score'] > 0.5:
return result['answer']
return "Sorry, I don't know the answer to that question."
# Create the Gradio interface with updated components
iface = gr.Interface(
fn=get_answer,
inputs=gr.Textbox(label="Ask a Question"),
outputs=gr.Textbox(label="Answer"),
title="FAQ Chatbot",
description="Ask a question and get an answer from the FAQ dataset."
)
# Launch the interface
iface.launch()
|