legacy107's picture
Update app.py
f48d6e2
import gradio as gr
from gradio.components import Textbox
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, T5ForConditionalGeneration
from peft import PeftModel, PeftConfig
import torch
import datasets
# Load your fine-tuned model and tokenizer
model_name = "google/flan-t5-large"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large")
model.load_adapter("legacy107/adapter-flan-t5-large-bottleneck-adapter-cpgQA", source="hf")
model.set_active_adapters("question_answering")
peft_name = "legacy107/flan-t5-large-ia3-bioasq-paraphrase"
peft_config = PeftConfig.from_pretrained(peft_name)
paraphrase_model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
paraphrase_model = PeftModel.from_pretrained(paraphrase_model, peft_name)
max_length = 512
max_target_length = 128
# Load your dataset
dataset = datasets.load_dataset("minh21/cpgQA-v1.0-unique-context-test-10-percent-validation-10-percent", split="test")
dataset = dataset.shuffle()
dataset = dataset.select(range(5))
def paraphrase_answer(question, answer):
# Combine question and context
input_text = f"question: {question}. Paraphrase the answer to make it more natural answer: {answer}"
# Tokenize the input text
input_ids = tokenizer(
input_text,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=max_length,
).input_ids
# Generate the answer
with torch.no_grad():
generated_ids = paraphrase_model.generate(input_ids=input_ids, max_new_tokens=max_target_length)
# Decode and return the generated answer
paraphrased_answer = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
return paraphrased_answer
# Define your function to generate answers
def generate_answer(question, context):
# Combine question and context
input_text = f"question: {question} context: {context}"
# Tokenize the input text
input_ids = tokenizer(
input_text,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=max_length,
).input_ids
# Generate the answer
with torch.no_grad():
generated_ids = model.generate(input_ids, max_new_tokens=max_target_length)
# Decode and return the generated answer
generated_answer = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
# Paraphrase answer
paraphrased_answer = paraphrase_answer(question, generated_answer)
return generated_answer, paraphrased_answer
# Define a function to list examples from the dataset
def list_examples():
examples = []
for example in dataset:
context = example["context"]
question = example["question"]
examples.append([question, context])
return examples
# Create a Gradio interface
iface = gr.Interface(
fn=generate_answer,
inputs=[
Textbox(label="Question"),
Textbox(label="Context")
],
outputs=[
Textbox(label="Generated Answer"),
Textbox(label="Natural Answer")
],
examples=list_examples()
)
# Launch the Gradio interface
iface.launch()