|
import streamlit as st |
|
import BERTopic |
|
from PIL import Image |
|
from transformers import ( |
|
pipeline, |
|
BlenderbotTokenizer, |
|
BlenderbotForConditionalGeneration, |
|
) |
|
|
|
|
|
@st.cache(allow_output_mutation=True) |
|
def load_vqa_pipeline(): |
|
return pipeline(task="visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa") |
|
|
|
|
|
@st.cache(allow_output_mutation=True) |
|
def load_bbu_pipeline(): |
|
return pipeline(task="fill-mask", model="bert-base-uncased") |
|
|
|
|
|
@st.cache(allow_output_mutation=True) |
|
def load_blenderbot_model(): |
|
model_name = "facebook/blenderbot-400M-distill" |
|
tokenizer = BlenderbotTokenizer.from_pretrained(pretrained_model_name_or_path=model_name) |
|
return BlenderbotForConditionalGeneration.from_pretrained(pretrained_model_name_or_path=model_name) |
|
|
|
|
|
@st.cache(allow_output_mutation=True) |
|
def load_gpt2_pipeline(): |
|
return pipeline(task="text-generation", model="gpt2") |
|
|
|
|
|
@st.cache(allow_output_mutation=True) |
|
def load_topic_models(): |
|
topic_model_1 = BERTopic.load(path="davanstrien/chat_topics") |
|
topic_model_2 = BERTopic.load(path="MaartenGr/BERTopic_ArXiv") |
|
return topic_model_1, topic_model_2 |
|
|
|
st.title("Georgios Ioannou's Visual Question Answering With Hugging Face") |
|
st.write("Drag and drop an image file here.") |
|
|
|
|
|
image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) |
|
|
|
if image is not None: |
|
|
|
image = Image.open(image) |
|
st.image(image, caption="Uploaded Image", use_column_width=True) |
|
|
|
question = st.text_input("What's your question?") |
|
|
|
|
|
vqa_pipeline = load_vqa_pipeline() |
|
bbu_pipeline = load_bbu_pipeline() |
|
facebook_model = load_blenderbot_model() |
|
gpt2_pipeline = load_gpt2_pipeline() |
|
topic_model_1, topic_model_2 = load_topic_models() |
|
|
|
|
|
|
|
vqa_pipeline_output = vqa_pipeline(image, question, top_k=5)[0] |
|
|
|
|
|
|
|
text = ( |
|
"I love " |
|
+ str(vqa_pipeline_output["answer"]) |
|
+ " and I would like to know how to [MASK]." |
|
) |
|
bbu_pipeline_output = bbu_pipeline(text) |
|
|
|
|
|
|
|
utterance = bbu_pipeline_output[0]["sequence"] |
|
inputs = tokenizer(utterance, return_tensors="pt") |
|
result = facebook_model.generate(**inputs) |
|
facebook_model_output = tokenizer.decode(result[0]) |
|
|
|
|
|
|
|
facebook_model_output = facebook_model_output.replace("<s> ", "") |
|
facebook_model_output = facebook_model_output.replace("<s>", "") |
|
facebook_model_output = facebook_model_output.replace("</s>", "") |
|
gpt2_pipeline_output = gpt2_pipeline(facebook_model_output)[0]["generated_text"] |
|
|
|
|
|
|
|
topic, prob = topic_model_1.transform(gpt2_pipeline_output) |
|
topic_model_1_output = topic_model_1.get_topic_info(topic[0])["Representation"][ |
|
0 |
|
] |
|
|
|
topic, prob = topic_model_2.transform(gpt2_pipeline_output) |
|
topic_model_2_output = topic_model_2.get_topic_info(topic[0])["Representation"][ |
|
0 |
|
] |
|
|
|
st.write("-" * 150) |
|
st.write("vqa_pipeline_output =", vqa_pipeline_output) |
|
st.write("bbu_pipeline_output =", bbu_pipeline_output) |
|
st.write("facebook_model_output =", facebook_model_output) |
|
st.write("gpt2_pipeline_output =", gpt2_pipeline_output) |
|
st.write("topic_model_1_output =", topic_model_1_output) |
|
st.write("topic_model_2_output =", topic_model_2_output) |
|
st.write("-" * 150) |
|
|
|
st.write("SUMMARY") |
|
st.subheader("Your Image:") |
|
st.image(image, caption="Your Image", use_column_width=True) |
|
st.subheader("Your Question:") |
|
st.write(question) |
|
st.write("-" * 100) |
|
|
|
st.subheader("1. Highest Predicted Answer For Your Question:") |
|
st.write(vqa_pipeline_output["answer"]) |
|
st.write(text) |
|
|
|
st.subheader("2. Highest Predicted Sequence On [MASK] Based on 1.:") |
|
st.write(bbu_pipeline_output[0]["sequence"]) |
|
|
|
st.subheader("3. Conversation Based On Previous Answer Based on 2.:") |
|
st.write(facebook_model_output) |
|
|
|
st.subheader("4. Text Generated Based On Previous Answer Based on 3.:") |
|
st.write(gpt2_pipeline_output) |
|
|
|
st.subheader("5. Highest Predicted Topic Model_1 For Previous The Answer Based on 4.:") |
|
st.write(topic_model_1_output) |
|
|
|
st.subheader("6. Highest Predicted Topic Model_2 For Previous The Answer Based on 4.:") |
|
st.write(topic_model_2_output) |
|
st.write("-" * 150) |
|
|