File size: 4,304 Bytes
5568eb9 ff62893 5568eb9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import streamlit as st
import BERTopic
from PIL import Image
from transformers import (
pipeline,
BlenderbotTokenizer,
BlenderbotForConditionalGeneration,
)
# Function to load VQA pipeline
@st.cache(allow_output_mutation=True)
def load_vqa_pipeline():
return pipeline(task="visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa")
# Function to load BERT-based pipeline
@st.cache(allow_output_mutation=True)
def load_bbu_pipeline():
return pipeline(task="fill-mask", model="bert-base-uncased")
# Function to load Blenderbot model
@st.cache(allow_output_mutation=True)
def load_blenderbot_model():
model_name = "facebook/blenderbot-400M-distill"
tokenizer = BlenderbotTokenizer.from_pretrained(pretrained_model_name_or_path=model_name)
return BlenderbotForConditionalGeneration.from_pretrained(pretrained_model_name_or_path=model_name)
# Function to load GPT-2 pipeline
@st.cache(allow_output_mutation=True)
def load_gpt2_pipeline():
return pipeline(task="text-generation", model="gpt2")
# Function to load BERTopic models
@st.cache(allow_output_mutation=True)
def load_topic_models():
topic_model_1 = BERTopic.load(path="davanstrien/chat_topics")
topic_model_2 = BERTopic.load(path="MaartenGr/BERTopic_ArXiv")
return topic_model_1, topic_model_2
st.title("Georgios Ioannou's Visual Question Answering With Hugging Face")
st.write("Drag and drop an image file here.")
# Allow the user to upload an image file
image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
if image is not None:
# Display the uploaded image
image = Image.open(image)
st.image(image, caption="Uploaded Image", use_column_width=True)
question = st.text_input("What's your question?")
# Load models using the cache
vqa_pipeline = load_vqa_pipeline()
bbu_pipeline = load_bbu_pipeline()
facebook_model = load_blenderbot_model()
gpt2_pipeline = load_gpt2_pipeline()
topic_model_1, topic_model_2 = load_topic_models()
# Model 1.
vqa_pipeline_output = vqa_pipeline(image, question, top_k=5)[0]
# Model 2.
text = (
"I love "
+ str(vqa_pipeline_output["answer"])
+ " and I would like to know how to [MASK]."
)
bbu_pipeline_output = bbu_pipeline(text)
# Model 3.
utterance = bbu_pipeline_output[0]["sequence"]
inputs = tokenizer(utterance, return_tensors="pt")
result = facebook_model.generate(**inputs)
facebook_model_output = tokenizer.decode(result[0])
# Model 4.
facebook_model_output = facebook_model_output.replace("<s> ", "")
facebook_model_output = facebook_model_output.replace("<s>", "")
facebook_model_output = facebook_model_output.replace("</s>", "")
gpt2_pipeline_output = gpt2_pipeline(facebook_model_output)[0]["generated_text"]
# Model 5.
topic, prob = topic_model_1.transform(gpt2_pipeline_output)
topic_model_1_output = topic_model_1.get_topic_info(topic[0])["Representation"][
0
]
topic, prob = topic_model_2.transform(gpt2_pipeline_output)
topic_model_2_output = topic_model_2.get_topic_info(topic[0])["Representation"][
0
]
st.write("-" * 150)
st.write("vqa_pipeline_output =", vqa_pipeline_output)
st.write("bbu_pipeline_output =", bbu_pipeline_output)
st.write("facebook_model_output =", facebook_model_output)
st.write("gpt2_pipeline_output =", gpt2_pipeline_output)
st.write("topic_model_1_output =", topic_model_1_output)
st.write("topic_model_2_output =", topic_model_2_output)
st.write("-" * 150)
st.write("SUMMARY")
st.subheader("Your Image:")
st.image(image, caption="Your Image", use_column_width=True)
st.subheader("Your Question:")
st.write(question)
st.write("-" * 100)
st.subheader("1. Highest Predicted Answer For Your Question:")
st.write(vqa_pipeline_output["answer"])
st.write(text)
st.subheader("2. Highest Predicted Sequence On [MASK] Based on 1.:")
st.write(bbu_pipeline_output[0]["sequence"])
st.subheader("3. Conversation Based On Previous Answer Based on 2.:")
st.write(facebook_model_output)
st.subheader("4. Text Generated Based On Previous Answer Based on 3.:")
st.write(gpt2_pipeline_output)
st.subheader("5. Highest Predicted Topic Model_1 For Previous The Answer Based on 4.:")
st.write(topic_model_1_output)
st.subheader("6. Highest Predicted Topic Model_2 For Previous The Answer Based on 4.:")
st.write(topic_model_2_output)
st.write("-" * 150)
|