SummerizeIt / app.py
Grey01's picture
Update app.py
fe48816 verified
raw
history blame
1.87 kB
import streamlit as st
import tensorflow as tf
from tensorflow import keras
import keras_nlp
import PyPDF2
import docx2txt
import huggingface_hub
# Available backend options are: "jax", "tensorflow", "torch".
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
bart_billsum = keras_nlp.models.BartSeq2SeqLM.from_preset("hf://Grey01/bart_billsum")
st.title("SummarizeIt")
# File uploader
uploaded_file = st.file_uploader("Choose a file", type=["pdf", "txt", "docx"])
# Text extraction
text = []
if uploaded_file is not None:
if uploaded_file.type == "application/pdf":
pdf_reader = PyPDF2.PdfReader(uploaded_file)
for page in pdf_reader.pages:
text += page.extract_text()
elif uploaded_file.type == "text/plain":
text = uploaded_file.read().decode("utf-8")
elif uploaded_file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
text = docx2txt.process(uploaded_file)
# Text input for direct text entry
user_input = st.text_area("Or paste your text here:")
if user_input:
text.append(user_input)
else:
text.append(text) # Prioritize user input over file
def generate_text(model, input_texts, max_length=500, print_time_taken=False):
# Convert input_texts to a list if it's a Dataset
chunks = [input_texts[i:i+512] for i in range(0, len(input_texts), 512)]
#initialize an empty list to store summaries
summaries = []
# generate summaries for each chunk
for chunk in chunks:
# Assuming your model's generate method can handle a batch of inputs
summary = model.generate(input_texts, max_length=max_length)
summaries.append(summary)
return summary
generated_summaries = generate_text(
bart_billsum,
text, # Pass the list of documents directly
)
st.subheader("Generated Summary:")
st.write(generated_summaries)