File size: 1,475 Bytes
a6bf2ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import streamlit as st
from transformers import BartForConditionalGeneration, BartTokenizer

# Load the model and tokenizer from the local directory
model_path = "disilbart-med-summary"  # Replace with the actual path
tokenizer = BartTokenizer.from_pretrained(model_path)
model = BartForConditionalGeneration.from_pretrained(model_path)

# Function to generate summary based on input
def generate_summary(input_text):
    # Tokenize the input text
    input_ids = tokenizer.encode(input_text, return_tensors="pt")

    # Generate summary
    summary_ids = model.generate(input_ids, max_length=4000, num_beams=4, no_repeat_ngram_size=2)

    # Decode the summary
    summary_text = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
    return summary_text

# Streamlit app
def main():
    # Apply custom styling for the title
    st.markdown("<h3 style='text-align: center; color: #333;'>Medical Summary - Text Generation</h3>", unsafe_allow_html=True)

    # Textbox for user input
    user_input = st.text_area("Enter Text:", "")

    # Button to trigger text generation
    if st.button("Generate Summary"):
        if user_input:
            # Call the generate_summary function with user input
            result = generate_summary(user_input)

            # Display the generated summary in a text area with word wrap
            st.text_area("Generated Summary:", result, key="generated_summary")

# Run the Streamlit app
if __name__ == "__main__":
    main()