File size: 2,427 Bytes
732fdf3
30ab516
56e5734
 
 
 
 
 
 
732fdf3
122cbad
d240045
122cbad
673114e
122cbad
d45c3dc
122cbad
 
d45c3dc
122cbad
d45c3dc
647f25b
fdd79a3
 
647f25b
fdd79a3
647f25b
fdd79a3
 
647f25b
fdd79a3
647f25b
fb4a63f
0feaf78
56e5734
d240045
 
088c6b5
56e5734
 
 
 
 
 
2dc1596
 
 
 
56e5734
2dc1596
 
 
 
 
56e5734
2ad87b7
 
8d8ae2b
bea4f83
8d8ae2b
 
 
 
2dc1596
088c6b5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
  
#downloading tokenizer and model 
tokenizer = AutoTokenizer.from_pretrained("tuner007/pegasus_summarizer")
model = AutoModelForSeq2SeqLM.from_pretrained("tuner007/pegasus_summarizer")



st.markdown(""" <style> .font {
font-size:50px ; font-family: "Helvetica"; color: #FF9633;} 
</style> """, unsafe_allow_html=True)

st.markdown('<p class="font">Now anyone can be a content marketer!</p>', unsafe_allow_html=True)
st.markdown('#')

st.subheader("Don't you wish there was a faster way to summarise your news articles and share it up onto your favourite social media platforms.")
st.markdown('##')

st.markdown(""" #### LorSor helps you through a simple 3 stage process. 

    Step 1: Copy and paste the complete article text in here 
    (*Coming soon* - Just paste the article URL)

    Step 2: Evaluate the generated summary and make minor edits as required

    Step 3: Copy and paste the summary when posting the article link to your social media 
    (*Coming soon* - Login to social media and schedule your post and we'll automate the process)

    Kick back and think about what you're going to do with all the time that you've saved!

    Send any feedback to [us](mailto:[email protected]) """)

st.markdown('#')
col1, col2 = st.columns(2)

# @st.cache
def get_response(input_text):
    batch = tokenizer([input_text],truncation=True,padding='longest',max_length=1024, return_tensors="pt").to('cpu')
    gen_out = model.generate(**batch,max_length=128,num_beams=5, num_return_sequences=1, temperature=1.5)
    output_text = tokenizer.batch_decode(gen_out, skip_special_tokens=True)
    return output_text

with col1:
    col1.header("Step 1:")
    raw_text = st.text_area('Paste the full article text to summarize here...')
    summary_button = st.button("Summarize this")

    if summary_button:
        if len(raw_text) < 10:
            summary = "<< Add some text in ( Step 1 ) for me to summarize >>"
        else:
            summary = get_response(raw_text)

with col2:
    col2.header("Step 2:")
    dummy_text = "<< Add some text in ( Step 1 ) for me to summarize >>"
    if len(summary) < 10:
        intial_output = dummy_text
    else:
        intial_output = summary
    y = st.text_area("Here is the completed summary for you to edit", intial_output)
    st.button("Submit edits")
    # st.balloons()