File size: 2,162 Bytes
732fdf3
56e5734
 
 
 
 
 
 
732fdf3
122cbad
d240045
122cbad
673114e
122cbad
d45c3dc
122cbad
 
d45c3dc
122cbad
d45c3dc
647f25b
fdd79a3
 
647f25b
fdd79a3
647f25b
fdd79a3
 
647f25b
fdd79a3
647f25b
fb4a63f
0feaf78
56e5734
d240045
 
8a52da0
9ee63d4
56e5734
fb4a63f
d240045
56e5734
 
 
 
 
 
 
 
 
 
 
 
2ad87b7
 
d9780b1
2ad87b7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
  
#downloading tokenizer and model 
tokenizer = AutoTokenizer.from_pretrained("tuner007/pegasus_summarizer")
model = AutoModelForSeq2SeqLM.from_pretrained("tuner007/pegasus_summarizer")



st.markdown(""" <style> .font {
font-size:50px ; font-family: "Helvetica"; color: #FF9633;} 
</style> """, unsafe_allow_html=True)

st.markdown('<p class="font">Now anyone can be a content marketer!</p>', unsafe_allow_html=True)
st.markdown('#')

st.subheader("Don't you wish there was a faster way to summarise your news articles and share it up onto your favourite social media platforms.")
st.markdown('##')

st.markdown(""" #### LorSor helps you through a simple 3 stage process. 

    Step 1: Copy and paste the complete article text in here 
    (*Coming soon* - Just paste the article URL)

    Step 2: Evaluate the generated summary and make minor edits as required

    Step 3: Copy and paste the summary when posting the article link to your social media 
    (*Coming soon* - Login to social media and schedule your post and we'll automate the process)

    Kick back and think about what you're going to do with all the time that you've saved!

    Send any feedback to [us](mailto:[email protected]) """)

st.markdown('#')
col1, col2 = st.columns(2)

with col1:
    col1.header("Step 1:")
    raw_text = st.text_area('Paste the full article text to summarize here...')
    st.button("Summarize this")

def get_response(input_text):
    batch = tokenizer([input_text],truncation=True,padding='longest',max_length=1024, return_tensors="pt").to('cpu')
    gen_out = model.generate(**batch,max_length=128,num_beams=5, num_return_sequences=1, temperature=1.5)
    output_text = tokenizer.batch_decode(gen_out, skip_special_tokens=True)
    return output_text

if len(raw_text) < 10:
    summary = "Lorem Ipsum is a long and boring piece of old latin text. What it means i have no idea"
else:
    summary = get_response(raw_text)


with col2:
    col2.header("Step 2:")
    y = st.text_area("Here is the completed summary for you to edit", summary)
    st.button("Submit edits")