LeeJang commited on
Commit
2f7dce2
1 Parent(s): 067918f

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -42
app.py DELETED
@@ -1,42 +0,0 @@
1
- import torch
2
- import streamlit as st
3
- from transformers.models.bart import BartForConditionalGeneration
4
- from transformers import PreTrainedTokenizerFast
5
-
6
- #@st.cache
7
- @st.cache_data(allow_output_mutation=True)
8
- def load_model():
9
- #model = BartForConditionalGeneration.from_pretrained('logs/model_chp/epoch-6')
10
- model = BartForConditionalGeneration.from_pretrained('LeeJang/news-summarization-v2')
11
- # tokenizer = get_kobart_tokenizer()
12
- return model
13
-
14
- model = load_model()
15
- tokenizer = PreTrainedTokenizerFast.from_pretrained('gogamza/kobart-base-v1')
16
- st.title("2문장 뉴스 요약기")
17
- text = st.text_area("뉴스 입력:")
18
-
19
- st.markdown("## 뉴스 원문")
20
- st.write(text)
21
-
22
- #'''
23
- if text:
24
- text = text.replace('\n', ' ')
25
- text = text.strip()
26
-
27
- arr = text.split(' ')
28
-
29
- if len(arr) > 501:
30
- #print('!!!')
31
- arr = arr[:501]
32
- text = ' '.join(arr)
33
-
34
- st.markdown("## 요약 결과")
35
- with st.spinner('processing..'):
36
- input_ids = tokenizer.encode(text)
37
- input_ids = torch.tensor(input_ids)
38
- input_ids = input_ids.unsqueeze(0)
39
- output = model.generate(input_ids, eos_token_id=1, max_length=512, num_beams=5)
40
- output = tokenizer.decode(output[0], skip_special_tokens=True)
41
- st.write(output)
42
- #'''