File size: 1,186 Bytes
44f92d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import gradio as gr

from transformers import PreTrainedTokenizerFast, BartForConditionalGeneration
# from transformers import๋กœ ์‹œ์ž‘ํ•˜๋Š” import ๋ฌธ์„ ๋ณด๋ฉด
# ๋งŽ์€ ๊ฒฝ์šฐ AutoTokenizer, AutoModel
# tokenizer = AutoTokenizer.from_pretrained("model ์ด๋ฆ„ ์–ด์ฉŒ๊ณ  ์ €์ฉŒ๊ณ ")
# PreTrainedTokenizerFast : https://huggingface.co./docs/transformers/main_classes/tokenizer
# BART๋Š” encoder-decoder ๋ชจ๋ธ์˜ ์˜ˆ์‹œ

model_name = "ainize/kobart-news"
tokenizer = PreTrainedTokenizerFast.from_pretrained(model_name)
model = BartForConditionalGeneration.from_pretrained(model_name)

# ์›๋ฌธ์„ ๋ฐ›์•„์„œ ์š”์•ฝ๋ฌธ์„ ๋ฐ˜ํ™˜
def summ(txt):
  input_ids = tokenizer.encode(input_text, return_tensors="pt")
  summary_text_ids = model.generate(
    input_ids=input_ids,
    bos_token_id=model.config.bos_token_id,
    eos_token_id=model.config.eos_token_id,
    length_penalty=2.0,
    max_length=142,
    min_length=56,
    num_beams=4)
  return tokenizer.decode(summary_text_ids[0], skip_special_tokens=True)

interface = gr.Interface(summ,
                        [gr.Textbox(label="original text")],
                        [gr.Textbox(label="summary")])

interface.launch(share=True)