import gradio as gr from transformers import PreTrainedTokenizerFast, BartForConditionalGeneration # from transformers import로 시작하는 import 문을 보면 # 많은 경우 AutoTokenizer, AutoModel # tokenizer = AutoTokenizer.from_pretrained("model 이름 어쩌고 저쩌고") # PreTrainedTokenizerFast : https://huggingface.co./docs/transformers/main_classes/tokenizer # BART는 encoder-decoder 모델의 예시 model_name = "ainize/kobart-news" tokenizer = PreTrainedTokenizerFast.from_pretrained(model_name) model = BartForConditionalGeneration.from_pretrained(model_name) # 원문을 받아서 요약문을 반환 def summ(txt): input_ids = tokenizer.encode(input_text, return_tensors="pt") summary_text_ids = model.generate( input_ids=input_ids, bos_token_id=model.config.bos_token_id, eos_token_id=model.config.eos_token_id, length_penalty=2.0, max_length=142, min_length=56, num_beams=4) return tokenizer.decode(summary_text_ids[0], skip_special_tokens=True) interface = gr.Interface(summ, [gr.Textbox(label="original text")], [gr.Textbox(label="summary")]) interface.launch(share=True)