ElPlaguister commited on
Commit
dae67e9
β€’
1 Parent(s): 0533c1e

Feat KoAlpaca Multimodel Gradio

Browse files
Files changed (31) hide show
  1. .gitignore +3 -0
  2. __pycache__/t5.cpython-39.pyc +0 -0
  3. app.py +18 -4
  4. koalpaca.py +33 -0
  5. model/pko_t5_COMU_patience10/.DS_Store +0 -0
  6. model/pko_t5_COMU_patience10/runs/.DS_Store +0 -0
  7. model/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/.DS_Store +0 -0
  8. model/pko_t5_fm_patience10/.DS_Store +0 -0
  9. model/pko_t5_fm_patience10/config.json +0 -29
  10. model/pko_t5_fm_patience10/pytorch_model.bin +0 -3
  11. model/pko_t5_fm_patience10/runs/Apr08_08-56-15_main1/1680944176.4623845/events.out.tfevents.1680944176.main1.37029.1 +0 -3
  12. model/pko_t5_fm_patience10/runs/Apr08_08-56-15_main1/events.out.tfevents.1680944176.main1.37029.0 +0 -3
  13. model/pko_t5_fm_patience10/special_tokens_map.json +0 -1
  14. model/pko_t5_fm_patience10/tokenizer.json +0 -0
  15. model/pko_t5_fm_patience10/tokenizer_config.json +0 -1
  16. model/pko_t5_fm_patience10/training_args.bin +0 -3
  17. model/pko_t5_instiz_patience10/.DS_Store +0 -0
  18. model/pko_t5_instiz_patience10/config.json +0 -29
  19. model/pko_t5_instiz_patience10/pytorch_model.bin +0 -0
  20. models/koalpaca/gen_config.json +11 -0
  21. {model β†’ models}/pko_t5_COMU_patience10/config.json +0 -0
  22. models/pko_t5_COMU_patience10/gen_config.json +12 -0
  23. {model β†’ models}/pko_t5_COMU_patience10/pytorch_model.bin +0 -0
  24. {model β†’ models}/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/1680932959.9242024/events.out.tfevents.1680932959.main1.34981.1 +0 -0
  25. {model β†’ models}/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/events.out.tfevents.1680932959.main1.34981.0 +0 -0
  26. {model β†’ models}/pko_t5_COMU_patience10/special_tokens_map.json +0 -0
  27. {model β†’ models}/pko_t5_COMU_patience10/tokenizer.json +0 -0
  28. {model β†’ models}/pko_t5_COMU_patience10/tokenizer_config.json +0 -0
  29. {model β†’ models}/pko_t5_COMU_patience10/training_args.bin +0 -0
  30. requirements.txt +3 -2
  31. t5.py +11 -13
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __pycache__
2
+ .DS_Store
3
+ *.ipynb
__pycache__/t5.cpython-39.pyc CHANGED
Binary files a/__pycache__/t5.cpython-39.pyc and b/__pycache__/t5.cpython-39.pyc differ
 
app.py CHANGED
@@ -1,6 +1,11 @@
1
  import gradio as gr
2
  from gradio.themes.utils import colors
3
  from t5 import T5
 
 
 
 
 
4
 
5
  def prepare_theme():
6
  theme = gr.themes.Default(primary_hue=colors.gray,
@@ -37,22 +42,31 @@ def prepare_theme():
37
  return theme
38
 
39
  def chat(message, chat_history):
40
- global model
41
- response = model.chat(message)
42
  chat_history.append((message, response))
43
  return "", chat_history
44
 
 
 
 
 
 
 
45
  if __name__=='__main__':
46
  theme = prepare_theme()
47
- model = T5()
 
 
 
48
 
49
  with gr.Blocks(theme=theme) as demo:
50
  with gr.Row():
 
51
  with gr.Column(scale=5): # 챗봇 λΆ€λΆ„
52
  chatbot = gr.Chatbot(label="T5", bubble_full_width=False)
53
  with gr.Row():
54
  txt = gr.Textbox(show_label=False, placeholder='Send a message...', container=False)
55
 
56
  txt.submit(chat, [txt, chatbot], [txt, chatbot])
57
-
58
  demo.launch(debug=True, share=True)
 
1
  import gradio as gr
2
  from gradio.themes.utils import colors
3
  from t5 import T5
4
+ from koalpaca import KoAlpaca
5
+
6
+ LOCAL_TEST = True
7
+ MODELS = []
8
+ cur_index = 0
9
 
10
  def prepare_theme():
11
  theme = gr.themes.Default(primary_hue=colors.gray,
 
42
  return theme
43
 
44
  def chat(message, chat_history):
45
+ response = MODELS[cur_index].generate(message)
 
46
  chat_history.append((message, response))
47
  return "", chat_history
48
 
49
+ def change_model_index(idx):
50
+ global cur_index
51
+ cur_index = idx
52
+ print(cur_index)
53
+ return
54
+
55
  if __name__=='__main__':
56
  theme = prepare_theme()
57
+
58
+ MODELS.append(T5())
59
+ if not LOCAL_TEST:
60
+ MODELS.append(KoAlpaca())
61
 
62
  with gr.Blocks(theme=theme) as demo:
63
  with gr.Row():
64
+ rd = gr.Radio(['T5','KoAlpaca'], value='T5', type='index', label='Model Selection', show_label=True, interactive=True)
65
  with gr.Column(scale=5): # 챗봇 λΆ€λΆ„
66
  chatbot = gr.Chatbot(label="T5", bubble_full_width=False)
67
  with gr.Row():
68
  txt = gr.Textbox(show_label=False, placeholder='Send a message...', container=False)
69
 
70
  txt.submit(chat, [txt, chatbot], [txt, chatbot])
71
+ rd.select(change_model_index, [rd])
72
  demo.launch(debug=True, share=True)
koalpaca.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, GenerationConfig
3
+ from peft import PeftModel, PeftConfig
4
+
5
+ class KoAlpaca:
6
+ def __init__(self):
7
+ peft_model_id = "4n3mone/Komuchat-koalpaca-polyglot-12.8B"
8
+ config = PeftConfig.from_pretrained(peft_model_id)
9
+ self.bnb_config = BitsAndBytesConfig(
10
+ load_in_4bit=True,
11
+ bnb_4bit_use_double_quant=True,
12
+ bnb_4bit_quant_type="nf4",
13
+ bnb_4bit_compute_dtype=torch.bfloat16
14
+ )
15
+ self.model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, quantization_config=self.bnb_config, device_map={"":0})
16
+ self.model = PeftModel.from_pretrained(self.model, peft_model_id)
17
+ self.tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
18
+ self.gen_config = GenerationConfig.from_pretrained('./models/koalpaca', 'gen_config.json')
19
+ self.INPUT_FORMAT = "### 질문: <INPUT>\n\n### λ‹΅λ³€:"
20
+ self.model.eval()
21
+
22
+ def generate(self, inputs):
23
+ inputs = self.INPUT_FORMAT.replace('<INPUT>', inputs)
24
+ output_ids = self.model.generate(
25
+ **self.tokenizer(
26
+ inputs,
27
+ return_tensors='pt',
28
+ return_token_type_ids=False
29
+ ).to('cuda'),
30
+ generation_config=self.gen_config
31
+ )
32
+ outputs = self.tokenizer.decode(output_ids[0]).split("### λ‹΅λ³€: ")[-1]
33
+ return outputs
model/pko_t5_COMU_patience10/.DS_Store DELETED
Binary file (6.15 kB)
 
model/pko_t5_COMU_patience10/runs/.DS_Store DELETED
Binary file (6.15 kB)
 
model/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/.DS_Store DELETED
Binary file (6.15 kB)
 
model/pko_t5_fm_patience10/.DS_Store DELETED
Binary file (6.15 kB)
 
model/pko_t5_fm_patience10/config.json DELETED
@@ -1,29 +0,0 @@
1
- {
2
- "_name_or_path": "paust/pko-t5-large",
3
- "architectures": [
4
- "T5ForConditionalGeneration"
5
- ],
6
- "d_ff": 2816,
7
- "d_kv": 64,
8
- "d_model": 1024,
9
- "decoder_start_token_id": 0,
10
- "dropout_rate": 0.1,
11
- "eos_token_id": 1,
12
- "feed_forward_proj": "gated-gelu",
13
- "initializer_factor": 1.0,
14
- "is_encoder_decoder": true,
15
- "layer_norm_epsilon": 1e-06,
16
- "model_type": "t5",
17
- "num_decoder_layers": 24,
18
- "num_heads": 16,
19
- "num_layers": 24,
20
- "output_past": true,
21
- "pad_token_id": 0,
22
- "relative_attention_max_distance": 128,
23
- "relative_attention_num_buckets": 32,
24
- "tie_word_embeddings": false,
25
- "torch_dtype": "float32",
26
- "transformers_version": "4.16.2",
27
- "use_cache": true,
28
- "vocab_size": 50364
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/pko_t5_fm_patience10/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff427d0fac31330c4f73047080eee6fb4280c44e64ec586d8c897c45dd88997b
3
- size 3282255749
 
 
 
 
model/pko_t5_fm_patience10/runs/Apr08_08-56-15_main1/1680944176.4623845/events.out.tfevents.1680944176.main1.37029.1 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d711899223dcba6a3f996fd53d46a45c177f4deea6f9a41c7d3a8e587ed4c29
3
- size 5024
 
 
 
 
model/pko_t5_fm_patience10/runs/Apr08_08-56-15_main1/events.out.tfevents.1680944176.main1.37029.0 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:673b3ea74f0648feca11e100846b45ff35c80ff61a724dbc65ef5637d41599a7
3
- size 9060
 
 
 
 
model/pko_t5_fm_patience10/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"eos_token": "</s>", "unk_token": "<pad>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
 
 
model/pko_t5_fm_patience10/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
model/pko_t5_fm_patience10/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"eos_token": "</s>", "unk_token": "<pad>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "special_tokens_map_file": "./models/pko-t5/special_tokens_map.json", "name_or_path": "paust/pko-t5-large", "tokenizer_class": "T5Tokenizer"}
 
 
model/pko_t5_fm_patience10/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1c51a0cfd21bf9256ed7d47851a2a4cba51264bf102db415ebea5927746bba4
3
- size 3195
 
 
 
 
model/pko_t5_instiz_patience10/.DS_Store DELETED
Binary file (6.15 kB)
 
model/pko_t5_instiz_patience10/config.json DELETED
@@ -1,29 +0,0 @@
1
- {
2
- "_name_or_path": "paust/pko-t5-large",
3
- "architectures": [
4
- "T5ForConditionalGeneration"
5
- ],
6
- "d_ff": 2816,
7
- "d_kv": 64,
8
- "d_model": 1024,
9
- "decoder_start_token_id": 0,
10
- "dropout_rate": 0.1,
11
- "eos_token_id": 1,
12
- "feed_forward_proj": "gated-gelu",
13
- "initializer_factor": 1.0,
14
- "is_encoder_decoder": true,
15
- "layer_norm_epsilon": 1e-06,
16
- "model_type": "t5",
17
- "num_decoder_layers": 24,
18
- "num_heads": 16,
19
- "num_layers": 24,
20
- "output_past": true,
21
- "pad_token_id": 0,
22
- "relative_attention_max_distance": 128,
23
- "relative_attention_num_buckets": 32,
24
- "tie_word_embeddings": false,
25
- "torch_dtype": "float32",
26
- "transformers_version": "4.16.2",
27
- "use_cache": true,
28
- "vocab_size": 50364
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/pko_t5_instiz_patience10/pytorch_model.bin DELETED
File without changes
models/koalpaca/gen_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "do_sample": true,
5
+ "early_stopping": true,
6
+ "eos_token_id": 2,
7
+ "max_new_tokens": 50,
8
+ "no_repeat_ngram_size": 3,
9
+ "num_beams": 2,
10
+ "transformers_version": "4.28.0"
11
+ }
{model β†’ models}/pko_t5_COMU_patience10/config.json RENAMED
File without changes
models/pko_t5_COMU_patience10/gen_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "do_sample": true,
5
+ "eos_token_id": 1,
6
+ "max_new_tokens": 64,
7
+ "min_length": 10,
8
+ "no_repeat_ngram_size": 2,
9
+ "num_beams": 2,
10
+ "pad_token_id": 0,
11
+ "transformers_version": "4.28.0"
12
+ }
{model β†’ models}/pko_t5_COMU_patience10/pytorch_model.bin RENAMED
File without changes
{model β†’ models}/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/1680932959.9242024/events.out.tfevents.1680932959.main1.34981.1 RENAMED
File without changes
{model β†’ models}/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/events.out.tfevents.1680932959.main1.34981.0 RENAMED
File without changes
{model β†’ models}/pko_t5_COMU_patience10/special_tokens_map.json RENAMED
File without changes
{model β†’ models}/pko_t5_COMU_patience10/tokenizer.json RENAMED
File without changes
{model β†’ models}/pko_t5_COMU_patience10/tokenizer_config.json RENAMED
File without changes
{model β†’ models}/pko_t5_COMU_patience10/training_args.bin RENAMED
File without changes
requirements.txt CHANGED
@@ -1,5 +1,6 @@
1
- transformers==4.16.2
2
  gradio==4.1.1
3
  numpy==1.26.1
4
  pandas==2.1.2
5
- torch==2.0.1
 
 
1
+ transformers==4.28.0
2
  gradio==4.1.1
3
  numpy==1.26.1
4
  pandas==2.1.2
5
+ torch==2.0.1
6
+ git+https://github.com/huggingface/peft.git
t5.py CHANGED
@@ -1,32 +1,30 @@
1
- from transformers import T5TokenizerFast, T5ForConditionalGeneration
2
 
3
  class T5:
4
  def __init__(self,
5
- model_dir:str='./model/pko_t5_COMU_patience10',
6
  max_input_length:int=64,
7
- max_target_length:int=64,
8
- prefix:str='qa question: '
9
  ):
10
  self.model = T5ForConditionalGeneration.from_pretrained(model_dir)
11
  self.tokenizer = T5TokenizerFast.from_pretrained(model_dir)
 
 
12
  self.max_input_length = max_input_length
13
  self.max_target_length = max_target_length
14
- self.prefix = prefix
15
 
16
  # add tokens
17
  self.tokenizer.add_tokens(["#ν™”μž#", "#청자#", "#(λ‚¨μž)청자#", "#(λ‚¨μž)ν™”μž#", "#(μ—¬μž)청자#", "(μ—¬μž)ν™”μž"])
18
  self.model.resize_token_embeddings(len(self.tokenizer))
19
  self.model.config.max_length = max_target_length
20
  self.tokenizer.model_max_length = max_target_length
21
-
22
- def chat(self, inputs):
23
- inputs = [self.prefix + inputs]
24
  input_ids = self.tokenizer(inputs, max_length=self.max_input_length, truncation=True, return_tensors="pt")
25
- output_tensor = self.model.generate(**input_ids, num_beams=2, do_sample=True, min_length=10, max_length=self.max_target_length, no_repeat_ngram_size=2) #repetition_penalty=2.5
26
  output_ids = self.tokenizer.batch_decode(output_tensor, skip_special_tokens=True, clean_up_tokenization_spaces=True)
27
  outputs = str(output_ids)
28
  outputs = outputs.replace('[', '').replace(']', '').replace("'", '').replace("'", '')
29
- return outputs
30
-
31
-
32
-
 
1
+ from transformers import T5TokenizerFast, T5ForConditionalGeneration, GenerationConfig
2
 
3
  class T5:
4
  def __init__(self,
5
+ model_dir:str='./models/pko_t5_COMU_patience10',
6
  max_input_length:int=64,
7
+ max_target_length:int=64
 
8
  ):
9
  self.model = T5ForConditionalGeneration.from_pretrained(model_dir)
10
  self.tokenizer = T5TokenizerFast.from_pretrained(model_dir)
11
+ self.gen_config = GenerationConfig.from_pretrained(model_dir, 'gen_config.json')
12
+
13
  self.max_input_length = max_input_length
14
  self.max_target_length = max_target_length
15
+ self.INPUT_FORMAT = 'qa question: <INPUT>'
16
 
17
  # add tokens
18
  self.tokenizer.add_tokens(["#ν™”μž#", "#청자#", "#(λ‚¨μž)청자#", "#(λ‚¨μž)ν™”μž#", "#(μ—¬μž)청자#", "(μ—¬μž)ν™”μž"])
19
  self.model.resize_token_embeddings(len(self.tokenizer))
20
  self.model.config.max_length = max_target_length
21
  self.tokenizer.model_max_length = max_target_length
22
+
23
+ def generate(self, inputs):
24
+ inputs = self.INPUT_FORMAT.replace("<INPUT>", inputs)
25
  input_ids = self.tokenizer(inputs, max_length=self.max_input_length, truncation=True, return_tensors="pt")
26
+ output_tensor = self.model.generate(**input_ids, generation_config=self.gen_config)
27
  output_ids = self.tokenizer.batch_decode(output_tensor, skip_special_tokens=True, clean_up_tokenization_spaces=True)
28
  outputs = str(output_ids)
29
  outputs = outputs.replace('[', '').replace(']', '').replace("'", '').replace("'", '')
30
+ return outputs