Kritias commited on
Commit
6ccd417
β€’
1 Parent(s): 9cfe1da

Add application file

Browse files
Files changed (25) hide show
  1. app.py +57 -0
  2. model/pko_t5_COMU_patience10/.DS_Store +0 -0
  3. model/pko_t5_COMU_patience10/config.json +29 -0
  4. model/pko_t5_COMU_patience10/pytorch_model.bin +3 -0
  5. model/pko_t5_COMU_patience10/runs/.DS_Store +0 -0
  6. model/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/.DS_Store +0 -0
  7. model/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/1680932959.9242024/events.out.tfevents.1680932959.main1.34981.1 +3 -0
  8. model/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/events.out.tfevents.1680932959.main1.34981.0 +3 -0
  9. model/pko_t5_COMU_patience10/special_tokens_map.json +1 -0
  10. model/pko_t5_COMU_patience10/tokenizer.json +0 -0
  11. model/pko_t5_COMU_patience10/tokenizer_config.json +1 -0
  12. model/pko_t5_COMU_patience10/training_args.bin +3 -0
  13. model/pko_t5_fm_patience10/.DS_Store +0 -0
  14. model/pko_t5_fm_patience10/config.json +29 -0
  15. model/pko_t5_fm_patience10/pytorch_model.bin +3 -0
  16. model/pko_t5_fm_patience10/runs/Apr08_08-56-15_main1/1680944176.4623845/events.out.tfevents.1680944176.main1.37029.1 +3 -0
  17. model/pko_t5_fm_patience10/runs/Apr08_08-56-15_main1/events.out.tfevents.1680944176.main1.37029.0 +3 -0
  18. model/pko_t5_fm_patience10/special_tokens_map.json +1 -0
  19. model/pko_t5_fm_patience10/tokenizer.json +0 -0
  20. model/pko_t5_fm_patience10/tokenizer_config.json +1 -0
  21. model/pko_t5_fm_patience10/training_args.bin +3 -0
  22. model/pko_t5_instiz_patience10/.DS_Store +0 -0
  23. model/pko_t5_instiz_patience10/config.json +29 -0
  24. model/pko_t5_instiz_patience10/pytorch_model.bin +0 -0
  25. t5.py +32 -0
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio.themes.utils import colors
3
+ from t5 import T5
4
+
5
+ def prepare_theme():
6
+ theme = gr.themes.Default(primary_hue=colors.gray,
7
+ secondary_hue=colors.emerald,
8
+ neutral_hue=colors.emerald).set(
9
+ body_background_fill="*primary_800",
10
+ body_background_fill_dark="*primary_800",
11
+
12
+ block_background_fill="*primary_700",
13
+ block_background_fill_dark="*primary_700",
14
+
15
+ border_color_primary="*secondary_300",
16
+ border_color_primary_dark="*secondary_300",
17
+ block_border_width="3px",
18
+ input_border_width="2px",
19
+
20
+ input_background_fill="*primary_700",
21
+ input_background_fill_dark="*primary_700",
22
+
23
+ background_fill_secondary="*primary_700",
24
+ background_fill_secondary_dark="*primary_700",
25
+
26
+ body_text_color="white",
27
+ body_text_color_dark="white",
28
+
29
+ block_label_text_color="*secondary_300",
30
+ block_label_text_color_dark="*secondary_300",
31
+ block_label_background_fill="*primary_800",
32
+ block_label_background_fill_dark="*primary_800",
33
+
34
+ color_accent_soft="*primary_600",
35
+ color_accent_soft_dark="*primary_600",
36
+ )
37
+ return theme
38
+
39
+ def chat(message, chat_history, model):
40
+ response = model.chat(message)
41
+ chat_history.append((message, response))
42
+ return "", chat_history
43
+
44
+ if __name__=='__main__':
45
+ theme = prepare_theme()
46
+ model = T5()
47
+
48
+ with gr.Blocks(theme=theme) as demo:
49
+ with gr.Row():
50
+ with gr.Column(scale=5): # 챗봇 λΆ€λΆ„
51
+ chatbot = gr.Chatbot(label="T5", bubble_full_width=False)
52
+ with gr.Row():
53
+ txt = gr.Textbox(show_label=False, placeholder='Send a message...', container=False)
54
+
55
+ txt.submit(chat, [txt, chatbot], [txt, chatbot])
56
+
57
+ demo.launch(debug=True, share=True)
model/pko_t5_COMU_patience10/.DS_Store ADDED
Binary file (6.15 kB). View file
 
model/pko_t5_COMU_patience10/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "paust/pko-t5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2816,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "gated-gelu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "num_decoder_layers": 24,
18
+ "num_heads": 16,
19
+ "num_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "relative_attention_max_distance": 128,
23
+ "relative_attention_num_buckets": 32,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.16.2",
27
+ "use_cache": true,
28
+ "vocab_size": 50364
29
+ }
model/pko_t5_COMU_patience10/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:809006db20f8f44d4328368cd076b4d175f5c24dd368a187180ad8d102faaec5
3
+ size 3282255749
model/pko_t5_COMU_patience10/runs/.DS_Store ADDED
Binary file (6.15 kB). View file
 
model/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/.DS_Store ADDED
Binary file (6.15 kB). View file
 
model/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/1680932959.9242024/events.out.tfevents.1680932959.main1.34981.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:944e8a7c48ddd5ccafeaeb6e11dc975899de396ef7e4c709ecec4f81975b9225
3
+ size 5030
model/pko_t5_COMU_patience10/runs/Apr08_05-49-18_main1/events.out.tfevents.1680932959.main1.34981.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11426f8d24ac9aaf4c445e4ba96e7619058f356f315a8c78a68d55f16eedd014
3
+ size 12941
model/pko_t5_COMU_patience10/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<pad>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
model/pko_t5_COMU_patience10/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
model/pko_t5_COMU_patience10/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<pad>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "special_tokens_map_file": "./models/pko-t5/special_tokens_map.json", "name_or_path": "paust/pko-t5-large", "tokenizer_class": "T5Tokenizer"}
model/pko_t5_COMU_patience10/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52194673556fa1ebb92cfacedfed9f89df760c93bb2f1d9dc186470fe268c03a
3
+ size 3195
model/pko_t5_fm_patience10/.DS_Store ADDED
Binary file (6.15 kB). View file
 
model/pko_t5_fm_patience10/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "paust/pko-t5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2816,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "gated-gelu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "num_decoder_layers": 24,
18
+ "num_heads": 16,
19
+ "num_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "relative_attention_max_distance": 128,
23
+ "relative_attention_num_buckets": 32,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.16.2",
27
+ "use_cache": true,
28
+ "vocab_size": 50364
29
+ }
model/pko_t5_fm_patience10/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff427d0fac31330c4f73047080eee6fb4280c44e64ec586d8c897c45dd88997b
3
+ size 3282255749
model/pko_t5_fm_patience10/runs/Apr08_08-56-15_main1/1680944176.4623845/events.out.tfevents.1680944176.main1.37029.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d711899223dcba6a3f996fd53d46a45c177f4deea6f9a41c7d3a8e587ed4c29
3
+ size 5024
model/pko_t5_fm_patience10/runs/Apr08_08-56-15_main1/events.out.tfevents.1680944176.main1.37029.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:673b3ea74f0648feca11e100846b45ff35c80ff61a724dbc65ef5637d41599a7
3
+ size 9060
model/pko_t5_fm_patience10/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<pad>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
model/pko_t5_fm_patience10/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
model/pko_t5_fm_patience10/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<pad>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "special_tokens_map_file": "./models/pko-t5/special_tokens_map.json", "name_or_path": "paust/pko-t5-large", "tokenizer_class": "T5Tokenizer"}
model/pko_t5_fm_patience10/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1c51a0cfd21bf9256ed7d47851a2a4cba51264bf102db415ebea5927746bba4
3
+ size 3195
model/pko_t5_instiz_patience10/.DS_Store ADDED
Binary file (6.15 kB). View file
 
model/pko_t5_instiz_patience10/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "paust/pko-t5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2816,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "gated-gelu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "num_decoder_layers": 24,
18
+ "num_heads": 16,
19
+ "num_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "relative_attention_max_distance": 128,
23
+ "relative_attention_num_buckets": 32,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.16.2",
27
+ "use_cache": true,
28
+ "vocab_size": 50364
29
+ }
model/pko_t5_instiz_patience10/pytorch_model.bin ADDED
File without changes
t5.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import T5TokenizerFast, T5ForConditionalGeneration
2
+
3
+ class T5:
4
+ def __init__(self,
5
+ model_dir:str='./model/pko_t5_COMU_patience10',
6
+ max_input_length:int=64,
7
+ max_target_length:int=64,
8
+ prefix:str='qa question: '
9
+ ):
10
+ self.model = T5ForConditionalGeneration.from_pretrained(model_dir)
11
+ self.tokenizer = T5TokenizerFast.from_pretrained(model_dir)
12
+ self.max_input_length = max_input_length
13
+ self.max_target_length = max_target_length
14
+ self.prefix = prefix
15
+
16
+ # add tokens
17
+ self.tokenizer.add_tokens(["#ν™”μž#", "#청자#", "#(λ‚¨μž)청자#", "#(λ‚¨μž)ν™”μž#", "#(μ—¬μž)청자#", "(μ—¬μž)ν™”μž"])
18
+ self.model.resize_token_embeddings(len(self.tokenizer))
19
+ self.model.config.max_length = max_target_length
20
+ self.tokenizer.model_max_length = max_target_length
21
+
22
+ def chat(self, inputs):
23
+ inputs = [self.prefix + inputs]
24
+ input_ids = self.tokenizer(inputs, max_length=self.max_input_length, truncation=True, return_tensors="pt")
25
+ output_tensor = self.model.generate(**input_ids, num_beams=2, do_sample=True, min_length=10, max_length=self.max_target_length, no_repeat_ngram_size=2) #repetition_penalty=2.5
26
+ output_ids = self.tokenizer.batch_decode(output_tensor, skip_special_tokens=True, clean_up_tokenization_spaces=True)
27
+ outputs = str(output_ids)
28
+ outputs = outputs.replace('[', '').replace(']', '').replace("'", '').replace("'", '')
29
+ return outputs
30
+
31
+
32
+