Alfasign mikeee commited on
Commit
df00340
·
0 Parent(s):

Duplicate from mikeee/chatglm2-6b-4bit

Browse files

Co-authored-by: mikeee <[email protected]>

Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +14 -0
  3. app.py +368 -0
  4. requirements.txt +9 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: chatglm2 6b int4
3
+ emoji: 🌖
4
+ colorFrom: purple
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.35.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: mikeee/chatglm2-6b-4bit
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pylint: disable=broad-exception-caught, redefined-outer-name, missing-function-docstring, missing-module-docstring, too-many-arguments, line-too-long, invalid-name, redefined-builtin, redefined-argument-from-local
2
+ # import gradio as gr
3
+
4
+ # model_name = "models/THUDM/chatglm2-6b-int4"
5
+ # gr.load(model_name).lauch()
6
+
7
+ # %%writefile demo-4bit.py
8
+
9
+ from textwrap import dedent
10
+
11
+ import gradio as gr
12
+ import mdtex2html
13
+ import torch
14
+ from loguru import logger
15
+
16
+ # credit to https://github.com/THUDM/ChatGLM2-6B/blob/main/web_demo.py
17
+ # while mistakes are mine
18
+ from transformers import AutoModel, AutoTokenizer
19
+
20
+ model_name = "THUDM/chatglm2-6b"
21
+ # model_name = "THUDM/chatglm2-6b-int4"
22
+
23
+ RETRY_FLAG = False
24
+
25
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
26
+
27
+ # model = AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda()
28
+
29
+ # 4/8 bit
30
+ # model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).quantize(4).cuda()
31
+
32
+ has_cuda = torch.cuda.is_available()
33
+ # has_cuda = False # force cpu
34
+
35
+ if has_cuda:
36
+ model = AutoModel.from_pretrained(
37
+ model_name, trust_remote_code=True
38
+ ).cuda() # 3.92G
39
+ else:
40
+ model = AutoModel.from_pretrained(
41
+ model_name, trust_remote_code=True
42
+ ).half() # .float() .half().float()
43
+
44
+ model = model.eval()
45
+
46
+ _ = """Override Chatbot.postprocess"""
47
+
48
+
49
+ def postprocess(self, y):
50
+ if y is None:
51
+ return []
52
+ for i, (message, response) in enumerate(y):
53
+ y[i] = (
54
+ None if message is None else mdtex2html.convert((message)),
55
+ None if response is None else mdtex2html.convert(response),
56
+ )
57
+ return y
58
+
59
+
60
+ gr.Chatbot.postprocess = postprocess
61
+
62
+
63
+ def parse_text(text):
64
+ """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
65
+ lines = text.split("\n")
66
+ lines = [line for line in lines if line != ""]
67
+ count = 0
68
+ for i, line in enumerate(lines):
69
+ if "```" in line:
70
+ count += 1
71
+ items = line.split("`")
72
+ if count % 2 == 1:
73
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
74
+ else:
75
+ lines[i] = "<br></code></pre>"
76
+ else:
77
+ if i > 0:
78
+ if count % 2 == 1:
79
+ line = line.replace("`", r"\`")
80
+ line = line.replace("<", "&lt;")
81
+ line = line.replace(">", "&gt;")
82
+ line = line.replace(" ", "&nbsp;")
83
+ line = line.replace("*", "&ast;")
84
+ line = line.replace("_", "&lowbar;")
85
+ line = line.replace("-", "&#45;")
86
+ line = line.replace(".", "&#46;")
87
+ line = line.replace("!", "&#33;")
88
+ line = line.replace("(", "&#40;")
89
+ line = line.replace(")", "&#41;")
90
+ line = line.replace("$", "&#36;")
91
+ lines[i] = "<br>" + line
92
+ text = "".join(lines)
93
+ return text
94
+
95
+
96
+ def predict(
97
+ RETRY_FLAG, input, chatbot, max_length, top_p, temperature, history, past_key_values
98
+ ):
99
+ try:
100
+ chatbot.append((parse_text(input), ""))
101
+ except Exception as exc:
102
+ logger.error(exc)
103
+ logger.debug(f"{chatbot=}")
104
+ _ = """
105
+ if chatbot:
106
+ chatbot[-1] = (parse_text(input), str(exc))
107
+ yield chatbot, history, past_key_values
108
+ # """
109
+ yield chatbot, history, past_key_values
110
+
111
+ for response, history, past_key_values in model.stream_chat(
112
+ tokenizer,
113
+ input,
114
+ history,
115
+ past_key_values=past_key_values,
116
+ return_past_key_values=True,
117
+ max_length=max_length,
118
+ top_p=top_p,
119
+ temperature=temperature,
120
+ ):
121
+ chatbot[-1] = (parse_text(input), parse_text(response))
122
+
123
+ yield chatbot, history, past_key_values
124
+
125
+
126
+ def trans_api(input, max_length=4096, top_p=0.8, temperature=0.2):
127
+ if max_length < 10:
128
+ max_length = 4096
129
+ if top_p < 0.1 or top_p > 1:
130
+ top_p = 0.85
131
+ if temperature <= 0 or temperature > 1:
132
+ temperature = 0.01
133
+ try:
134
+ res, _ = model.chat(
135
+ tokenizer,
136
+ input,
137
+ history=[],
138
+ past_key_values=None,
139
+ max_length=max_length,
140
+ top_p=top_p,
141
+ temperature=temperature,
142
+ )
143
+ # logger.debug(f"{res=} \n{_=}")
144
+ except Exception as exc:
145
+ logger.error(f"{exc=}")
146
+ res = str(exc)
147
+
148
+ return res
149
+
150
+
151
+ def reset_user_input():
152
+ return gr.update(value="")
153
+
154
+
155
+ def reset_state():
156
+ return [], [], None
157
+
158
+
159
+ # Delete last turn
160
+ def delete_last_turn(chat, history):
161
+ if chat and history:
162
+ chat.pop(-1)
163
+ history.pop(-1)
164
+ return chat, history
165
+
166
+
167
+ # Regenerate response
168
+ def retry_last_answer(
169
+ user_input, chatbot, max_length, top_p, temperature, history, past_key_values
170
+ ):
171
+ if chatbot and history:
172
+ # Removing the previous conversation from chat
173
+ chatbot.pop(-1)
174
+ # Setting up a flag to capture a retry
175
+ RETRY_FLAG = True
176
+ # Getting last message from user
177
+ user_input = history[-1][0]
178
+ # Removing bot response from the history
179
+ history.pop(-1)
180
+
181
+ yield from predict(
182
+ RETRY_FLAG,
183
+ user_input,
184
+ chatbot,
185
+ max_length,
186
+ top_p,
187
+ temperature,
188
+ history,
189
+ past_key_values,
190
+ )
191
+
192
+
193
+ with gr.Blocks(title="ChatGLM2-6B-int4", theme=gr.themes.Soft(text_size="sm")) as demo:
194
+ # gr.HTML("""<h1 align="center">ChatGLM2-6B-int4</h1>""")
195
+ gr.HTML(
196
+ """<center><a href="https://huggingface.co/spaces/mikeee/chatglm2-6b-4bit?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>To avoid the queue and for faster inference Duplicate this Space and upgrade to GPU</center>"""
197
+ )
198
+
199
+ with gr.Accordion("Info", open=False):
200
+ _ = """
201
+ ## ChatGLM2-6B-int4
202
+
203
+ Try to refresh the browser and try again when occasionally an error occurs.
204
+
205
+ With a GPU, a query takes from a few seconds to a few tens of seconds, dependent on the number of words/characters
206
+ the question and responses contain. The quality of the responses varies quite a bit it seems. Even the same
207
+ question with the same parameters, asked at different times, can result in quite different responses.
208
+
209
+ * Low temperature: responses will be more deterministic and focused; High temperature: responses more creative.
210
+
211
+ * Suggested temperatures -- translation: up to 0.3; chatting: > 0.4
212
+
213
+ * Top P controls dynamic vocabulary selection based on context.
214
+
215
+ For a table of example values for different scenarios, refer to [this](https://community.openai.com/t/cheat-sheet-mastering-temperature-and-top-p-in-chatgpt-api-a-few-tips-and-tricks-on-controlling-the-creativity-deterministic-output-of-prompt-responses/172683)
216
+
217
+ If the instance is not on a GPU (T4), it will be very slow. You can try to run the colab notebook [chatglm2-6b-4bit colab notebook](https://colab.research.google.com/drive/1WkF7kOjVCcBBatDHjaGkuJHnPdMWNtbW?usp=sharing) for a spin.
218
+
219
+ The T4 GPU is sponsored by a community GPU grant from Huggingface. Thanks a lot!
220
+ """
221
+ gr.Markdown(dedent(_))
222
+ chatbot = gr.Chatbot()
223
+ with gr.Row():
224
+ with gr.Column(scale=4):
225
+ with gr.Column(scale=12):
226
+ user_input = gr.Textbox(
227
+ show_label=False,
228
+ placeholder="Input...",
229
+ ).style(container=False)
230
+ RETRY_FLAG = gr.Checkbox(value=False, visible=False)
231
+ with gr.Column(min_width=32, scale=1):
232
+ with gr.Row():
233
+ submitBtn = gr.Button("Submit", variant="primary")
234
+ deleteBtn = gr.Button("Delete last turn", variant="secondary")
235
+ retryBtn = gr.Button("Regenerate", variant="secondary")
236
+ with gr.Column(scale=1):
237
+ emptyBtn = gr.Button("Clear History")
238
+ max_length = gr.Slider(
239
+ 0,
240
+ 32768,
241
+ value=8192,
242
+ step=1.0,
243
+ label="Maximum length",
244
+ interactive=True,
245
+ )
246
+ top_p = gr.Slider(
247
+ 0, 1, value=0.85, step=0.01, label="Top P", interactive=True
248
+ )
249
+ temperature = gr.Slider(
250
+ 0.01, 1, value=0.95, step=0.01, label="Temperature", interactive=True
251
+ )
252
+
253
+ history = gr.State([])
254
+ past_key_values = gr.State(None)
255
+
256
+ user_input.submit(
257
+ predict,
258
+ [
259
+ RETRY_FLAG,
260
+ user_input,
261
+ chatbot,
262
+ max_length,
263
+ top_p,
264
+ temperature,
265
+ history,
266
+ past_key_values,
267
+ ],
268
+ [chatbot, history, past_key_values],
269
+ show_progress="full",
270
+ )
271
+ submitBtn.click(
272
+ predict,
273
+ [
274
+ RETRY_FLAG,
275
+ user_input,
276
+ chatbot,
277
+ max_length,
278
+ top_p,
279
+ temperature,
280
+ history,
281
+ past_key_values,
282
+ ],
283
+ [chatbot, history, past_key_values],
284
+ show_progress="full",
285
+ api_name="predict",
286
+ )
287
+ submitBtn.click(reset_user_input, [], [user_input])
288
+
289
+ emptyBtn.click(
290
+ reset_state, outputs=[chatbot, history, past_key_values], show_progress="full"
291
+ )
292
+
293
+ retryBtn.click(
294
+ retry_last_answer,
295
+ inputs=[
296
+ user_input,
297
+ chatbot,
298
+ max_length,
299
+ top_p,
300
+ temperature,
301
+ history,
302
+ past_key_values,
303
+ ],
304
+ # outputs = [chatbot, history, last_user_message, user_message]
305
+ outputs=[chatbot, history, past_key_values],
306
+ )
307
+ deleteBtn.click(delete_last_turn, [chatbot, history], [chatbot, history])
308
+
309
+ with gr.Accordion("Example inputs", open=True):
310
+ etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
311
+ examples = gr.Examples(
312
+ examples=[
313
+ ["Explain the plot of Cinderella in a sentence."],
314
+ [
315
+ "How long does it take to become proficient in French, and what are the best methods for retaining information?"
316
+ ],
317
+ ["What are some common mistakes to avoid when writing code?"],
318
+ ["Build a prompt to generate a beautiful portrait of a horse"],
319
+ ["Suggest four metaphors to describe the benefits of AI"],
320
+ ["Write a pop song about leaving home for the sandy beaches."],
321
+ ["Write a summary demonstrating my ability to tame lions"],
322
+ ["鲁迅和周树人什么关系"],
323
+ ["从前有一头牛,这头牛后面有什么?"],
324
+ ["正无穷大加一大于正无穷大吗?"],
325
+ ["正无穷大加正无穷大大于正无穷大吗?"],
326
+ ["-2的平方根等于什么"],
327
+ ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"],
328
+ ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"],
329
+ ["鲁迅和周树人什么关系 用英文回答"],
330
+ ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"],
331
+ [f"{etext} 翻成中文,列出3个版本"],
332
+ [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本"],
333
+ ["js 判断一个数是不是质数"],
334
+ ["js 实现python 的 range(10)"],
335
+ ["js 实现python 的 [*(range(10)]"],
336
+ ["假定 1 + 2 = 4, 试求 7 + 8"],
337
+ ["Erkläre die Handlung von Cinderella in einem Satz."],
338
+ ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch"],
339
+ ],
340
+ inputs=[user_input],
341
+ examples_per_page=30,
342
+ )
343
+
344
+ with gr.Accordion("For Chat/Translation API", open=False, visible=False):
345
+ input_text = gr.Text()
346
+ tr_btn = gr.Button("Go", variant="primary")
347
+ out_text = gr.Text()
348
+ tr_btn.click(
349
+ trans_api,
350
+ [input_text, max_length, top_p, temperature],
351
+ out_text,
352
+ # show_progress="full",
353
+ api_name="tr",
354
+ )
355
+ _ = """
356
+ input_text.submit(
357
+ trans_api,
358
+ [input_text, max_length, top_p, temperature],
359
+ out_text,
360
+ show_progress="full",
361
+ api_name="tr1",
362
+ )
363
+ # """
364
+
365
+ # demo.queue().launch(share=False, inbrowser=True)
366
+ # demo.queue().launch(share=True, inbrowser=True, debug=True)
367
+
368
+ demo.queue().launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ protobuf
2
+ transformers==4.30.2
3
+ cpm_kernels
4
+ torch>=2.0
5
+ # gradio
6
+ mdtex2html
7
+ sentencepiece
8
+ accelerate
9
+ loguru