mikeee commited on
Commit
64dc528
0 Parent(s):

Duplicate from mikeee/llama-2-70b-guanaco-qlora-ggml

Browse files
Files changed (13) hide show
  1. .gitattributes +35 -0
  2. .gitignore +13 -0
  3. .husky/pre-commit +6 -0
  4. .ruff.toml +18 -0
  5. .stignore +103 -0
  6. README.md +13 -0
  7. app.py +389 -0
  8. dl_model.py +73 -0
  9. examples_list.py +43 -0
  10. package.json +14 -0
  11. requirements.txt +8 -0
  12. run-app.sh +2 -0
  13. yarn.lock +8 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ call-activate.bat
2
+ okteto.yml
3
+ okteto-up.bat
4
+ install-sw.sh
5
+ install-sw1.sh
6
+ start-sshd.sh
7
+ pyproject.toml
8
+ models
9
+ .ruff_cache
10
+ run-nodemon.sh
11
+ app-.py
12
+ nodemon.json
13
+ node_modules
.husky/pre-commit ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/env sh
2
+ . "$(dirname -- "$0")/_/husky.sh"
3
+
4
+ # npm test
5
+ ruff . --fix
6
+ black .
.ruff.toml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Assume Python 3.10.
2
+ target-version = "py310"
3
+ # Decrease the maximum line length to 79 characters.
4
+ line-length = 300
5
+
6
+ # pyflakes, pycodestyle, isort
7
+ # flake8 YTT, pydocstyle D, pylint PLC
8
+ select = ["F", "E", "W", "I001", "YTT", "D", "PLC"]
9
+ # select = ["ALL"]
10
+
11
+ # D100 Missing docstring in public module
12
+ # D103 Missing docstring in public function
13
+ # D101 Missing docstring in public class
14
+ # `multi-line-summary-first-line` (D212)
15
+ # `one-blank-line-before-class` (D203)
16
+ extend-ignore = ["D100", "D103", "D101", "D212", "D203"]
17
+
18
+ exclude = [".venv"]
.stignore ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models
2
+ *.bin
3
+ .git
4
+ # Byte-compiled / optimized / DLL files
5
+ __pycache__
6
+ *.py[cod]
7
+ *$py.class
8
+
9
+ # C extensions
10
+ *.so
11
+
12
+ # Distribution / packaging
13
+ .Python
14
+ build
15
+ develop-eggs
16
+ dist
17
+ downloads
18
+ eggs
19
+ .eggs
20
+ lib
21
+ lib64
22
+ parts
23
+ sdist
24
+ var
25
+ wheels
26
+ pip-wheel-metadata
27
+ share/python-wheels
28
+ *.egg-info
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Translations
44
+ *.mo
45
+ *.pot
46
+
47
+ # Django stuff:
48
+ *.log
49
+ local_settings.py
50
+ db.sqlite3
51
+
52
+ # Flask stuff:
53
+ instance
54
+ .webassets-cache
55
+
56
+ # Scrapy stuff:
57
+ .scrapy
58
+
59
+ # Sphinx documentation
60
+ docs/_build
61
+
62
+ # PyBuilder
63
+ target
64
+
65
+ # Jupyter Notebook
66
+ .ipynb_checkpoints
67
+
68
+ # IPython
69
+ profile_default
70
+ ipython_config.py
71
+
72
+ # pyenv
73
+ .python-version
74
+
75
+ # celery beat schedule file
76
+ celerybeat-schedule
77
+
78
+ # SageMath parsed files
79
+ *.sage.py
80
+
81
+ # Environments
82
+ .env
83
+ .venv
84
+ env
85
+ venv
86
+ ENV
87
+ env.bak
88
+ venv.bak
89
+
90
+ # Spyder project settings
91
+ .spyderproject
92
+ .spyproject
93
+
94
+ # Rope project settings
95
+ .ropeproject
96
+
97
+ # mypy
98
+ .mypy_cache
99
+ .dmypy.json
100
+ dmypy.json
101
+
102
+ # Pyre type checker
103
+ .pyre
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: llama-2-7b-or-13b-ggml
3
+ emoji: 🚀
4
+ colorFrom: green
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.39.0
8
+ app_file: app.py
9
+ pinned: true
10
+ duplicated_from: mikeee/llama-2-70b-guanaco-qlora-ggml
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Run codes."""
2
+ # pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
3
+ # ruff: noqa: E501
4
+ import os
5
+ import platform
6
+ import random
7
+ import time
8
+ from dataclasses import asdict, dataclass, field
9
+ from pathlib import Path
10
+ from textwrap import dedent
11
+
12
+ # from types import SimpleNamespace
13
+ import gradio as gr
14
+ import psutil
15
+ from about_time import about_time
16
+ from ctransformers import AutoModelForCausalLM
17
+ from dl_hf_model import dl_hf_model
18
+ from loguru import logger
19
+
20
+ from examples_list import examples_list
21
+
22
+ url = "https://huggingface.co/TheBloke/llama-2-13B-Guanaco-QLoRA-GGML/blob/main/llama-2-13b-guanaco-qlora.ggmlv3.q4_K_S.bin" # 8.14G
23
+
24
+ LLM = None
25
+
26
+ if "forindo" in platform.node(): # deploy 70b model locally
27
+ # url = "https://huggingface.co/TheBloke/llama-2-70b-Guanaco-QLoRA-GGML/blob/main/llama-2-70b-guanaco-qlora.ggmlv3.q3_K_S.bin" # 29.7G
28
+ # model_loc = "/home/mu2018/github/langchain-llama-2-70b-guanaco-qlora-ggml/models/llama-2-70b-guanaco-qlora.ggmlv3.q3_K_S.bin"
29
+ _ = """
30
+ url = "https://huggingface.co/TheBloke/StableBeluga2-70B-GGML/blob/main/stablebeluga2-70b.ggmlv3.q3_K_S.bin"
31
+ try:
32
+ model_loc, file_size = dl_hf_model(url)
33
+ logger.info(f"done load llm {model_loc=} {file_size=}G")
34
+ except Exception as exc_:
35
+ logger.error(exc_)
36
+ raise SystemExit(1) from exc_
37
+ # """
38
+ model_loc = "models/stablebeluga2-70b.ggmlv3.q3_K_S.bin"
39
+ assert Path(model_loc).exists(), f"Make sure {model_loc=} exists."
40
+ else:
41
+ try:
42
+ logger.debug(f" dl {url}")
43
+ model_loc, file_size = dl_hf_model(url)
44
+ logger.info(f"done load llm {model_loc=} {file_size=}G")
45
+ except Exception as exc_:
46
+ logger.error(exc_)
47
+ raise SystemExit(1) from exc_
48
+
49
+ # raise SystemExit(0)
50
+
51
+ # Prompt template: Guanaco
52
+ # {past_history}
53
+ prompt_template = """You are a helpful assistant. Let's think step by step.
54
+ ### Human:
55
+ {question}
56
+ ### Assistant:"""
57
+ human_prefix = "### Human"
58
+ ai_prefix = "### Assistant"
59
+ stop_list = [f"{human_prefix}:"]
60
+
61
+ if "beluga" in model_loc.lower():
62
+ prompt_template = dedent(
63
+ """
64
+ ### System:
65
+ You are Stable Beluga, an AI that follows instructions extremely well. Help as much as you can.
66
+ Let's think step by step.
67
+
68
+ ### User: {question}
69
+
70
+ ### Assistant:
71
+ """
72
+ ).lstrip()
73
+ human_prefix = "### User"
74
+ ai_prefix = "### Assistant"
75
+ stop_list = [f"{human_prefix}:"]
76
+
77
+ _ = psutil.cpu_count(logical=False) - 1
78
+ cpu_count: int = int(_) if _ else 1
79
+ logger.debug(f"{cpu_count=}")
80
+
81
+
82
+ logger.debug(f"{model_loc=}")
83
+ LLM = AutoModelForCausalLM.from_pretrained(
84
+ model_loc,
85
+ model_type="llama",
86
+ threads=cpu_count,
87
+ )
88
+
89
+ os.environ["TZ"] = "Asia/Shanghai"
90
+ try:
91
+ time.tzset() # type: ignore # pylint: disable=no-member
92
+ except Exception:
93
+ # Windows
94
+ logger.warning("Windows, cant run time.tzset()")
95
+
96
+
97
+ @dataclass
98
+ class GenerationConfig:
99
+ temperature: float = 0.7
100
+ top_k: int = 50
101
+ top_p: float = 0.9
102
+ repetition_penalty: float = 1.0
103
+ max_new_tokens: int = 512
104
+ seed: int = 42
105
+ reset: bool = False
106
+ stream: bool = True
107
+ threads: int = cpu_count
108
+ stop: list[str] = field(default_factory=lambda: stop_list)
109
+
110
+
111
+ def generate(
112
+ question: str,
113
+ llm=LLM,
114
+ config: GenerationConfig = GenerationConfig(),
115
+ ):
116
+ """Run model inference, will return a Generator if streaming is true."""
117
+ # _ = prompt_template.format(question=question)
118
+ # print(_)
119
+
120
+ prompt = prompt_template.format(question=question)
121
+
122
+ return llm(
123
+ prompt,
124
+ **asdict(config),
125
+ )
126
+
127
+
128
+ logger.debug(f"{asdict(GenerationConfig())=}")
129
+
130
+
131
+ def user(user_message, history):
132
+ # return user_message, history + [[user_message, None]]
133
+ if history is None:
134
+ history = []
135
+ history.append([user_message, None])
136
+ return user_message, history # keep user_message
137
+
138
+
139
+ def user1(user_message, history):
140
+ # return user_message, history + [[user_message, None]]
141
+ if history is None:
142
+ history = []
143
+ history.append([user_message, None])
144
+ return "", history # clear user_message
145
+
146
+
147
+ def bot_(history):
148
+ user_message = history[-1][0]
149
+ resp = random.choice(["How are you?", "I love you", "I'm very hungry"])
150
+ bot_message = user_message + ": " + resp
151
+ history[-1][1] = ""
152
+ for character in bot_message:
153
+ history[-1][1] += character
154
+ time.sleep(0.02)
155
+ yield history
156
+
157
+ history[-1][1] = resp
158
+ yield history
159
+
160
+
161
+ def bot(history):
162
+ user_message = ""
163
+ try:
164
+ user_message = history[-1][0]
165
+ except Exception as exc:
166
+ logger.error(exc)
167
+ response = []
168
+
169
+ logger.debug(f"{user_message=}")
170
+
171
+ with about_time() as atime: # type: ignore
172
+ flag = 1
173
+ prefix = ""
174
+ then = time.time()
175
+
176
+ logger.debug("about to generate")
177
+
178
+ config = GenerationConfig(reset=True)
179
+ for elm in generate(user_message, config=config):
180
+ if flag == 1:
181
+ logger.debug("in the loop")
182
+ prefix = f"({time.time() - then:.2f}s) "
183
+ flag = 0
184
+ print(prefix, end="", flush=True)
185
+ logger.debug(f"{prefix=}")
186
+ print(elm, end="", flush=True)
187
+ # logger.debug(f"{elm}")
188
+
189
+ response.append(elm)
190
+ history[-1][1] = prefix + "".join(response)
191
+ yield history
192
+
193
+ _ = (
194
+ f"(time elapsed: {atime.duration_human}, " # type: ignore
195
+ f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore
196
+ )
197
+
198
+ history[-1][1] = "".join(response) + f"\n{_}"
199
+ yield history
200
+
201
+
202
+ def predict_api(prompt):
203
+ logger.debug(f"{prompt=}")
204
+ try:
205
+ # user_prompt = prompt
206
+ config = GenerationConfig(
207
+ temperature=0.2,
208
+ top_k=10,
209
+ top_p=0.9,
210
+ repetition_penalty=1.0,
211
+ max_new_tokens=512, # adjust as needed
212
+ seed=42,
213
+ reset=True, # reset history (cache)
214
+ stream=False,
215
+ # threads=cpu_count,
216
+ # stop=prompt_prefix[1:2],
217
+ )
218
+
219
+ response = generate(
220
+ prompt,
221
+ config=config,
222
+ )
223
+
224
+ logger.debug(f"api: {response=}")
225
+ except Exception as exc:
226
+ logger.error(exc)
227
+ response = f"{exc=}"
228
+ # bot = {"inputs": [response]}
229
+ # bot = [(prompt, response)]
230
+
231
+ return response
232
+
233
+
234
+ css = """
235
+ .importantButton {
236
+ background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
237
+ border: none !important;
238
+ }
239
+ .importantButton:hover {
240
+ background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
241
+ border: none !important;
242
+ }
243
+ .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
244
+ .xsmall {font-size: x-small;}
245
+ """
246
+
247
+ logger.info("start block")
248
+
249
+ with gr.Blocks(
250
+ title=f"{Path(model_loc).name}",
251
+ # theme=gr.themes.Soft(text_size="sm", spacing_size="sm"),
252
+ theme=gr.themes.Glass(text_size="sm", spacing_size="sm"),
253
+ css=css,
254
+ ) as block:
255
+ # buff_var = gr.State("")
256
+ with gr.Accordion("🎈 Info", open=False):
257
+ gr.Markdown(
258
+ f"""<h5><center>{Path(model_loc).name}</center></h4>
259
+ Most examples are meant for another model.
260
+ You probably should try to test
261
+ some related prompts.""",
262
+ elem_classes="xsmall",
263
+ )
264
+
265
+ # chatbot = gr.Chatbot().style(height=700) # 500
266
+ chatbot = gr.Chatbot(height=500)
267
+
268
+ # buff = gr.Textbox(show_label=False, visible=True)
269
+
270
+ with gr.Row():
271
+ with gr.Column(scale=5):
272
+ msg = gr.Textbox(
273
+ label="Chat Message Box",
274
+ placeholder="Ask me anything (press Shift+Enter or click Submit to send)",
275
+ show_label=False,
276
+ # container=False,
277
+ lines=6,
278
+ max_lines=30,
279
+ show_copy_button=True,
280
+ # ).style(container=False)
281
+ )
282
+ with gr.Column(scale=1, min_width=50):
283
+ with gr.Row():
284
+ submit = gr.Button("Submit", elem_classes="xsmall")
285
+ stop = gr.Button("Stop", visible=True)
286
+ clear = gr.Button("Clear History", visible=True)
287
+ with gr.Row(visible=False):
288
+ with gr.Accordion("Advanced Options:", open=False):
289
+ with gr.Row():
290
+ with gr.Column(scale=2):
291
+ system = gr.Textbox(
292
+ label="System Prompt",
293
+ value=prompt_template,
294
+ show_label=False,
295
+ container=False,
296
+ # ).style(container=False)
297
+ )
298
+ with gr.Column():
299
+ with gr.Row():
300
+ change = gr.Button("Change System Prompt")
301
+ reset = gr.Button("Reset System Prompt")
302
+
303
+ with gr.Accordion("Example Inputs", open=True):
304
+ examples = gr.Examples(
305
+ examples=examples_list,
306
+ inputs=[msg],
307
+ examples_per_page=40,
308
+ )
309
+
310
+ # with gr.Row():
311
+ with gr.Accordion("Disclaimer", open=False):
312
+ _ = Path(model_loc).name
313
+ gr.Markdown(
314
+ f"Disclaimer: {_} can produce factually incorrect output, and should not be relied on to produce "
315
+ "factually accurate information. {_} was trained on various public datasets; while great efforts "
316
+ "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
317
+ "biased, or otherwise offensive outputs.",
318
+ elem_classes=["disclaimer"],
319
+ )
320
+
321
+ msg_submit_event = msg.submit(
322
+ # fn=conversation.user_turn,
323
+ fn=user,
324
+ inputs=[msg, chatbot],
325
+ outputs=[msg, chatbot],
326
+ queue=True,
327
+ show_progress="full",
328
+ # api_name=None,
329
+ ).then(bot, chatbot, chatbot, queue=True)
330
+ submit_click_event = submit.click(
331
+ # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
332
+ fn=user1, # clear msg
333
+ inputs=[msg, chatbot],
334
+ outputs=[msg, chatbot],
335
+ queue=True,
336
+ # queue=False,
337
+ show_progress="full",
338
+ # api_name=None,
339
+ ).then(bot, chatbot, chatbot, queue=True)
340
+ stop.click(
341
+ fn=None,
342
+ inputs=None,
343
+ outputs=None,
344
+ cancels=[msg_submit_event, submit_click_event],
345
+ queue=False,
346
+ )
347
+ clear.click(lambda: None, None, chatbot, queue=False)
348
+
349
+ with gr.Accordion("For Chat/Translation API", open=False, visible=False):
350
+ input_text = gr.Text()
351
+ api_btn = gr.Button("Go", variant="primary")
352
+ out_text = gr.Text()
353
+
354
+ api_btn.click(
355
+ predict_api,
356
+ input_text,
357
+ out_text,
358
+ api_name="api",
359
+ )
360
+
361
+ # block.load(update_buff, [], buff, every=1)
362
+ # block.load(update_buff, [buff_var], [buff_var, buff], every=1)
363
+
364
+ # concurrency_count=5, max_size=20
365
+ # max_size=36, concurrency_count=14
366
+ # CPU cpu_count=2 16G, model 7G
367
+ # CPU UPGRADE cpu_count=8 32G, model 7G
368
+
369
+ # does not work
370
+ _ = """
371
+ # _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1)
372
+ # concurrency_count = max(_, 1)
373
+ if psutil.cpu_count(logical=False) >= 8:
374
+ # concurrency_count = max(int(32 / file_size) - 1, 1)
375
+ else:
376
+ # concurrency_count = max(int(16 / file_size) - 1, 1)
377
+ # """
378
+
379
+ # default concurrency_count = 1
380
+ # block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)
381
+
382
+ server_port = 7860
383
+ if "forindo" in platform.node():
384
+ server_port = 7861
385
+ block.queue(max_size=5).launch(
386
+ debug=True, server_name="0.0.0.0", server_port=server_port
387
+ )
388
+
389
+ # block.queue(max_size=5).launch(debug=True, server_name="0.0.0.0")
dl_model.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Download modles."""
2
+ # pylint: disable=invalid-name, broad-exception-caught, line-too-long
3
+ from typing import Optional
4
+
5
+ import typer
6
+ from dl_hf_model import dl_hf_model
7
+ from loguru import logger
8
+
9
+ url = "https://huggingface.co/TheBloke/Upstage-Llama-2-70B-instruct-v2-GGML/blob/main/upstage-llama-2-70b-instruct-v2.ggmlv3.q3_K_S.bin"
10
+
11
+
12
+ __version__ = "0.0.1"
13
+
14
+ app = typer.Typer(
15
+ name="dl-mode",
16
+ add_completion=False,
17
+ help="donwload models from hf and save to a dir (default models)",
18
+ )
19
+
20
+
21
+ def _version_callback(value: bool) -> None:
22
+ if value:
23
+ typer.echo(
24
+ f"{app.info.name} v.{__version__} -- download models for given url(s)"
25
+ )
26
+ raise typer.Exit()
27
+
28
+
29
+ @app.command()
30
+ def main(
31
+ urls: str = typer.Argument( # pylint: disable=unused-argument
32
+ "",
33
+ help=f"one or more urls (default {url})",
34
+ show_default=False,
35
+ ),
36
+ version: Optional[bool] = typer.Option( # pylint: disable=unused-argument
37
+ None,
38
+ "--version",
39
+ "-v",
40
+ "-V",
41
+ help="Show version info and exit.",
42
+ callback=_version_callback,
43
+ is_eager=True,
44
+ ),
45
+ model_dir: Optional[str] = typer.Option(
46
+ None,
47
+ "--mode-dir",
48
+ help="dir to save downloaded models (default models)",
49
+ ),
50
+ ):
51
+ """Download a model or model given url(s)."""
52
+ logger.trace(f"{urls}")
53
+ if model_dir is None:
54
+ model_dir = "models"
55
+ if isinstance(urls, str):
56
+ urls.split()
57
+
58
+ url_list = urls[:]
59
+ if not urls:
60
+ url_list = [url]
61
+ try:
62
+ for elm in url_list:
63
+ dl_hf_model(elm)
64
+ except Exception as exc:
65
+ logger.error(exc)
66
+ raise typer.Exit()
67
+
68
+
69
+ if __name__ == "__main__":
70
+ try:
71
+ app()
72
+ except Exception as exc_:
73
+ logger.error(exc_)
examples_list.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
2
+ examples_list = [
3
+ ["What NFL team won the Super Bowl in the year Justin Bieber was born?"],
4
+ [
5
+ "What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step."
6
+ ],
7
+ ["How to pick a lock? Provide detailed steps."],
8
+ [
9
+ "If it takes 10 hours to dry 10 clothes, assuming all the clothes are hung together at the same time for drying , then how long will it take to dry a cloth?"
10
+ ],
11
+ [
12
+ "If it takes 10 hours to dry 10 clothes, assuming all the clothes are hung together at the same time for drying , then how long will it take to dry a cloth? Think step by step."
13
+ ],
14
+ ["is infinity + 1 bigger than infinity?"],
15
+ ["Explain the plot of Cinderella in a sentence."],
16
+ [
17
+ "How long does it take to become proficient in French, and what are the best methods for retaining information?"
18
+ ],
19
+ ["What are some common mistakes to avoid when writing code?"],
20
+ ["Build a prompt to generate a beautiful portrait of a horse"],
21
+ ["Suggest four metaphors to describe the benefits of AI"],
22
+ ["Write a pop song about leaving home for the sandy beaches."],
23
+ ["Write a summary demonstrating my ability to tame lions"],
24
+ ["鲁迅和周树人什么关系? 说中文。"],
25
+ ["鲁迅和周树人什么关系?"],
26
+ ["鲁迅和周树人什么关系? 用英文回答。"],
27
+ ["从前有一头牛,这头牛后面有什么?"],
28
+ ["正无穷大加一大于正无穷大吗?"],
29
+ ["正无穷大加正无穷大大于正无穷大吗?"],
30
+ ["-2的平方根等于什么?"],
31
+ ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"],
32
+ ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"],
33
+ ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"],
34
+ [f"Translate ths following to Chinese. List 2 variants: \n{etext}"],
35
+ [f"{etext} 翻成中文,列出3个版本。"],
36
+ [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本。"],
37
+ ["假定 1 + 2 = 4, 试求 7 + 8。"],
38
+ ["给出判断一个数是不是质数的 javascript 码。"],
39
+ ["给出实现python 里 range(10)的 javascript 码。"],
40
+ ["给出实现python 里 [*(range(10)]的 javascript 码。"],
41
+ ["Erkläre die Handlung von Cinderella in einem Satz."],
42
+ ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch."],
43
+ ]
package.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "llama-2-70b-guanaco-qlora-ggml",
3
+ "version": "1.0.0",
4
+ "main": "index.js",
5
+ "repository": "https://huggingface.co/spaces/mikeee/llama-2-70b-guanaco-qlora-ggml",
6
+ "author": "ffreemt <[email protected]>",
7
+ "license": "MIT",
8
+ "devDependencies": {
9
+ "husky": "^8.0.0"
10
+ },
11
+ "scripts": {
12
+ "prepare": "husky install"
13
+ }
14
+ }
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ ctransformers # ==0.2.10 0.2.13
2
+ transformers # ==4.30.2
3
+ # huggingface_hub
4
+ gradio
5
+ loguru
6
+ about-time
7
+ psutil
8
+ dl-hf-model
run-app.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ export GRADIO_SERVER_NAME=0.0.0.0
2
+ nodemon -w app.py -x python app.py
yarn.lock ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
2
+ # yarn lockfile v1
3
+
4
+
5
+ husky@^8.0.0:
6
+ version "8.0.3"
7
+ resolved "https://registry.npmmirror.com/husky/-/husky-8.0.3.tgz#4936d7212e46d1dea28fef29bb3a108872cd9184"
8
+ integrity sha512-+dQSyqPh4x1hlO1swXBiNb2HzTDN1I2IGLQx1GrBuiqFJfoMrnZWwVmatvSiO+Iz8fBUnf+lekwNo4c2LlXItg==