Spaces:
Sleeping
Sleeping
johnsmith253325
commited on
Commit
·
7d0f396
1
Parent(s):
52cd289
feat: 加入LLaMA-7B-Chat
Browse files- modules/models/LLaMA.py +15 -11
- modules/presets.py +13 -9
- requirements_advanced.txt +1 -4
- run_Linux.sh +0 -0
- run_macOS.command +0 -0
modules/models/LLaMA.py
CHANGED
@@ -15,6 +15,13 @@ import json
|
|
15 |
from llama_cpp import Llama
|
16 |
from huggingface_hub import hf_hub_download
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
def download(repo_id, filename, retry=10):
|
19 |
if os.path.exists("./models/downloaded_models.json"):
|
20 |
with open("./models/downloaded_models.json", "r") as f:
|
@@ -70,18 +77,15 @@ class LLaMA_Client(BaseLLMModel):
|
|
70 |
# lora_path = f"lora/{lora_path}"
|
71 |
|
72 |
def _get_llama_style_input(self):
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
history.append(f"{instruction}Input: {x['content']}")
|
80 |
else:
|
81 |
-
|
82 |
-
|
83 |
-
context += "\n\nOutput: "
|
84 |
-
return context
|
85 |
|
86 |
def get_answer_at_once(self):
|
87 |
context = self._get_llama_style_input()
|
|
|
15 |
from llama_cpp import Llama
|
16 |
from huggingface_hub import hf_hub_download
|
17 |
|
18 |
+
SYS_PREFIX = "<<SYS>>\n"
|
19 |
+
SYS_POSTFIX = "\n<</SYS>>\n\n"
|
20 |
+
INST_PREFIX = "<s>[INST] "
|
21 |
+
INST_POSTFIX = " "
|
22 |
+
OUTPUT_PREFIX = "[/INST] "
|
23 |
+
OUTPUT_POSTFIX = "</s>"
|
24 |
+
|
25 |
def download(repo_id, filename, retry=10):
|
26 |
if os.path.exists("./models/downloaded_models.json"):
|
27 |
with open("./models/downloaded_models.json", "r") as f:
|
|
|
77 |
# lora_path = f"lora/{lora_path}"
|
78 |
|
79 |
def _get_llama_style_input(self):
|
80 |
+
context = []
|
81 |
+
for conv in self.history:
|
82 |
+
if conv["role"] == "system":
|
83 |
+
context.append(SYS_PREFIX+conv["content"]+SYS_POSTFIX)
|
84 |
+
elif conv["role"] == "user":
|
85 |
+
context.append(INST_PREFIX+conv["content"]+INST_POSTFIX+OUTPUT_PREFIX)
|
|
|
86 |
else:
|
87 |
+
context.append(conv["content"]+OUTPUT_POSTFIX)
|
88 |
+
return "".join(context)
|
|
|
|
|
89 |
|
90 |
def get_answer_at_once(self):
|
91 |
context = self._get_llama_style_input()
|
modules/presets.py
CHANGED
@@ -83,9 +83,21 @@ LOCAL_MODELS = [
|
|
83 |
"chatglm2-6b-int4",
|
84 |
"StableLM",
|
85 |
"MOSS",
|
86 |
-
"Llama-2-7B",
|
87 |
]
|
88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
|
90 |
MODELS = ONLINE_MODELS
|
91 |
else:
|
@@ -262,11 +274,3 @@ small_and_beautiful_theme = gr.themes.Soft(
|
|
262 |
# gradio 会把这个几个chatbot打头的变量应用到其他md渲染的地方,鬼晓得怎么想的。。。
|
263 |
chatbot_code_background_color_dark="*neutral_950",
|
264 |
)
|
265 |
-
|
266 |
-
# Additional metadate for local models
|
267 |
-
MODEL_METADATA = {
|
268 |
-
"Llama-2-7B":{
|
269 |
-
"repo_id": "TheBloke/Llama-2-7B-GGUF",
|
270 |
-
"filelist": ["llama-2-7b.Q6_K.gguf"],
|
271 |
-
}
|
272 |
-
}
|
|
|
83 |
"chatglm2-6b-int4",
|
84 |
"StableLM",
|
85 |
"MOSS",
|
86 |
+
"Llama-2-7B-Chat",
|
87 |
]
|
88 |
|
89 |
+
# Additional metadate for local models
|
90 |
+
MODEL_METADATA = {
|
91 |
+
"Llama-2-7B":{
|
92 |
+
"repo_id": "TheBloke/Llama-2-7B-GGUF",
|
93 |
+
"filelist": ["llama-2-7b.Q6_K.gguf"],
|
94 |
+
},
|
95 |
+
"Llama-2-7B-Chat":{
|
96 |
+
"repo_id": "TheBloke/Llama-2-7b-Chat-GGUF",
|
97 |
+
"filelist": ["llama-2-7b-chat.Q6_K.gguf"],
|
98 |
+
}
|
99 |
+
}
|
100 |
+
|
101 |
if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
|
102 |
MODELS = ONLINE_MODELS
|
103 |
else:
|
|
|
274 |
# gradio 会把这个几个chatbot打头的变量应用到其他md渲染的地方,鬼晓得怎么想的。。。
|
275 |
chatbot_code_background_color_dark="*neutral_950",
|
276 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements_advanced.txt
CHANGED
@@ -1,11 +1,8 @@
|
|
1 |
transformers
|
2 |
huggingface_hub
|
3 |
torch
|
4 |
-
icetk
|
5 |
-
protobuf==3.19.0
|
6 |
-
git+https://github.com/OptimalScale/LMFlow.git
|
7 |
cpm-kernels
|
8 |
sentence_transformers
|
9 |
accelerate
|
10 |
sentencepiece
|
11 |
-
|
|
|
1 |
transformers
|
2 |
huggingface_hub
|
3 |
torch
|
|
|
|
|
|
|
4 |
cpm-kernels
|
5 |
sentence_transformers
|
6 |
accelerate
|
7 |
sentencepiece
|
8 |
+
llama-cpp-python
|
run_Linux.sh
CHANGED
File without changes
|
run_macOS.command
CHANGED
File without changes
|