Spaces:
Runtime error
Runtime error
Update scale_n_consult.py
Browse files- scale_n_consult.py +20 -2
scale_n_consult.py
CHANGED
@@ -6,17 +6,35 @@ OpenAI.api_key = os.getenv("OPENAI_API_KEY")
|
|
6 |
api_key = os.getenv("OPENAI_API_KEY")
|
7 |
client = OpenAI(api_key=api_key)
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
|
11 |
def predict(message, history):
|
12 |
history_openai_format = []
|
13 |
-
history_openai_format.append({"role": "assistant", "content":
|
14 |
for human, assistant in history:
|
15 |
history_openai_format.append({"role": "user", "content": human })
|
16 |
history_openai_format.append({"role": "assistant", "content":assistant})
|
17 |
history_openai_format.append({"role": "user", "content": message})
|
18 |
|
19 |
-
response = client.chat.completions.create(model='gpt-
|
20 |
messages= history_openai_format,
|
21 |
temperature=1.0,
|
22 |
stream=True)
|
|
|
6 |
api_key = os.getenv("OPENAI_API_KEY")
|
7 |
client = OpenAI(api_key=api_key)
|
8 |
|
9 |
+
initial_prompt = """
|
10 |
+
你是一名【专业的心理咨询师】,以下是你的用户的【基本情况】,由JSON格式给出:
|
11 |
+
{
|
12 |
+
"抑郁情况":"轻度抑郁",
|
13 |
+
"睡眠状况":"有一点难以入睡",
|
14 |
+
"体重状况":"体重正常",
|
15 |
+
"思维状况":"头脑有时会一片空白",
|
16 |
+
"心情状况":"心情有点沮丧烦闷,容易想哭,有时心跳的比平时快",
|
17 |
+
"价值感":"有的时候会有点迷茫,不知道该做什么",
|
18 |
+
"便秘情况":"偶尔",
|
19 |
+
"性生活":"正常",
|
20 |
+
"自杀倾向":"无自杀倾向",
|
21 |
+
}
|
22 |
+
当你和用户交谈时,你需要【主动】向用户提问,并【围绕】用户的【基本情况】展开,【负面】的基本情况需要【着重】展开。
|
23 |
+
首先,你需要根据用户的基本情况【展开详细的咨询】,但是【不能一下抛出所有问题,需要逐条提问】,主动向用户【提问】相关的问题并基于相应的【建议】。
|
24 |
+
询问完后后,你需要生成一个【咨询报告】,包含【用户情况】、【相应建议】、【最终总结】,先以Markdown格式输出,再以JSON格式输出。
|
25 |
+
最后你需要告诉用户,诊断已结束,你不会主动提问,但受咨询者可以继续向你提问感兴趣的问题,你会作出回答。
|
26 |
+
"""
|
27 |
|
28 |
|
29 |
def predict(message, history):
|
30 |
history_openai_format = []
|
31 |
+
history_openai_format.append({"role": "assistant", "content":initial_prompt})
|
32 |
for human, assistant in history:
|
33 |
history_openai_format.append({"role": "user", "content": human })
|
34 |
history_openai_format.append({"role": "assistant", "content":assistant})
|
35 |
history_openai_format.append({"role": "user", "content": message})
|
36 |
|
37 |
+
response = client.chat.completions.create(model='gpt-4-turbo-preview',
|
38 |
messages= history_openai_format,
|
39 |
temperature=1.0,
|
40 |
stream=True)
|