Dagfinn1962 commited on
Commit
ff6a62e
1 Parent(s): f345297

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +221 -21
app.py CHANGED
@@ -9,19 +9,151 @@ from src.llm_boilers import llm_boiler
9
  # Read the API key from the .env file
10
  openai.api_key = os.getenv("API_KEY")
11
 
12
- # Define the chatbot function
13
- def chatbot(message):
14
- response = openai.Completion.create(
15
- model="gpt-3.5-turbo",
16
- prompt=message,
17
- max_tokens=50,
18
- temperature=0.8,
19
- )
20
- return response.choices[0].text.strip()
21
 
22
- # Create the interface
23
- chatbot = gr.Chatbot().style(height=400)
24
- with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  with gr.Column():
26
  msg = gr.Textbox(
27
  label="Chat Message Box",
@@ -33,12 +165,80 @@ chatbot = gr.Chatbot().style(height=400)
33
  submit = gr.Button("Submit")
34
  stop = gr.Button("Stop")
35
  clear = gr.Button("Clear")
36
- gr.Interface(
37
- fn=chat_interface,
38
- inputs=textbox,
39
- outputs=output_text,
40
- title="Chat with GPT-3.5 Turbo",
41
- description="This is a chatbot powered by GPT-3.5 Turbo.",
42
- theme="huggingface",
43
- server_port=8080 # Change this to the desired port number
44
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  # Read the API key from the .env file
10
  openai.api_key = os.getenv("API_KEY")
11
 
 
 
 
 
 
 
 
 
 
12
 
13
+
14
+ # Get the OpenAI key from the configuration file
15
+ openai_key = config.get('Credentials', 'openai_key')
16
+
17
+ # Use openai_key in your code
18
+
19
+
20
+
21
+ logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
22
+ logging.warning("READY. App started...")
23
+
24
+
25
+ class Chat:
26
+ default_system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."
27
+ system_format = "<|im_start|>system\n{}<|im_end|>\n"
28
+
29
+ def __init__(
30
+ self, system: str = None, user: str = None, assistant: str = None
31
+ ) -> None:
32
+ if system is not None:
33
+ self.set_system_prompt(system)
34
+ else:
35
+ self.reset_system_prompt()
36
+ self.user = user if user else "<|im_start|>user\n{}<|im_end|>\n"
37
+ self.assistant = (
38
+ assistant if assistant else "<|im_start|>assistant\n{}<|im_end|>\n"
39
+ )
40
+ self.response_prefix = self.assistant.split("{}")[0]
41
+
42
+ def set_system_prompt(self, system_prompt):
43
+ # self.system = self.system_format.format(system_prompt)
44
+ return system_prompt
45
+
46
+ def reset_system_prompt(self):
47
+ return self.set_system_prompt(self.default_system_prompt)
48
+
49
+ def history_as_formatted_str(self, system, history) -> str:
50
+ system = self.system_format.format(system)
51
+ text = system + "".join(
52
+ [
53
+ "\n".join(
54
+ [
55
+ self.user.format(item[0]),
56
+ self.assistant.format(item[1]),
57
+ ]
58
+ )
59
+ for item in history[:-1]
60
+ ]
61
+ )
62
+ text += self.user.format(history[-1][0])
63
+ text += self.response_prefix
64
+ # stopgap solution to too long sequences
65
+ if len(text) > 4500:
66
+ # delete from the middle between <|im_start|> and <|im_end|>
67
+ # find the middle ones, then expand out
68
+ start = text.find("<|im_start|>", 139)
69
+ end = text.find("<|im_end|>", 139)
70
+ while end < len(text) and len(text) > 4500:
71
+ end = text.find("<|im_end|>", end + 1)
72
+ text = text[:start] + text[end + 1 :]
73
+ if len(text) > 4500:
74
+ # the nice way didn't work, just truncate
75
+ # deleting the beginning
76
+ text = text[-4500:]
77
+
78
+ return text
79
+
80
+ def clear_history(self, history):
81
+ return []
82
+
83
+ def turn(self, user_input: str):
84
+ self.user_turn(user_input)
85
+ return self.bot_turn()
86
+
87
+ def user_turn(self, user_input: str, history):
88
+ history.append([user_input, ""])
89
+ return user_input, history
90
+
91
+ def bot_turn(self, system, history, openai_key):
92
+ conversation = self.history_as_formatted_str(system, history)
93
+ assistant_response = call_inf_server(conversation, openai_key)
94
+ # history[-1][-1] = assistant_response
95
+ # return history
96
+ history[-1][1] = ""
97
+ for chunk in assistant_response:
98
+ try:
99
+ decoded_output = chunk["choices"][0]["delta"]["content"]
100
+ history[-1][1] += decoded_output
101
+ yield history
102
+ except KeyError:
103
+ pass
104
+
105
+
106
+ def call_inf_server(prompt, openai_key):
107
+ model_id = "gpt-3.5-turbo" # "gpt-3.5-turbo-16k",
108
+ model = llm_boiler(model_id, openai_key)
109
+ logging.warning(f'Inf via "{model_id}"" for prompt "{prompt}"')
110
+
111
+ try:
112
+ # run text generation
113
+ response = model.run(prompt, temperature=1.0)
114
+ logging.warning(f"Result of text generation: {response}")
115
+ return response
116
+
117
+ except Exception as e:
118
+ # assume it is our error
119
+ # just wait and try one more time
120
+ print(e)
121
+ time.sleep(2)
122
+ response = model.run(prompt, temperature=1.0)
123
+ logging.warning(f"Result of text generation: {response}")
124
+ return response
125
+
126
+
127
+ with gr.Blocks(theme='HaleyCH/HaleyCH_Theme') as demo:
128
+ # org :
129
+ #theme=gr.themes.Glass(
130
+ #primary_hue="lime",
131
+ #secondary_hue="emerald",
132
+ #neutral_hue="zinc",
133
+
134
+
135
+
136
+ gr.Markdown(
137
+ """
138
+ <br><h1><center>Chat with gpt-3.5-turbo</center></h1>
139
+ This is a lightweight gpt-3.5-turbo conversation
140
+ completion.
141
+ """
142
+ )
143
+ conversation = Chat()
144
+ with gr.Row():
145
+ with gr.Column():
146
+ # to do: change to openaikey input for public release
147
+ openai_key = gr.Textbox(
148
+ label="OpenAI Key",
149
+ value="",
150
+ type="password",
151
+ placeholder="os.environ.get('openai_key')",
152
+ info="You have to provide your own OpenAI API key.",
153
+ )
154
+
155
+ chatbot = gr.Chatbot().style(height=400)
156
+ with gr.Row():
157
  with gr.Column():
158
  msg = gr.Textbox(
159
  label="Chat Message Box",
 
165
  submit = gr.Button("Submit")
166
  stop = gr.Button("Stop")
167
  clear = gr.Button("Clear")
168
+ with gr.Row():
169
+ with gr.Accordion("Advanced Options:", open=False):
170
+ with gr.Row():
171
+ with gr.Column(scale=2):
172
+ system = gr.Textbox(
173
+ label="System Prompt",
174
+ value=Chat.default_system_prompt,
175
+ show_label=False,
176
+ ).style(container=False)
177
+ with gr.Column():
178
+ with gr.Row():
179
+ change = gr.Button("Change System Prompt")
180
+ reset = gr.Button("Reset System Prompt")
181
+ with gr.Row():
182
+ gr.Markdown(
183
+ "Disclaimer: The gpt-3.5-turbo model can produce factually incorrect output, and should not be solely relied on to produce "
184
+ "factually accurate information. The gpt-3.5-turbo model was trained on various public datasets; while great efforts "
185
+ "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
186
+ "biased, or otherwise offensive outputs.",
187
+ elem_classes=["disclaimer"],
188
+ )
189
+ with gr.Row():
190
+ gr.Markdown(
191
+ "[Privacy policy](https://gist.github.com/samhavens/c29c68cdcd420a9aa0202d0839876dac)",
192
+ elem_classes=["disclaimer"],
193
+ )
194
+
195
+ submit_event = msg.submit(
196
+ fn=conversation.user_turn,
197
+ inputs=[msg, chatbot],
198
+ outputs=[msg, chatbot],
199
+ queue=False,
200
+ ).then(
201
+ fn=conversation.bot_turn,
202
+ inputs=[system, chatbot, openai_key],
203
+ outputs=[chatbot],
204
+ queue=True,
205
+ )
206
+ submit_click_event = submit.click(
207
+ fn=conversation.user_turn,
208
+ inputs=[msg, chatbot],
209
+ outputs=[msg, chatbot],
210
+ queue=False,
211
+ ).then(
212
+ fn=conversation.bot_turn,
213
+ inputs=[system, chatbot, openai_key],
214
+ outputs=[chatbot],
215
+ queue=True,
216
+ )
217
+ stop.click(
218
+ fn=None,
219
+ inputs=None,
220
+ outputs=None,
221
+ cancels=[submit_event, submit_click_event],
222
+ queue=False,
223
+ )
224
+ clear.click(lambda: None, None, chatbot, queue=False).then(
225
+ fn=conversation.clear_history,
226
+ inputs=[chatbot],
227
+ outputs=[chatbot],
228
+ queue=False,
229
+ )
230
+ change.click(
231
+ fn=conversation.set_system_prompt,
232
+ inputs=[system],
233
+ outputs=[system],
234
+ queue=False,
235
+ )
236
+ reset.click(
237
+ fn=conversation.reset_system_prompt,
238
+ inputs=[],
239
+ outputs=[system],
240
+ queue=False,
241
+ )
242
+
243
+
244
+ demo.queue(max_size=36, concurrency_count=14).launch(debug=True)