Dagfinn1962 commited on
Commit
13f006f
1 Parent(s): c6cfabd

Update app1.py

Browse files
Files changed (1) hide show
  1. app1.py +60 -95
app1.py CHANGED
@@ -1,16 +1,10 @@
1
- import gradio as gr
2
  import openai
3
- import os
4
  import time
5
  import logging
6
-
7
-
8
-
9
-
10
-
11
-
12
-
13
-
14
 
15
  logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
16
  logging.warning("READY. App started...")
@@ -18,29 +12,24 @@ logging.warning("READY. App started...")
18
 
19
  class Chat:
20
  default_system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."
21
- system_format = "<|im_start|>system\n{}<|im_end|>\n"
22
 
23
- def __init__(
24
- self, system: str = None, user: str = None, assistant: str = None
25
- ) -> None:
26
  if system is not None:
27
  self.set_system_prompt(system)
28
  else:
29
  self.reset_system_prompt()
30
- self.user = user if user else "<|im_start|>user\n{}<|im_end|>\n"
31
- self.assistant = (
32
- assistant if assistant else "<|im_start|>assistant\n{}<|im_end|>\n"
33
- )
34
  self.response_prefix = self.assistant.split("{}")[0]
35
 
36
  def set_system_prompt(self, system_prompt):
37
- # self.system = self.system_format.format(system_prompt)
38
  return system_prompt
39
 
40
  def reset_system_prompt(self):
41
  return self.set_system_prompt(self.default_system_prompt)
42
 
43
- def history_as_formatted_str(self, system, history) -> str:
44
  system = self.system_format.format(system)
45
  text = system + "".join(
46
  [
@@ -55,27 +44,18 @@ class Chat:
55
  )
56
  text += self.user.format(history[-1][0])
57
  text += self.response_prefix
58
- # stopgap solution to too long sequences
59
- if len(text) > 4500:
60
- # delete from the middle between <|im_start|> and <|im_end|>
61
- # find the middle ones, then expand out
62
- start = text.find("<|im_start|>", 139)
63
- end = text.find("<|im_end|>", 139)
64
- while end < len(text) and len(text) > 4500:
65
- end = text.find("<|im_end|>", end + 1)
66
- text = text[:start] + text[end + 1 :]
67
- if len(text) > 4500:
68
- # the nice way didn't work, just truncate
69
- # deleting the beginning
70
- text = text[-4500:]
71
 
72
  return text
73
 
74
  def clear_history(self, history):
75
  return []
76
 
77
- def turn(self, user_input: str):
78
- self.user_turn(user_input)
79
  return self.bot_turn()
80
 
81
  def user_turn(self, user_input: str, history):
@@ -85,8 +65,6 @@ class Chat:
85
  def bot_turn(self, system, history, openai_key):
86
  conversation = self.history_as_formatted_str(system, history)
87
  assistant_response = call_inf_server(conversation, openai_key)
88
- # history[-1][-1] = assistant_response
89
- # return history
90
  history[-1][1] = ""
91
  for chunk in assistant_response:
92
  try:
@@ -98,19 +76,18 @@ class Chat:
98
 
99
 
100
  def call_inf_server(prompt, openai_key):
101
- model_id = "gpt-3.5-turbo" # "gpt-3.5-turbo-16k",
102
  model = llm_boiler(model_id, openai_key)
103
  logging.warning(f'Inf via "{model_id}"" for prompt "{prompt}"')
104
 
105
  try:
106
- # run text generation
107
  response = model.run(prompt, temperature=1.0)
108
  logging.warning(f"Result of text generation: {response}")
109
  return response
110
 
111
  except Exception as e:
112
- # assume it is our error
113
- # just wait and try one more time
114
  print(e)
115
  time.sleep(2)
116
  response = model.run(prompt, temperature=1.0)
@@ -118,34 +95,21 @@ def call_inf_server(prompt, openai_key):
118
  return response
119
 
120
 
121
- with gr.Blocks(theme='HaleyCH/HaleyCH_Theme') as demo:
122
- # org :
123
- #theme=gr.themes.Glass(
124
- #primary_hue="lime",
125
- #secondary_hue="emerald",
126
- #neutral_hue="zinc",
127
-
128
 
129
 
 
 
 
 
 
 
130
  gr.Markdown(
131
  """
132
  <br><h1><center>Chat with gpt-3.5-turbo</center></h1>
133
- This is a lightweight gpt-3.5-turbo conversation
134
- completion.
135
- """
136
- )
137
  conversation = Chat()
138
- with gr.Row():
139
- with gr.Column():
140
- # to do: change to openaikey input for public release
141
- openai_key = gr.Textbox(
142
- label="OpenAI Key",
143
- value="",
144
- type="password",
145
- placeholder="os.environ.get('openai_key')",
146
- info="You have to provide your own OpenAI API key.",
147
- )
148
-
149
  chatbot = gr.Chatbot().style(height=400)
150
  with gr.Row():
151
  with gr.Column():
@@ -174,7 +138,7 @@ with gr.Blocks(theme='HaleyCH/HaleyCH_Theme') as demo:
174
  reset = gr.Button("Reset System Prompt")
175
  with gr.Row():
176
  gr.Markdown(
177
- "Disclaimer: The gpt-3.5-turbo model can produce factually incorrect output, and should not be solely relied on to produce "
178
  "factually accurate information. The gpt-3.5-turbo model was trained on various public datasets; while great efforts "
179
  "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
180
  "biased, or otherwise offensive outputs.",
@@ -182,39 +146,40 @@ with gr.Blocks(theme='HaleyCH/HaleyCH_Theme') as demo:
182
  )
183
  with gr.Row():
184
  gr.Markdown(
185
- "[Privacy policy](https://gist.github.com/samhavens/c29c68cdcd420a9aa0202d0839876dac)",
186
  elem_classes=["disclaimer"],
187
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
- submit_event = msg.submit(
190
- fn=conversation.user_turn,
191
- inputs=[msg, chatbot],
192
- outputs=[msg, chatbot],
193
- queue=False,
194
- ).then(
195
- fn=conversation.bot_turn,
196
- inputs=[system, chatbot, openai_key],
197
- outputs=[chatbot],
198
- queue=True,
199
- )
200
- submit_click_event = submit.click(
201
- fn=conversation.user_turn,
202
- inputs=[msg, chatbot],
203
- outputs=[msg, chatbot],
204
- queue=False,
205
- ).then(
206
- fn=conversation.bot_turn,
207
- inputs=[system, chatbot, openai_key],
208
- outputs=[chatbot],
209
- queue=True,
210
- )
211
- stop.click(
212
- fn=None,
213
- inputs=None,
214
- outputs=None,
215
- cancels=[submit_event, submit_click_event],
216
- queue=False,
217
- )
218
  clear.click(lambda: None, None, chatbot, queue=False).then(
219
  fn=conversation.clear_history,
220
  inputs=[chatbot],
 
 
1
  import openai
 
2
  import time
3
  import logging
4
+ import gradio as gr
5
+ import os
6
+ from src.llm_boilers import llm_boiler
7
+ import configparser
 
 
 
 
8
 
9
  logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
10
  logging.warning("READY. App started...")
 
12
 
13
  class Chat:
14
  default_system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."
15
+ system_format = "system\n{}\n"
16
 
17
+ def __init__(self, system: str = None, user: str = None, assistant: str = None):
 
 
18
  if system is not None:
19
  self.set_system_prompt(system)
20
  else:
21
  self.reset_system_prompt()
22
+ self.user = user if user else "user\n{}\n"
23
+ self.assistant = assistant if assistant else "assistant\n{}\n"
 
 
24
  self.response_prefix = self.assistant.split("{}")[0]
25
 
26
  def set_system_prompt(self, system_prompt):
 
27
  return system_prompt
28
 
29
  def reset_system_prompt(self):
30
  return self.set_system_prompt(self.default_system_prompt)
31
 
32
+ def history_as_formatted_str(self, system, history):
33
  system = self.system_format.format(system)
34
  text = system + "".join(
35
  [
 
44
  )
45
  text += self.user.format(history[-1][0])
46
  text += self.response_prefix
47
+
48
+ # Truncate text if it exceeds the limit
49
+ if len(text) > 4096:
50
+ text = text[-4096:]
 
 
 
 
 
 
 
 
 
51
 
52
  return text
53
 
54
  def clear_history(self, history):
55
  return []
56
 
57
+ def turn(self, user_input: str, history):
58
+ self.user_turn(user_input, history)
59
  return self.bot_turn()
60
 
61
  def user_turn(self, user_input: str, history):
 
65
  def bot_turn(self, system, history, openai_key):
66
  conversation = self.history_as_formatted_str(system, history)
67
  assistant_response = call_inf_server(conversation, openai_key)
 
 
68
  history[-1][1] = ""
69
  for chunk in assistant_response:
70
  try:
 
76
 
77
 
78
  def call_inf_server(prompt, openai_key):
79
+ model_id = "gpt-3.5-turbo"
80
  model = llm_boiler(model_id, openai_key)
81
  logging.warning(f'Inf via "{model_id}"" for prompt "{prompt}"')
82
 
83
  try:
84
+ # Run text generation
85
  response = model.run(prompt, temperature=1.0)
86
  logging.warning(f"Result of text generation: {response}")
87
  return response
88
 
89
  except Exception as e:
90
+ # Wait and try one more time
 
91
  print(e)
92
  time.sleep(2)
93
  response = model.run(prompt, temperature=1.0)
 
95
  return response
96
 
97
 
 
 
 
 
 
 
 
98
 
99
 
100
+ # Get the OpenAI key from the environment variable
101
+ openai_key = os.getenv("OPENAI_API_KEY")
102
+
103
+
104
+
105
+ with gr.Blocks(theme='HaleyCH/HaleyCH_Theme') as demo:
106
  gr.Markdown(
107
  """
108
  <br><h1><center>Chat with gpt-3.5-turbo</center></h1>
109
+ This is a lightweight gpt-3.5-turbo conversation completion.
110
+ """
111
+ )
 
112
  conversation = Chat()
 
 
 
 
 
 
 
 
 
 
 
113
  chatbot = gr.Chatbot().style(height=400)
114
  with gr.Row():
115
  with gr.Column():
 
138
  reset = gr.Button("Reset System Prompt")
139
  with gr.Row():
140
  gr.Markdown(
141
+ "Disclaimer: The gpt-3.5-turbo model can produce factually incorrect output and should not be solely relied on to produce "
142
  "factually accurate information. The gpt-3.5-turbo model was trained on various public datasets; while great efforts "
143
  "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
144
  "biased, or otherwise offensive outputs.",
 
146
  )
147
  with gr.Row():
148
  gr.Markdown(
149
+ "[Privacy policy](https://gist.github.com/samhavens/c29c68cdcd420a9aa0202d0839876dac)",
150
  elem_classes=["disclaimer"],
151
+ )submit_event = msg.submit(
152
+ fn=conversation.user_turn,
153
+ inputs=[msg],
154
+ outputs=[msg, chatbot],
155
+ queue=False,
156
+ ).then(
157
+ fn=conversation.bot_turn,
158
+ inputs=[system, chatbot, openai_key],
159
+ outputs=[chatbot],
160
+ queue=True, # Change `queue=True` to `keep_in_queue=True`
161
+ )
162
+
163
+ submit_click_event = submit.click(
164
+ fn=conversation.user_turn,
165
+ inputs=[msg],
166
+ outputs=[msg, chatbot],
167
+ queue=False,
168
+ ).then(
169
+ fn=conversation.bot_turn,
170
+ inputs=[system, chatbot, openai_key],
171
+ outputs=[chatbot],
172
+ queue=True, # Change `queue=True` to `keep_in_queue=True`
173
+ )
174
+
175
+ stop.click(
176
+ fn=None,
177
+ inputs=None,
178
+ outputs=None,
179
+ cancels=[submit_event, submit_click_event],
180
+ queue=False,
181
+ )
182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
  clear.click(lambda: None, None, chatbot, queue=False).then(
184
  fn=conversation.clear_history,
185
  inputs=[chatbot],