bleysg commited on
Commit
ccca515
β€’
1 Parent(s): 8525f61

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -22
app.py CHANGED
@@ -10,14 +10,8 @@ openai.api_key = os.environ.get("OPENAI_API_KEY")
10
 
11
  BASE_SYSTEM_MESSAGE = """"""
12
 
13
- def make_prediction(chat, prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None):
14
- if chat == "Chatbot1":
15
- openai.api_base = os.environ.get("OPENAI_API_BASE")
16
- completion = openai.Completion.create(model="wizardcoder-python-34b-v1.0.Q5_K_M.gguf", prompt=prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, stream=True, stop=["</s>", "<|im_end|>"])
17
- elif chat == "Chatbot2":
18
- openai.api_base = os.environ.get("OPENAI_API_BASE2")
19
- completion = openai.Completion.create(model="wizardcoder-python-34b-v1.0.Q5_K_M.gguf", prompt=prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, stream=True, stop=["</s>", "<|im_end|>"])
20
-
21
  for chunk in completion:
22
  yield chunk["choices"][0]["text"]
23
 
@@ -35,7 +29,7 @@ def user(message, history):
35
  return "", history
36
 
37
 
38
- def chat(chatbot, history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
39
  history = history or []
40
 
41
  messages = BASE_SYSTEM_MESSAGE + system_message.strip() + "\n" + \
@@ -47,7 +41,6 @@ def chat(chatbot, history, system_message, max_tokens, temperature, top_p, top_k
47
  messages = messages.rstrip()
48
 
49
  prediction = make_prediction(
50
- chatbot,
51
  messages,
52
  max_tokens=max_tokens,
53
  temperature=temperature,
@@ -74,8 +67,7 @@ CSS ="""
74
  .contain { display: flex; flex-direction: column; }
75
  .gradio-container { height: 100vh !important; }
76
  #component-0 { height: 100%; }
77
- #chatbot1 { flex-grow: 1; overflow: auto; resize: vertical; }
78
- #chatbot2 { flex-grow: 1; overflow: auto; resize: vertical; }
79
  """
80
 
81
  #with gr.Blocks() as demo:
@@ -84,15 +76,13 @@ with gr.Blocks(css=CSS) as demo:
84
  with gr.Column():
85
  gr.Markdown(f"""
86
  ## This demo is an unquantized GPU chatbot of [WizardCoder-Python-34B-V1.0-GGUF](https://huggingface.co/TheBloke/WizardCoder-Python-34B-V1.0-GGUF)
 
87
  """)
88
  with gr.Row():
89
  gr.Markdown("# πŸ” WizardCoder-Python-34B-V1.0-GGUF Playground Space! πŸ”Ž")
90
  with gr.Row():
91
- with gr.Column():
92
- #chatbot = gr.Chatbot().style(height=500)
93
- chatbot1 = gr.Chatbot(label="Chatbot1", elem_id="chatbot1")
94
- with gr.Column():
95
- chatbot2 = gr.Chatbot(label="Chatbot2", elem_id="chatbot2")
96
  with gr.Row():
97
  message = gr.Textbox(
98
  label="What do you want to chat about?",
@@ -117,15 +107,12 @@ with gr.Blocks(css=CSS) as demo:
117
 
118
  chat_history_state = gr.State()
119
  clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
120
- clear.click(lambda: None, None, chatbot1, queue=False)
121
- clear.click(lambda: None, None, chatbot2, queue=False)
122
 
123
  submit_click_event = submit.click(
124
  fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
125
  ).then(
126
- fn=chat, inputs=["Chatbot1", chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=['chatbot1', chat_history_state, message], queue=True
127
- ).then(
128
- fn=chat, inputs=["Chatbot2", chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=['chatbot2', chat_history_state, message], queue=True
129
  )
130
  stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event], queue=False)
131
 
 
10
 
11
  BASE_SYSTEM_MESSAGE = """"""
12
 
13
+ def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None):
14
+ completion = openai.Completion.create(model="/workspace/text-generation-webui/models/wizardcoder-python-34b-v1.0.Q5_K_M.gguf", prompt=prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, stream=True, stop=["</s>", "<|im_end|>"])
 
 
 
 
 
 
15
  for chunk in completion:
16
  yield chunk["choices"][0]["text"]
17
 
 
29
  return "", history
30
 
31
 
32
+ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
33
  history = history or []
34
 
35
  messages = BASE_SYSTEM_MESSAGE + system_message.strip() + "\n" + \
 
41
  messages = messages.rstrip()
42
 
43
  prediction = make_prediction(
 
44
  messages,
45
  max_tokens=max_tokens,
46
  temperature=temperature,
 
67
  .contain { display: flex; flex-direction: column; }
68
  .gradio-container { height: 100vh !important; }
69
  #component-0 { height: 100%; }
70
+ #chatbot { flex-grow: 1; overflow: auto; resize: vertical; }
 
71
  """
72
 
73
  #with gr.Blocks() as demo:
 
76
  with gr.Column():
77
  gr.Markdown(f"""
78
  ## This demo is an unquantized GPU chatbot of [WizardCoder-Python-34B-V1.0-GGUF](https://huggingface.co/TheBloke/WizardCoder-Python-34B-V1.0-GGUF)
79
+ Brought to you by your friends at Alignment Lab AI, garage-bAInd, Open Access AI Collective, and OpenChat!
80
  """)
81
  with gr.Row():
82
  gr.Markdown("# πŸ” WizardCoder-Python-34B-V1.0-GGUF Playground Space! πŸ”Ž")
83
  with gr.Row():
84
+ #chatbot = gr.Chatbot().style(height=500)
85
+ chatbot = gr.Chatbot(elem_id="chatbot")
 
 
 
86
  with gr.Row():
87
  message = gr.Textbox(
88
  label="What do you want to chat about?",
 
107
 
108
  chat_history_state = gr.State()
109
  clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
110
+ clear.click(lambda: None, None, chatbot, queue=False)
 
111
 
112
  submit_click_event = submit.click(
113
  fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
114
  ).then(
115
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=[chatbot, chat_history_state, message], queue=True
 
 
116
  )
117
  stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event], queue=False)
118