Dagfinn1962 commited on
Commit
bd0fb97
1 Parent(s): 028b828

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -13
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import time
2
  import logging
3
  import gradio as gr
4
-
5
 
6
 
7
  logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
@@ -110,18 +110,11 @@ def call_inf_server(prompt, openai_key):
110
  return response
111
 
112
 
113
-
114
-
115
- theme = gr.themes.Glass(
116
- primary_hue="amber",
117
- secondary_hue="neutral",
118
- neutral_hue="rose",
119
- text_size="text_lg",
120
-
121
- )
122
- with gr.Blocks(theme=theme) as demo:
123
-
124
- gr.Markdown(
125
  """<h1><center>Chat with gpt-3.5-turbo</center></h1>
126
  This is a lightweight demo of gpt-3.5-turbo conversation completion. It was designed as a template for in-context learning applications to be built on top of.
127
  """
@@ -224,3 +217,6 @@ with gr.Blocks(theme=theme) as demo:
224
  outputs=[system],
225
  queue=False,
226
  )
 
 
 
 
1
  import time
2
  import logging
3
  import gradio as gr
4
+ from src.llm_boilers import llm_boiler
5
 
6
 
7
  logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
 
110
  return response
111
 
112
 
113
+ with gr.Blocks(
114
+ theme=gr.themes.Soft(),
115
+ css=".disclaimer {font-variant-caps: all-small-caps;}",
116
+ ) as demo:
117
+ gr.Markdown(
 
 
 
 
 
 
 
118
  """<h1><center>Chat with gpt-3.5-turbo</center></h1>
119
  This is a lightweight demo of gpt-3.5-turbo conversation completion. It was designed as a template for in-context learning applications to be built on top of.
120
  """
 
217
  outputs=[system],
218
  queue=False,
219
  )
220
+
221
+
222
+ demo.queue(max_size=36, concurrency_count=14).launch(debug=True)