ryanrwatkins commited on
Commit
390942c
Β·
1 Parent(s): 8c7cd91

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -56,9 +56,9 @@ def download_prompt_templates():
56
  if len(row) >= 2:
57
  act = row[0].strip('"')
58
  prompt = row[1].strip('"')
59
- des = row[2].strip('"')
60
  prompt_templates[act] = prompt
61
- prompt_templates[des] = description
62
 
63
 
64
  except requests.exceptions.RequestException as e:
@@ -67,7 +67,7 @@ def download_prompt_templates():
67
 
68
  choices = list(prompt_templates.keys())
69
  choices = choices[:1] + sorted(choices[1:])
70
- return gr.update(value=choices[0], choices=choices)
71
 
72
  def on_prompt_template_change(prompt_template):
73
  if not isinstance(prompt_template, str): return
@@ -240,7 +240,7 @@ with gr.Blocks(css=css) as demo:
240
  btn_clear_conversation = gr.Button("Start New Conversation")
241
  with gr.Column():
242
  prompt_template = gr.Dropdown(label="Choose a expert:", choices=list(prompt_templates.keys()))
243
- prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
244
  with gr.Accordion("Advanced parameters", open=False):
245
  temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, label="Flexibility", info="Higher = more creative/chaotic, Lower = just the guru")
246
  max_tokens = gr.Slider(minimum=100, maximum=400, value=200, step=1, label="Max tokens per response")
 
56
  if len(row) >= 2:
57
  act = row[0].strip('"')
58
  prompt = row[1].strip('"')
59
+ description = row[2].strip('"')
60
  prompt_templates[act] = prompt
61
+
62
 
63
 
64
  except requests.exceptions.RequestException as e:
 
67
 
68
  choices = list(prompt_templates.keys())
69
  choices = choices[:1] + sorted(choices[1:])
70
+ return gr.update(value=choices[0], choices=choices, description=description)
71
 
72
  def on_prompt_template_change(prompt_template):
73
  if not isinstance(prompt_template, str): return
 
240
  btn_clear_conversation = gr.Button("Start New Conversation")
241
  with gr.Column():
242
  prompt_template = gr.Dropdown(label="Choose a expert:", choices=list(prompt_templates.keys()))
243
+ prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview", label=description)
244
  with gr.Accordion("Advanced parameters", open=False):
245
  temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, label="Flexibility", info="Higher = more creative/chaotic, Lower = just the guru")
246
  max_tokens = gr.Slider(minimum=100, maximum=400, value=200, step=1, label="Max tokens per response")