andrewgleave commited on
Commit
59b8bcd
·
1 Parent(s): b8f5c23
Files changed (2) hide show
  1. app.py +2 -2
  2. chain.py +1 -6
app.py CHANGED
@@ -84,7 +84,7 @@ def chat(inp, history, agent):
84
  return history, history, source_html
85
 
86
 
87
- with gr.Blocks(css=".gradio-container {background-color: lightgray}") as demo:
88
 
89
  state = gr.State()
90
  agent_state = gr.State()
@@ -98,7 +98,7 @@ with gr.Blocks(css=".gradio-container {background-color: lightgray}") as demo:
98
  )
99
 
100
  sources = gr.HTML()
101
- chatbot = gr.Chatbot()
102
  with gr.Row():
103
  message = gr.Textbox(
104
  label="What's your question?",
 
84
  return history, history, source_html
85
 
86
 
87
+ with gr.Blocks() as demo:
88
 
89
  state = gr.State()
90
  agent_state = gr.State()
 
98
  )
99
 
100
  sources = gr.HTML()
101
+ chatbot = gr.Chatbot().style(color_map=("blue", "gray"))
102
  with gr.Row():
103
  message = gr.Textbox(
104
  label="What's your question?",
chain.py CHANGED
@@ -67,11 +67,6 @@ def get_chain(vectorstore: FAISS) -> Chain:
67
  Standalone question:"""
68
 
69
  examples = [
70
- {
71
- "question": "What is the TokCast podcast?",
72
- "chat_history": [],
73
- "answer": "TokCast is a podcast about the philosophy of David Deutsch.",
74
- },
75
  {
76
  "question": "Who is that?",
77
  "chat_history": "Human: What is the TokCast podcast?\nAssistant: TokCast is a podcast about the philosophy of David Deutsch.",
@@ -92,7 +87,7 @@ def get_chain(vectorstore: FAISS) -> Chain:
92
  input_variables=["question", "chat_history"],
93
  )
94
  llm = OpenAI(temperature=0, model_name="text-davinci-003")
95
- key_word_extractor = LLMChain(llm=llm, prompt=prompt, verbose=True)
96
 
97
  EXAMPLE_PROMPT = PromptTemplate(
98
  template="CONTENT:\n{page_content}\n----------\nSOURCE:\n{source}\n",
 
67
  Standalone question:"""
68
 
69
  examples = [
 
 
 
 
 
70
  {
71
  "question": "Who is that?",
72
  "chat_history": "Human: What is the TokCast podcast?\nAssistant: TokCast is a podcast about the philosophy of David Deutsch.",
 
87
  input_variables=["question", "chat_history"],
88
  )
89
  llm = OpenAI(temperature=0, model_name="text-davinci-003")
90
+ key_word_extractor = LLMChain(llm=llm, prompt=prompt)
91
 
92
  EXAMPLE_PROMPT = PromptTemplate(
93
  template="CONTENT:\n{page_content}\n----------\nSOURCE:\n{source}\n",