KvrParaskevi commited on
Commit
fa240df
·
verified ·
1 Parent(s): 68f9fec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -33,8 +33,12 @@ def load_pipeline():
33
  def chat_interface(inputs):
34
  question = inputs["input"]
35
  chat_history = inputs["history"]
36
- # Assuming `chain` is your instance of ConversationalRetrievalChain
37
- result = chain.run({"input": question, "history": chat_history})
 
 
 
 
38
  return result["response"]
39
 
40
  llm = load_pipeline()
@@ -67,13 +71,6 @@ with gr.Blocks() as demo:
67
  clear = gr.Button("Clear")
68
  #llm_chain, llm = init_chain(model, tokenizer)
69
 
70
- # Convert chat history to list of tuples
71
- chat_history_tuples = []
72
- for message in chat_history:
73
- chat_history_tuples.append((message[0], message[1]))
74
-
75
- result = llm_chain({"input": query, "history": chat_history_tuples})
76
-
77
  iface = gr.Interface(
78
  fn=chat_interface,
79
  inputs=[
 
33
  def chat_interface(inputs):
34
  question = inputs["input"]
35
  chat_history = inputs["history"]
36
+ chat_history_tuples = []
37
+ for message in chat_history:
38
+ chat_history_tuples.append((message[0], message[1]))
39
+
40
+ #result = llm_chain({"input": query, "history": chat_history_tuples})
41
+ result = llm_chain.invoke({"input": question, "history": chat_history})
42
  return result["response"]
43
 
44
  llm = load_pipeline()
 
71
  clear = gr.Button("Clear")
72
  #llm_chain, llm = init_chain(model, tokenizer)
73
 
 
 
 
 
 
 
 
74
  iface = gr.Interface(
75
  fn=chat_interface,
76
  inputs=[