Spaces:
Runtime error
Runtime error
Rohan Kataria
commited on
Commit
·
5b0aecb
1
Parent(s):
dc9eb63
changes
Browse files- src/main.py +8 -5
src/main.py
CHANGED
@@ -16,7 +16,7 @@ from langchain.llms import OpenAI
|
|
16 |
from langchain.memory import ConversationBufferMemory
|
17 |
from langchain.vectorstores import Chroma
|
18 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
19 |
-
from langchain.prompts import PromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate, ChatPromptTemplate
|
20 |
import datetime
|
21 |
import shutil
|
22 |
|
@@ -97,8 +97,7 @@ def retreival(vector_store, k):
|
|
97 |
# Create the chat prompt templates
|
98 |
messages = [
|
99 |
SystemMessagePromptTemplate.from_template(system_template),
|
100 |
-
|
101 |
-
HumanMessagePromptTemplate.from_template(human_template),
|
102 |
# AIMessagePromptTemplate.from_template(ai_template)
|
103 |
]
|
104 |
|
@@ -121,7 +120,7 @@ def retreival(vector_store, k):
|
|
121 |
memory=memory,
|
122 |
return_source_documents=True, #When used these 2 properties, the output gets 3 properties: answer, source_document, source_document_score and then have to speocify input and output key in memory for it to work
|
123 |
combine_docs_chain_kwargs=dict({"prompt": PROMPT})
|
124 |
-
|
125 |
|
126 |
return chain
|
127 |
|
@@ -139,5 +138,9 @@ class ConversationalResponse:
|
|
139 |
self.chain = retreival(self.vector_store, self.k)
|
140 |
|
141 |
def __call__(self, question):
|
142 |
-
|
|
|
|
|
|
|
|
|
143 |
return agent['answer']
|
|
|
16 |
from langchain.memory import ConversationBufferMemory
|
17 |
from langchain.vectorstores import Chroma
|
18 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
19 |
+
from langchain.prompts import PromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate, ChatPromptTemplate
|
20 |
import datetime
|
21 |
import shutil
|
22 |
|
|
|
97 |
# Create the chat prompt templates
|
98 |
messages = [
|
99 |
SystemMessagePromptTemplate.from_template(system_template),
|
100 |
+
HumanMessagePromptTemplate.from_template(human_template)
|
|
|
101 |
# AIMessagePromptTemplate.from_template(ai_template)
|
102 |
]
|
103 |
|
|
|
120 |
memory=memory,
|
121 |
return_source_documents=True, #When used these 2 properties, the output gets 3 properties: answer, source_document, source_document_score and then have to speocify input and output key in memory for it to work
|
122 |
combine_docs_chain_kwargs=dict({"prompt": PROMPT})
|
123 |
+
)
|
124 |
|
125 |
return chain
|
126 |
|
|
|
138 |
self.chain = retreival(self.vector_store, self.k)
|
139 |
|
140 |
def __call__(self, question):
|
141 |
+
chat_history = []
|
142 |
+
agent = self.chain({"question": question
|
143 |
+
, "chat_history": chat_history
|
144 |
+
})
|
145 |
+
chat_history.append((question, agent['answer']))
|
146 |
return agent['answer']
|