Spaces:
Runtime error
Runtime error
Rohan Kataria
commited on
Commit
Β·
72af804
1
Parent(s):
5b0aecb
changes
Browse files- app.py +1 -1
- src/main.py +17 -9
app.py
CHANGED
@@ -5,7 +5,7 @@ import os
|
|
5 |
# Constants
|
6 |
ROLE_USER = "user"
|
7 |
ROLE_ASSISTANT = "assistant"
|
8 |
-
MAX_MESSAGES =
|
9 |
|
10 |
st.set_page_config(page_title="Chat with Git", page_icon="π¦")
|
11 |
st.title("Chat with Git π€π")
|
|
|
5 |
# Constants
|
6 |
ROLE_USER = "user"
|
7 |
ROLE_ASSISTANT = "assistant"
|
8 |
+
MAX_MESSAGES = 5
|
9 |
|
10 |
st.set_page_config(page_title="Chat with Git", page_icon="π¦")
|
11 |
st.title("Chat with Git π€π")
|
src/main.py
CHANGED
@@ -13,7 +13,7 @@ from langchain.chat_models import ChatOpenAI
|
|
13 |
from langchain.document_loaders import TextLoader
|
14 |
from langchain.document_loaders import GitLoader
|
15 |
from langchain.llms import OpenAI
|
16 |
-
from langchain.memory import ConversationBufferMemory
|
17 |
from langchain.vectorstores import Chroma
|
18 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
19 |
from langchain.prompts import PromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate, ChatPromptTemplate
|
@@ -81,12 +81,17 @@ def retreival(vector_store, k):
|
|
81 |
llm = ChatOpenAI(model=llm_name, temperature=0)
|
82 |
|
83 |
# Define the system message template
|
|
|
|
|
|
|
84 |
system_template = """You're a code summarisation assistant. Given the following extracted parts of a long document as "CONTEXT" create a final answer.
|
85 |
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
|
86 |
Only If asked to create a "DIAGRAM" for code use "MERMAID SYNTAX LANGUAGE" in your answer from "CONTEXT" and "CHAT HISTORY" with a short explanation of diagram.
|
87 |
|
88 |
CONTEXT: {context}
|
89 |
=======
|
|
|
|
|
90 |
FINAL ANSWER:"""
|
91 |
|
92 |
human_template = """{question}"""
|
@@ -104,11 +109,18 @@ def retreival(vector_store, k):
|
|
104 |
PROMPT = ChatPromptTemplate.from_messages(messages)
|
105 |
|
106 |
#Creating memory
|
107 |
-
memory = ConversationBufferMemory(
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
memory_key="chat_history",
|
109 |
input_key="question",
|
110 |
output_key="answer",
|
111 |
-
return_messages=True
|
|
|
112 |
|
113 |
#Creating the retriever, this can also be a contextual compressed retriever
|
114 |
retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": k}) #search_type can be "similarity" or "mmr"
|
@@ -134,13 +146,9 @@ class ConversationalResponse:
|
|
134 |
self.chunks = split_data(self.data)
|
135 |
self.vector_store = ingest_chunks(self.chunks)
|
136 |
self.chain_type = "stuff"
|
137 |
-
self.k =
|
138 |
self.chain = retreival(self.vector_store, self.k)
|
139 |
|
140 |
def __call__(self, question):
|
141 |
-
|
142 |
-
agent = self.chain({"question": question
|
143 |
-
, "chat_history": chat_history
|
144 |
-
})
|
145 |
-
chat_history.append((question, agent['answer']))
|
146 |
return agent['answer']
|
|
|
13 |
from langchain.document_loaders import TextLoader
|
14 |
from langchain.document_loaders import GitLoader
|
15 |
from langchain.llms import OpenAI
|
16 |
+
from langchain.memory import ConversationBufferMemory, ConversationBufferWindowMemory
|
17 |
from langchain.vectorstores import Chroma
|
18 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
19 |
from langchain.prompts import PromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate, ChatPromptTemplate
|
|
|
81 |
llm = ChatOpenAI(model=llm_name, temperature=0)
|
82 |
|
83 |
# Define the system message template
|
84 |
+
#Adding CHAT HISTORY to the System template explicitly because mainly Chat history goes to Condense the Human Question with Backround (Not template), but System template goes straight the LLM Chain
|
85 |
+
#Explicitly adding chat history to access previous chats and answer "what is my previous question?"
|
86 |
+
#Great thing this also sends the chat history to the LLM Model along with the context and question
|
87 |
system_template = """You're a code summarisation assistant. Given the following extracted parts of a long document as "CONTEXT" create a final answer.
|
88 |
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
|
89 |
Only If asked to create a "DIAGRAM" for code use "MERMAID SYNTAX LANGUAGE" in your answer from "CONTEXT" and "CHAT HISTORY" with a short explanation of diagram.
|
90 |
|
91 |
CONTEXT: {context}
|
92 |
=======
|
93 |
+
CHAT HISTORY: {chat_history}
|
94 |
+
=======
|
95 |
FINAL ANSWER:"""
|
96 |
|
97 |
human_template = """{question}"""
|
|
|
109 |
PROMPT = ChatPromptTemplate.from_messages(messages)
|
110 |
|
111 |
#Creating memory
|
112 |
+
# memory = ConversationBufferMemory(
|
113 |
+
# memory_key="chat_history",
|
114 |
+
# input_key="question",
|
115 |
+
# output_key="answer",
|
116 |
+
# return_messages=True)
|
117 |
+
|
118 |
+
memory = ConversationBufferWindowMemory(
|
119 |
memory_key="chat_history",
|
120 |
input_key="question",
|
121 |
output_key="answer",
|
122 |
+
return_messages=True,
|
123 |
+
k=5)
|
124 |
|
125 |
#Creating the retriever, this can also be a contextual compressed retriever
|
126 |
retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": k}) #search_type can be "similarity" or "mmr"
|
|
|
146 |
self.chunks = split_data(self.data)
|
147 |
self.vector_store = ingest_chunks(self.chunks)
|
148 |
self.chain_type = "stuff"
|
149 |
+
self.k = 10
|
150 |
self.chain = retreival(self.vector_store, self.k)
|
151 |
|
152 |
def __call__(self, question):
|
153 |
+
agent = self.chain(question)
|
|
|
|
|
|
|
|
|
154 |
return agent['answer']
|