Spaces:
Sleeping
Sleeping
robertselvam
commited on
Commit
•
54f3b3b
1
Parent(s):
e76b8cd
Update app.py
Browse files
app.py
CHANGED
@@ -80,7 +80,7 @@ class ChatDocumentQA:
|
|
80 |
Returns:
|
81 |
List[str]: List of smaller text chunks.
|
82 |
"""
|
83 |
-
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=
|
84 |
|
85 |
chunks = text_splitter.split_documents(text)
|
86 |
|
@@ -110,7 +110,8 @@ class ChatDocumentQA:
|
|
110 |
|
111 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
112 |
|
113 |
-
llm = ChatOpenAI(temperature=0)
|
|
|
114 |
|
115 |
return ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(),
|
116 |
condense_question_prompt=CONDENSE_QUESTION_PROMPT,
|
|
|
80 |
Returns:
|
81 |
List[str]: List of smaller text chunks.
|
82 |
"""
|
83 |
+
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=500, chunk_overlap=100, length_function=len)
|
84 |
|
85 |
chunks = text_splitter.split_documents(text)
|
86 |
|
|
|
110 |
|
111 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
112 |
|
113 |
+
# llm = ChatOpenAI(temperature=0)
|
114 |
+
llm=OpenAI(temperature=0)
|
115 |
|
116 |
return ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(),
|
117 |
condense_question_prompt=CONDENSE_QUESTION_PROMPT,
|