Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -53,14 +53,14 @@ def get_vectorstore(text_chunks : list) -> FAISS:
|
|
53 |
|
54 |
def get_conversation_chain(vectorstore:FAISS) -> ConversationalRetrievalChain:
|
55 |
# llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
|
56 |
-
|
57 |
#repo_id="mistralai/Mistral-7B-Instruct-v0.2",
|
58 |
#repo_id="cognitivecomputations/Llama-3-70B-Gradient-1048k-adapter",
|
59 |
#repo_id="TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
65 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
66 |
llm=llm, retriever=vectorstore.as_retriever(),memory=memory
|
|
|
53 |
|
54 |
def get_conversation_chain(vectorstore:FAISS) -> ConversationalRetrievalChain:
|
55 |
# llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
|
56 |
+
llm = HuggingFaceHub(
|
57 |
#repo_id="mistralai/Mistral-7B-Instruct-v0.2",
|
58 |
#repo_id="cognitivecomputations/Llama-3-70B-Gradient-1048k-adapter",
|
59 |
#repo_id="TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
|
60 |
+
repo_id="lmstudio-community/Meta-Llama-3-120B-Instruct-GGUF",
|
61 |
+
model_kwargs={"temperature": 0.1, "max_length": 2048},
|
62 |
+
)
|
63 |
+
|
64 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
65 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
66 |
llm=llm, retriever=vectorstore.as_retriever(),memory=memory
|