Spaces:
Sleeping
Sleeping
saswatdas123
commited on
Update pages/ChatPDF_Reader.py
Browse files- pages/ChatPDF_Reader.py +4 -2
pages/ChatPDF_Reader.py
CHANGED
@@ -30,8 +30,10 @@ def get_data(query):
|
|
30 |
db = Chroma(persist_directory=chromadbpath, embedding_function=embeddings)
|
31 |
retriever = db.as_retriever(search_type="mmr", search_kwargs={'k': 2})
|
32 |
|
33 |
-
llm = HuggingFaceHub(huggingfacehub_api_token=hf_token,
|
34 |
-
repo_id=llm_repo_id, model_kwargs={"temperature":0.2, "max_new_tokens":50})
|
|
|
|
|
35 |
|
36 |
# Create the Conversational Retrieval Chain
|
37 |
qa_chain = ConversationalRetrievalChain.from_llm(llm, retriever,return_source_documents=True)
|
|
|
30 |
db = Chroma(persist_directory=chromadbpath, embedding_function=embeddings)
|
31 |
retriever = db.as_retriever(search_type="mmr", search_kwargs={'k': 2})
|
32 |
|
33 |
+
"""llm = HuggingFaceHub(huggingfacehub_api_token=hf_token,
|
34 |
+
repo_id=llm_repo_id, model_kwargs={"temperature":0.2, "max_new_tokens":50})"""
|
35 |
+
|
36 |
+
llm = HuggingFaceHub(repo_id=llm_repo_id, model_kwargs={"temperature":0.2, "max_new_tokens":50})
|
37 |
|
38 |
# Create the Conversational Retrieval Chain
|
39 |
qa_chain = ConversationalRetrievalChain.from_llm(llm, retriever,return_source_documents=True)
|