ali121300 commited on
Commit
fb21a93
1 Parent(s): 26a5f45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -53,14 +53,14 @@ def get_vectorstore(text_chunks : list) -> FAISS:
53
 
54
  def get_conversation_chain(vectorstore:FAISS) -> ConversationalRetrievalChain:
55
  # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
56
- #llm = HuggingFaceHub(
57
  #repo_id="mistralai/Mistral-7B-Instruct-v0.2",
58
  #repo_id="cognitivecomputations/Llama-3-70B-Gradient-1048k-adapter",
59
  #repo_id="TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
60
- # repo_id="mostafaamiri/persian-llama-7b-GGUF-Q4",
61
- # model_kwargs={"temperature": 0.1, "max_length": 2048},
62
- #)
63
- llm = ChatGroq(temperature=0.5, model_name="llama3-8b-8192", groq_api_key='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')
64
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
65
  conversation_chain = ConversationalRetrievalChain.from_llm(
66
  llm=llm, retriever=vectorstore.as_retriever(),memory=memory
 
53
 
54
  def get_conversation_chain(vectorstore:FAISS) -> ConversationalRetrievalChain:
55
  # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
56
+ llm = HuggingFaceHub(
57
  #repo_id="mistralai/Mistral-7B-Instruct-v0.2",
58
  #repo_id="cognitivecomputations/Llama-3-70B-Gradient-1048k-adapter",
59
  #repo_id="TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
60
+ repo_id="lmstudio-community/Meta-Llama-3-120B-Instruct-GGUF",
61
+ model_kwargs={"temperature": 0.1, "max_length": 2048},
62
+ )
63
+
64
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
65
  conversation_chain = ConversationalRetrievalChain.from_llm(
66
  llm=llm, retriever=vectorstore.as_retriever(),memory=memory