Spaces:
Runtime error
Runtime error
ryanrwatkins
commited on
Commit
Β·
7298dcb
1
Parent(s):
c7890fd
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ import glob
|
|
10 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
11 |
from langchain.vectorstores import Chroma
|
12 |
from langchain.text_splitter import TokenTextSplitter
|
13 |
-
from langchain.llms import OpenAI
|
14 |
from langchain.chat_models import ChatOpenAI
|
15 |
#from langchain.chains import ChatVectorDBChain
|
16 |
from langchain.chains import RetrievalQA
|
@@ -103,7 +103,7 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
|
|
103 |
|
104 |
# completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
105 |
|
106 |
-
completion_chain = load_qa_chain(
|
107 |
completion = RetrievalQA(combine_documents_chain=completion_chain, retriever=vectordb.as_retriever())
|
108 |
query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
|
109 |
completion = completion.run(query)
|
|
|
10 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
11 |
from langchain.vectorstores import Chroma
|
12 |
from langchain.text_splitter import TokenTextSplitter
|
13 |
+
#from langchain.llms import OpenAI
|
14 |
from langchain.chat_models import ChatOpenAI
|
15 |
#from langchain.chains import ChatVectorDBChain
|
16 |
from langchain.chains import RetrievalQA
|
|
|
103 |
|
104 |
# completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
105 |
|
106 |
+
completion_chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff" )
|
107 |
completion = RetrievalQA(combine_documents_chain=completion_chain, retriever=vectordb.as_retriever())
|
108 |
query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
|
109 |
completion = completion.run(query)
|