MikeCraBash commited on
Commit
cf2de7b
1 Parent(s): e75eeac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -29,7 +29,7 @@ docs = PyMuPDFLoader(direct_url).load()
29
 
30
  import tiktoken
31
  def tiktoken_len(text):
32
- tokens = tiktoken.encoding_for_model("gpt-4").encode(
33
  text,
34
  )
35
  return len(tokens)
@@ -63,8 +63,7 @@ qdrant_vectorstore = Qdrant.from_documents(
63
  qdrant_retriever = qdrant_vectorstore.as_retriever()
64
 
65
  from langchain_openai import ChatOpenAI
66
- openai_chat_model = ChatOpenAI(model="gpt-4-turbo")
67
- #openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo")
68
 
69
  from langchain_core.prompts import ChatPromptTemplate
70
 
@@ -124,7 +123,7 @@ retrieval_augmented_qa_chain = (
124
  @cl.on_chat_start
125
  async def start_chat():
126
  settings = {
127
- "model": "gpt-4-turbo",
128
  "temperature": 0,
129
  "max_tokens": 500,
130
  "top_p": 1,
 
29
 
30
  import tiktoken
31
  def tiktoken_len(text):
32
+ tokens = tiktoken.encoding_for_model("gpt-3.5-turbo").encode(
33
  text,
34
  )
35
  return len(tokens)
 
63
  qdrant_retriever = qdrant_vectorstore.as_retriever()
64
 
65
  from langchain_openai import ChatOpenAI
66
+ openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo")
 
67
 
68
  from langchain_core.prompts import ChatPromptTemplate
69
 
 
123
  @cl.on_chat_start
124
  async def start_chat():
125
  settings = {
126
+ "model": "gpt-3.5-turbo",
127
  "temperature": 0,
128
  "max_tokens": 500,
129
  "top_p": 1,