MikeCraBash commited on
Commit
3c2db16
1 Parent(s): b8ba0df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -29,7 +29,7 @@ docs = PyMuPDFLoader(direct_url).load()
29
 
30
  import tiktoken
31
  def tiktoken_len(text):
32
- tokens = tiktoken.encoding_for_model("gpt-3.5-turbo").encode(
33
  text,
34
  )
35
  return len(tokens)
@@ -48,7 +48,7 @@ split_chunks = text_splitter.split_documents(docs)
48
  # Load the embeddings model
49
  from langchain_openai.embeddings import OpenAIEmbeddings
50
 
51
- embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
52
 
53
  # Load the vector store and retriever from Qdrant
54
  from langchain_community.vectorstores import Qdrant
@@ -124,7 +124,7 @@ retrieval_augmented_qa_chain = (
124
  @cl.on_chat_start
125
  async def start_chat():
126
  settings = {
127
- "model": "gpt-3.5-turbo",
128
  "temperature": 0,
129
  "max_tokens": 500,
130
  "top_p": 1,
 
29
 
30
  import tiktoken
31
  def tiktoken_len(text):
32
+ tokens = tiktoken.encoding_for_model("gpt-4").encode(
33
  text,
34
  )
35
  return len(tokens)
 
48
  # Load the embeddings model
49
  from langchain_openai.embeddings import OpenAIEmbeddings
50
 
51
+ embedding_model = OpenAIEmbeddings(model="text-embedding-3-large")
52
 
53
  # Load the vector store and retriever from Qdrant
54
  from langchain_community.vectorstores import Qdrant
 
124
  @cl.on_chat_start
125
  async def start_chat():
126
  settings = {
127
+ "model": "gpt-4",
128
  "temperature": 0,
129
  "max_tokens": 500,
130
  "top_p": 1,