Vamsikrishna Chemudupati commited on
Commit
5fd9ae6
Β·
1 Parent(s): f755dcf

Added comments to notebook

Browse files
notebooks/04-RAG_with_VectorStore.ipynb CHANGED
@@ -333,7 +333,7 @@
333
  "outputs": [],
334
  "source": [
335
  "from langchain.schema.document import Document\n",
336
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
337
  "documents = [Document(page_content=t) for t in chunks]"
338
  ]
339
  },
@@ -356,9 +356,7 @@
356
  "source": [
357
  "from langchain_chroma import Chroma\n",
358
  "from langchain_openai import OpenAIEmbeddings\n",
359
- "# create client and a new collection\n",
360
- "# chromadb.EphemeralClient saves data in-memory.\n",
361
- "# Add the documents to the database and create Index / embeddings\n",
362
  "\n",
363
  "embeddings = OpenAIEmbeddings(model=\"text-embedding-ada-002\")\n",
364
  "chroma_db = Chroma.from_documents(\n",
@@ -387,9 +385,7 @@
387
  "outputs": [],
388
  "source": [
389
  "from langchain_openai import ChatOpenAI\n",
390
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
391
- "# and using a LLM to formulate the final answer.\n",
392
- "\n",
393
  "llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
394
  ]
395
  },
@@ -416,6 +412,8 @@
416
  "from langchain.chains import RetrievalQA\n",
417
  "query = \"How many parameters LLaMA2 model has?\"\n",
418
  "retriever = chroma_db.as_retriever(search_kwargs={\"k\": 2})\n",
 
 
419
  "chain = RetrievalQA.from_chain_type(llm=llm,\n",
420
  " chain_type=\"stuff\",\n",
421
  " retriever=retriever)\n",
 
333
  "outputs": [],
334
  "source": [
335
  "from langchain.schema.document import Document\n",
336
+ "# Convert the chunks to Document objects so the LangChain framework can process them.\n",
337
  "documents = [Document(page_content=t) for t in chunks]"
338
  ]
339
  },
 
356
  "source": [
357
  "from langchain_chroma import Chroma\n",
358
  "from langchain_openai import OpenAIEmbeddings\n",
359
+ "# Add the documents to chroma DB and create Index / embeddings\n",
 
 
360
  "\n",
361
  "embeddings = OpenAIEmbeddings(model=\"text-embedding-ada-002\")\n",
362
  "chroma_db = Chroma.from_documents(\n",
 
385
  "outputs": [],
386
  "source": [
387
  "from langchain_openai import ChatOpenAI\n",
388
+ "# Initializing the LLM model\n",
 
 
389
  "llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
390
  ]
391
  },
 
412
  "from langchain.chains import RetrievalQA\n",
413
  "query = \"How many parameters LLaMA2 model has?\"\n",
414
  "retriever = chroma_db.as_retriever(search_kwargs={\"k\": 2})\n",
415
+ "# Define a RetrievalQA chain that is responsible for retrieving related pieces of text,\n",
416
+ "# and using a LLM to formulate the final answer.\n",
417
  "chain = RetrievalQA.from_chain_type(llm=llm,\n",
418
  " chain_type=\"stuff\",\n",
419
  " retriever=retriever)\n",