fabiochiu commited on
Commit
a66a959
Β·
1 Parent(s): 4f87621

fix notebook

Browse files
notebooks/15-Use_OpenSource_Models.ipynb CHANGED
@@ -7,7 +7,7 @@
7
  "id": "view-in-github"
8
  },
9
  "source": [
10
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/15-Use_OpenSource_Models.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n"
11
  ]
12
  },
13
  {
@@ -16,7 +16,7 @@
16
  "id": "-zE1h0uQV7uT"
17
  },
18
  "source": [
19
- "# Install Packages and Setup Variables\n"
20
  ]
21
  },
22
  {
@@ -27,11 +27,7 @@
27
  },
28
  "outputs": [],
29
  "source": [
30
- <<<<<<< HEAD
31
  "!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-llms-together llama-index-llms-gemini llama-index-embeddings-huggingface llama-index-readers-web llama-index-vector-stores-chroma tiktoken==0.6.0 chromadb==0.4.22 pandas==2.2.0 html2text sentence_transformers pydantic kaleido==0.2.1"
32
- =======
33
- "!pip install -q llama-index==0.10.57 openai==1.37.0 llama-index-finetuning llama-index-llms-replicate llama-index-embeddings-huggingface llama-index-embeddings-cohere llama-index-readers-web cohere==5.6.2 tiktoken==0.7.0 chromadb==0.5.5 html2text sentence_transformers pydantic llama-index-vector-stores-chroma==0.1.10 kaleido==0.2.1 replicate==0.23.1"
34
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
35
  ]
36
  },
37
  {
@@ -44,29 +40,10 @@
44
  "source": [
45
  "import os\n",
46
  "\n",
47
- <<<<<<< HEAD
48
  "# Set environment variables for the API keys\n",
49
  "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_API_KEY>\"\n",
50
  "os.environ[\"TOGETHER_AI_API_TOKEN\"] = \"<YOUR_API_KEY>\"\n",
51
  "os.environ[\"GOOGLE_API_KEY\"] = \"<YOUR_API_KEY>\"\n",
52
- =======
53
- "# Set the \"OPENAI_API_KEY\" and \"REPLICATE_API_TOKEN\" in the Python environment. Will be used by OpenAI client later.\n",
54
- "# You can sign up on https://replicate.com/docs/get-started/python and get a token to use for free for this notebook.\n",
55
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\"\n",
56
- "os.environ[\"REPLICATE_API_TOKEN\"] = \"<YOUR_REPLICATE_KEY>\"\n",
57
- "os.environ[\"GOOGLE_API_KEY\"] = \"<YOUR_API_KEY>\""
58
- ]
59
- },
60
- {
61
- "cell_type": "code",
62
- "execution_count": 2,
63
- "metadata": {
64
- "id": "jIEeZzqLbz0J"
65
- },
66
- "outputs": [],
67
- "source": [
68
- "# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
69
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
70
  "\n",
71
  "# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
72
  "import nest_asyncio\n",
@@ -77,54 +54,10 @@
77
  {
78
  "cell_type": "markdown",
79
  "metadata": {
80
- <<<<<<< HEAD
81
  "id": "0BwVuJXlzHVL"
82
  },
83
  "source": [
84
  "# Create a vector store and ingest articles"
85
- =======
86
- "id": "Bkgi2OrYzF7q"
87
- },
88
- "source": [
89
- "# Load a Model\n"
90
- ]
91
- },
92
- {
93
- "cell_type": "code",
94
- "execution_count": 3,
95
- "metadata": {
96
- "id": "A1yVgic9DeJ6"
97
- },
98
- "outputs": [
99
- {
100
- "name": "stderr",
101
- "output_type": "stream",
102
- "text": [
103
- "/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
104
- " from .autonotebook import tqdm as notebook_tqdm\n"
105
- ]
106
- }
107
- ],
108
- "source": [
109
- "from llama_index.core.prompts import PromptTemplate\n",
110
- "from llama_index.llms.replicate import Replicate\n",
111
- "\n",
112
- "# Use the repicate service to access the LLaMA2-70B chat model\n",
113
- "llm = Replicate(\n",
114
- " model=\"meta/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf\",\n",
115
- " is_chat_model=True,\n",
116
- " additional_kwargs={\"max_new_tokens\": 512},\n",
117
- ")"
118
- ]
119
- },
120
- {
121
- "cell_type": "markdown",
122
- "metadata": {
123
- "id": "0BwVuJXlzHVL"
124
- },
125
- "source": [
126
- "# Create a VectoreStore\n"
127
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
128
  ]
129
  },
130
  {
@@ -148,31 +81,10 @@
148
  {
149
  "cell_type": "markdown",
150
  "metadata": {
151
- <<<<<<< HEAD
152
- =======
153
- "id": "I9JbAzFcjkpn"
154
- },
155
- "source": [
156
- "# Load the Dataset (CSV)\n"
157
- ]
158
- },
159
- {
160
- "cell_type": "markdown",
161
- "metadata": {
162
- "id": "ceveDuYdWCYk"
163
- },
164
- "source": [
165
- "## Download\n"
166
- ]
167
- },
168
- {
169
- "cell_type": "markdown",
170
- "metadata": {
171
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
172
  "id": "eZwf6pv7WFmD"
173
  },
174
  "source": [
175
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string.\n"
176
  ]
177
  },
178
  {
@@ -196,11 +108,7 @@
196
  "id": "VWBLtDbUWJfA"
197
  },
198
  "source": [
199
- <<<<<<< HEAD
200
  "## Read articles from file"
201
- =======
202
- "## Read File\n"
203
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
204
  ]
205
  },
206
  {
@@ -221,22 +129,11 @@
221
  "\n",
222
  "# Load the file as a JSON\n",
223
  "with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
224
- " csv_reader = csv.reader(file)\n",
225
  "\n",
226
- <<<<<<< HEAD
227
  " for idx, row in enumerate( csv_reader ):\n",
228
  " if idx == 0: continue; # Skip header row\n",
229
  " rows.append(row)"
230
- =======
231
- " for idx, row in enumerate(csv_reader):\n",
232
- " if idx == 0:\n",
233
- " continue\n",
234
- " # Skip header row\n",
235
- " rows.append(row)\n",
236
- "\n",
237
- "# The number of characters in the dataset.\n",
238
- "len(rows)"
239
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
240
  ]
241
  },
242
  {
@@ -245,11 +142,7 @@
245
  "id": "S17g2RYOjmf2"
246
  },
247
  "source": [
248
- <<<<<<< HEAD
249
  "## Ingest documents into vector store"
250
- =======
251
- "# Convert to Document obj\n"
252
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
253
  ]
254
  },
255
  {
@@ -266,163 +159,12 @@
266
  "from llama_index.core.ingestion import IngestionPipeline\n",
267
  "\n",
268
  "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
269
- <<<<<<< HEAD
270
  "documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]\n",
271
  "\n",
272
  "# Define the splitter object that split the text into segments with 512 tokens,\n",
273
  "# with a 128 overlap between the segments.\n",
274
  "text_splitter = TokenTextSplitter(\n",
275
  " separator=\" \", chunk_size=512, chunk_overlap=128\n",
276
- =======
277
- "documents = [\n",
278
- " Document(\n",
279
- " text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}\n",
280
- " )\n",
281
- " for row in rows\n",
282
- "]"
283
- ]
284
- },
285
- {
286
- "cell_type": "markdown",
287
- "metadata": {
288
- "id": "qjuLbmFuWsyl"
289
- },
290
- "source": [
291
- "# Transforming\n"
292
- ]
293
- },
294
- {
295
- "cell_type": "code",
296
- "execution_count": 8,
297
- "metadata": {
298
- "id": "9z3t70DGWsjO"
299
- },
300
- "outputs": [],
301
- "source": [
302
- "from llama_index.core.text_splitter import TokenTextSplitter\n",
303
- "\n",
304
- "# Define the splitter object that split the text into segments with 512 tokens,\n",
305
- "# with a 128 overlap between the segments.\n",
306
- "text_splitter = TokenTextSplitter(separator=\" \", chunk_size=512, chunk_overlap=128)"
307
- ]
308
- },
309
- {
310
- "cell_type": "code",
311
- "execution_count": 12,
312
- "metadata": {
313
- "colab": {
314
- "base_uri": "https://localhost:8080/",
315
- "height": 650,
316
- "referenced_widgets": [
317
- "2711e1220eac4e4e8ff6e5fae93c5a1a",
318
- "073ca56c113f4dad93b73d9fcd350a66",
319
- "9bc95d130d7347548e112c59a113e169",
320
- "9fdbd1ce1076410d8265699ff13df861",
321
- "c1e0d83bdbb0484983901d6db36dc112",
322
- "d237f91523d242f6a11ac294e0832fa2",
323
- "c53e80972993487e94fd56cf34302f0b",
324
- "2c47558aaa6c44adb2afdb5ec766d8dd",
325
- "38efcc43969b40429cf99b03c9f7ccbc",
326
- "e973a85fd8ce42c9949074c7747cf467",
327
- "b68e7dbe575e4934bbbc67461d2ee167",
328
- "a6d1480cebd044ee8ec996c26498a07d",
329
- "23ac188881d1484aaf630309809bbc2b",
330
- "f75e22947e4b4efcb10f7d157c9fe5d2",
331
- "938bdfdc914e44fcb9ae942bb6b74496",
332
- "fdc1f2cb889f43a88e0301b29b726657",
333
- "d1271b18bd5f4f84aa450f8d58b17774",
334
- "34e5dab56e354682adb687ffb19c695d",
335
- "a2e452e75f964f96b84f22521f7533a5",
336
- "644aeca8e37f4df294cec4a0425587f7",
337
- "fe571f5475834c01a22255e68dc782e3",
338
- "987018d8d0e34a58a993a836cb3300d4",
339
- "9e79a1468ef6452899596ed496801394",
340
- "387baf8595754e8e930f36426e9f6758",
341
- "55650dcd92f14d5f8e05eda8295e4834",
342
- "d4901be0f61c4cfb93fa8f05c0f5bd2f",
343
- "3f77b9fcc78c41969ea0f7cffbab2ca9",
344
- "2807299dd4f7402d897aae5bc7adefb6",
345
- "a882c7b38aed4592944458efb288f025",
346
- "846b99723d934015ade4d75987e92340",
347
- "4111e05472284375bf6e591b83cdaab9",
348
- "06de7570f46644ce89ebac09915d1df5",
349
- "8688a4936b9f47c6a86288f6c56fe08c",
350
- "ffd5823568564c05b5cd89b04132020a",
351
- "5afa1845fd734f0d81c1833615ebcef2",
352
- "f3f65dbeccec455cb169fb7b3b2f3748",
353
- "99933154a95f4547811b56004ba96c99",
354
- "c0a5b64331af4b5e89acc24905fecfc8",
355
- "a1c85b0d1291481d984a7cc6009294d1",
356
- "cc5115d155534a8b9187efeb3f18b917",
357
- "ad4354f9a8134e7ca58571bf10bd0668",
358
- "b5325917ecce4469be5f64936d88a9b9",
359
- "3f9c588d74ee46ae96689c7112c43291",
360
- "59052cd74dfc4da8aeb461ba43d6c1ce",
361
- "36a5fc86bdee4f5a98922ae9abd687fd",
362
- "830c2d0a44c245b9987ca5b9b3688300",
363
- "f919db0110564db9a3f286dc622c48a6",
364
- "2edec7086ca64edab9599fb64a73384a",
365
- "b68df68efe8b43389d0880af68ebb6ef",
366
- "9b7a963b1af749dc945d528ceff0487a",
367
- "774b8be0d1cc4f1c9c3dcacdf0724fb7",
368
- "b1855af26dcd4d1c8d84ec39144a10ea",
369
- "65eb95ad32fd4e52a4367d23aebc7a9f",
370
- "96f87f9f50374fb49fa2321f74c40522",
371
- "edd7d9ce8a1942628d08ae84bd424ea2",
372
- "777a1a034fe54c40a80176aa32de5bef",
373
- "bc67387104744a3c8885b5d0a681977d",
374
- "6f1fc70b6cf54f5fbe6dcb94828d412e",
375
- "6b4ae5c8971f47f0b02a5417cc84e548",
376
- "35523981fb7545b5bc70e32eaca6df61",
377
- "5562e178fcbb4c3690c4555ca1a25649",
378
- "77adc5c66da6481eba92996d3130e7e2",
379
- "26bb673b58cd43a09004c15cd2ae0cc7",
380
- "7656e8f0910244808ba32c9feb0d1cce",
381
- "1c170007cc714f179e6761a26f648928",
382
- "3abd1d02caf5489ba66907425f447651",
383
- "6bd2c40c93e14c0bb93447af64799be9",
384
- "b8111557128040b5a517ba77ebb0a244",
385
- "e9ce05dd20304114b148076a5489fdc3",
386
- "f35c6cab11eb4eebba14d2650e314daf",
387
- "9f1c5dc44df3452d8637639861e5c978",
388
- "3962d5f5f940453387e4e38c56c43555",
389
- "6c227510710d4364b9983808b5b101f1",
390
- "10043e32478b4400abb336d7d0fb6f18",
391
- "19264bfe1e6048449dd4467afe582218",
392
- "2086d0451a8b4fa2a395ff21e5d51d39",
393
- "e53a03ea9bbb46928937065deec1ac08",
394
- "73f6fa5f742d4341862f53da4263e5c5",
395
- "6bc1d0ab84234e37b2fd34bd520215a9",
396
- "679a9cc52b124d1b807d194dfc779e3c",
397
- "98f05b81a1ba49baa3f7694f8d26a3ed",
398
- "183d8bf73ede4872bab715efa0d011b4",
399
- "80818af9902541bc80c1fa55436b6b91",
400
- "8a1618e5df38497d92a2be9e3e982688",
401
- "c067f30d6a14445dabd68393f736853b",
402
- "e395cd15151743889282fff05ef628d4",
403
- "0ae8682d08f74b8cbd9091c7d60096f8",
404
- "a1f056b411d64699b93fd12da6e10162"
405
- ]
406
- },
407
- "id": "P9LDJ7o-Wsc-",
408
- "outputId": "08a795a9-53e3-4a2b-89d2-c0a8912d66b9"
409
- },
410
- "outputs": [
411
- {
412
- "name": "stderr",
413
- "output_type": "stream",
414
- "text": [
415
- "Parsing nodes: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14/14 [00:00<00:00, 27.58it/s]\n",
416
- "Generating embeddings: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:05<00:00, 21.15it/s]\n"
417
- ]
418
- }
419
- ],
420
- "source": [
421
- "from llama_index.core.extractors import (\n",
422
- " SummaryExtractor,\n",
423
- " QuestionsAnsweredExtractor,\n",
424
- " KeywordExtractor,\n",
425
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
426
  ")\n",
427
  "\n",
428
  "# Create the pipeline to apply the transformation on each chunk,\n",
@@ -430,90 +172,12 @@
430
  "pipeline = IngestionPipeline(\n",
431
  " transformations=[\n",
432
  " text_splitter,\n",
433
- " HuggingFaceEmbedding(\n",
434
- " model_name=\"BAAI/bge-small-en-v1.5\"\n",
435
- " ), # Or, OpenAIEmbedding()\n",
436
  " ],\n",
437
- " vector_store=vector_store,\n",
438
  ")\n",
439
  "\n",
440
  "nodes = pipeline.run(documents=documents, show_progress=True)"
441
- <<<<<<< HEAD
442
- =======
443
- ]
444
- },
445
- {
446
- "cell_type": "code",
447
- "execution_count": 13,
448
- "metadata": {
449
- "colab": {
450
- "base_uri": "https://localhost:8080/"
451
- },
452
- "id": "mPGa85hM2P3P",
453
- "outputId": "56c3980a-38a4-40e7-abdd-84ec1f26cb95"
454
- },
455
- "outputs": [
456
- {
457
- "data": {
458
- "text/plain": [
459
- "108"
460
- ]
461
- },
462
- "execution_count": 13,
463
- "metadata": {},
464
- "output_type": "execute_result"
465
- }
466
- ],
467
- "source": [
468
- "len(nodes)"
469
- ]
470
- },
471
- {
472
- "cell_type": "code",
473
- "execution_count": 14,
474
- "metadata": {
475
- "colab": {
476
- "base_uri": "https://localhost:8080/"
477
- },
478
- "id": "OeeG3jxT0taW",
479
- "outputId": "d1938534-9a12-4f5e-b7e1-5fd58d687d60"
480
- },
481
- "outputs": [
482
- {
483
- "name": "stdout",
484
- "output_type": "stream",
485
- "text": [
486
- " adding: mini-llama-articles/ (stored 0%)\n",
487
- " adding: mini-llama-articles/01877efc-b4a2-4da3-80b3-93cc40b27067/ (stored 0%)\n",
488
- " adding: mini-llama-articles/01877efc-b4a2-4da3-80b3-93cc40b27067/data_level0.bin"
489
- ]
490
- },
491
- {
492
- "name": "stderr",
493
- "output_type": "stream",
494
- "text": [
495
- "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
496
- "To disable this warning, you can either:\n",
497
- "\t- Avoid using `tokenizers` before the fork if possible\n",
498
- "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
499
- ]
500
- },
501
- {
502
- "name": "stdout",
503
- "output_type": "stream",
504
- "text": [
505
- " (deflated 57%)\n",
506
- " adding: mini-llama-articles/01877efc-b4a2-4da3-80b3-93cc40b27067/length.bin (deflated 48%)\n",
507
- " adding: mini-llama-articles/01877efc-b4a2-4da3-80b3-93cc40b27067/link_lists.bin (stored 0%)\n",
508
- " adding: mini-llama-articles/01877efc-b4a2-4da3-80b3-93cc40b27067/header.bin (deflated 61%)\n",
509
- " adding: mini-llama-articles/chroma.sqlite3 (deflated 66%)\n"
510
- ]
511
- }
512
- ],
513
- "source": [
514
- "# Compress the vector store directory to a zip file to be able to download and use later.\n",
515
- "!zip -r vectorstore-bge-embedding.zip mini-llama-articles"
516
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
517
  ]
518
  },
519
  {
@@ -522,20 +186,7 @@
522
  "id": "OWaT6rL7ksp8"
523
  },
524
  "source": [
525
- <<<<<<< HEAD
526
  "# Load vector store and create query engine"
527
- =======
528
- "# Load Indexes\n"
529
- ]
530
- },
531
- {
532
- "cell_type": "markdown",
533
- "metadata": {
534
- "id": "RF4U62oMr-iW"
535
- },
536
- "source": [
537
- "If you have already uploaded the zip file for the vector store checkpoint, please uncomment the code in the following cell block to extract its contents. After doing so, you will be able to load the dataset from local storage.\n"
538
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
539
  ]
540
  },
541
  {
@@ -639,41 +290,11 @@
639
  "id": "RZ5iQ_KkJufJ",
640
  "outputId": "dd6029ee-10ed-4bf8-95d1-88ac5c636c47"
641
  },
642
- <<<<<<< HEAD
643
- =======
644
- "outputs": [
645
- {
646
- "name": "stderr",
647
- "output_type": "stream",
648
- "text": [
649
- "/var/folders/l7/9qcp7g5x5rl9x8ltw0t85qym0000gn/T/ipykernel_8187/3245113941.py:5: DeprecationWarning: Call to deprecated class method from_defaults. (ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.) -- Deprecated since version 0.10.0.\n",
650
- " service_context = ServiceContext.from_defaults(llm=llm, embed_model=\"local:BAAI/bge-small-en-v1.5\")\n"
651
- ]
652
- }
653
- ],
654
- "source": [
655
- "from llama_index.core import ServiceContext\n",
656
- "\n",
657
- "# Define a ServiceContext that uses the BGE model for embedding which will be loads from Huggingface.\n",
658
- "# The model will be downloaded in your local machine.\n",
659
- "service_context = ServiceContext.from_defaults(\n",
660
- " llm=llm, embed_model=\"local:BAAI/bge-small-en-v1.5\"\n",
661
- ")"
662
- ]
663
- },
664
- {
665
- "cell_type": "code",
666
- "execution_count": 27,
667
- "metadata": {
668
- "id": "jKXURvLtkuTS"
669
- },
670
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
671
  "outputs": [],
672
  "source": [
673
  "from llama_index.core.prompts import PromptTemplate\n",
674
  "from llama_index.llms.together import TogetherLLM\n",
675
  "\n",
676
- <<<<<<< HEAD
677
  "# Use the Together AI service to access the LLaMA2-70B chat model\n",
678
  "llm = TogetherLLM(\n",
679
  " model=\"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo\",\n",
@@ -686,12 +307,6 @@
686
  "# Define a query engine that is responsible for retrieving related pieces of text,\n",
687
  "# and using a LLM to formulate the final answer.\n",
688
  "query_engine = index.as_query_engine()"
689
- =======
690
- "# Create the index based on the vector store.\n",
691
- "index = VectorStoreIndex.from_vector_store(\n",
692
- " vector_store, service_context=service_context\n",
693
- ")"
694
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
695
  ]
696
  },
697
  {
@@ -700,33 +315,12 @@
700
  "id": "8JPD8yAinVSq"
701
  },
702
  "source": [
703
- <<<<<<< HEAD
704
  "# Test query engine"
705
- =======
706
- "# Query Dataset\n"
707
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
708
  ]
709
  },
710
  {
711
  "cell_type": "code",
712
- <<<<<<< HEAD
713
  "execution_count": null,
714
- =======
715
- "execution_count": 28,
716
- "metadata": {
717
- "id": "8lBu8V7tJ2_8"
718
- },
719
- "outputs": [],
720
- "source": [
721
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
722
- "# and using a LLM to formulate the final answer.\n",
723
- "query_engine = index.as_query_engine(llm=llm)"
724
- ]
725
- },
726
- {
727
- "cell_type": "code",
728
- "execution_count": 29,
729
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
730
  "metadata": {
731
  "id": "rWAI0jUhJ7qH"
732
  },
@@ -751,11 +345,11 @@
751
  "source": [
752
  "# print the source nodes used to write the answer\n",
753
  "for src in res.source_nodes:\n",
754
- " print(\"Node ID\\t\", src.node_id)\n",
755
- " print(\"Title\\t\", src.metadata[\"title\"])\n",
756
- " print(\"Text\\t\", src.text)\n",
757
- " print(\"Score\\t\", src.score)\n",
758
- " print(\"-_\" * 20)"
759
  ]
760
  },
761
  {
@@ -764,11 +358,7 @@
764
  "id": "iMkpzH7vvb09"
765
  },
766
  "source": [
767
- <<<<<<< HEAD
768
  "# Evaluate the retriever"
769
- =======
770
- "# Evaluate\n"
771
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
772
  ]
773
  },
774
  {
@@ -784,14 +374,16 @@
784
  "outputs": [],
785
  "source": [
786
  "from llama_index.core.evaluation import generate_question_context_pairs\n",
787
- "from llama_index.llms.gemini import Gemini\n",
788
  "\n",
789
  "# Create questions for each segment. These questions will be used to\n",
790
  "# assess whether the retriever can accurately identify and return the\n",
791
  "# corresponding segment when queried.\n",
792
- "llm = Gemini(model=\"models/gemini-1.5-flash\", temperature=1, max_tokens=512)\n",
793
  "rag_eval_dataset = generate_question_context_pairs(\n",
794
- " nodes, llm=llm, num_questions_per_chunk=1\n",
 
 
795
  ")\n",
796
  "\n",
797
  "# We can save the evaluation dataset as a json file for later use.\n",
@@ -804,7 +396,7 @@
804
  "id": "JjM95B9Zs29W"
805
  },
806
  "source": [
807
- "If you have uploaded the generated question JSON file, please uncomment the code in the next cell block. This will avoid the need to generate the questions manually, saving you time and effort.\n"
808
  ]
809
  },
810
  {
@@ -825,43 +417,7 @@
825
  },
826
  {
827
  "cell_type": "code",
828
- <<<<<<< HEAD
829
  "execution_count": null,
830
- =======
831
- "execution_count": 33,
832
- "metadata": {
833
- "id": "H7ubvcbk27vr"
834
- },
835
- "outputs": [],
836
- "source": [
837
- "import pandas as pd\n",
838
- "\n",
839
- "\n",
840
- "# A simple function to show the evaluation result.\n",
841
- "def display_results_retriever(name, eval_results):\n",
842
- " \"\"\"Display results from evaluate.\"\"\"\n",
843
- "\n",
844
- " metric_dicts = []\n",
845
- " for eval_result in eval_results:\n",
846
- " metric_dict = eval_result.metric_vals_dict\n",
847
- " metric_dicts.append(metric_dict)\n",
848
- "\n",
849
- " full_df = pd.DataFrame(metric_dicts)\n",
850
- "\n",
851
- " hit_rate = full_df[\"hit_rate\"].mean()\n",
852
- " mrr = full_df[\"mrr\"].mean()\n",
853
- "\n",
854
- " metric_df = pd.DataFrame(\n",
855
- " {\"Retriever Name\": [name], \"Hit Rate\": [hit_rate], \"MRR\": [mrr]}\n",
856
- " )\n",
857
- "\n",
858
- " return metric_df"
859
- ]
860
- },
861
- {
862
- "cell_type": "code",
863
- "execution_count": 34,
864
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
865
  "metadata": {
866
  "colab": {
867
  "base_uri": "https://localhost:8080/"
@@ -1085,11 +641,7 @@
1085
  "name": "python",
1086
  "nbconvert_exporter": "python",
1087
  "pygments_lexer": "ipython3",
1088
- <<<<<<< HEAD
1089
  "version": "3.11.4"
1090
- =======
1091
- "version": "3.12.4"
1092
- >>>>>>> b8b57398f877722d8b854eaf1fb3901fa0618894
1093
  },
1094
  "widgets": {
1095
  "application/vnd.jupyter.widget-state+json": {
 
7
  "id": "view-in-github"
8
  },
9
  "source": [
10
+ "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/15-Use_OpenSource_Models.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
  ]
12
  },
13
  {
 
16
  "id": "-zE1h0uQV7uT"
17
  },
18
  "source": [
19
+ "# Install Packages and Setup Variables"
20
  ]
21
  },
22
  {
 
27
  },
28
  "outputs": [],
29
  "source": [
 
30
  "!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-llms-together llama-index-llms-gemini llama-index-embeddings-huggingface llama-index-readers-web llama-index-vector-stores-chroma tiktoken==0.6.0 chromadb==0.4.22 pandas==2.2.0 html2text sentence_transformers pydantic kaleido==0.2.1"
 
 
 
31
  ]
32
  },
33
  {
 
40
  "source": [
41
  "import os\n",
42
  "\n",
 
43
  "# Set environment variables for the API keys\n",
44
  "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_API_KEY>\"\n",
45
  "os.environ[\"TOGETHER_AI_API_TOKEN\"] = \"<YOUR_API_KEY>\"\n",
46
  "os.environ[\"GOOGLE_API_KEY\"] = \"<YOUR_API_KEY>\"\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  "\n",
48
  "# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
49
  "import nest_asyncio\n",
 
54
  {
55
  "cell_type": "markdown",
56
  "metadata": {
 
57
  "id": "0BwVuJXlzHVL"
58
  },
59
  "source": [
60
  "# Create a vector store and ingest articles"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  ]
62
  },
63
  {
 
81
  {
82
  "cell_type": "markdown",
83
  "metadata": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  "id": "eZwf6pv7WFmD"
85
  },
86
  "source": [
87
+ "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
88
  ]
89
  },
90
  {
 
108
  "id": "VWBLtDbUWJfA"
109
  },
110
  "source": [
 
111
  "## Read articles from file"
 
 
 
112
  ]
113
  },
114
  {
 
129
  "\n",
130
  "# Load the file as a JSON\n",
131
  "with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
132
+ " csv_reader = csv.reader(file)\n",
133
  "\n",
 
134
  " for idx, row in enumerate( csv_reader ):\n",
135
  " if idx == 0: continue; # Skip header row\n",
136
  " rows.append(row)"
 
 
 
 
 
 
 
 
 
 
137
  ]
138
  },
139
  {
 
142
  "id": "S17g2RYOjmf2"
143
  },
144
  "source": [
 
145
  "## Ingest documents into vector store"
 
 
 
146
  ]
147
  },
148
  {
 
159
  "from llama_index.core.ingestion import IngestionPipeline\n",
160
  "\n",
161
  "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
 
162
  "documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]\n",
163
  "\n",
164
  "# Define the splitter object that split the text into segments with 512 tokens,\n",
165
  "# with a 128 overlap between the segments.\n",
166
  "text_splitter = TokenTextSplitter(\n",
167
  " separator=\" \", chunk_size=512, chunk_overlap=128\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  ")\n",
169
  "\n",
170
  "# Create the pipeline to apply the transformation on each chunk,\n",
 
172
  "pipeline = IngestionPipeline(\n",
173
  " transformations=[\n",
174
  " text_splitter,\n",
175
+ " HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\") # Or, OpenAIEmbedding()\n",
 
 
176
  " ],\n",
177
+ " vector_store=vector_store\n",
178
  ")\n",
179
  "\n",
180
  "nodes = pipeline.run(documents=documents, show_progress=True)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  ]
182
  },
183
  {
 
186
  "id": "OWaT6rL7ksp8"
187
  },
188
  "source": [
 
189
  "# Load vector store and create query engine"
 
 
 
 
 
 
 
 
 
 
 
 
190
  ]
191
  },
192
  {
 
290
  "id": "RZ5iQ_KkJufJ",
291
  "outputId": "dd6029ee-10ed-4bf8-95d1-88ac5c636c47"
292
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
  "outputs": [],
294
  "source": [
295
  "from llama_index.core.prompts import PromptTemplate\n",
296
  "from llama_index.llms.together import TogetherLLM\n",
297
  "\n",
 
298
  "# Use the Together AI service to access the LLaMA2-70B chat model\n",
299
  "llm = TogetherLLM(\n",
300
  " model=\"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo\",\n",
 
307
  "# Define a query engine that is responsible for retrieving related pieces of text,\n",
308
  "# and using a LLM to formulate the final answer.\n",
309
  "query_engine = index.as_query_engine()"
 
 
 
 
 
 
310
  ]
311
  },
312
  {
 
315
  "id": "8JPD8yAinVSq"
316
  },
317
  "source": [
 
318
  "# Test query engine"
 
 
 
319
  ]
320
  },
321
  {
322
  "cell_type": "code",
 
323
  "execution_count": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
  "metadata": {
325
  "id": "rWAI0jUhJ7qH"
326
  },
 
345
  "source": [
346
  "# print the source nodes used to write the answer\n",
347
  "for src in res.source_nodes:\n",
348
+ " print(\"Node ID\\t\", src.node_id)\n",
349
+ " print(\"Title\\t\", src.metadata['title'])\n",
350
+ " print(\"Text\\t\", src.text)\n",
351
+ " print(\"Score\\t\", src.score)\n",
352
+ " print(\"-_\"*20)"
353
  ]
354
  },
355
  {
 
358
  "id": "iMkpzH7vvb09"
359
  },
360
  "source": [
 
361
  "# Evaluate the retriever"
 
 
 
362
  ]
363
  },
364
  {
 
374
  "outputs": [],
375
  "source": [
376
  "from llama_index.core.evaluation import generate_question_context_pairs\n",
377
+ "from llama_index.llms.openai import OpenAI\n",
378
  "\n",
379
  "# Create questions for each segment. These questions will be used to\n",
380
  "# assess whether the retriever can accurately identify and return the\n",
381
  "# corresponding segment when queried.\n",
382
+ "llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
383
  "rag_eval_dataset = generate_question_context_pairs(\n",
384
+ " nodes,\n",
385
+ " llm=llm,\n",
386
+ " num_questions_per_chunk=1\n",
387
  ")\n",
388
  "\n",
389
  "# We can save the evaluation dataset as a json file for later use.\n",
 
396
  "id": "JjM95B9Zs29W"
397
  },
398
  "source": [
399
+ "If you have uploaded the generated question JSON file, please uncomment the code in the next cell block. This will avoid the need to generate the questions manually, saving you time and effort."
400
  ]
401
  },
402
  {
 
417
  },
418
  {
419
  "cell_type": "code",
 
420
  "execution_count": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421
  "metadata": {
422
  "colab": {
423
  "base_uri": "https://localhost:8080/"
 
641
  "name": "python",
642
  "nbconvert_exporter": "python",
643
  "pygments_lexer": "ipython3",
 
644
  "version": "3.11.4"
 
 
 
645
  },
646
  "widgets": {
647
  "application/vnd.jupyter.widget-state+json": {