Spaces:
Running
on
T4
Running
on
T4
File size: 41,518 Bytes
a267b49 44b8cfd fdac275 fcd3572 a267b49 f57dc09 d8af5b9 61d5a3f 122d84f ee34418 15d08bb a267b49 ab422ce a267b49 049f240 6121fd1 80f87b1 4f26794 8b5cd21 ada35d6 5419ad3 602fcc0 9cd87f4 4773b90 af7cbd7 b4676f8 0c746c2 1fad9e0 ee34418 122d84f 80f87b1 ee34418 9cd87f4 2ab448b b40229f 7d4a4c3 64c1388 b2457e2 ffa40a2 80f87b1 557005f 3ad6fb8 8487529 bb5cfa6 3ad6fb8 0cdc7ec 4a5c98c 1e93f85 a9cba95 0cdc7ec d054933 ebed8ce 2c4e7a2 ee34418 2c4e7a2 d054933 41103cd 2c4e7a2 023cddc 1b01217 7fc8cc0 cff0859 628be14 836444c 3465a1f ebed8ce d054933 1c8b292 8487529 ebed8ce c29f6a3 302780d c29f6a3 ebed8ce 302780d ebed8ce 302780d ebed8ce af7cbd7 ebed8ce 8487529 ebed8ce 3465a1f 9c3f5fa 1c8b292 0c70929 179b326 a267b49 df43546 fdc7196 11f7b7a 34b0705 11f7b7a 34b0705 fdc7196 11f7b7a cc40273 fdc7196 11f7b7a fdc7196 fd4a890 957f5de fd4a890 628be14 9131110 122d84f 40ce4f6 6751bfa 122d84f d209482 122d84f 9131110 e5f59f7 af7cbd7 628be14 1f7c7c9 af7cbd7 76159ea 90d7d9c 6292eb9 59ed555 1f7c7c9 6143b5b af7cbd7 215effb df43546 755634f 662b291 fd4a890 a18030e fd4a890 30e3424 fd4a890 b2457e2 ee34418 fd4a890 30e3424 fd4a890 df94751 fd4a890 ee34418 6143b5b fdc7196 34b0705 ee34418 34b0705 cc40273 34b0705 6143b5b ee34418 b79abfc fd4a890 38a44b2 a8f7144 fde637a fd4a890 8053dc6 20b2e2b a8f7144 3c35148 a8f7144 8053dc6 fd4a890 34b0705 be0a9d4 6143b5b ee34418 7e28091 9d0f111 ee34418 9d0f111 ee34418 c78c291 ee34418 34b0705 ba165fd d435c23 de6a046 8eb714e cc40273 34b0705 ee34418 fd4a890 ee34418 34b0705 cc40273 34b0705 df43546 fdc7196 ee34418 34b0705 f9cdb92 34b0705 ee34418 fd4a890 34b0705 e1e5d31 34b0705 e1e5d31 39636c1 fd4a890 df43546 abbb034 fc24904 6a514da 420f6f1 67cd02d 9d1a58b 420f6f1 4bceb82 420f6f1 4bceb82 420f6f1 41d1c3f 90efc8d 8eb9c8c 90efc8d ebcd496 90efc8d d418a49 420f6f1 90efc8d ebcd496 195f8e8 a6cb806 85602b3 ced9420 e41def2 85602b3 e41def2 e8ab384 e41def2 e93d62e e8ab384 e93d62e e8ab384 e93d62e 6aa884b df43546 6184a59 23ea686 bdf836d b7169ce aac4052 7d4a4c3 bdf836d 90efc8d df43546 bdf836d df43546 bdf836d df43546 bdf836d df43546 3fe6095 bdf836d ff129ff df43546 90efc8d c010288 8b7ce86 6d2a9e0 3f440c6 64b0cfb 6aa884b df43546 6aa884b ab422ce dc77bf9 ab422ce dc77bf9 ab422ce 78b817e 0052cf7 fed2e5f a9cba95 fed2e5f 78b817e ab422ce 016a765 78b817e 5ac1939 0052cf7 359ff38 78b817e 0052cf7 78b817e 11c7a3d ab422ce e8b73cb 000aeae afb0db5 f68e408 8b5cd21 afb0db5 60d3cbf d7c4deb 90efc8d e100b25 90efc8d 85714c0 ebed8ce 6631c5b ebed8ce 97fdfd3 90efc8d 7cc8887 90efc8d e100b25 5af5765 e100b25 ebed8ce 97fdfd3 d391b77 049ccb9 d391b77 b027fb2 90efc8d d391b77 97fdfd3 fe668d1 90efc8d d391b77 97fdfd3 90efc8d 7cc8887 90efc8d 97fdfd3 8b5cd21 ebed8ce 6631c5b ebed8ce 6631c5b ebed8ce 19e784c 6631c5b dad325e ebed8ce 6631c5b ebed8ce af7cbd7 67cd02d 950d1ed 67cd02d 8018eb0 af7cbd7 67cd02d e119473 fa5807e 38839ef e8b73cb 1ad1696 4a1c345 c39c941 40bf681 4a1c345 8b5cd21 af7cbd7 8b5cd21 e8b73cb 8b5cd21 ebed8ce e8b73cb e7d9cfc 000aeae 62c49a7 000aeae e8b73cb da6dfaf e100b25 f68e408 822f8d6 f914f00 e8b73cb 86b5d9a 000aeae 1d66c65 4f26794 40e37ba f914f00 0cf748c f914f00 b485ba4 f914f00 1014077 f914f00 78b817e f914f00 1014077 f914f00 6aa884b 78b817e 6aa884b f7481f5 f914f00 6aa884b 78b817e 6aa884b 3bfd348 f914f00 e8ab384 f914f00 e8ab384 f914f00 96f7fc8 4ccdfbe 122d84f fdc7196 ba165fd 11f7b7a 0bc29b1 9c4c112 0bc29b1 11f7b7a 0bc29b1 11f7b7a 602fcc0 59a30d8 1b6a673 5611847 53c9f6d fdc7196 f914f00 fdc7196 f914f00 31b50c4 f914f00 93e855d dce0b3a eb66be6 fdc7196 cc40273 ba165fd fdc7196 ba165fd fdc7196 ba165fd fdc7196 ba165fd fdc7196 ba165fd cc40273 ba165fd f914f00 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 |
import os
import re
import lz4
import json
import time
import uuid
import torch
import base64
import asyncio
import msgpack
import validators
import numpy as np
import pandas as pd
import streamlit as st
from vllm import LLM
from numpy import ndarray
from outlines import models
from datetime import datetime
from typing import List, Dict
from pydantic import BaseModel, Field
from dense_embed import embed_text
from ppt_chunker import ppt_chunker
from qdrant_client import QdrantClient
from streamlit_navigation_bar import st_navbar
from vllm.sampling_params import SamplingParams
from fastembed import SparseTextEmbedding, SparseEmbedding
from outlines.fsm.json_schema import build_regex_from_schema
from unstructured.nlp.tokenize import download_nltk_packages
from scipy.sparse import csr_matrix, save_npz, load_npz, vstack
from infinity_emb import AsyncEngineArray, EngineArgs, AsyncEmbeddingEngine
from prompts import (
transform_query,
build_prompt_conv,
route_llm,
open_query_prompt,
question_type_prompt,
idk,
self_knowledge,
answer_with_context
)
from qdrant_client.models import (
NamedSparseVector,
NamedVector,
SparseVector,
PointStruct,
ScoredPoint,
Prefetch,
FusionQuery,
Fusion,
SearchRequest,
Modifier,
OptimizersConfigDiff,
HnswConfigDiff,
Distance,
VectorParams,
SparseVectorParams,
SparseIndexParams,
Batch,
Filter,
HasIdCondition,
Datatype,
BinaryQuantization,
BinaryQuantizationConfig
)
class Question(BaseModel):
answer: str
schema = json.dumps(Question.model_json_schema())
icon_to_types = {
'ppt':('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAAsTAAALEwEAmpwYAAAC4ElEQVR4nO2YS2gTQRzGF/VgPVuPpsXSQ3eshdbXqVDMEtlN1UN8HqTZYouCVJTsiIfgTcWbD0o0mp3GKq1oC55E2AbbKn3dakISCVjwUNtLlZ1a24xs0oqG2uxmJ2si+8F32NnL95v/fx67DGPLli1b/5UUkauJtDsvDHm5XkXkphSvc17xcksqgksYSXMYwSksS71Yhudx6MouphSk+Ju3RLzcGUV0jg6JHFnPGMH1LcMRVZZOkz7P5n8TXuRcisjF/xY8LwDKWpVhDIcgZ1nwiXPCNkXkHuYLrhcAr4EgKUD6LlUUNfxIp3OHIjon9YY3AoCzHl8IXq0sWvgh0RkzEr4AAKK1FHWIbNsYm/lCAfBqJchj/1ZqAEZ6nhIAURHsNhQy0OToCjQ5vj1ochAzDu6vJgOte00DYATJYsh32AiA6fC/Q9AAUJEU1X1O0Aq/ZhoAOAPhO1nWABhJw2UNoMowjdG16oIBcrWy/IPMx6Pk9eV2iyoACQ75OkwDxF4+JdEXT8jMaCTznF5ZJoNtR60BkKWwaYDgwZpfY/FXzzNj0/3IGgAEJ6gCjN29ma3KwDOLAKRZ0wBhrpGglnoy2HaMLHyeyYy98XVaAqAiuGgaIFcf+ns2XMRJAdD0ommA8Xu3yNidG+Td7etk4Gxr3m2ULgA7S3UN6DFlgIlyB+gpcwBQ+EFWqGmFTwggnTqyp6p8AXjwNm/4kgZwg+N6Ab7SCv9oXxUdAB5ME4/eD5rGnRdpQGjhIy21dADcdS0MDSUFEKC8qxAdi/c+Q0ufPAcqEjw7biHA+1Szg95vFU1xV0NlgmdjRZ95no3GhNrtVMP/CQHGijnzcVdDcX4t5rRTdzF6PkW7bTbSR373Ia3cpsPzYJrabmNU2h6ddNedSvBgWDvyDcx2WjthEwJ7gviZTUwpKOaur9YuXQmBDSd5djLJg7kkD76v+ot2Jc68E0CHrruNLVu2bDGlrJ8c/urSuEn7XgAAAABJRU5ErkJggg==',
'Powerpoint'),
'pptx':('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAAsTAAALEwEAmpwYAAAC4ElEQVR4nO2YS2gTQRzGF/VgPVuPpsXSQ3eshdbXqVDMEtlN1UN8HqTZYouCVJTsiIfgTcWbD0o0mp3GKq1oC55E2AbbKn3dakISCVjwUNtLlZ1a24xs0oqG2uxmJ2si+8F32NnL95v/fx67DGPLli1b/5UUkauJtDsvDHm5XkXkphSvc17xcksqgksYSXMYwSksS71Yhudx6MouphSk+Ju3RLzcGUV0jg6JHFnPGMH1LcMRVZZOkz7P5n8TXuRcisjF/xY8LwDKWpVhDIcgZ1nwiXPCNkXkHuYLrhcAr4EgKUD6LlUUNfxIp3OHIjon9YY3AoCzHl8IXq0sWvgh0RkzEr4AAKK1FHWIbNsYm/lCAfBqJchj/1ZqAEZ6nhIAURHsNhQy0OToCjQ5vj1ochAzDu6vJgOte00DYATJYsh32AiA6fC/Q9AAUJEU1X1O0Aq/ZhoAOAPhO1nWABhJw2UNoMowjdG16oIBcrWy/IPMx6Pk9eV2iyoACQ75OkwDxF4+JdEXT8jMaCTznF5ZJoNtR60BkKWwaYDgwZpfY/FXzzNj0/3IGgAEJ6gCjN29ma3KwDOLAKRZ0wBhrpGglnoy2HaMLHyeyYy98XVaAqAiuGgaIFcf+ns2XMRJAdD0ommA8Xu3yNidG+Td7etk4Gxr3m2ULgA7S3UN6DFlgIlyB+gpcwBQ+EFWqGmFTwggnTqyp6p8AXjwNm/4kgZwg+N6Ab7SCv9oXxUdAB5ME4/eD5rGnRdpQGjhIy21dADcdS0MDSUFEKC8qxAdi/c+Q0ufPAcqEjw7biHA+1Szg95vFU1xV0NlgmdjRZ95no3GhNrtVMP/CQHGijnzcVdDcX4t5rRTdzF6PkW7bTbSR373Ia3cpsPzYJrabmNU2h6ddNedSvBgWDvyDcx2WjthEwJ7gviZTUwpKOaur9YuXQmBDSd5djLJg7kkD76v+ot2Jc68E0CHrruNLVu2bDGlrJ8c/urSuEn7XgAAAABJRU5ErkJggg==',
'Powerpoint'),
'txt':('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAACXBIWXMAAAsTAAALEwEAmpwYAAAEA0lEQVR4nO2bW08TQRTH+8QDH8DPpF+EGLt+AENI44OC7u6L+mAxYQ0xLdZoDE8kCpr4oEYUGu4kEOgFRWm5FLbd3Zk9Ztpus132WmZ3WtqTnGQzLbP7/82ZM2dmaSw2sIENbGAdWHIkOSxw0pgQlzYFTlJFTgIGXhQ4aSKRyAzFojT+9vQNMT71i5HoS04gRCpe4KZWyY2TiUzrIdTj3cCOzooA1ZO64/ND23azm79DfD+73IqEyMSLcWmF3HQykYG5ldOOAVxVvHayB7pcbt2fifgPu0pHAGiJJ59FAoB3EN8JAJriIwHAu4gPCoC2+NAB8B7i/QDA1ePQxIcKgPch3gsAkv8BMQIhDPGhAeB9incDgOQStJlWDSyegHMTHwqA5Ehy2ChyXtx/A3OrZ47ivSIAGxCUi8DitcpB/U915dxRfCgABE4aM4ocL/Hvv+TrN39+L+Wc8FS5I/E6VhvwdB1AvYgQQFzaDFqKLryep5btUbUMtoaUyCJA9SucjPznzDxUj3boLnVKBUDHpgiQo4sAkWFt35bwyNwn5iK+qwCgMNZ5reYqvmsAoJCKHD/OHABiKJ4pgK3vi5AcTTE/DPHtcSnPcy9vUQMwOZpmLyqgC3elAhUA6KzY6vTBj1pPuO+pInoAMOZ2XwJApsTWdwCQJdvTBPD4Zw0mFrsYALJZ6vwAyFea5ayN5Sq4/p2HizWoqDocynobBHJ9cK7X+wjSH3UAyGGd9wMg5/OBiXhiC3mt1Uauif2V9cD9UQOAXIqcoFPAMLvPpjcaW18VAzzLKvAkq4DS1JreVgP3RwUA8qjwaAIgvlZqKF4vYVgvN6/LzqMaOgDwKG9pAzCPuhENT7MKWwDYpbanDcA87635gAkA7LGxCQPAp0IXAVA9dnW0AZBwJ2FvGJkOZFowBYBctrS0AWw0E9+aKQmSa2YAkMd+niaA1FZjGVRQY9TNCfHVJqNlEDwOM2gCIBUfsY+5y4VQwaHAiRwAtpzkBAVAKjWnau3rb1QP+3FLKbx9jOHbHxS4P+oAsM0x1rXfDYLHGV5fAMDmWkAugXaau/SfGdcWAG4Tf0Re0QBgDaB22h8A1Law3wcgLyvJK6vm1JgcnYn0QJPZoajWOrdvjLzhu0tbPQVB4KS8eEe6GQiAdsWXFl5uTbCduO/Q9mtis8NeEB8qAL0HxIcKAHpAfM8BwJTF9xQAHIL4SACk+VmYEWfbxARtM4tPPXoLaf5dm4irtIUOQLSJiKBtdg9Mu20AgJaJlpEkIWwNbT9tJOxJuFpDlnZbVyZBHFLCiyoJFkmHhfWdrhe/t7xE/yczAidNsN64BPb41Dg1AIlEZqgJoR4JXe5sfjY3sIENLHZd7D/x1k4dCUv1GwAAAABJRU5ErkJggg==',
'Txt'),
'doc':('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAAsTAAALEwEAmpwYAAACOElEQVR4nO2ZQWgTQRSGh+DFq70VD2lvQuKpV714cGtLu3ioiiJeWr0oaSnSm0KhKnabJiKYYhHRQxORBmLqoYVKqaGGWmizAYsaCBhDpdCkqatpap68l3QO4kHJsBnN/PDDzts34f+YN3sJY0pKSkr/pFz31q64/Ym822+CPU7kXX7zsjAAe8ObZJfPzAkEsDe8u2oFsC91An41Qv/rCGmGh2nGDtMMqMWOznFoHpipC8BOreE5RMc4uIdNoWZ/AAAirQCGG/kEjg8GARVZ+shrR3ofwb5ODD3j9Wg8RTXcIw3AwS4ffN/dgy85i9f6fLMc4MaTGK9vbH2lXtwjDQDTDHidzFDYlksPaf14LgnFUgVqdiVNNXyHwl6pRohpBtwJxSncmZEXtE5lc7D07jNMx95DwdqFAx1eOHc7Sj3YKx1A180whTOeL0Pz+QA9e6ffwvXJBXpuu/qU1ijslQ6gqec+lMsAi8kMnQKqZyQCxwan6NnzYJ5GB3uw99f9Rz1vhJr9LQA6md6Eb8USBGZWKfThCxN0WfEuhGMfwCqWqOd3e6UACFSDb1tF+LRZ4PX4ehZKez/oHfZIC3Dx7kv+6QwtrPO6L7zC69gjLUBr9TOJ6g+84vWztyp3AoU90gKwGqwAPI13AqMFUeEdp7x1ADhpXBMB4WgfKx/qnLRaT4e2RJqJUosehHqYKYCq1AnoaoQafIScejBvd3hn95S4PziceqgPf9DO8E492CsMQElJSYmJ0E+635eFCoKREwAAAABJRU5ErkJggg==',
'Microsoft Word'),
'docx':('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAAsTAAALEwEAmpwYAAACOElEQVR4nO2ZQWgTQRSGh+DFq70VD2lvQuKpV714cGtLu3ioiiJeWr0oaSnSm0KhKnabJiKYYhHRQxORBmLqoYVKqaGGWmizAYsaCBhDpdCkqatpap68l3QO4kHJsBnN/PDDzts34f+YN3sJY0pKSkr/pFz31q64/Ym822+CPU7kXX7zsjAAe8ObZJfPzAkEsDe8u2oFsC91An41Qv/rCGmGh2nGDtMMqMWOznFoHpipC8BOreE5RMc4uIdNoWZ/AAAirQCGG/kEjg8GARVZ+shrR3ofwb5ODD3j9Wg8RTXcIw3AwS4ffN/dgy85i9f6fLMc4MaTGK9vbH2lXtwjDQDTDHidzFDYlksPaf14LgnFUgVqdiVNNXyHwl6pRohpBtwJxSncmZEXtE5lc7D07jNMx95DwdqFAx1eOHc7Sj3YKx1A180whTOeL0Pz+QA9e6ffwvXJBXpuu/qU1ijslQ6gqec+lMsAi8kMnQKqZyQCxwan6NnzYJ5GB3uw99f9Rz1vhJr9LQA6md6Eb8USBGZWKfThCxN0WfEuhGMfwCqWqOd3e6UACFSDb1tF+LRZ4PX4ehZKez/oHfZIC3Dx7kv+6QwtrPO6L7zC69gjLUBr9TOJ6g+84vWztyp3AoU90gKwGqwAPI13AqMFUeEdp7x1ADhpXBMB4WgfKx/qnLRaT4e2RJqJUosehHqYKYCq1AnoaoQafIScejBvd3hn95S4PziceqgPf9DO8E492CsMQElJSYmJ0E+635eFCoKREwAAAABJRU5ErkJggg==',
'Microsoft Word'),
'xslx':('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAAsTAAALEwEAmpwYAAADGUlEQVR4nO2Za0hTYRzG9z0vu59JpK3vedvmTHOmM1OnE0wqbZWJjjTNS2nz7jZvFVmJUTOCchF0+RaBiAThQkgQKSKKbjohIqMIKlG3J3aiVXicw3PaMToPPF/eT7/fe/7vew4cHo8LFy5c/qlssht1cnvpjNxuxGqqcJhX1dhRs1M5as6kLbBx0OhcLTwdAYVHwtE+TVuADjxdAYXDDE5Azj0BMzdCtLLhTCH8aXifAREXiyC/WhrYQ0z06HVET+4M0a0H0ZUDojMb0g4dpNYsSC1ZkJgzIWnPgKRtB8Rt6RC3pkPcsh2i5jSImrQQNqZC2JACUYMWMose4f2GPyT+vkC33kkXXmhKgeD4NkhMqVB1FyHJVoHkKzVk6V4CPD8EGIEX1CdDUK+BqusAEs+XB1Cgizl4fl3SD4H+sgAKdDIHzz/GgoCUAn69NRsvZ2fgSeuQzQsffaoQcwvz+DY/h5gTBUvg+Ue3siBgpd75/dfbSYHZL58QYckhd/7W5Ai5Zh26RAkfWpvIgoBl+bEZef6QBO4YvgxNXwlcbjdevHdC1phGCR9akxB4AYmPmU84V4z5xQV8/PoZY68fwe12Q2+rXhY+tHoLCwLtvg/sBcdt/MyNiWGf8CFV8SwItPm+bezjd70CNyeGfcKHHImHOCcS0l0KyAxqsvzmVFrlrRRf8DkDNeTYjE89wYNXk6RE3kDtsvAhlWoWBFqp4cOa0/Hs3RsSWm+rgrbPSMpMfXiLMJOWEj64Mo4FgRbql9Tpe9dI+OGnY96xufP4PrnWOzJICR9coQq8gIgCPqH3IHn7uNwuaM4We2de2VOABdci2cST+5bABx1WsiDQtPLnga+Z/x0+qJwFASGD8EFlChYEGpiDX3coNvACAlOKkyn4oJJoFgTqNZn8Oo2TCXjBzkiIc6NA7P4lQLc8uhHpNsPfivVRkObHgChQrR0BSV4M/KnU0/xYELuVkO2NWzsCxB4l/K5n5xmElzEhwCSMjBP4H58AYVA72YInDGr6PzhkhepMNiQIg3paZojPoC3AhQsXLjwm8x3YSSmFlSW/AQAAAABJRU5ErkJggg==',
'Excel')
}
def query_keywords_search(query: str, client: QdrantClient, collection_name: str, sparse_embeddings):
return client.query_points(
collection_name=collection_name,
prefetch=Prefetch(query=sparse_embeddings, using='title-sparse', limit=25),
query=FusionQuery(fusion=Fusion.RRF),
with_vectors=False,
with_payload=True,
limit=1
)
def query_hybrid_search(query: str, client: QdrantClient, collection_name: str, dense_embeddings, sparse_embeddings):
return client.query_points(
collection_name=collection_name,
prefetch=[
Prefetch(query=sparse_embeddings, using="text-sparse", limit=25),
Prefetch(query=dense_embeddings[0], using="text-dense", limit=25)
],
query=FusionQuery(fusion=Fusion.RRF),
with_vectors=False,
with_payload=True,
limit=10,
score_threshold=0.95
)
def generate_answer(query: str,
client: QdrantClient,
collection_name: str,
llm,
dense_model: AsyncEmbeddingEngine,
sparse_model: SparseTextEmbedding,
past_messages: str,
search_strategy: str,
):
sparse_embeddings = list(sparse_model.query_embed(query))[0].as_object()
s = time.time()
if search_strategy == 'Exact Search':
scored_point = query_keywords_search(query, client, collection_name, sparse_embeddings).points[0]
text = scored_point.payload['text']
metadata = scored_point.payload['metadata']
answer = f"{text}\n\n\nSource :\n\n{metadata}"
else:
regex = build_regex_from_schema(schema, r"[\n ]?")
gen_text = outlines.generate.regex(llm, regex)
gen_choice = outlines.generate.choice(llm, choices=['Yes', 'No'])
prompt = route_llm(context, query)
action = gen_choice(prompt, max_tokens=2, sampling_params=SamplingParams(temperature=0))
print(f'Choice: {action}')
if action == 'Yes':
dense_embeddings, tokens_count = asyncio.run(embed_text(dense_model[0], transform_query(query)))
scored_points = query_hybrid_search(query, client, collection_name, dense_embeddings, sparse_embeddings).points
print(f'Score : {scored_points[0]}')
docs = [(scored_point.payload['text'], scored_point.payload['metadata']) for scored_point in scored_points]
contents, metadatas = [list(t) for t in zip(*docs)]
context = "\n".join(contents)
print(f'Context : \n + {context}')
filtered_metadatas = {
value
for metadata in metadatas
if 'url' in metadata
for value in [metadata['url']]
}
result_metadatas = "\n\n".join(f'{value}' for value in filtered_metadatas)
prompt = answer_with_context(context, query)
answer = json.loads(gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0)))['answer']
answer = f"{answer}\n\n\nSource(s) :\n\n{result_metadatas}"
if search_strategy == 'Documents + LLM Search':
answer = f'Documents Based :\n\n{answer}'
else:
gen_choice = outlines.generate.choice(llm, choices=['Domain-Specific Question', 'General Question'])
prompt = question_type_prompt(query)
action = gen_choice(prompt, max_tokens=3, sampling_params=SamplingParams(temperature=0))
print(f'Choice 2: {action}')
if action == 'General Question':
prompt = open_query_prompt(past_messages, query)
answer = json.loads(gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9, top_k=10)))['answer']
else:
if search_strategy == 'Documents Only Search':
prompt = idk(query)
answer = json.loads(gen_text(prompt, max_tokens=128, sampling_params=SamplingParams(temperature=0.6, top_p=0.9, top_k=10)))['answer']
elif search_strategy == 'Documents + LLM Search':
prompt = self_knowledge(query)
answer = json.loads(gen_text(prompt, max_tokens=300, sampling_params=SamplingParams(temperature=0.6, top_p=0.9, top_k=10)))['answer']
answer = f'Internal Knowledge :\n\n{answer}'
torch.cuda.empty_cache()
e = time.time()
f = e - s
print(f'SEARCH TIME : {f}')
return answer
def collect_files(directory, pattern):
array = []
for filename in os.listdir(directory):
if pattern in filename:
if filename.endswith('.msgpack'):
with open(os.path.join(directory, filename), "rb") as data_file_payload:
decompressed_payload = data_file_payload.read()
array.extend(msgpack.unpackb(decompressed_payload, raw=False))
elif filename.endswith('.npz') and (pattern == '_dense'):
array.extend(list(np.load(os.path.join(directory, filename)).values()))
elif filename.endswith('.npz') and ((pattern == '_sparse') or (pattern == '_sparse_titles')):
sparse_embeddings = []
loaded_sparse_matrix = load_npz(os.path.join(directory, filename))
for i in range(loaded_sparse_matrix.shape[0]):
row = loaded_sparse_matrix.getrow(i)
values = row.data.tolist()
indices = row.indices.tolist()
embedding = SparseVector(indices=indices, values=values)
sparse_embeddings.append(embedding)
array.extend(sparse_embeddings)
elif (filename.endswith('.npy')):
ids_list = np.load(os.path.join(directory, filename), allow_pickle=True).tolist()
array.extend(ids_list)
return array
@st.cache_resource(show_spinner=False)
def load_models_and_documents():
container = st.empty()
with container.status("Load AI Models and Prepare Documents...", expanded=True) as status:
st.write('Downloading and Loading MixedBread Mxbai Dense Embedding Model with vLLM as backend...')
dense_model = AsyncEngineArray.from_args(
[
EngineArgs(
model_name_or_path='mixedbread-ai/mxbai-embed-large-v1',
engine='torch',
device='cuda',
embedding_dtype='float32',
dtype='float16',
pooling_method='cls',
lengths_via_tokenize=True
)
]
)
st.write('Downloading and Loading Qdrant BM42 Sparse Embedding Model under ONNX using the CPU...')
sparse_model = SparseTextEmbedding(
'Qdrant/bm42-all-minilm-l6-v2-attentions',
cache_dir=os.getenv('HF_HOME'),
providers=['CPUExecutionProvider']
)
st.write('Downloading and Loading Mistral Nemo quantized with GPTQ and using Outlines + vLLM Engine as backend...')
llm = LLM(
model="shuyuej/Mistral-Nemo-Instruct-2407-GPTQ",
tensor_parallel_size=1,
enforce_eager=True,
gpu_memory_utilization=1,
max_model_len=10240,
dtype=torch.float16,
max_num_seqs=64,
quantization="gptq"
)
model = models.VLLM(llm)
st.write('Downloading NLTK Packages...')
download_nltk_packages()
st.write('Creating Collection for our Qdrant Vector Database in RAM memory...')
client = QdrantClient(':memory:')
collection_name = 'collection_demo'
client.create_collection(
collection_name,
{
'text-dense': VectorParams(
size=1024,
distance=Distance.COSINE,
datatype=Datatype.FLOAT16,
on_disk=False
)
},
{
'text-sparse': SparseVectorParams(
index=SparseIndexParams(
on_disk=False
),
modifier=Modifier.IDF
),
'title-sparse': SparseVectorParams(
index=SparseIndexParams(
on_disk=False
),
modifier=Modifier.IDF
)
},
2,
optimizers_config=OptimizersConfigDiff(
indexing_threshold=0,
default_segment_number=4
),
hnsw_config=HnswConfigDiff(
on_disk=False,
m=32,
ef_construct=200
)
)
name = 'action_rpg'
embeddings_path = os.path.join(os.getenv('HF_HOME'), 'embeddings')
payload_path = os.path.join(embeddings_path, name + '_payload.msgpack')
payload_titles_path = os.path.join(embeddings_path, name + '_payload_titles.npz')
dense_path = os.path.join(embeddings_path, name + '_dense.npz')
sparse_path = os.path.join(embeddings_path, name + '_sparse.npz')
sparse_titles_path = os.path.join(embeddings_path, name + '_sparse_titles.npz')
ids_path = os.path.join(embeddings_path, name + '_ids.npy')
ids_titles_path = os.path.join(embeddings_path, name + '_ids_titles.npz')
if not os.path.exists(embeddings_path):
os.mkdir(embeddings_path)
st.write('Downloading and Loading Video Games Dataset coming from Wikipedia...')
dataset = pd.read_parquet(os.path.join(os.getenv('HOME'),'data', 'train_pages.parquet.zst'), engine='pyarrow')
for columnName, columnData in dataset.iteritems():
if columnName == 'text':
documents = columnData.values.tolist()
elif columnName == 'section_title':
metadatas_titles = columnData.values.tolist()
elif columnName == 'url':
metadatas_url = columnData.values.tolist()
st.write('Transforming the Wikipedia Video Games Dataset into ingestable format for our Qdrant Vector Database...')
payload_docs = [{ 'text': text, 'metadata': { 'url': url } } for text, url in zip(documents, metadatas_url)]
start_sparse = time.time()
sparse_embeddings = [SparseVector(indices=s.indices.tolist(), values=s.values.tolist()) for s in sparse_model.embed(metadatas_titles, 32)]
end_sparse = time.time()
final_sparse = end_sparse - start_sparse
print(f'SPARSE TIME: {final_sparse}')
st.write('Saving on disk the Wikipedia Video Games Dataset into quickly ingestable format...')
with open(payload_titles_path, "wb") as outfile_texts:
packed_payload = msgpack.packb(payload_docs, use_bin_type=True)
outfile_texts.write(packed_payload)
max_index = 0
for embedding in sparse_embeddings:
if len(embedding.indices) > 0:
max_index = max(max_index, max(embedding.indices))
sparse_matrices = []
for embedding in sparse_embeddings:
data = np.array(embedding.values)
indices = np.array(embedding.indices)
indptr = np.array([0, len(data)])
matrix = csr_matrix((data, indices, indptr), shape=(1, max_index + 1))
sparse_matrices.append(matrix)
combined_sparse_matrix = vstack(sparse_matrices)
save_npz(sparse_titles_path, combined_sparse_matrix)
unique_ids = []
while len(unique_ids) < len(payload_docs):
new_id = uuid.uuid4()
while new_id.hex[0] == '0':
new_id = uuid.uuid4()
unique_ids.append(new_id.int)
np.save(ids_titles_path, np.array(unique_ids), allow_pickle=True)
st.write('Ingesting saved documents on disk into our Qdrant Vector Database...')
client.upsert(
collection_name,
points=Batch(
ids=unique_ids,
payloads=payload_docs,
vectors={
'title-sparse': sparse_embeddings,
}
)
)
client.update_collection(
collection_name=collection_name,
optimizer_config=OptimizersConfigDiff(indexing_threshold=20000)
)
else:
st.write('Loading the saved documents on disk')
patterns = ['_ids', '_ids_titles', '_payload', '_payload_titles', '_dense', '_sparse', '_sparse_titles']
unique_ids, unique_ids_titles, payload_docs, payload_docs_titles, dense_embeddings, sparse_embeddings, sparse_embeddings_titles = [
collect_files(embeddings_path, pattern) for pattern in patterns
]
st.write('Ingesting saved documents on disk into our Qdrant Vector Database...')
client.upsert(
collection_name,
points=Batch(
ids=unique_ids,
payloads=payload_docs,
vectors={
'text-dense': dense_embeddings,
'text-sparse': sparse_embeddings
}
)
)
client.upsert(
collection_name,
points=Batch(
ids=unique_ids_titles,
payloads=payload_docs_titles,
vectors={
'title-sparse': sparse_embeddings_titles
}
)
)
client.update_collection(
collection_name=collection_name,
optimizer_config=OptimizersConfigDiff(indexing_threshold=20000)
)
st.write('Building FSM Index for Agentic Behaviour of our AI...')
answer = generate_answer('aggro', client, collection_name, model, dense_model, sparse_model, '', 'Exact Search')
status.update(
label="Processing Complete!", state="complete", expanded=False
)
time.sleep(5)
container.empty()
return client, collection_name, llm, model, dense_model, sparse_model
if __name__ == '__main__':
st.set_page_config(page_title="Multipurpose AI Agent",layout="wide", initial_sidebar_state='auto')
client, collection_name, llm, model, dense_model, sparse_model, nlp, conn, cursor = load_models_and_documents()
styles = {
"nav": {
"background-color": "rgb(204, 200, 194)",
},
"div": {
"max-width": "32rem",
},
"span": {
"border-radius": "0.5rem",
"color": "rgb(125, 102, 84)",
"margin": "0 0.125rem",
"padding": "0.4375rem 0.625rem",
},
"active": {
"background-color": "rgba(255, 255, 255, 0.25)",
},
"hover": {
"background-color": "rgba(255, 255, 255, 0.35)",
},
}
if 'menu_id' not in st.session_state:
st.session_state.menu_id = 'ChatBot'
st.session_state.menu_id = st_navbar(
['ChatBot', 'Documents'],
st.session_state.menu_id,
options={
'hide_nav': False,
'fix_shadow': False,
'use_padding': False
},
styles=styles
)
st.title('Multipurpose AI Agent')
#st.markdown("<h1 style='position: fixed; top: 0; left: 0; width: 100%; padding: 10px; text-align: left; color: black;'>Multipurpose AI Agent</h1>", unsafe_allow_html=True)
data_editor_path = os.path.join(os.getenv('HF_HOME'), 'documents')
if 'df' not in st.session_state:
if os.path.exists(data_editor_path):
st.session_state.df = pd.read_parquet(os.path.join(data_editor_path, 'data_editor.parquet.lz4'), engine='pyarrow')
else:
st.session_state.df = pd.DataFrame()
os.mkdir(data_editor_path)
st.session_state.df.to_parquet(
os.path.join(
data_editor_path,
'data_editor.parquet.lz4'
),
compression='lz4',
engine='pyarrow'
)
def on_change_data_editor(client, collection_name):
print(f'Check : {st.session_state.key_data_editor}')
if st.session_state.key_data_editor['deleted_rows']:
st.toast('Wait for deletion to complete...')
embeddings_path = os.path.join(os.getenv('HF_HOME'), 'embeddings')
for deleted_idx in st.session_state.key_data_editor['deleted_rows']:
name = st.session_state.df.loc[deleted_idx, 'document']
print(f'WHAT IS THAT : {name}')
os.remove(os.path.join(embeddings_path, name + '_ids.npy'))
os.remove(os.path.join(embeddings_path, name + '_payload.msgpack'))
os.remove(os.path.join(embeddings_path, name + '_dense.npz'))
os.remove(os.path.join(embeddings_path, name + '_sparse.npz'))
client.delete(
collection_name=collection_name,
points_selector=Filter(
must=[
FieldCondition(
key='url',
match=MatchValue(value=st.session_state.df.loc[deleted_idx, 'path'])
)
]
)
)
st.session_state.df.drop(deleted_idx)
st.toast('Deletion Completed !', icon='π')
if st.session_state.menu_id == 'Documents':
st.session_state.df = st.data_editor(
st.session_state.df,
num_rows="dynamic",
use_container_width=True,
hide_index=True,
on_change=on_change_data_editor,
args=(client, collection_name),
key='key_data_editor',
column_config={
'icon': st.column_config.ImageColumn(
'Document'
),
"document": st.column_config.TextColumn(
"Name",
help="Name of the document",
required=True,
disabled=True
),
"type": st.column_config.SelectboxColumn(
'File type',
help='The file format extension of this document',
required=True,
options=[
'Powerpoint',
'Microsoft Word',
'Excel'
],
disabled=True
),
"path": st.column_config.TextColumn(
'Path',
help='Path to the document',
required=False,
disabled=True
),
"time": st.column_config.DatetimeColumn(
'Date and hour',
help='When this document has been ingested here for the last time',
format="D MMM YYYY, h:mm a",
required=True,
disabled=True
),
"exact_search": st.column_config.CheckboxColumn(
'Exact Search',
help='Wether the Exact Search is available for this document or not',
required=True,
default=False,
disabled=True
)
}
)
conversations_path = os.path.join(os.getenv('HF_HOME'), 'conversations')
try:
with open(conversations_path, 'rb') as fp:
packed_bytes = fp.read()
conversations: Dict[str, list] = msgpack.unpackb(packed_bytes, raw=False)
except:
conversations = {}
if st.session_state.menu_id == 'ChatBot':
if 'id_chat' not in st.session_state:
st.session_state.id_chat = 'New Conversation'
if 'search_strategy' not in st.session_state:
st.session_state.search_strategy = 'Documents Only Search'
st.session_state.tooltip = 'The AI answer your questions only considering the documents provided'
st.session_state.search_idx = 1
def options_list(conversations: Dict[str, list]):
if st.session_state.id_chat == 'New Conversation':
return [st.session_state.id_chat] + list(conversations.keys())
else:
return ['New Conversation'] + list(conversations.keys())
with st.sidebar:
st.session_state.id_chat = st.selectbox(
label='Choose a Conversation',
options=options_list(conversations),
index=0,
key='chat_id'
)
st.session_state.messages = conversations[st.session_state.id_chat] if st.session_state.id_chat != 'New Conversation' else []
def update_selectbox_remove(conversations_path, conversations):
conversations.pop(st.session_state.chat_id)
with open(conversations_path, 'wb') as fp:
packed_bytes = msgpack.packb(conversations, use_bin_type=True)
fp.write(packed_bytes)
st.session_state.chat_id = 'New Conversation'
st.button(
'Delete Conversation',
use_container_width=True,
disabled=False if st.session_state.id_chat != 'New Conversation' else True,
on_click=update_selectbox_remove,
args=(conversations_path, conversations)
)
st.divider()
def tooltip_change():
if st.session_state.search_id == 'Exact Search':
st.session_state.tooltip = 'Search the exact definition'
st.session_state.search_idx = 0
elif st.session_state.search_id == 'Documents Only Search':
st.session_state.tooltip = 'The AI answer your questions only considering the documents provided'
st.session_state.search_idx = 1
elif st.session_state.search_id == 'Documents + LLM Search':
st.session_state.tooltip = """The AI answer your questions considering the documents provided, and if it doesn't found the answer in them, try to find in its own internal knowledge"""
st.session_state.search_idx = 2
st.session_state.search_strategy = st.radio(
label='Choose a Search Strategy',
options=['Exact Search', 'Documents Only Search', 'Documents + LLM Search'],
index=st.session_state.search_idx,
on_change=tooltip_change,
key='search_id',
help=st.session_state.tooltip
)
def generate_conv_title(llm):
if st.session_state.chat_id == 'New Conversation':
output = llm.chat(
build_prompt_conv(),
SamplingParams(temperature=0.6,top_p=0.9, max_tokens=10, top_k=10)
)
print(f'OUTPUT : {output[0].outputs[0].text}')
st.session_state.chat_id = output[0].outputs[0].text.replace('"', '')
st.session_state.messages = []
torch.cuda.empty_cache()
conversations.update({st.session_state.chat_id: st.session_state.messages})
with open(conversations_path, 'wb') as fp:
packed_bytes = msgpack.packb(conversations, use_bin_type=True)
fp.write(packed_bytes)
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input(
"Message Video Game Assistant",
on_submit=generate_conv_title,
key='user_input',
args=(llm,)
):
st.chat_message("user").markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
ai_response = generate_answer(prompt, client, collection_name, model, dense_model, sparse_model, "\n".join([f'{msg["role"]}: {msg["content"]}' for msg in st.session_state.messages]), st.session_state.search_strategy)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for chunk in re.split(r'(\s+)', ai_response):
full_response += chunk + " "
time.sleep(0.05)
message_placeholder.write(full_response + 'β')
message_placeholder.write(re.sub('β', '', full_response))
st.session_state.messages.append({"role": "assistant", "content": full_response})
conversations.update({st.session_state.id_chat: st.session_state.messages})
with open(conversations_path, 'wb') as fp:
packed_bytes = msgpack.packb(conversations, use_bin_type=True)
fp.write(packed_bytes)
if "cached_files" not in st.session_state:
st.session_state.cached_files = []
with st.sidebar:
st.divider()
uploaded_files = st.file_uploader("Upload a file :", accept_multiple_files=True, type=['pptx', 'ppt'])
for uploaded_file in uploaded_files:
if uploaded_file not in st.session_state.cached_files:
st.session_state.cached_files.append(uploaded_file)
file_name = os.path.basename(uploaded_file.name)
base_name, ext = os.path.splitext(file_name)
processing_time = datetime.now().strftime('%d %b %Y, %I:%M %p')
full_path = os.path.realpath(uploaded_file.name)
file_type = ext.lstrip('.')
d = {
'icon': icon_to_types[file_type][0],
'document': base_name,
'type': icon_to_types[file_type][1],
'path': full_path,
'time': [datetime.strptime(processing_time, '%d %b %Y, %I:%M %p')],
'exact_search': False
}
if (st.session_state.df.empty) or (base_name not in st.session_state.df['document'].tolist()):
st.session_state.df = pd.concat(
[
st.session_state.df,
pd.DataFrame(data={
'icon': icon_to_types[file_type][0],
'document': base_name,
'type': icon_to_types[file_type][1],
'path': full_path,
'time': [datetime.strptime(processing_time, '%d %b %Y, %I:%M %p')],
'exact_search': False
})
],
ignore_index=True
)
else:
idx = st.session_state.df.index[st.session_state.df['document']==base_name].tolist()[0]
st.session_state.df.loc[idx] = {
'icon': icon_to_types[file_type][0],
'document': base_name,
'type': icon_to_types[file_type][1],
'path': full_path,
'time': datetime.strptime(processing_time, '%d %b %Y, %I:%M %p'),
'exact_search': False
}
st.session_state.df.to_parquet(
os.path.join(
data_editor_path,
'data_editor.parquet.lz4'
),
compression='lz4',
engine='pyarrow'
)
documents, ids = ppt_chunker(uploaded_file, llm)
dense, tokens_count = asyncio.run(embed_text(dense_model[0], documents))
sparse = [s for s in sparse_model.embed(documents, 32)]
embeddings_path = os.path.join(os.getenv('HF_HOME'), 'embeddings')
def generate_unique_id(existing_ids):
while True:
new_id = uuid.uuid4()
while new_id.hex[0] == '0':
new_id = uuid.uuid4()
new_id = new_id.int
if new_id not in existing_ids:
return new_id
for filename in os.listdir(embeddings_path):
if '_ids' in filename:
list_ids = np.load(os.path.join(embeddings_path, filename), allow_pickle=True).tolist()
for i, ids_ in enumerate(ids):
if ids_ in list_ids:
ids[i] = generate_unique_id(list_ids)
metadatas_list = [{'url': full_path}] * len(documents)
payload_docs = [{ 'text': documents[i], 'metadata': metadata } for i, metadata in enumerate(metadatas_list)]
print(f'LEN : {len(ids)}, {len(payload_docs)}, {len(dense)}, {len([SparseVector(indices=s.indices.tolist(), values=s.values.tolist()) for s in sparse])}')
client.upsert(
collection_name=collection_name,
points=Batch(
ids=ids,
payloads=payload_docs,
vectors={
'text-dense': dense,
'text-sparse': [SparseVector(indices=s.indices.tolist(), values=s.values.tolist()) for s in sparse]
}
)
)
payload_path = os.path.join(embeddings_path, base_name + '_payload.msgpack')
dense_path = os.path.join(embeddings_path, base_name + '_dense.npz')
sparse_path = os.path.join(embeddings_path, base_name + '_sparse.npz')
ids_path = os.path.join(embeddings_path, base_name + '_ids.npy')
with open(payload_path, "wb") as outfile_texts:
packed_payload = msgpack.packb(payload_docs, use_bin_type=True)
outfile_texts.write(packed_payload)
np.savez_compressed(dense_path, *dense)
max_index = 0
for embedding in sparse:
if len(embedding.indices) > 0:
max_index = max(max_index, max(embedding.indices))
sparse_matrices = []
for embedding in sparse:
data = np.array(embedding.values)
indices = np.array(embedding.indices)
indptr = np.array([0, len(data)])
matrix = csr_matrix((data, indices, indptr), shape=(1, max_index + 1))
sparse_matrices.append(matrix)
combined_sparse_matrix = vstack(sparse_matrices)
save_npz(sparse_path, combined_sparse_matrix)
np.save(ids_path, np.array(ids), allow_pickle=True)
st.toast('Document(s) Ingested !', icon='π') |