File size: 3,072 Bytes
39a4e56
eb52945
768b51c
eb52945
5091f09
eb52945
5091f09
 
eb52945
 
 
 
 
9b3f2e9
8983152
39a4e56
f9769ad
 
39a4e56
 
 
 
 
 
 
 
63bab67
eb52945
 
 
 
 
 
 
 
 
8983152
008dbaf
 
 
 
 
 
 
 
 
 
 
 
 
 
768b51c
008dbaf
768b51c
f9769ad
8983152
f9769ad
 
 
 
 
 
 
9b3f2e9
f9769ad
 
9b3f2e9
8983152
f9769ad
 
 
8983152
eb52945
 
 
 
 
 
 
 
 
 
 
 
 
f9769ad
eb52945
f9769ad
9b3f2e9
 
f9769ad
9b3f2e9
 
8983152
9b3f2e9
 
2a571e8
9b3f2e9
2a571e8
18fa7bc
 
 
 
ee4e058
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import os
from langchain_community.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain_qdrant import QdrantVectorStore
from langchain_community.vectorstores import Qdrant
from langchain.prompts import ChatPromptTemplate
from langchain_openai.chat_models import ChatOpenAI
from langchain_openai.embeddings import OpenAIEmbeddings
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from qdrant_client import QdrantClient
from qdrant_client.http.models import Distance, VectorParams
from operator import itemgetter
import chainlit as cl



# Load all the documents in the directory
documents = []
directory = "data/"

for filename in os.listdir(directory):
    if filename.endswith(".pdf"):  # Check if the file is a PDF
        file_path = os.path.join(directory, filename)
        loader = PyMuPDFLoader(file_path)
        docs = loader.load()
        documents.extend(docs)

# Split the documents
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=500,
    chunk_overlap=40,
    length_function=len,
    is_separator_regex=False
)
rag_documents = text_splitter.split_documents(documents)

# # Alternative chunking: Tokens (more accurate for OpenAI models)
# token_text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
#     encoding="cl100k_base", chunk_size=100, chunk_overlap=0
# )
# token_rag_documents = token_text_splitter.split_documents(documents)
# # TO DO ^^ test

# Split the documents by character
text_splitter = CharacterTextSplitter(
    separator="\n\n",
    chunk_size=1000,
    chunk_overlap=200,
    length_function=len,
    is_separator_regex=False,
)
character_rag_documents = text_splitter.split_documents(documents)

embedding = OpenAIEmbeddings(model="text-embedding-3-small")

# Create the vector store
vectorstore = Qdrant.from_documents(
    rag_documents,
    embedding,
    location=":memory:",
    collection_name="Implications of AI",
)

retriever = vectorstore.as_retriever()
llm = ChatOpenAI(model="gpt-4")


# @cl.cache_resource
@cl.on_chat_start
async def start_chat():

    template = """
    Use the provided context to answer the user's query.
    You may not answer the user's query unless there is specific context in the following text.
    If you do not know the answer, or cannot answer, please respond with "I don't know".
    Question:
    {question}
    Context:
    {context}
    Answer:
    """

    prompt = ChatPromptTemplate.from_template(template)

    base_chain = (
        {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
        | prompt | llm | StrOutputParser()
    )

    cl.user_session.set("chain", base_chain)


@cl.on_message
async def main(message):
    chain = cl.user_session.get("chain")
    result = chain.invoke({"question":message.content})

    msg = cl.Message(content=result)

    async for stream_resp in result["response"]:
        await msg.stream_token(stream_resp)

    await msg.send()