File size: 2,255 Bytes
0d14198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from dotenv import load_dotenv
import streamlit as st
import os
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
from streamlit_chat import message

os.environ["OPENAI_API_KEY"] = "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"


def main():
    load_dotenv()
    st.header(" LLM CHATBOT ON PFD FILES")
    st.sidebar.header("Instructions")
    st.sidebar.info(
        '''This is a web application that allows you to interact with 
        your PDF Files
        '''
    )
    st.sidebar.info('''Enter a query in the text box and press enter
        to receive a response''')

    st.sidebar.info('''
    This project works perfectly even on your own data
    ''')
    # st.set_page_config(page_title="Ask your PDF")
    st.header("Ask your PDF files some questions 💬")

    # upload file
    pdf = st.file_uploader("Upload your PDF File Below", type="pdf")

    # extract the text
    if pdf is not None:
        pdf_reader = PdfReader(pdf)
        text = ""
        for page in pdf_reader.pages:
            text += page.extract_text()

        # split into chunks
        text_splitter = CharacterTextSplitter(
            separator="\n",
            chunk_size=1000,
            chunk_overlap=200,
            length_function=len
        )
        chunks = text_splitter.split_text(text)

        # create embeddings
        embeddings = OpenAIEmbeddings()
        knowledge_base = FAISS.from_texts(chunks, embeddings)

        # show user input
        user_question = st.text_input("Ask a question about your PDF:")
        if user_question:
            docs = knowledge_base.similarity_search(user_question)

            llm = OpenAI()
            chain = load_qa_chain(llm, chain_type="stuff")
            with get_openai_callback() as cb:
                response = chain.run(input_documents=docs, question=user_question)
                print(cb)

            # st.write(response)
            message(response)


if __name__ == '__main__':
    main()