Mr-TD commited on
Commit
1bdec92
1 Parent(s): ea541a2

Upload 7 files

Browse files
Files changed (8) hide show
  1. .gitattributes +1 -0
  2. Kia_EV6.pdf +3 -0
  3. PDF_Reader.py +31 -0
  4. QA_Bot.py +37 -0
  5. QnA.py +10 -0
  6. app.py +35 -0
  7. requirements.txt +9 -0
  8. td-logo.png +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Kia_EV6.pdf filter=lfs diff=lfs merge=lfs -text
Kia_EV6.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:363f7482ab9c73eae7a92ba6a45162e787ab9a57679656f150d3293108f20ffb
3
+ size 8591101
PDF_Reader.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import PyPDF2
2
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
3
+ from langchain.embeddings import HuggingFaceBgeEmbeddings
4
+ from langchain.vectorstores import FAISS
5
+
6
+ def read_pdf(uploaded_file):
7
+ pdf_reader = PyPDF2.PdfReader(uploaded_file)
8
+ text = ""
9
+ for page in pdf_reader.pages:
10
+ text += page.extract_text()
11
+ return text
12
+
13
+ def Chunks(docs):
14
+ text_splitter = RecursiveCharacterTextSplitter(
15
+ # Set a really small chunk size, just to show.
16
+ chunk_size = 1000,
17
+ chunk_overlap = 100,
18
+ )
19
+ doc = text_splitter.split_text(docs)
20
+ return doc
21
+
22
+
23
+ def PDF_4_QA(file):
24
+ content = read_pdf(file)
25
+ pdf_chunks = Chunks(docs=content)
26
+
27
+ embeddings = HuggingFaceBgeEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
28
+ model_kwargs={'device': 'cpu'})
29
+ vectorstore_openai = FAISS.from_texts(pdf_chunks, embeddings)
30
+
31
+ return vectorstore_openai
QA_Bot.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from QnA import Q_A
3
+ import re,time
4
+
5
+
6
+ def QA_Bot(vectorstore):
7
+ st.title("Q&A Bot")
8
+ # Initialize chat history
9
+ if "messages" not in st.session_state:
10
+ st.session_state.messages = []
11
+
12
+ # Display chat messages from history on app rerun
13
+ for message in st.session_state.messages:
14
+ with st.chat_message(message["role"]):
15
+ st.markdown(message["content"])
16
+
17
+ # React to user input
18
+ if prompt := st.chat_input("What is up?"):
19
+ # Display user message in chat message container
20
+ st.chat_message("user").markdown(prompt)
21
+ # Add user message to chat history
22
+ st.session_state.messages.append({"role": "user", "content": prompt})
23
+
24
+ ai_response = Q_A(vectorstore,prompt)
25
+ response = f"Echo: {ai_response}"
26
+ # Display assistant response in chat message container
27
+ with st.chat_message("assistant"):
28
+ message_placeholder = st.empty()
29
+ full_response = ""
30
+ for chunk in re.split(r'(\s+)', response):
31
+ full_response += chunk + " "
32
+ time.sleep(0.01)
33
+
34
+ # Add a blinking cursor to simulate typing
35
+ message_placeholder.markdown(full_response + "▌")
36
+ # Add assistant response to chat history
37
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
QnA.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chains import RetrievalQA
2
+ from Api_Key import google_plam
3
+ from langchain.llms import GooglePalm
4
+
5
+ def Q_A(vectorstore,question):
6
+ google_llm = GooglePalm(google_api_key=google_plam, temperature=0.5)
7
+ qa = RetrievalQA.from_chain_type(llm=google_llm, chain_type="stuff", retriever=vectorstore.as_retriever())
8
+ answer = qa.run(question)
9
+
10
+ return answer
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from QA_Bot import QA_Bot
3
+ from PDF_Reader import PDF_4_QA
4
+ from PIL import Image
5
+
6
+ # Streamlit app
7
+ def main():
8
+ # Page icon
9
+ icon = Image.open('td-logo.png')
10
+
11
+ # Page config
12
+ st.set_page_config(page_title="Q&A ChatBot",
13
+ page_icon=icon,
14
+ layout="wide"
15
+ )
16
+
17
+ company_logo_path = 'td-logo.png'
18
+ st.sidebar.image(company_logo_path, width=50)
19
+ st.sidebar.title("Upload PDF")
20
+ st.sidebar.write("Download Demo PDF file from Below....")
21
+ with open("Kia_EV6.pdf", "rb") as file:
22
+ btn = st.sidebar.download_button(
23
+ label="Download PDF",
24
+ data=file,
25
+ file_name="Kia_EV6.pdf"
26
+ )
27
+
28
+ uploaded_file = st.sidebar.file_uploader("Choose a PDF file", type="pdf")
29
+ if uploaded_file is not None:
30
+ st.sidebar.success("File uploaded successfully.")
31
+ vector_store = PDF_4_QA(uploaded_file)
32
+ QA_Bot(vector_store)
33
+
34
+ if __name__ == '__main__':
35
+ main()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ langchain==0.0.339
2
+ streamlit==1.29.0
3
+ google-ai-generativelanguage==0.4.0
4
+ google-generativeai==0.3.2
5
+ google-cloud-aiplatform==1.38.1
6
+ faiss-cpu==1.7.4
7
+ tiktoken==0.5.2
8
+ PyPDF2==3.0.1
9
+ sentence-transformers==2.2.2
td-logo.png ADDED