ThisIs-Developer commited on
Commit
0dcdc5b
1 Parent(s): bd85d38

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -54
app.py CHANGED
@@ -1,45 +1,36 @@
1
- import asyncio
2
  from langchain.document_loaders import PyPDFLoader, DirectoryLoader
3
  from langchain import PromptTemplate
4
  from langchain.embeddings import HuggingFaceEmbeddings
5
  from langchain.vectorstores import FAISS
6
  from langchain.llms import CTransformers
7
  from langchain.chains import RetrievalQA
8
- import chainlit as cl
9
 
10
  DB_FAISS_PATH = 'vectorstores/db_faiss'
11
 
12
  custom_prompt_template = """Use the following pieces of information to answer the user's question.
13
  If you don't know the answer, just say that you don't know, don't try to make up an answer.
14
-
15
  Context: {context}
16
  Question: {question}
17
-
18
  Only return the helpful answer below and nothing else.
19
  Helpful answer:
20
  """
21
 
22
  def set_custom_prompt():
23
- """
24
- Prompt template for QA retrieval for each vectorstore
25
- """
26
  prompt = PromptTemplate(template=custom_prompt_template,
27
  input_variables=['context', 'question'])
28
  return prompt
29
 
30
- # Retrieval QA Chain
31
  def retrieval_qa_chain(llm, prompt, db):
32
  qa_chain = RetrievalQA.from_chain_type(llm=llm,
33
- chain_type='stuff',
34
- retriever=db.as_retriever(search_kwargs={'k': 2}),
35
- return_source_documents=True,
36
- chain_type_kwargs={'prompt': prompt}
37
- )
38
  return qa_chain
39
 
40
- # Loading the model
41
  def load_llm():
42
- # Load the locally downloaded model here
43
  llm = CTransformers(
44
  model="TheBloke/Llama-2-7B-Chat-GGML",
45
  model_type="llama",
@@ -48,8 +39,7 @@ def load_llm():
48
  )
49
  return llm
50
 
51
- # QA Model Function
52
- async def qa_bot():
53
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
54
  model_kwargs={'device': 'cpu'})
55
  db = FAISS.load_local(DB_FAISS_PATH, embeddings)
@@ -57,42 +47,83 @@ async def qa_bot():
57
  qa_prompt = set_custom_prompt()
58
  qa = retrieval_qa_chain(llm, qa_prompt, db)
59
 
60
- return qa
61
-
62
- # Output function
63
- async def final_result(query):
64
- qa_result = await qa_bot()
65
- response = await qa_result({'query': query})
66
- return response
67
-
68
- # chainlit code
69
- @cl.on_chat_start
70
- async def start():
71
- chain = await qa_bot()
72
- # msg = cl.Message(content="Starting the bot...")
73
- # await msg.send()
74
- # msg.content = "Hi, Welcome to Medical Bot. What is your query?"
75
- # await msg.update()
76
-
77
- cl.user_session.set("chain", chain)
78
-
79
- @cl.on_message
80
- async def main(message):
81
- chain = cl.user_session.get("chain")
82
- cb = cl.AsyncLangchainCallbackHandler(
83
- stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"]
84
- )
85
- cb.answer_reached = True
86
- res = await chain.acall(message.content, callbacks=[cb])
87
- answer = res["result"]
88
- sources = res["source_documents"]
89
-
90
- if sources:
91
- answer += f"\nSources:" + str(sources)
92
- else:
93
- answer += "\nNo sources found"
94
-
95
- await cl.Message(content=answer).send()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  if __name__ == "__main__":
98
- asyncio.run(cl.main())
 
1
+ import streamlit as st
2
  from langchain.document_loaders import PyPDFLoader, DirectoryLoader
3
  from langchain import PromptTemplate
4
  from langchain.embeddings import HuggingFaceEmbeddings
5
  from langchain.vectorstores import FAISS
6
  from langchain.llms import CTransformers
7
  from langchain.chains import RetrievalQA
 
8
 
9
  DB_FAISS_PATH = 'vectorstores/db_faiss'
10
 
11
  custom_prompt_template = """Use the following pieces of information to answer the user's question.
12
  If you don't know the answer, just say that you don't know, don't try to make up an answer.
 
13
  Context: {context}
14
  Question: {question}
 
15
  Only return the helpful answer below and nothing else.
16
  Helpful answer:
17
  """
18
 
19
  def set_custom_prompt():
 
 
 
20
  prompt = PromptTemplate(template=custom_prompt_template,
21
  input_variables=['context', 'question'])
22
  return prompt
23
 
 
24
  def retrieval_qa_chain(llm, prompt, db):
25
  qa_chain = RetrievalQA.from_chain_type(llm=llm,
26
+ chain_type='stuff',
27
+ retriever=db.as_retriever(search_kwargs={'k': 2}),
28
+ return_source_documents=True,
29
+ chain_type_kwargs={'prompt': prompt}
30
+ )
31
  return qa_chain
32
 
 
33
  def load_llm():
 
34
  llm = CTransformers(
35
  model="TheBloke/Llama-2-7B-Chat-GGML",
36
  model_type="llama",
 
39
  )
40
  return llm
41
 
42
+ def qa_bot(query):
 
43
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
44
  model_kwargs={'device': 'cpu'})
45
  db = FAISS.load_local(DB_FAISS_PATH, embeddings)
 
47
  qa_prompt = set_custom_prompt()
48
  qa = retrieval_qa_chain(llm, qa_prompt, db)
49
 
50
+ # Implement the question-answering logic here
51
+ response = qa({'query': query})
52
+ return response['result']
53
+
54
+ def add_vertical_space(spaces=1):
55
+ for _ in range(spaces):
56
+ st.markdown("---")
57
+
58
+ def main():
59
+ st.set_page_config(page_title="Llama-2-GGML Medical Chatbot")
60
+
61
+ with st.sidebar:
62
+ st.title('Llama-2-GGML Medical Chatbot! 🚀🤖')
63
+ st.markdown('''
64
+ ## About
65
+
66
+ The Llama-2-GGML Medical Chatbot uses the **Llama-2-7B-Chat-GGML** model and was trained on medical data from **"The GALE ENCYCLOPEDIA of MEDICINE"**.
67
+
68
+ ### 🔄Bot evolving, stay tuned!
69
+ ## Useful Links 🔗
70
+ - **Model:** [Llama-2-7B-Chat-GGML](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML) 📚
71
+ - **GitHub:** [ThisIs-Developer/Llama-2-GGML-Medical-Chatbot](https://github.com/ThisIs-Developer/Llama-2-GGML-Medical-Chatbot) 💬
72
+ ''')
73
+ add_vertical_space(1) # Adjust the number of spaces as needed
74
+ st.write('Made by [@ThisIs-Developer](https://huggingface.co/ThisIs-Developer)')
75
+
76
+ st.title("Llama-2-GGML Medical Chatbot")
77
+ st.markdown(
78
+ """
79
+ <style>
80
+ .chat-container {
81
+ display: flex;
82
+ flex-direction: column;
83
+ height: 400px;
84
+ overflow-y: auto;
85
+ padding: 10px;
86
+ color: white; /* Font color */
87
+ }
88
+ .user-bubble {
89
+ background-color: #007bff; /* Blue color for user */
90
+ align-self: flex-end;
91
+ border-radius: 10px;
92
+ padding: 8px;
93
+ margin: 5px;
94
+ max-width: 70%;
95
+ word-wrap: break-word;
96
+ }
97
+ .bot-bubble {
98
+ background-color: #363636; /* Slightly lighter background color */
99
+ align-self: flex-start;
100
+ border-radius: 10px;
101
+ padding: 8px;
102
+ margin: 5px;
103
+ max-width: 70%;
104
+ word-wrap: break-word;
105
+ }
106
+ </style>
107
+ """
108
+ , unsafe_allow_html=True)
109
+
110
+ conversation = st.session_state.get("conversation", [])
111
+
112
+ query = st.text_input("Ask your question here:", key="user_input")
113
+ if st.button("Get Answer"):
114
+ if query:
115
+ with st.spinner("Processing your question..."): # Display the processing message
116
+ conversation.append({"role": "user", "message": query})
117
+ # Call your QA function
118
+ answer = qa_bot(query)
119
+ conversation.append({"role": "bot", "message": answer})
120
+ st.session_state.conversation = conversation
121
+ else:
122
+ st.warning("Please input a question.")
123
+
124
+ chat_container = st.empty()
125
+ chat_bubbles = ''.join([f'<div class="{c["role"]}-bubble">{c["message"]}</div>' for c in conversation])
126
+ chat_container.markdown(f'<div class="chat-container">{chat_bubbles}</div>', unsafe_allow_html=True)
127
 
128
  if __name__ == "__main__":
129
+ main()