Khaledmd12 commited on
Commit
5586d2d
1 Parent(s): 78827ae

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -347
app.py DELETED
@@ -1,347 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """Mech_bot_monitor_eval.ipynb
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1_Y_wngb4w6axUfB6jOwRv_DR_DPzx_1M
8
- """
9
-
10
- # Commented out IPython magic to ensure Python compatibility.
11
- # %pip install --upgrade langchain-together
12
-
13
- """# 🔧🚗**Mech-Bot 🤖🔧: Automotive Mechanic Assistant**
14
-
15
- Welcome to Mech-Bot, your personal automotive mechanic assistant. Mech-Bot leverages RAG (Retrieval-Augmented Generation) and the capabilities of LangChain to provide comprehensive support for diagnosing and repairing vehicle issues. This project aims to make mechanical expertise accessible, reliable, and efficient, addressing common challenges faced by car owners and mechanics.
16
-
17
-
18
-
19
-
20
- 😉**Key features include:**
21
-
22
- - AI Diagnostic Support.
23
- - Ease of dealing with Repair Manuals.
24
- - Real-Time Assistance
25
-
26
-
27
-
28
- 📚🔧**Data to Use:**
29
- - Repair Manuals: Detailed, step-by-step instructions on car maintenance and repair, available in digital formats like PDFs, and loaded using PyMuPDF.
30
-
31
- **🦾Technology Stack for Mech-Bot:**
32
-
33
- ### **part 1: Tools and Dependencies**
34
- """
35
-
36
-
37
-
38
- # pip install -U bitsandbytes
39
-
40
- import os
41
- os.environ["WEAVE_TRACE_LANGCHAIN"] = "true"
42
- import weave
43
- import wandb
44
-
45
- wandb.Api(api_key='e27ce4c7835957d03d50e03f0bd8182ace127cbd')
46
-
47
-
48
- wandb.login(key= 'e27ce4c7835957d03d50e03f0bd8182ace127cbd')
49
- weave.init('Mech-Bot-Evaluation')
50
-
51
- """### **part 2: Data Collection and Preprocessing**"""
52
-
53
- from langchain.document_loaders import PyMuPDFLoader
54
- all_documents = ""
55
-
56
- # Load PDF data
57
- pdf_loader = PyMuPDFLoader("Mech-bot_DATA(text).pdf")
58
- pdf_documents = pdf_loader.load()
59
-
60
- # Extract text from the PDF documents
61
- for pdf_doc in pdf_documents:
62
- all_documents += pdf_doc.page_content
63
-
64
- """ ### **Part 3: split the text**"""
65
-
66
- from langchain.text_splitter import RecursiveCharacterTextSplitter
67
- # Split documents
68
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=31)
69
- splits = text_splitter.split_text(all_documents)
70
-
71
-
72
- # Output the split content (for demonstration purposes)
73
- for i, split in enumerate(splits):
74
- print(f"Split {i + 1}: {split[:100]}...") # Print the first 100 characters of each split
75
-
76
- """### **part 4: save embeddings in a vector store**"""
77
-
78
- from langchain.vectorstores import FAISS
79
- from langchain.embeddings import HuggingFaceEmbeddings
80
- from langchain.schema import Document
81
-
82
- # Convert split strings to Document objects
83
- documents = [Document(page_content=split) for split in splits]
84
-
85
-
86
- # Add source metadata to each document
87
- for doc in documents:
88
- doc.metadata["source"] = "https://club.autodoc.co.uk/manuals"
89
- # Create the FAISS vector store
90
- db = FAISS.from_documents(documents,
91
- HuggingFaceEmbeddings(model_name='BAAI/bge-base-en-v1.5'))
92
-
93
- """### **part 5: create the retriever**"""
94
-
95
- retriever = db.as_retriever(
96
- search_type="similarity",
97
- search_kwargs={'k': 4}
98
- )
99
-
100
- print(documents)
101
-
102
- # Manually test retrieval to verify correct chunk fetching
103
- query = "what are the tools needed to change Brake Discs on a Car?"
104
- retrieved_docs = db.similarity_search(query, k=5)
105
-
106
- # Print out the content of retrieved documents to verify
107
- for idx, doc in enumerate(retrieved_docs):
108
- print(f"Document {idx + 1}:", doc.page_content)
109
-
110
- """### **part 6: load the llama model + Quantization**"""
111
-
112
- import torch
113
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
114
- from transformers import LlamaTokenizer, LlamaForCausalLM
115
-
116
-
117
- model_name = "meta-llama/Llama-2-13b-chat-hf"
118
-
119
- bnb_config = BitsAndBytesConfig(
120
- load_in_4bit=True,
121
- bnb_4bit_use_double_quant=True,
122
- bnb_4bit_quant_type="nf4",
123
- bnb_4bit_compute_dtype=torch.bfloat16
124
- )
125
-
126
- model = AutoModelForCausalLM.from_pretrained(
127
- model_name,
128
- quantization_config=bnb_config,
129
- torch_dtype=torch.bfloat16,
130
- # device_map="auto",
131
- )
132
- tokenizer = AutoTokenizer.from_pretrained(model_name)
133
-
134
- """### **part 7:Load the Llama Guard model**"""
135
-
136
- model_id = "meta-llama/LlamaGuard-7b"
137
- guard_tokenizer = AutoTokenizer.from_pretrained(model_id)
138
-
139
- bnb_config_guard = BitsAndBytesConfig(
140
- load_in_4bit=True,
141
- bnb_4bit_use_double_quant=True,
142
- bnb_4bit_quant_type="nf4",
143
- bnb_4bit_compute_dtype=torch.bfloat16,
144
- # Allow offloading to CPU for parts of the model
145
- load_in_8bit_fp32_cpu_offload=True
146
- )
147
- guard_model = AutoModelForCausalLM.from_pretrained(
148
- model_id,
149
- quantization_config=bnb_config_guard,
150
- torch_dtype=torch.bfloat16,
151
- # device_map="auto",
152
- )
153
-
154
- """### **part 8: Setting Up a Text Generation Pipeline with HuggingFace and LangChain**"""
155
-
156
- from langchain.llms import HuggingFacePipeline
157
- from langchain.prompts import PromptTemplate
158
- from transformers import pipeline
159
- from langchain_core.output_parsers import StrOutputParser
160
-
161
-
162
- text_generation_pipeline = pipeline(
163
- model=model,
164
- tokenizer=tokenizer,
165
- task="text-generation",
166
- temperature=0.2,
167
- do_sample=True,
168
- repetition_penalty=1.1,
169
- return_full_text=False,
170
- max_new_tokens=512,
171
- num_return_sequences=1,
172
- eos_token_id=tokenizer.eos_token_id,
173
- )
174
-
175
- llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
176
-
177
- """### **part 9: Creating a History-Aware Question Retriever with LangChain**"""
178
-
179
- from langchain.chains import create_history_aware_retriever
180
- from langchain.prompts.chat import MessagesPlaceholder
181
- from langchain.prompts import ChatPromptTemplate
182
-
183
-
184
- contextualize_q_system_prompt = (
185
- "Given a chat history and the latest user question "
186
- "which might reference context in the chat history, "
187
- "formulate a standalone question which can be understood "
188
- "without the chat history. Do NOT answer the question, "
189
- "just reformulate it if needed and otherwise return it as is."
190
- )
191
- contextualize_q_prompt = ChatPromptTemplate.from_messages(
192
- [
193
- ("system", contextualize_q_system_prompt),
194
- MessagesPlaceholder("chat_history"),
195
- ("human", "{input}"),
196
- ]
197
- )
198
- history_aware_retriever = create_history_aware_retriever(
199
- llm, retriever, contextualize_q_prompt
200
- )
201
-
202
- """### **part 10 :Creating Mech-bot QA Chain with LangChain**"""
203
-
204
- from langchain.chains.combine_documents import create_stuff_documents_chain
205
- system_prompt = """
206
- You are Mech-bot, a highly knowledgeable car mechanic assistant. Your expertise covers a wide range of automotive issues, including diagnostics, repairs, maintenance, and technical advice. Please answer the question using only the information provided in the context:
207
-
208
- If the answer is not available in the context, respond with 'I'm sorry, I don't have enough information to answer that question.'
209
-
210
- If the question is 'Who are you?' or similar, respond with 'I am Mech-bot, a specialized assistant in car mechanics. Please ask questions related to car mechanics, and I'll do my best to assist you.'
211
-
212
- If the question is unrelated to car mechanics, respond with 'I'm sorry, I specialize in car mechanics. Please ask questions related to car mechanics, and I'll do my best to assist you.'
213
-
214
- {context}
215
- </s>
216
- """
217
- qa_prompt = ChatPromptTemplate.from_messages(
218
- [
219
- ("system", system_prompt),
220
- MessagesPlaceholder("chat_history"),
221
- ("human", "{input}"),
222
- ]
223
- )
224
- question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
225
-
226
- from langchain.chains import create_retrieval_chain
227
- rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
228
-
229
- import warnings
230
-
231
- # Ignore all warnings
232
- warnings.filterwarnings('ignore')
233
-
234
- """### **part 11: Moderating Chat Messages with a Template and Guard Model**"""
235
-
236
- @weave.op
237
- def moderate_with_template(chat):
238
- input_ids = guard_tokenizer.apply_chat_template(chat, return_tensors="pt")
239
- output = guard_model.generate(input_ids=input_ids, max_new_tokens=100, pad_token_id=0)
240
- prompt_len = input_ids.shape[-1]
241
- return guard_tokenizer.decode(output[0][prompt_len:], skip_special_tokens=True)
242
-
243
- """# Difining the evaluation and the monitoring functions"""
244
-
245
- from ragas import evaluate
246
- from datasets import Dataset
247
- from ragas.metrics import (
248
- answer_relevancy
249
- )
250
-
251
- emb= HuggingFaceEmbeddings(model_name='BAAI/bge-base-en-v1.5')
252
-
253
- from langchain_together import Together
254
- from getpass import getpass
255
-
256
-
257
- # api_key = getpass("Enter your Together AI API key: ")
258
-
259
- eval_llm = Together( # using together ai for evaluation
260
- model="meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
261
- together_api_key='b8c80a28c275fc6e52e89964a81e68e82e5d1ce6471836c9233a7502d9c1343d'
262
- )
263
-
264
- def start_monitor():
265
- run = wandb.init(
266
- project="Mech-Bot-Evaluation",
267
- # name=f"run_{}",
268
- )
269
- return run
270
-
271
- @weave.op
272
- def evaluate_response(question, answer, context):
273
- context_contents = [item.page_content for item in context]
274
- context_contents
275
- output = Dataset.from_dict({"question": [question], "answer": [answer], 'contexts': [context_contents]})
276
- score = evaluate(output, llm = eval_llm, embeddings = emb ,metrics=[answer_relevancy])
277
- return score
278
-
279
- """### **part 12: Question Answering Function with Content Moderation**
280
-
281
- This function, **answer_question**, answers user questions while ensuring content appropriateness through moderation checks.
282
-
283
- **Moderate User Question:** Checks if the question is safe. If not, it returns an apologetic message.
284
-
285
- **Generate AI Response:** Uses the RAG chain to generate an answer if the question is safe.
286
-
287
- **Update and Moderate Chat History:** Adds the question and answer to the chat history and checks if the AI response is appropriate. If not, it apologizes and indicates an attempt to generate a better answer.
288
- """
289
-
290
- @weave.op
291
- def invoking(question, chat_history):
292
- return rag_chain.invoke({"input": question, "chat_history": chat_history})
293
-
294
- from langchain_core.messages import AIMessage, HumanMessage
295
- chat_history = []
296
- @weave.op
297
- def answer_question(question, history):
298
- run = start_monitor() # Start monitoring
299
- # Check if the question is safe using Llama guard
300
- chat = [ {"role": "user", "content": question} ]
301
-
302
- if not moderate_with_template(chat) == 'safe':
303
- return "I'm sorry, but I can't respond to that question as it may contain inappropriate content."
304
-
305
- ai_msg = invoking(question, chat_history) # Generate AI response
306
- chat_history.extend(
307
- [
308
- HumanMessage(content=question),
309
- AIMessage(content=ai_msg["answer"]),
310
- ]
311
- )
312
- system_response = [
313
- {"role": "user", "content": question},
314
- {"role": "assistant", "content": ai_msg["answer"]},
315
- ]
316
- if not moderate_with_template(system_response) == 'safe':
317
- return "I generated a response, but it may contain inappropriate content. Let me try again with a more appropriate answer."
318
- else:
319
-
320
- # Evaluate the response
321
- score = evaluate_response(question, ai_msg["answer"], ai_msg['context'])
322
- if score['answer_relevancy'] < 0.7:
323
- a = score['answer_relevancy']
324
- run.alert(title='Low Answer Relevancy',
325
- text=f'Accuracy {a} is below the acceptable theshold')
326
- # log the run
327
- wandb.log(score.to_pandas().to_dict())
328
- run.finish()
329
- return ai_msg["answer"]
330
-
331
- """### **part 13: interface**
332
-
333
- """
334
-
335
- import gradio as gr
336
-
337
- # Create the Chat interface
338
- iface = gr.ChatInterface(
339
- answer_question, # Use the improved answer_question function
340
- title="Mech-bot: Your Car Mechanic Assistant",
341
- description="Ask any car mechanic-related questions, and Mech-bot will try its best to assist you.",
342
- submit_btn="Ask",
343
- clear_btn="Clear Chat"
344
- )
345
-
346
- # Launch the Gradio interface
347
- iface.launch(debug=True)