Spaces:
Running
Running
File size: 11,437 Bytes
ec78ac6 3b43632 ec78ac6 3b43632 ec78ac6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 |
# -*- coding: utf-8 -*-
"""Mech_bot_monitor_eval.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1_Y_wngb4w6axUfB6jOwRv_DR_DPzx_1M
"""
# Commented out IPython magic to ensure Python compatibility.
# %pip install --upgrade langchain-together
"""# 🔧🚗**Mech-Bot 🤖🔧: Automotive Mechanic Assistant**
Welcome to Mech-Bot, your personal automotive mechanic assistant. Mech-Bot leverages RAG (Retrieval-Augmented Generation) and the capabilities of LangChain to provide comprehensive support for diagnosing and repairing vehicle issues. This project aims to make mechanical expertise accessible, reliable, and efficient, addressing common challenges faced by car owners and mechanics.
😉**Key features include:**
- AI Diagnostic Support.
- Ease of dealing with Repair Manuals.
- Real-Time Assistance
📚🔧**Data to Use:**
- Repair Manuals: Detailed, step-by-step instructions on car maintenance and repair, available in digital formats like PDFs, and loaded using PyMuPDF.
**🦾Technology Stack for Mech-Bot:**
### **part 1: Tools and Dependencies**
"""
# pip install -U bitsandbytes
import os
os.environ["WEAVE_TRACE_LANGCHAIN"] = "true"
wandb.Api(api_key='e27ce4c7835957d03d50e03f0bd8182ace127cbd')
import weave
import wandb
# wandb.login(relogin=True)
weave.init('Mech-Bot-Evaluation')
"""### **part 2: Data Collection and Preprocessing**"""
from langchain.document_loaders import PyMuPDFLoader
all_documents = ""
# Load PDF data
pdf_loader = PyMuPDFLoader("Mech-bot_DATA(text).pdf")
pdf_documents = pdf_loader.load()
# Extract text from the PDF documents
for pdf_doc in pdf_documents:
all_documents += pdf_doc.page_content
""" ### **Part 3: split the text**"""
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Split documents
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=31)
splits = text_splitter.split_text(all_documents)
# Output the split content (for demonstration purposes)
for i, split in enumerate(splits):
print(f"Split {i + 1}: {split[:100]}...") # Print the first 100 characters of each split
"""### **part 4: save embeddings in a vector store**"""
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.schema import Document
# Convert split strings to Document objects
documents = [Document(page_content=split) for split in splits]
# Add source metadata to each document
for doc in documents:
doc.metadata["source"] = "https://club.autodoc.co.uk/manuals"
# Create the FAISS vector store
db = FAISS.from_documents(documents,
HuggingFaceEmbeddings(model_name='BAAI/bge-base-en-v1.5'))
"""### **part 5: create the retriever**"""
retriever = db.as_retriever(
search_type="similarity",
search_kwargs={'k': 4}
)
print(documents)
# Manually test retrieval to verify correct chunk fetching
query = "what are the tools needed to change Brake Discs on a Car?"
retrieved_docs = db.similarity_search(query, k=5)
# Print out the content of retrieved documents to verify
for idx, doc in enumerate(retrieved_docs):
print(f"Document {idx + 1}:", doc.page_content)
"""### **part 6: load the llama model + Quantization**"""
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from transformers import LlamaTokenizer, LlamaForCausalLM
model_name = "meta-llama/Llama-2-13b-chat-hf"
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config=bnb_config,
torch_dtype=torch.bfloat16,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
"""### **part 7:Load the Llama Guard model**"""
model_id = "meta-llama/LlamaGuard-7b"
guard_tokenizer = AutoTokenizer.from_pretrained(model_id)
bnb_config_guard = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
# Allow offloading to CPU for parts of the model
load_in_8bit_fp32_cpu_offload=True
)
guard_model = AutoModelForCausalLM.from_pretrained(
model_id,
quantization_config=bnb_config_guard,
torch_dtype=torch.bfloat16,
device_map="auto",
)
"""### **part 8: Setting Up a Text Generation Pipeline with HuggingFace and LangChain**"""
from langchain.llms import HuggingFacePipeline
from langchain.prompts import PromptTemplate
from transformers import pipeline
from langchain_core.output_parsers import StrOutputParser
text_generation_pipeline = pipeline(
model=model,
tokenizer=tokenizer,
task="text-generation",
temperature=0.2,
do_sample=True,
repetition_penalty=1.1,
return_full_text=False,
max_new_tokens=512,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
)
llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
"""### **part 9: Creating a History-Aware Question Retriever with LangChain**"""
from langchain.chains import create_history_aware_retriever
from langchain.prompts.chat import MessagesPlaceholder
from langchain.prompts import ChatPromptTemplate
contextualize_q_system_prompt = (
"Given a chat history and the latest user question "
"which might reference context in the chat history, "
"formulate a standalone question which can be understood "
"without the chat history. Do NOT answer the question, "
"just reformulate it if needed and otherwise return it as is."
)
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
history_aware_retriever = create_history_aware_retriever(
llm, retriever, contextualize_q_prompt
)
"""### **part 10 :Creating Mech-bot QA Chain with LangChain**"""
from langchain.chains.combine_documents import create_stuff_documents_chain
system_prompt = """
You are Mech-bot, a highly knowledgeable car mechanic assistant. Your expertise covers a wide range of automotive issues, including diagnostics, repairs, maintenance, and technical advice. Please answer the question using only the information provided in the context:
If the answer is not available in the context, respond with 'I'm sorry, I don't have enough information to answer that question.'
If the question is 'Who are you?' or similar, respond with 'I am Mech-bot, a specialized assistant in car mechanics. Please ask questions related to car mechanics, and I'll do my best to assist you.'
If the question is unrelated to car mechanics, respond with 'I'm sorry, I specialize in car mechanics. Please ask questions related to car mechanics, and I'll do my best to assist you.'
{context}
</s>
"""
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
from langchain.chains import create_retrieval_chain
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
import warnings
# Ignore all warnings
warnings.filterwarnings('ignore')
"""### **part 11: Moderating Chat Messages with a Template and Guard Model**"""
@weave.op
def moderate_with_template(chat):
input_ids = guard_tokenizer.apply_chat_template(chat, return_tensors="pt")
output = guard_model.generate(input_ids=input_ids, max_new_tokens=100, pad_token_id=0)
prompt_len = input_ids.shape[-1]
return guard_tokenizer.decode(output[0][prompt_len:], skip_special_tokens=True)
"""# Difining the evaluation and the monitoring functions"""
from ragas import evaluate
from datasets import Dataset
from ragas.metrics import (
answer_relevancy
)
emb= HuggingFaceEmbeddings(model_name='BAAI/bge-base-en-v1.5')
from langchain_together import Together
from getpass import getpass
# api_key = getpass("Enter your Together AI API key: ")
eval_llm = Together( # using together ai for evaluation
model="meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
together_api_key='b8c80a28c275fc6e52e89964a81e68e82e5d1ce6471836c9233a7502d9c1343d',
)
def start_monitor():
run = wandb.init(
project="Mech-Bot-Evaluation",
# name=f"run_{}",
)
return run
@weave.op
def evaluate_response(question, answer, context):
context_contents = [item.page_content for item in context]
context_contents
output = Dataset.from_dict({"question": [question], "answer": [answer], 'contexts': [context_contents]})
score = evaluate(output, llm = eval_llm, embeddings = emb ,metrics=[answer_relevancy])
return score
"""### **part 12: Question Answering Function with Content Moderation**
This function, **answer_question**, answers user questions while ensuring content appropriateness through moderation checks.
**Moderate User Question:** Checks if the question is safe. If not, it returns an apologetic message.
**Generate AI Response:** Uses the RAG chain to generate an answer if the question is safe.
**Update and Moderate Chat History:** Adds the question and answer to the chat history and checks if the AI response is appropriate. If not, it apologizes and indicates an attempt to generate a better answer.
"""
@weave.op
def invoking(question, chat_history):
return rag_chain.invoke({"input": question, "chat_history": chat_history})
from langchain_core.messages import AIMessage, HumanMessage
chat_history = []
@weave.op
def answer_question(question, history):
run = start_monitor() # Start monitoring
# Check if the question is safe using Llama guard
chat = [ {"role": "user", "content": question} ]
if not moderate_with_template(chat) == 'safe':
return "I'm sorry, but I can't respond to that question as it may contain inappropriate content."
ai_msg = invoking(question, chat_history) # Generate AI response
chat_history.extend(
[
HumanMessage(content=question),
AIMessage(content=ai_msg["answer"]),
]
)
system_response = [
{"role": "user", "content": question},
{"role": "assistant", "content": ai_msg["answer"]},
]
if not moderate_with_template(system_response) == 'safe':
return "I generated a response, but it may contain inappropriate content. Let me try again with a more appropriate answer."
else:
# Evaluate the response
score = evaluate_response(question, ai_msg["answer"], ai_msg['context'])
if score['answer_relevancy'] < 0.7:
a = score['answer_relevancy']
run.alert(title='Low Answer Relevancy',
text=f'Accuracy {a} is below the acceptable theshold')
# log the run
wandb.log(score.to_pandas().to_dict())
run.finish()
return ai_msg["answer"]
"""### **part 13: interface**
"""
import gradio as gr
# Create the Chat interface
iface = gr.ChatInterface(
answer_question, # Use the improved answer_question function
title="Mech-bot: Your Car Mechanic Assistant",
description="Ask any car mechanic-related questions, and Mech-bot will try its best to assist you.",
submit_btn="Ask",
clear_btn="Clear Chat"
)
# Launch the Gradio interface
iface.launch(debug=True) |