devve1's picture
Update app.py
c0d29c3 verified
raw
history blame
No virus
18.1 kB
import os
import re
import nltk
import copy
import time
import joblib
import msgpack
import validators
import numpy as np
import streamlit as st
from io import BytesIO
from numpy import ndarray
from llama_cpp import Llama
from scipy.sparse import csr_matrix, save_npz, load_npz, vstack
from qdrant_client import QdrantClient, models
from langchain_community.document_loaders import WikipediaLoader, WebBaseLoader
from langchain_experimental.text_splitter import SemanticChunker
from fastembed.sparse.splade_pp import supported_splade_models
from fastembed import SparseTextEmbedding, SparseEmbedding
from fastembed_ext import FastEmbedEmbeddingsLc
from unstructured.partition.auto import partition
from qdrant_client.models import (
NamedSparseVector,
NamedVector,
SparseVector,
PointStruct,
SearchRequest,
ScoredPoint,
)
def make_points(texts: list, metadatas: list, dense: list[ndarray], sparse: list[SparseEmbedding])-> list[PointStruct]:
points = []
for idx, (text, metadata, sparse_vector, dense_vector) in enumerate(zip(texts, metadatas, sparse, dense)):
sparse_vec = SparseVector(indices=sparse_vector.indices.tolist(), values=sparse_vector.values.tolist())
point = PointStruct(
id=idx,
vector={
"text-sparse": sparse_vec,
"text-dense": dense_vector,
},
payload={
"text": text,
"metadata": metadata
}
)
points.append(point)
return points
def search(client: QdrantClient, collection_name: str, dense, sparse):
search_results = client.search_batch(
collection_name,
[
SearchRequest(
vector=NamedVector(
name="text-dense",
vector=dense,
),
limit=10
),
SearchRequest(
vector=NamedSparseVector(
name="text-sparse",
vector=SparseVector(
indices=sparse[0].indices.tolist(),
values=sparse[0].values.tolist(),
),
),
limit=10
),
],
)
return search_results
def rank_list(search_result: list[ScoredPoint]):
return [(point.id, rank + 1) for rank, point in enumerate(search_result)]
def rrf(rank_lists, alpha=60, default_rank=1000):
"""
Optimized Reciprocal Rank Fusion (RRF) using NumPy for large rank lists.
:param rank_lists: A list of rank lists. Each rank list should be a list of (item, rank) tuples.
:param alpha: The parameter alpha used in the RRF formula. Default is 60.
:param default_rank: The default rank assigned to items not present in a rank list. Default is 1000.
:return: Sorted list of items based on their RRF scores.
"""
all_items = set(item for rank_list in rank_lists for item, _ in rank_list)
item_to_index = {item: idx for idx, item in enumerate(all_items)}
rank_matrix = np.full((len(all_items), len(rank_lists)), default_rank)
for list_idx, rank_list in enumerate(rank_lists):
for item, rank in rank_list:
rank_matrix[item_to_index[item], list_idx] = rank
rrf_scores = np.sum(1.0 / (alpha + rank_matrix), axis=1)
sorted_indices = np.argsort(-rrf_scores)
sorted_items = [(list(item_to_index.keys())[idx], rrf_scores[idx]) for idx in sorted_indices]
return sorted_items
def main(query: str, client: QdrantClient, collection_name: str, llm, dense_model, sparse_model):
dense_query = list(dense_model.embed_query(query, 32))
sparse_query = list(sparse_model.embed(query, 32))
search_results = search(
client,
collection_name,
dense_query,
sparse_query
)
dense_rank_list, sparse_rank_list = rank_list(search_results[0]), rank_list(search_results[1])
rrf_rank_list = rrf([dense_rank_list, sparse_rank_list])
records_list = client.retrieve(
collection_name,
[item[0] for item in rrf_rank_list]
)
docs = [record.payload for record in records_list[:3]]
contents = [doc['text'] for doc in docs]
metadatas = [doc['metadata'] for doc in docs]
context = "\n".join(contents)
seen_values = set()
result_metadatas = "\n".join(
f'{value}'
for metadata in metadatas
for key, value in metadata.items()
if (value not in seen_values and not seen_values.add(value))
)
print(f'QA_PROMPT : {qa_prompt}')
response = llm.create_chat_completion(
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": qa_prompt(query, context)
}
], stop=["</s>"], temperature=0, frequency_penalty=0.2, presence_penalty=0.4, top_p=0.2)
text = response["choices"][0]["message"]['content']
print(f'TEXT: {text}')
output = llm.create_chat_completion(
messages = [
{"role": "system", "content": """Act like a professional summary writer. You have been providing summarization services for various types of documents, including academic papers, legal texts, and business reports, for over 20 years.
Your expertise includes extracting key points and important details concisely without adding unnecessary introductory phrases."""
},
{
"role": "user",
"content": f"""Write a summary of the following text delimited by triple backquotes. Ensure the summary covers the key points of the text. Do not introduce the summary with sentences like "Here is the summary:" or similar. The summary should be detailed, precise, and directly convey the essential information from the text.
```{text}```
Take a deep breath and work on this problem step-by-step."""
}
], stop=["</s>"], temperature=0.7, max_tokens=3000)
answer = output['choices'][0]['message']['content']
answer_with_metadatas = f"{answer}\n\nSource(s) :\n{result_metadatas}"
print(f'OUTPUT: {output}')
return answer, answer_with_metadatas
@st.cache_resource
def load_models_and_documents():
print('load')
supported_splade_models[0] = {
"model": "prithivida/Splade_PP_en_v2",
"vocab_size": 30522,
"description": "Implementation of SPLADE++ Model for English v2",
"size_in_GB": 0.532,
"sources": {
"hf": "devve1/Splade_PP_en_v2_onnx"
},
"model_file": "model.onnx"
}
with st.spinner('Load models...'):
llm = Llama.from_pretrained(
repo_id="MaziyarPanahi/Llama-3-8B-Instruct-32k-v0.1-GGUF",
filename="*Q8_0.gguf",
verbose=False,
chat_format="chatml",
n_ctx=16000,
n_gpu_layers=32
)
provider = ['CPUExecutionProvider']
dense_model = FastEmbedEmbeddingsLc(
model_name='mixedbread-ai/mxbai-embed-large-v1',
providers=provider,
cache_dir=os.getenv('HF_HOME'),
batch_size=32
)
sparse_model = SparseTextEmbedding(
'prithivida/Splade_PP_en_v2',
cache_dir=os.getenv('HF_HOME'),
providers=provider
)
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
client = QdrantClient(':memory:')
collection_name = 'collection_demo'
client.create_collection(
collection_name,
{
"text-dense": models.VectorParams(
size=1024,
distance=models.Distance.COSINE,
on_disk=False,
quantization_config=models.BinaryQuantization(
binary=models.BinaryQuantizationConfig(
always_ram=True
)
)
)
},
{
"text-sparse": models.SparseVectorParams(
index=models.SparseIndexParams(
on_disk=False
)
)
},
2,
optimizers_config=models.OptimizersConfigDiff(
indexing_threshold=0
),
hnsw_config=models.HnswConfigDiff(
on_disk=False,
m=16,
ef_construct=100
)
)
with st.spinner('Parse and chunk documents...'):
name = 'action_rpg'
embeddings_path = os.path.join(os.getenv('HF_HOME'), 'embeddings')
texts_path = os.path.join(embeddings_path, name + '_texts.msgpack')
metadatas_path = os.path.join(embeddings_path, name + '_metadatas.msgpack')
dense_path = os.path.join(embeddings_path, name + '_dense.npz')
sparse_path = os.path.join(embeddings_path, name + '_sparse.npz')
if not os.path.exists(embeddings_path):
os.mkdir(embeddings_path)
docs_1 = WikipediaLoader(query='Action-RPG').load()
docs_2 = WikipediaLoader(query='Real-time strategy').load()
docs_3 = WikipediaLoader(query='First-person shooter').load()
docs_4 = WikipediaLoader(query='Multiplayer online battle arena').load()
docs_5 = WikipediaLoader(query='List of video game genres').load()
docs = docs_1 + docs_2 + docs_3 + docs_4 + docs_5
texts, metadatas = [], []
for doc in docs:
texts.append(doc.page_content)
del doc.metadata['title']
del doc.metadata['summary']
metadatas.append(doc.metadata)
docs_texts, docs_metadatas, dense_embeddings, sparse_embeddings = chunk_documents(texts, metadatas, dense_model, sparse_model)
with open(texts_path, "wb") as outfile_texts:
packed_texts = msgpack.packb(docs_texts, use_bin_type=True)
outfile_texts.write(packed_texts)
with open(metadatas_path, "wb") as outfile_metadatas:
packed_metadatas = msgpack.packb(docs_metadatas, use_bin_type=True)
outfile_metadatas.write(packed_metadatas)
np.savez_compressed(dense_path, *dense_embeddings)
max_index = max(np.max(embedding.indices) for embedding in sparse_embeddings)
sparse_matrices = []
for embedding in sparse_embeddings:
data = embedding.values
indices = embedding.indices
indptr = np.array([0, len(data)])
matrix = csr_matrix((data, indices, indptr), shape=(1, max_index + 1))
sparse_matrices.append(matrix)
combined_sparse_matrix = vstack(sparse_matrices)
save_npz(sparse_path, combined_sparse_matrix)
else:
with open(texts_path, "rb") as data_file_texts:
byte_data_texts = data_file_texts.read()
with open(metadatas_path, "rb") as data_file_metadatas:
byte_data_metadatas = data_file_metadatas.read()
docs_texts = msgpack.unpackb(byte_data_texts, raw=False)
docs_metadatas = msgpack.unpackb(byte_data_metadatas, raw=False)
dense_embeddings = list(np.load(dense_path).values())
sparse_embeddings = []
loaded_sparse_matrix = load_npz(sparse_path)
for i in range(loaded_sparse_matrix.shape[0]):
row = loaded_sparse_matrix.getrow(i)
values = row.data
indices = row.indices
embedding = SparseEmbedding(values, indices)
sparse_embeddings.append(embedding)
with st.spinner('Save documents...'):
client.upsert(
collection_name,
make_points(
docs_texts,
docs_metadatas,
dense_embeddings,
sparse_embeddings
)
)
client.update_collection(
collection_name=collection_name,
optimizer_config=models.OptimizersConfigDiff(indexing_threshold=20000)
)
return client, collection_name, llm, dense_model, sparse_model
def chunk_documents(texts, metadatas, dense_model, sparse_model):
text_splitter = SemanticChunker(
dense_model,
breakpoint_threshold_type='standard_deviation'
)
_metadatas = metadatas or [{}] * len(texts)
documents = []
metadatas_docs = []
def create_document(text: str, i: int, _metadatas: list):
index = -1
for chunk in text_splitter.split_text(text):
metadata = copy.deepcopy(_metadatas[i])
if text_splitter._add_start_index:
index = text.find(chunk, index + 1)
metadata['start_index'] = index
documents.append(chunk)
metadatas_docs.append(metadata)
joblib.Parallel(n_jobs=joblib.cpu_count(), verbose=1, require='sharedmem')(
joblib.delayed(create_document)(text, i, _metadatas) for i, text in enumerate(texts))
dense_embeddings = dense_model.embed_documents(documents, 32)
sparse_embeddings = list(sparse_model.embed(documents, 32))
return documents, metadatas_docs, dense_embeddings, sparse_embeddings
def on_change_documents_only(qa_prompt):
qa_prompt = lambda query, context: (
f"""You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
Question: {query}
Context: {context}
Answer:"""
if st.session_state.documents_only else
f"""If the context is not relevant, please answer the question by using your own knowledge about the topic
{context}
Question: {query}"""
)
if __name__ == '__main__':
st.set_page_config(page_title="Multipurpose AI Agent",
layout="wide"
)
st.title("Multipurpose AI Agent")
qa_prompt = None
client, collection_name, llm, dense_model, sparse_model = load_models_and_documents()
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Message Video Game Assistant"):
st.chat_message("user").markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
ai_response, ai_response_with_metadatas = main(prompt, client, collection_name, llm, dense_model, sparse_model, IsDocumentsOnly)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for chunk in re.split(r'(\s+)', ai_response_with_metadatas):
full_response += chunk + " "
time.sleep(0.01)
message_placeholder.markdown(full_response + "▌")
st.session_state.messages.append({"role": "assistant", "content": full_response})
url = st.sidebar.text_input("Scrape an URL link :")
print(type(url))
print(url)
if validators.url(url):
docs = WebBaseLoader(url).load()
print(f'WebBaseLoader: {docs[0].metadata}')
texts, metadatas = [], []
for doc in docs:
texts.append(doc.page_content)
del doc.metadata['title']
del doc.metadata['description']
del doc.metadata['language']
metadatas.append(doc.metadata)
texts, metadatas, dense_embeddings, sparse_embeddings = chunk_documents(texts, metadatas, dense_model, sparse_model)
client.upsert(
collection_name,
make_points(
texts,
metadatas,
dense_embeddings,
sparse_embeddings
)
)
st.sidebar.success("URL content uploaded and ready!")
uploaded_files = st.sidebar.file_uploader("Upload a file :", accept_multiple_files=True, type=['docx', 'doc', 'odt', 'pptx', 'ppt', 'xlsx', 'csv', 'tsv', 'eml', 'msg', 'rtf', 'epub', 'html', 'xml', 'pdf', 'png', 'jpg', 'heic','txt'])
print(f'uploaded-files : {uploaded_files}')
for uploaded_file in uploaded_files:
print('count')
elements = partition(file=uploaded_file,
strategy='hi_res',
skip_infer_table_types=['png', 'pdf', 'jpg', 'xls', 'xlsx', 'heic'],
hi_res_model_name='yolox',
include_page_breaks=True
)
metadata_dict = {"source": uploaded_file.name}
texts, metadatas = [], []
for elem in elements:
texts.append(elem.text)
metadatas.append(metadata_dict)
texts, metadatas, dense_embeddings, sparse_embeddings = chunk_documents(texts, metadatas, dense_model, sparse_model)
client.upsert(
collection_name,
make_points(
texts,
metadatas,
dense_embeddings,
sparse_embeddings
)
)
st.sidebar.success("Document content uploaded and ready!")
IsDocumentsOnly = st.sidebar.toggle(
label="""Enable 'Documents-Only' Mode""",
value=True,
on_change=on_change_documents_only,
key="documents_only",
args=(qa_prompt, )
)
if IsDocumentsOnly:
st.write('The AI answer your questions only considering the documents provided')
else:
st.write("""The AI answer your questions considering the documents provided, and if it doesn't found the answer in them, try to find in its on own internal knowledge""")