danicafisher commited on
Commit
9b3f2e9
1 Parent(s): 439d7fc
aimakerspace/__init__.py ADDED
File without changes
aimakerspace/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (185 Bytes). View file
 
aimakerspace/__pycache__/text_utils.cpython-311.pyc ADDED
Binary file (8.18 kB). View file
 
aimakerspace/__pycache__/vectordatabase.cpython-311.pyc ADDED
Binary file (6.65 kB). View file
 
aimakerspace/openai_utils/__init__.py ADDED
File without changes
aimakerspace/openai_utils/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (198 Bytes). View file
 
aimakerspace/openai_utils/__pycache__/chatmodel.cpython-311.pyc ADDED
Binary file (1.73 kB). View file
 
aimakerspace/openai_utils/__pycache__/embedding.cpython-311.pyc ADDED
Binary file (5.43 kB). View file
 
aimakerspace/openai_utils/__pycache__/prompts.cpython-311.pyc ADDED
Binary file (5.52 kB). View file
 
aimakerspace/openai_utils/chatmodel.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ from dotenv import load_dotenv
3
+ import os
4
+
5
+ load_dotenv()
6
+
7
+
8
+ class ChatOpenAI:
9
+ def __init__(self, model_name: str = "gpt-4o-mini"):
10
+ self.model_name = model_name
11
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
12
+ if self.openai_api_key is None:
13
+ raise ValueError("OPENAI_API_KEY is not set")
14
+
15
+ def run(self, messages, text_only: bool = True, **kwargs):
16
+ if not isinstance(messages, list):
17
+ raise ValueError("messages must be a list")
18
+
19
+ client = OpenAI()
20
+ response = client.chat.completions.create(
21
+ model=self.model_name, messages=messages, **kwargs
22
+ )
23
+
24
+ if text_only:
25
+ return response.choices[0].message.content
26
+
27
+ return response
aimakerspace/openai_utils/embedding.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ from openai import AsyncOpenAI, OpenAI
3
+ import openai
4
+ from typing import List
5
+ import os
6
+ import asyncio
7
+
8
+
9
+ class EmbeddingModel:
10
+ def __init__(self, embeddings_model_name: str = "text-embedding-3-small"):
11
+ load_dotenv()
12
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
13
+ self.async_client = AsyncOpenAI()
14
+ self.client = OpenAI()
15
+
16
+ if self.openai_api_key is None:
17
+ raise ValueError(
18
+ "OPENAI_API_KEY environment variable is not set. Please set it to your OpenAI API key."
19
+ )
20
+ openai.api_key = self.openai_api_key
21
+ self.embeddings_model_name = embeddings_model_name
22
+
23
+ async def async_get_embeddings(self, list_of_text: List[str]) -> List[List[float]]:
24
+ batch_size = 1024
25
+ batches = [list_of_text[i:i + batch_size] for i in range(0, len(list_of_text), batch_size)]
26
+
27
+ async def process_batch(batch):
28
+ embedding_response = await self.async_client.embeddings.create(
29
+ input=batch, model=self.embeddings_model_name
30
+ )
31
+ return [embeddings.embedding for embeddings in embedding_response.data]
32
+
33
+ # Use asyncio.gather to process all batches concurrently
34
+ results = await asyncio.gather(*[process_batch(batch) for batch in batches])
35
+
36
+ # Flatten the results
37
+ return [embedding for batch_result in results for embedding in batch_result]
38
+
39
+ async def async_get_embedding(self, text: str) -> List[float]:
40
+ embedding = await self.async_client.embeddings.create(
41
+ input=text, model=self.embeddings_model_name
42
+ )
43
+
44
+ return embedding.data[0].embedding
45
+
46
+ def get_embeddings(self, list_of_text: List[str]) -> List[List[float]]:
47
+ embedding_response = self.client.embeddings.create(
48
+ input=list_of_text, model=self.embeddings_model_name
49
+ )
50
+
51
+ return [embeddings.embedding for embeddings in embedding_response.data]
52
+
53
+ def get_embedding(self, text: str) -> List[float]:
54
+ embedding = self.client.embeddings.create(
55
+ input=text, model=self.embeddings_model_name
56
+ )
57
+
58
+ return embedding.data[0].embedding
59
+
60
+
61
+ if __name__ == "__main__":
62
+ embedding_model = EmbeddingModel()
63
+ print(asyncio.run(embedding_model.async_get_embedding("Hello, world!")))
64
+ print(
65
+ asyncio.run(
66
+ embedding_model.async_get_embeddings(["Hello, world!", "Goodbye, world!"])
67
+ )
68
+ )
aimakerspace/openai_utils/prompts.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+
4
+ class BasePrompt:
5
+ def __init__(self, prompt):
6
+ """
7
+ Initializes the BasePrompt object with a prompt template.
8
+
9
+ :param prompt: A string that can contain placeholders within curly braces
10
+ """
11
+ self.prompt = prompt
12
+ self._pattern = re.compile(r"\{([^}]+)\}")
13
+
14
+ def format_prompt(self, **kwargs):
15
+ """
16
+ Formats the prompt string using the keyword arguments provided.
17
+
18
+ :param kwargs: The values to substitute into the prompt string
19
+ :return: The formatted prompt string
20
+ """
21
+ matches = self._pattern.findall(self.prompt)
22
+ return self.prompt.format(**{match: kwargs.get(match, "") for match in matches})
23
+
24
+ def get_input_variables(self):
25
+ """
26
+ Gets the list of input variable names from the prompt string.
27
+
28
+ :return: List of input variable names
29
+ """
30
+ return self._pattern.findall(self.prompt)
31
+
32
+
33
+ class RolePrompt(BasePrompt):
34
+ def __init__(self, prompt, role: str):
35
+ """
36
+ Initializes the RolePrompt object with a prompt template and a role.
37
+
38
+ :param prompt: A string that can contain placeholders within curly braces
39
+ :param role: The role for the message ('system', 'user', or 'assistant')
40
+ """
41
+ super().__init__(prompt)
42
+ self.role = role
43
+
44
+ def create_message(self, format=True, **kwargs):
45
+ """
46
+ Creates a message dictionary with a role and a formatted message.
47
+
48
+ :param kwargs: The values to substitute into the prompt string
49
+ :return: Dictionary containing the role and the formatted message
50
+ """
51
+ if format:
52
+ return {"role": self.role, "content": self.format_prompt(**kwargs)}
53
+
54
+ return {"role": self.role, "content": self.prompt}
55
+
56
+
57
+ class SystemRolePrompt(RolePrompt):
58
+ def __init__(self, prompt: str):
59
+ super().__init__(prompt, "system")
60
+
61
+
62
+ class UserRolePrompt(RolePrompt):
63
+ def __init__(self, prompt: str):
64
+ super().__init__(prompt, "user")
65
+
66
+
67
+ class AssistantRolePrompt(RolePrompt):
68
+ def __init__(self, prompt: str):
69
+ super().__init__(prompt, "assistant")
70
+
71
+
72
+ if __name__ == "__main__":
73
+ prompt = BasePrompt("Hello {name}, you are {age} years old")
74
+ print(prompt.format_prompt(name="John", age=30))
75
+
76
+ prompt = SystemRolePrompt("Hello {name}, you are {age} years old")
77
+ print(prompt.create_message(name="John", age=30))
78
+ print(prompt.get_input_variables())
aimakerspace/text_utils.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+ from PyPDF2 import PdfReader
4
+
5
+
6
+ class TextFileLoader:
7
+ def __init__(self, path: str, encoding: str = "utf-8"):
8
+ self.documents = []
9
+ self.path = path
10
+ self.encoding = encoding
11
+
12
+ def load(self):
13
+ if os.path.isdir(self.path):
14
+ self.load_directory()
15
+ elif os.path.isfile(self.path) and self.path.endswith(".txt"):
16
+ self.load_file()
17
+ else:
18
+ raise ValueError(
19
+ "Provided path is neither a valid directory nor a .txt file."
20
+ )
21
+
22
+ def load_file(self):
23
+ with open(self.path, "r", encoding=self.encoding) as f:
24
+ self.documents.append(f.read())
25
+
26
+ def load_directory(self):
27
+ for root, _, files in os.walk(self.path):
28
+ for file in files:
29
+ if file.endswith(".txt"):
30
+ with open(
31
+ os.path.join(root, file), "r", encoding=self.encoding
32
+ ) as f:
33
+ self.documents.append(f.read())
34
+
35
+ def load_documents(self):
36
+ self.load()
37
+ return self.documents
38
+
39
+ class PDFFileLoader:
40
+ def __init__(self, path: str):
41
+ self.documents = []
42
+ self.path = path
43
+
44
+ def load(self):
45
+ if os.path.isdir(self.path):
46
+ self.load_directory()
47
+ elif os.path.isfile(self.path) and self.path.endswith(".pdf"):
48
+ self.load_file()
49
+ else:
50
+ raise ValueError(
51
+ "Provided path is neither a valid directory nor a .pdf file."
52
+ )
53
+
54
+ def load_file(self):
55
+ with open(self.path, "rb") as file:
56
+ pdf_reader = PdfReader(file)
57
+ text = ""
58
+ for page in pdf_reader.pages:
59
+ text += page.extract_text()
60
+ self.documents.append(text)
61
+
62
+ def load_directory(self):
63
+ for root, _, files in os.walk(self.path):
64
+ for file in files:
65
+ if file.endswith(".pdf"):
66
+ file_path = os.path.join(root, file)
67
+ with open(file_path, "rb") as f:
68
+ pdf_reader = PdfReader(f)
69
+ text = ""
70
+ for page in pdf_reader.pages:
71
+ text += page.extract_text()
72
+ self.documents.append(text)
73
+
74
+ def load_documents(self):
75
+ self.load()
76
+ return self.documents
77
+
78
+ class CharacterTextSplitter:
79
+ def __init__(
80
+ self,
81
+ chunk_size: int = 1000,
82
+ chunk_overlap: int = 200,
83
+ ):
84
+ assert (
85
+ chunk_size > chunk_overlap
86
+ ), "Chunk size must be greater than chunk overlap"
87
+
88
+ self.chunk_size = chunk_size
89
+ self.chunk_overlap = chunk_overlap
90
+
91
+ def split(self, text: str) -> List[str]:
92
+ chunks = []
93
+ for i in range(0, len(text), self.chunk_size - self.chunk_overlap):
94
+ chunks.append(text[i : i + self.chunk_size])
95
+ return chunks
96
+
97
+ def split_texts(self, texts: List[str]) -> List[str]:
98
+ chunks = []
99
+ for text in texts:
100
+ chunks.extend(self.split(text))
101
+ return chunks
102
+
103
+
104
+ if __name__ == "__main__":
105
+ loader = TextFileLoader("data/KingLear.txt")
106
+ loader.load()
107
+ splitter = CharacterTextSplitter()
108
+ chunks = splitter.split_texts(loader.documents)
109
+ print(len(chunks))
110
+ print(chunks[0])
111
+ print("--------")
112
+ print(chunks[1])
113
+ print("--------")
114
+ print(chunks[-2])
115
+ print("--------")
116
+ print(chunks[-1])
aimakerspace/vectordatabase.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from collections import defaultdict
3
+ from typing import List, Tuple, Callable
4
+ from aimakerspace.openai_utils.embedding import EmbeddingModel
5
+ import asyncio
6
+
7
+
8
+ def cosine_similarity(vector_a: np.array, vector_b: np.array) -> float:
9
+ """Computes the cosine similarity between two vectors."""
10
+ dot_product = np.dot(vector_a, vector_b)
11
+ norm_a = np.linalg.norm(vector_a)
12
+ norm_b = np.linalg.norm(vector_b)
13
+ return dot_product / (norm_a * norm_b)
14
+
15
+ def jaccard_binary(vector_a: np.array, vector_b: np.array):
16
+ """A function for finding the similarity between two binary vectors"""
17
+ intersection = len(list(set(vector_a).intersection(vector_b)))
18
+ union = (len(vector_a) + len(vector_b)) - intersection
19
+ return float(intersection) / union
20
+
21
+ def euclidean_distance(vector_a: np.array, vector_b: np.array) -> float:
22
+ """Computes the euclidean distance between two vectors."""
23
+ return np.linalg.norm(vector_a - vector_b)
24
+
25
+
26
+ class VectorDatabase:
27
+ def __init__(self, embedding_model: EmbeddingModel = None):
28
+ self.vectors = defaultdict(np.array)
29
+ self.embedding_model = embedding_model or EmbeddingModel()
30
+
31
+ def insert(self, key: str, vector: np.array) -> None:
32
+ self.vectors[key] = vector
33
+
34
+ def search(
35
+ self,
36
+ query_vector: np.array,
37
+ k: int,
38
+ distance_measure: Callable = cosine_similarity,
39
+ ) -> List[Tuple[str, float]]:
40
+ scores = [
41
+ (key, distance_measure(query_vector, vector))
42
+ for key, vector in self.vectors.items()
43
+ ]
44
+ return sorted(scores, key=lambda x: x[1], reverse=True)[:k]
45
+
46
+ def search_by_text(
47
+ self,
48
+ query_text: str,
49
+ k: int,
50
+ distance_measure: Callable = cosine_similarity,
51
+ return_as_text: bool = False,
52
+ ) -> List[Tuple[str, float]]:
53
+ query_vector = self.embedding_model.get_embedding(query_text)
54
+ results = self.search(query_vector, k, distance_measure)
55
+ return [result[0] for result in results] if return_as_text else results
56
+
57
+ def retrieve_from_key(self, key: str) -> np.array:
58
+ return self.vectors.get(key, None)
59
+
60
+ async def abuild_from_list(self, list_of_text: List[str]) -> "VectorDatabase":
61
+ embeddings = await self.embedding_model.async_get_embeddings(list_of_text)
62
+ for text, embedding in zip(list_of_text, embeddings):
63
+ self.insert(text, np.array(embedding))
64
+ return self
65
+
66
+
67
+ if __name__ == "__main__":
68
+ list_of_text = [
69
+ "I like to eat broccoli and bananas.",
70
+ "I ate a banana and spinach smoothie for breakfast.",
71
+ "Chinchillas and kittens are cute.",
72
+ "My sister adopted a kitten yesterday.",
73
+ "Look at this cute hamster munching on a piece of broccoli.",
74
+ ]
75
+
76
+ vector_db = VectorDatabase()
77
+ vector_db = asyncio.run(vector_db.abuild_from_list(list_of_text))
78
+ k = 2
79
+
80
+ searched_vector = vector_db.search_by_text("I think fruit is awesome!", k=k)
81
+ print(f"Closest {k} vector(s):", searched_vector)
82
+
83
+ retrieved_vector = vector_db.retrieve_from_key(
84
+ "I like to eat broccoli and bananas."
85
+ )
86
+ print("Retrieved vector:", retrieved_vector)
87
+
88
+ relevant_texts = vector_db.search_by_text(
89
+ "I think fruit is awesome!", k=k, return_as_text=True
90
+ )
91
+ print(f"Closest {k} text(s):", relevant_texts)
app.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+ from chainlit.types import AskFileResponse
4
+ from aimakerspace.text_utils import CharacterTextSplitter, PDFFileLoader
5
+ from aimakerspace.openai_utils.prompts import (
6
+ UserRolePrompt,
7
+ SystemRolePrompt,
8
+ AssistantRolePrompt,
9
+ )
10
+ from aimakerspace.openai_utils.embedding import EmbeddingModel
11
+ from aimakerspace.vectordatabase import VectorDatabase
12
+ from aimakerspace.openai_utils.chatmodel import ChatOpenAI
13
+ import chainlit as cl
14
+ import asyncio
15
+ import nest_asyncio
16
+ nest_asyncio.apply()
17
+
18
+
19
+ pdf_loader_NIST = PDFFileLoader("data/NIST.AI.600-1.pdf")
20
+ pdf_loader_Blueprint = PDFFileLoader("data/Blueprint-for-an-AI-Bill-of-Rights.pdf")
21
+ documents_NIST = pdf_loader_NIST.load_documents()
22
+ documents_Blueprint = pdf_loader_Blueprint.load_documents()
23
+
24
+ text_splitter = CharacterTextSplitter()
25
+ split_documents_NIST = text_splitter.split_texts(documents_NIST)
26
+ split_documents_Blueprint = text_splitter.split_texts(documents_Blueprint)
27
+
28
+
29
+ # query = "What is the NIST definition of AI?"
30
+ # response = vector_db.search_by_text(query, k=3)
31
+ # print(response)
32
+
33
+ # user_prompt_template = "{content}"
34
+ # user_role_prompt = UserRolePrompt(user_prompt_template)
35
+ # system_prompt_template = (
36
+ # "You are an expert in {expertise}, you always answer in a kind way."
37
+ # )
38
+ # system_role_prompt = SystemRolePrompt(system_prompt_template)
39
+
40
+
41
+ RAG_PROMPT_TEMPLATE = """ \
42
+ Use the provided context to answer the user's query.
43
+
44
+ You may not answer the user's query unless there is specific context in the following text.
45
+
46
+ If you do not know the answer, or cannot answer, please respond with "I don't know".
47
+ """
48
+
49
+ rag_prompt = SystemRolePrompt(RAG_PROMPT_TEMPLATE)
50
+
51
+ USER_PROMPT_TEMPLATE = """ \
52
+ Context:
53
+ {context}
54
+
55
+ User Query:
56
+ {user_query}
57
+ """
58
+
59
+ user_prompt = UserRolePrompt(USER_PROMPT_TEMPLATE)
60
+
61
+ class RetrievalAugmentedQAPipeline:
62
+ def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
63
+ self.llm = llm
64
+ self.vector_db_retriever = vector_db_retriever
65
+
66
+ def run_pipeline(self, user_query: str) -> str:
67
+ context_list = self.vector_db_retriever.search_by_text(user_query, k=4)
68
+
69
+ context_prompt = ""
70
+ for context in context_list:
71
+ context_prompt += context[0] + "\n"
72
+
73
+ formatted_system_prompt = rag_prompt.create_message()
74
+
75
+ formatted_user_prompt = user_prompt.create_message(user_query=user_query, context=context_prompt)
76
+
77
+ return {"response" : self.llm.run([formatted_system_prompt, formatted_user_prompt]), "context" : context_list}
78
+
79
+
80
+
81
+ # ------------------------------------------------------------
82
+
83
+
84
+ @cl.on_chat_start # marks a function that will be executed at the start of a user session
85
+ async def start_chat():
86
+ # settings = {
87
+ # "model": "gpt-3.5-turbo",
88
+ # "temperature": 0,
89
+ # "max_tokens": 500,
90
+ # "top_p": 1,
91
+ # "frequency_penalty": 0,
92
+ # "presence_penalty": 0,
93
+ # }
94
+
95
+ # Create a dict vector store
96
+ vector_db = VectorDatabase()
97
+ vector_db = await vector_db.abuild_from_list(split_documents_NIST)
98
+ vector_db = await vector_db.abuild_from_list(split_documents_Blueprint)
99
+
100
+ chat_openai = ChatOpenAI()
101
+
102
+ # Create a chain
103
+ retrieval_augmented_qa_pipeline = RetrievalAugmentedQAPipeline(
104
+ vector_db_retriever=vector_db,
105
+ llm=chat_openai
106
+ )
107
+
108
+ # cl.user_session.set("settings", settings)
109
+ cl.user_session.set("chain", retrieval_augmented_qa_pipeline)
110
+
111
+
112
+ @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
113
+ async def main(message):
114
+ chain = cl.user_session.get("chain")
115
+
116
+ msg = cl.Message(content="")
117
+ result = await chain.arun_pipeline(message.content)
118
+
119
+ async for stream_resp in result["response"]:
120
+ await msg.stream_token(stream_resp)
121
+
122
+ await msg.send()