Megatron17 commited on
Commit
5623f53
1 Parent(s): 28b5b24

Upload 20 files

Browse files
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+ RUN useradd -m -u 1000 user
3
+ USER user
4
+ ENV HOME=/home/user \
5
+ PATH=/home/user/.local/bin:$PATH
6
+ WORKDIR $HOME/app
7
+ COPY --chown=user . $HOME/app
8
+ COPY ./requirements.txt ~/app/requirements.txt
9
+ RUN pip install -r requirements.txt
10
+ COPY . .
11
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import chainlit as cl
2
+ from llmops.text_utils import TextFileLoader, CharacterTextSplitter
3
+ from llmops.vectordatabase import VectorDatabase
4
+ import asyncio
5
+ from llmops.retrieval_pipeline import RetrievalAugmentedQAPipeline, WandB_RetrievalAugmentedQAPipeline
6
+ from llmops.openai_utils.chatmodel import ChatOpenAI
7
+ import wandb
8
+ from llmops.openai_utils.prompts import (
9
+ UserRolePrompt,
10
+ SystemRolePrompt,
11
+ AssistantRolePrompt,
12
+ )
13
+
14
+ RAQA_PROMPT_TEMPLATE = """
15
+ Use the provided context to answer the user's query.
16
+
17
+ You may not answer the user's query unless there is specific context in the following text.
18
+
19
+ If you do not know the answer, or cannot answer, please respond with "I don't know".
20
+
21
+ Context:
22
+ {context}
23
+ """
24
+
25
+ raqa_prompt = SystemRolePrompt(RAQA_PROMPT_TEMPLATE)
26
+
27
+ USER_PROMPT_TEMPLATE = """
28
+ User Query:
29
+ {user_query}
30
+ """
31
+
32
+ user_prompt = UserRolePrompt(USER_PROMPT_TEMPLATE)
33
+
34
+ @cl.on_chat_start
35
+ async def on_chat_start():
36
+ files = None
37
+
38
+ # Wait for the user to upload a file
39
+ while files == None:
40
+ files = await cl.AskFileMessage(
41
+ content="Please upload a text file to begin!",
42
+ accept=["text/plain"],
43
+ max_size_mb=20,
44
+ timeout=180,
45
+ ).send()
46
+
47
+ file = files[0]
48
+
49
+ msg = cl.Message(
50
+ content=f"Processing `{file.name}`...", disable_human_feedback=True
51
+ )
52
+ await msg.send()
53
+
54
+ text_loader = TextFileLoader("data/KingLear.txt")
55
+ documents = text_loader.load_documents()
56
+
57
+ text_splitter = CharacterTextSplitter()
58
+ split_documents = text_splitter.split_texts(documents)
59
+
60
+ vector_db = VectorDatabase()
61
+ vector_db = asyncio.run(vector_db.abuild_from_list(split_documents))
62
+ chat_openai = ChatOpenAI()
63
+ wandb.init(project="RAQA Example")
64
+ raqa_retrieval_augmented_qa_pipeline = WandB_RetrievalAugmentedQAPipeline(
65
+ vector_db_retriever=vector_db,
66
+ llm=chat_openai,
67
+ wandb_project="RAQA from Scratch"
68
+ )
69
+ # Let the user know that the system is ready
70
+ msg.content = f"Processing `{file.name}` done. You can now ask questions!"
71
+ await msg.update()
72
+
73
+ cl.user_session.set("chain", raqa_retrieval_augmented_qa_pipeline)
74
+
75
+ @cl.on_message
76
+ async def main(message:str):
77
+ chain = cl.user_session.get("chain")
78
+ output = chain.run_pipeline(message,raqa_prompt, user_prompt)
79
+ print(output)
80
+ msg = cl.Message(content=f"{output}")
81
+ # msg.prompt = output
82
+ await msg.send()
83
+
llmops/.DS_Store ADDED
Binary file (6.15 kB). View file
 
llmops/__init__.py ADDED
File without changes
llmops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
llmops/__pycache__/retrieval_pipeline.cpython-310.pyc ADDED
Binary file (3.03 kB). View file
 
llmops/__pycache__/text_utils.cpython-310.pyc ADDED
Binary file (3.74 kB). View file
 
llmops/__pycache__/vectordatabase.cpython-310.pyc ADDED
Binary file (3.51 kB). View file
 
llmops/openai_utils/__init__.py ADDED
File without changes
llmops/openai_utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
llmops/openai_utils/__pycache__/chatmodel.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
llmops/openai_utils/__pycache__/embedding.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
llmops/openai_utils/__pycache__/prompts.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
llmops/openai_utils/chatmodel.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ from dotenv import load_dotenv
3
+ import os
4
+
5
+ load_dotenv()
6
+
7
+ class ChatOpenAI:
8
+ """
9
+ This class pings open ai to create response for the list of messages
10
+ """
11
+ def __init__(self, model_name:str="gpt-3.5-turbo"):
12
+ self.model_name = model_name
13
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
14
+ if self.openai_api_key is None:
15
+ raise ValueError("OPENAI_API_KEY is not set")
16
+
17
+ def run(self, messages:list, text_only:bool=True):
18
+ """
19
+ Takes in list of messages and returns response
20
+ """
21
+ if not isinstance(messages, list):
22
+ raise ValueError("Messages myst be a list")
23
+
24
+ openai.api_key = self.openai_api_key
25
+ response = openai.ChatCompletion.create(
26
+ model=self.model_name, messages = messages
27
+ )
28
+ if text_only:
29
+ return response.choices[0].message.content
30
+
31
+ return response
llmops/openai_utils/embedding.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ from openai.embeddings_utils import get_embeddings, aget_embeddings, get_embedding, aget_embedding
3
+ import openai
4
+ from typing import List
5
+ import os
6
+ import asyncio
7
+
8
+ class EmbeddingModel:
9
+ """
10
+ This class contains functionalities to generate embeddings from the
11
+ list of texts or text asynchronously or in sync.
12
+ """
13
+ def __init__(self, embeddings_model_name:str = "text-embedding-ada-002"):
14
+ """
15
+ Loads the OpenAI Api key and sets the embedding model
16
+ """
17
+ load_dotenv()
18
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
19
+
20
+ if self.openai_api_key is None:
21
+ raise ValueError("OPENAI_API_KEY environment variables is not set. Please set it to your openAI API key")
22
+
23
+ openai.api_key = self.openai_api_key
24
+ self.embeddings_model_name = embeddings_model_name
25
+
26
+ async def async_get_embeddings(self, list_of_text:List[str])->List[List[float]]:
27
+ """
28
+ This function takes in a list of strings and uses openai api
29
+ aget_embeddings to get the list of embeddings back. The process is asynchronous in nature
30
+
31
+ """
32
+ return await aget_embeddings(
33
+ list_of_text = list_of_text, engine = self.embeddings_model_name
34
+ )
35
+
36
+ async def async_get_embedding(self, text: str) -> List[float]:
37
+ """
38
+ This function takes in a string and uses openai api
39
+ aget_embedding to get the list of embeddings back. The process is asynchronous in nature
40
+
41
+ """
42
+ return await aget_embedding(text=text, engine=self.embeddings_model_name)
43
+
44
+ def get_embeddings(self, list_of_text: List[str]) -> List[List[float]]:
45
+ """
46
+ This function takes in a list of strings and uses openai api
47
+ get_embeddings to get the list of embeddings back. The process is synchronous in nature
48
+
49
+ """
50
+ return get_embeddings(
51
+ list_of_text=list_of_text, engine=self.embeddings_model_name
52
+ )
53
+
54
+ def get_embedding(self, text: str) -> List[float]:
55
+ """
56
+ This function takes in a string and uses openai api
57
+ get_embedding to get the list of embeddings back. The process is synchronous in nature
58
+
59
+ """
60
+ return get_embedding(text=text, engine=self.embeddings_model_name)
61
+
62
+ if __name__ == "__main__":
63
+ embedding_model = EmbeddingModel()
64
+ print(embedding_model.get_embedding("Hello, world!"))
65
+ print(embedding_model.get_embeddings(["Hello, world!", "Goodbye, world!"]))
66
+ print(asyncio.run(embedding_model.async_get_embedding("Hello, world!")))
67
+ print(
68
+ asyncio.run(
69
+ embedding_model.async_get_embeddings(["Hello, world!", "Goodbye, world!"])
70
+ )
71
+ )
llmops/openai_utils/prompts.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ class BasePrompt:
4
+ def __init__(self, prompt):
5
+ """
6
+ Initializes the BasePrompt object with a prompt template.
7
+
8
+ :param prompt: A string that can contain placeholders within curly braces{}
9
+ """
10
+ self.prompt = prompt
11
+ self._pattern = re.compile(r"\{([^}]+)\}")
12
+
13
+ def format_prompt(self, **kwargs):
14
+ """
15
+ Formats the prompt string using the keyword arguments provided.
16
+
17
+ :param kwargs: The values to substitute into the prompt string
18
+ :return: The formatted prompt string
19
+ """
20
+ matches = self._pattern.findall(self.prompt)
21
+ return self.prompt.format(**{match: kwargs.get(match, "") for match in matches})
22
+
23
+ def get_input_variables(self):
24
+ """
25
+ Gets the list of input variable names from the prompt string.
26
+
27
+ :return: List of input variable names
28
+ """
29
+ return self._pattern.findall(self.prompt)
30
+
31
+ class RolePrompt(BasePrompt):
32
+ def __init__(self, prompt, role: str):
33
+ """
34
+ Initializes the RolePrompt object with a prompt template and a role.
35
+
36
+ :param prompt: A string that can contain placeholders within curly braces
37
+ :param role: The role for the message ('system', 'user', or 'assistant')
38
+ """
39
+ super().__init__(prompt)
40
+ self.role = role
41
+
42
+ def create_message(self, **kwargs):
43
+ """
44
+ Creates a message dictionary with a role and a formatted message.
45
+
46
+ :param kwargs: The values to substitute into the prompt string
47
+ :return: Dictionary containing the role and the formatted message
48
+ """
49
+ return {"role": self.role, "content": self.format_prompt(**kwargs)}
50
+
51
+
52
+ class SystemRolePrompt(RolePrompt):
53
+ """
54
+ This class inherits the RolePrompt class and sets the role to "system"
55
+ """
56
+ def __init__(self, prompt: str):
57
+ super().__init__(prompt, "system")
58
+
59
+
60
+ class UserRolePrompt(RolePrompt):
61
+ """
62
+ This class inherits the RolePrompt class and sets the role to "user"
63
+ """
64
+ def __init__(self, prompt: str):
65
+ super().__init__(prompt, "user")
66
+
67
+
68
+ class AssistantRolePrompt(RolePrompt):
69
+ """
70
+ This class inherits the RolePrompt class and sets the role to "assistant"
71
+ """
72
+ def __init__(self, prompt: str):
73
+ super().__init__(prompt, "assistant")
74
+
75
+
76
+ if __name__ == "__main__":
77
+ prompt = BasePrompt("Hello {name}, you are {age} years old")
78
+ print(prompt.format_prompt(name="John", age=30))
79
+
80
+ prompt = SystemRolePrompt("Hello {name}, you are {age} years old")
81
+ print(prompt.create_message(name="John", age=30))
82
+ print(prompt.get_input_variables())
llmops/retrieval_pipeline.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llmops.openai_utils.chatmodel import ChatOpenAI
2
+ from llmops.vectordatabase import VectorDatabase
3
+ from llmops.openai_utils.prompts import (
4
+ UserRolePrompt,
5
+ SystemRolePrompt,
6
+ AssistantRolePrompt,
7
+ )
8
+ import datetime
9
+ from wandb.sdk.data_types.trace_tree import Trace
10
+
11
+ class RetrievalAugmentedQAPipeline:
12
+ """
13
+
14
+ """
15
+ def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
16
+ self.llm = llm
17
+ self.vector_db_retriever = vector_db_retriever
18
+
19
+ def run_pipeline(self, user_query: str, raqa_prompt:SystemRolePrompt, user_prompt:UserRolePrompt) -> str:
20
+ context_list = self.vector_db_retriever.search_by_text(user_query, k=4)
21
+
22
+ context_prompt = ""
23
+ for context in context_list:
24
+ context_prompt += context[0] + "\n"
25
+
26
+ formatted_system_prompt = raqa_prompt.create_message(context=context_prompt)
27
+
28
+ formatted_user_prompt = user_prompt.create_message(user_query=user_query)
29
+
30
+ return self.llm.run([formatted_system_prompt, formatted_user_prompt])
31
+
32
+
33
+ class WandB_RetrievalAugmentedQAPipeline:
34
+ def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase, wandb_project = None) -> None:
35
+ self.llm = llm
36
+ self.vector_db_retriever = vector_db_retriever
37
+ self.wandb_project = wandb_project
38
+
39
+ def run_pipeline(self, user_query: str, raqa_prompt:SystemRolePrompt, user_prompt:UserRolePrompt) -> str:
40
+ context_list = self.vector_db_retriever.search_by_text(user_query, k=4)
41
+
42
+ context_prompt = ""
43
+ for context in context_list:
44
+ context_prompt += context[0] + "\n"
45
+
46
+ formatted_system_prompt = raqa_prompt.create_message(context=context_prompt)
47
+ formatted_user_prompt = user_prompt.create_message(user_query=user_query)
48
+ start_time = datetime.datetime.now().timestamp() * 1000
49
+
50
+ try:
51
+ openai_response = self.llm.run([formatted_system_prompt, formatted_user_prompt], text_only=False)
52
+ end_time = datetime.datetime.now().timestamp() * 1000
53
+ status = "success"
54
+ status_message = (None, )
55
+ response_text = openai_response.choices[0].message.content
56
+ token_usage = openai_response["usage"].to_dict()
57
+ model = openai_response["model"]
58
+
59
+ except Exception as e:
60
+ end_time = datetime.datetime.now().timestamp() * 1000
61
+ status = "error"
62
+ status_message = str(e)
63
+ response_text = ""
64
+ token_usage = {}
65
+ model = ""
66
+
67
+ if self.wandb_project:
68
+ root_span = Trace(
69
+ name="root_span",
70
+ kind="llm",
71
+ status_code=status,
72
+ status_message=status_message,
73
+ start_time_ms=start_time,
74
+ end_time_ms=end_time,
75
+ metadata={
76
+ "token_usage" : token_usage,
77
+ "model_name" : model
78
+ },
79
+ inputs= {"system_prompt" : formatted_system_prompt, "user_prompt" : formatted_user_prompt},
80
+ outputs= {"response" : response_text}
81
+ )
82
+
83
+ root_span.log(name="openai_trace")
84
+
85
+ return response_text if response_text else "We ran into an error. Please try again later. Full Error Message: " + status_message
llmops/text_utils.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+
4
+ class TextFileLoader:
5
+ """
6
+ This class has the functionality to load the data from
7
+ the text files.
8
+ """
9
+ def __init__(self, path:str, encoding:str = "utf-8")->None:
10
+ self.documents = []
11
+ self.path = path
12
+ self.encoding = encoding
13
+
14
+ def load(self)->None:
15
+ """
16
+ if the path is of a directory, then load directory and read the file,
17
+ else if the path is of the file, directly read the file.
18
+ """
19
+ if os.path.isdir(self.path):
20
+ self.load_directory()
21
+ elif os.path.isfile(self.path) and self.path.endswith(".txt"):
22
+ self.load_file()
23
+ else:
24
+ raise ValueError(
25
+ "Provided path is neither a valid directory not a .txt tile"
26
+ )
27
+
28
+ def load_file(self)->None:
29
+ """
30
+ read the text file and append it to the list
31
+ """
32
+ with open(self.path,"r",encoding=self.encoding) as f:
33
+ self.documents.append(f.read())
34
+
35
+ def load_directory(self)->None:
36
+ """
37
+ reads all the text files in the directory and appends it to the list
38
+ """
39
+ for root, _, files in os.walk(self.path):
40
+ for file in files:
41
+ if file.endswith(".txt"):
42
+ with open(
43
+ os.path.join(root, file),"r",encoding=self.encoding
44
+ ) as f:
45
+ self.documents.append(f.read())
46
+
47
+ def load_documents(self):
48
+ """
49
+ call the load function, that calls the function to read data and returns the documents.
50
+ """
51
+ self.load()
52
+ return self.documents
53
+
54
+ class CharacterTextSplitter:
55
+ """
56
+ This class contains the functionailites to chunk the text documents.
57
+ """
58
+ def __init__(self, chunk_size:int = 1000,chunk_overlap:int = 200):
59
+ assert(chunk_size>chunk_overlap),"Chunk size must be greater than chunk overlap"
60
+ self.chunk_size = chunk_size
61
+ self.chunk_overlap = chunk_overlap
62
+
63
+ def split(self, text:str)->List[str]:
64
+ """
65
+ takes in text and splits them based on character count
66
+ """
67
+ chunks = []
68
+ for i in range(0, len(text),self.chunk_size-self.chunk_overlap):
69
+ chunks.append(text[i:i+self.chunk_size])
70
+ return chunks
71
+
72
+ def split_texts(self, texts:List[str])->List[str]:
73
+ """
74
+ takes in list of texts and breaks it down to chunks
75
+ """
76
+ chunks = []
77
+ for text in texts:
78
+ chunks.extend(self.split(text))
79
+ return chunks
80
+
81
+ if __name__ == "__main__":
82
+ loader = TextFileLoader("/Users/shubham.agnihotri/Documents/GitHub/LLM-Ops/RAQA from scratch/data/KingLear.txt")
83
+ loader.load()
84
+ splitter = CharacterTextSplitter()
85
+ chunks = splitter.split_texts(loader.documents)
86
+ print(len(chunks))
87
+ print(chunks[0])
88
+ print("--------")
89
+ print(chunks[1])
90
+ print("--------")
91
+ print(chunks[-2])
92
+ print("--------")
93
+ print(chunks[-1])
94
+
95
+
llmops/vectordatabase.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from collections import defaultdict
3
+ from typing import List, Tuple, Callable
4
+ from llmops.openai_utils.embedding import EmbeddingModel
5
+ import asyncio
6
+
7
+ def cosine_similarity(vector_a: np.array, vector_b: np.array) -> float:
8
+ """Computes the cosine similarity between two vectors."""
9
+ dot_product = np.dot(vector_a, vector_b)
10
+ norm_a = np.linalg.norm(vector_a)
11
+ norm_b = np.linalg.norm(vector_b)
12
+ return dot_product / (norm_a * norm_b)
13
+
14
+ class VectorDatabase:
15
+ def __init__(self, embedding_model:EmbeddingModel = None):
16
+ self.vectors = defaultdict(np.array)
17
+ self.embedding_model = embedding_model or EmbeddingModel()
18
+
19
+ def insert(self, key:str, vector:np.array)->None:
20
+ """
21
+ Adding elements to the dictionary vectors, with key as key and value as vector
22
+ """
23
+ self.vectors[key] = vector
24
+
25
+ def search(self, query_vector:np.array,k:int, distance_measure:Callable = cosine_similarity)->List[Tuple[str, float]]:
26
+ """
27
+ calculates cosine similarity between query vector and vector in the database and then sort the result and
28
+ returns the top k values by slicing the list
29
+ """
30
+ scores = [
31
+ (key, distance_measure(query_vector, vector)) for key, vector in self.vectors.items()
32
+ ]
33
+ return sorted(scores, key = lambda x:x[1], reverse = True)[:k]
34
+
35
+ def search_by_text(self, query_text:str, k:int, distance_measure:Callable = cosine_similarity, return_as_text:bool = False) -> List[Tuple[str, float]]:
36
+ """
37
+ This function converts the text query to embeddings and then calls the seach function
38
+ """
39
+ query_vector = self.embedding_model.get_embedding(query_text)
40
+ results = self.search(query_vector, k, distance_measure)
41
+ return [result[0] for result in results] if return_as_text else results
42
+
43
+ def retrieve_from_key(self, key: str) -> np.array:
44
+ """
45
+ This function returns the value of the parameter key in the vector dictionary
46
+ """
47
+ return self.vectors.get(key, None)
48
+
49
+ async def abuild_from_list(self, list_of_text: List[str]) -> "VectorDatabase":
50
+ """
51
+ create a database from a list of texts. text is key where as embedding is the mapping
52
+ """
53
+ embeddings = await self.embedding_model.async_get_embeddings(list_of_text)
54
+ for text, embedding in zip(list_of_text, embeddings):
55
+ self.insert(text, np.array(embedding))
56
+ return self
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ numpy==1.25.2
2
+ openai==0.27.8
3
+ python-dotenv==1.0.0
4
+ pandas
5
+ scikit-learn
6
+ ipykernel
7
+ matplotlib
8
+ plotly
9
+ wandb