Omar Solano
commited on
Commit
Β·
66bc7b6
1
Parent(s):
0cfc98f
add custom prompt
Browse files- scripts/cfg.py +62 -0
- scripts/gradio-ui.py +2 -3
scripts/cfg.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from llama_index.core.llms import ChatMessage, MessageRole
|
2 |
+
from llama_index.core import ChatPromptTemplate
|
3 |
+
|
4 |
+
default = (
|
5 |
+
"Context information is below.\n"
|
6 |
+
"---------------------\n"
|
7 |
+
"{context_str}\n"
|
8 |
+
"---------------------\n"
|
9 |
+
"Given the context information and not prior knowledge, "
|
10 |
+
"answer the question: {query_str}\n"
|
11 |
+
)
|
12 |
+
|
13 |
+
prompt_formatter_cfg = (
|
14 |
+
"You are a witty AI teacher, helpfully answering questions from students of an applied artificial intelligence course on Large Language Models (LLMs or llm). Topics covered include training models, fine tuning models, giving memory to LLMs, prompting, hallucinations and bias, vector databases, transformer architectures, embeddings, Langchain, making LLMs interact with tool use, AI agents, reinforcement learning with human feedback. Questions should be understood with this context."
|
15 |
+
"You are provided information found in the json documentation. "
|
16 |
+
"Only respond with information inside the json documentation. DO NOT use additional information, even if you know the answer. "
|
17 |
+
"If the answer is in the documentation, answer the question (depending on the questions and the variety of relevant information in the json documentation, answer in 5 paragraphs."
|
18 |
+
"If the documentation does not discuss the topic related to the question, kindly respond that you cannot answer the question because it is not part of your knowledge. "
|
19 |
+
"Here is the information you can use in order: \n"
|
20 |
+
"---------------------\n"
|
21 |
+
"{context_str}\n"
|
22 |
+
"---------------------\n"
|
23 |
+
"REMEMBER:\n"
|
24 |
+
"You are a witty AI teacher, helpfully answering questions from students of an applied artificial intelligence course on Large Language Models (LLMs or llm). Topics covered include training models, fine tuning models, giving memory to LLMs, prompting, hallucinations and bias, vector databases, transformer architectures, embeddings, Langchain, making LLMs interact with tool use, AI agents, reinforcement learning with human feedback. Questions should be understood with this context."
|
25 |
+
"You are provided information found in the json documentation. "
|
26 |
+
"Here are the rules you must follow:\n"
|
27 |
+
"* Only respond with information inside the json documentation. DO NOT provide additional information, even if you know the answer. "
|
28 |
+
"* If the answer is in the documentation, answer the question (depending on the questions and the variety of relevant information in the json documentation. Your answer needs to be pertinent and not redundant giving a clear explanation as if you were a teacher. "
|
29 |
+
"* If the documentation does not discuss the topic related to the question, kindly respond that you cannot answer the question because it is not part of your knowledge. "
|
30 |
+
"* Only use information summarized from the json documentation, do not respond otherwise. "
|
31 |
+
"* Do not refer to the json documentation directly, but use the instructions provided within it to answer questions. "
|
32 |
+
"* Do not reference any links, urls or hyperlinks in your answers.\n"
|
33 |
+
"* Make sure to format your answers in Markdown format, including code block and snippets.\n"
|
34 |
+
"* If you do not know the answer to a question, or if it is completely irrelevant to the AI courses, simply reply with:\n"
|
35 |
+
"'I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the topics I'm trained on. Is there anything else I can assist you with?'"
|
36 |
+
"For example:\n"
|
37 |
+
"What is the meaning of life for a qa bot?\n"
|
38 |
+
"I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the topics I'm trained on. Is there anything else I can assist you with?"
|
39 |
+
"Now answer the following question: {query_str}\n"
|
40 |
+
)
|
41 |
+
|
42 |
+
chat_text_qa_msgs: list[ChatMessage] = [
|
43 |
+
ChatMessage(
|
44 |
+
role=MessageRole.SYSTEM,
|
45 |
+
content=(
|
46 |
+
"You are an expert Q&A system that is trusted around the world.\n"
|
47 |
+
"Always answer the query using the provided context information, "
|
48 |
+
"and not prior knowledge.\n"
|
49 |
+
"Some rules to follow:\n"
|
50 |
+
"1. Never directly reference the given context in your answer.\n"
|
51 |
+
"2. Avoid statements like 'Based on the context, ...' or "
|
52 |
+
"'The context information ...' or anything along "
|
53 |
+
"those lines."
|
54 |
+
),
|
55 |
+
),
|
56 |
+
ChatMessage(
|
57 |
+
role=MessageRole.USER,
|
58 |
+
content=prompt_formatter_cfg,
|
59 |
+
),
|
60 |
+
]
|
61 |
+
|
62 |
+
TEXT_QA_TEMPLATE = ChatPromptTemplate(chat_text_qa_msgs)
|
scripts/gradio-ui.py
CHANGED
@@ -19,6 +19,7 @@ from gradio.themes.utils import (
|
|
19 |
)
|
20 |
|
21 |
from utils import init_mongo_db
|
|
|
22 |
|
23 |
logging.getLogger("httpx").setLevel(logging.WARNING)
|
24 |
logger = logging.getLogger(__name__)
|
@@ -67,9 +68,6 @@ index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
|
|
67 |
# Initialize query engine
|
68 |
llm = OpenAI(temperature=0, model="gpt-3.5-turbo-0125", max_tokens=None)
|
69 |
embeds = OpenAIEmbedding(model="text-embedding-3-large", mode="text_search")
|
70 |
-
# query_engine = index.as_query_engine(
|
71 |
-
# llm=llm, similarity_top_k=5, embed_model=embeds, streaming=True
|
72 |
-
# )
|
73 |
|
74 |
|
75 |
def save_completion(completion, history):
|
@@ -200,6 +198,7 @@ def get_answer(history, sources: Optional[list[str]] = None):
|
|
200 |
embed_model=embeds,
|
201 |
streaming=True,
|
202 |
filters=filters,
|
|
|
203 |
)
|
204 |
completion = query_engine.query(user_input)
|
205 |
|
|
|
19 |
)
|
20 |
|
21 |
from utils import init_mongo_db
|
22 |
+
from cfg import TEXT_QA_TEMPLATE
|
23 |
|
24 |
logging.getLogger("httpx").setLevel(logging.WARNING)
|
25 |
logger = logging.getLogger(__name__)
|
|
|
68 |
# Initialize query engine
|
69 |
llm = OpenAI(temperature=0, model="gpt-3.5-turbo-0125", max_tokens=None)
|
70 |
embeds = OpenAIEmbedding(model="text-embedding-3-large", mode="text_search")
|
|
|
|
|
|
|
71 |
|
72 |
|
73 |
def save_completion(completion, history):
|
|
|
198 |
embed_model=embeds,
|
199 |
streaming=True,
|
200 |
filters=filters,
|
201 |
+
text_qa_template=TEXT_QA_TEMPLATE,
|
202 |
)
|
203 |
completion = query_engine.query(user_input)
|
204 |
|