PreciousPlastic_chatbot / langchain_bot.py
JarvisLabs's picture
Upload 4 files
4d7c224 verified
raw
history blame
1.71 kB
import os
import json
from langchain_openai import ChatOpenAI
from langchain_core.prompts import MessagesPlaceholder
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
from langchain.memory.buffer_window import ConversationBufferWindowMemory
from langchain_core.prompts import PromptTemplate
from langchain.memory import VectorStoreRetrieverMemo
### Contextualize question ###
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
settings= json.load(open("system.json","r"))
from upstash_vector import Index
from langchain_community.vectorstores.upstash import UpstashVectorStore
index = Index(os.environ["UPSTASH_VECTOR_REST_URL"],os.environ["UPSTASH_VECTOR_REST_TOKEN"])
vectorStore = UpstashVectorStore(
embedding=True, index=index,
)
retriever = vectorStore.as_retriever(search_kwargs={"k": settings["k"]})
#LLM setup
LLM= ChatOpenAI(model=settings["model"], temperature=settings["temp"])
#Setup prompt template
QUESTION_PROMPT = PromptTemplate(
template=settings["prompt_temp"], # プロンプトテンプレートをセット
input_variables=["context", "question"] # プロンプトに挿入する変数
)
# Conversation memory
memory = ConversationBufferWindowMemory(
memory_key=settings["MEMORY_KEY"], # Memory key メモリーのキー名
output_key="answer", #output key 出力ののキー名
k=8, #saved conversation number 保持する会話の履歴数
return_messages=True, #get chat list チャット履歴をlistで取得する場合はTrue
)