Spaces:
Runtime error
Runtime error
''' | |
Om Sri Sai Ram | |
Swami's Chatbot Alpha Version | |
''' | |
from langchain.vectorstores import FAISS | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.llms import OpenAI | |
from langchain.chains import RetrievalQA | |
from langchain import PromptTemplate | |
import textwrap | |
import gradio as gr | |
import time | |
import os | |
OPENAI_API_KEY=os.environ["OPENAI_API_KEY"] | |
vectordb = FAISS.load_local("faiss_index OPENAI", OpenAIEmbeddings()) | |
# -------------------------------------------------------------------------------- | |
prompt_template = """ | |
Don't try to make up an answer, if you don't know just say that you don't know. | |
Answer in the same language the question was asked. | |
Use only the following pieces of context to answer the question at the end. | |
{context} | |
Question: {question} | |
Answer:""" | |
PROMPT = PromptTemplate( | |
template= prompt_template, | |
input_variables=["context", "question"] | |
) | |
chain = RetrievalQA.from_chain_type(llm= OpenAI(), | |
chain_type="stuff", | |
retriever= vectordb.as_retriever(), | |
chain_type_kwargs= {'prompt': PROMPT}, | |
return_source_documents= True, | |
verbose= False) | |
# -------------------------------------------------------------------------------- | |
def wrap_text_preserve_newlines(text, width=200): # 110 | |
# Split the input text into lines based on newline characters | |
lines = text.split('\n') | |
# Wrap each line individually | |
wrapped_lines = [textwrap.fill(line, width=width) for line in lines] | |
# Join the wrapped lines back together using newline characters | |
wrapped_text = '\n'.join(wrapped_lines) | |
return wrapped_text | |
def process_llm_response(llm_response): | |
ans = wrap_text_preserve_newlines(llm_response['result']) | |
sources_used = ' \n'.join([str(source.metadata['source'].split('/')[-1][:-4]) for source in llm_response['source_documents']]) | |
ans = ans + '\n\nSources: \n' + sources_used | |
return ans | |
def llm_ans(query): | |
llm_response = chain(query) | |
ans = process_llm_response(llm_response) | |
return ans | |
def predict(message, history): | |
# output = message # debug mode | |
output = str(llm_ans(message)) | |
return output | |
demo = gr.ChatInterface(predict, | |
title = f'SAI Speaks') | |
if __name__ == "__main__": | |
demo.launch() | |