wholewhale's picture
anthropic
d8804c0
raw
history blame
4.41 kB
import gradio as gr
import os
import time
import threading
from langchain.document_loaders import OnlinePDFLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.llms import OpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
os.environ['ANTHROPIC_API_KEY'] = os.getenv("Your_Anthropic_API_Key")
os.environ['OPENAI_API_KEY'] = os.getenv("Your_API_Key")
# Global variable for tracking last interaction time
last_interaction_time = 0
# Initialize the Anthropic model instead of OpenAI
from anthropic import LanguageModel
anthropic_model = LanguageModel(api_key=os.environ['ANTHROPIC_API_KEY'], model="some_model")
def loading_pdf():
return "Working on the upload. Also, pondering the usefulness of sporks..."
def pdf_changes(pdf_doc):
try:
if pdf_doc is None:
return "No PDF uploaded."
loader = OnlinePDFLoader(pdf_doc.name)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
# Replace this with your appropriate embeddings class
embeddings = OpenAIEmbeddings()
global db
db = Chroma.from_documents(texts, embeddings)
retriever = db.as_retriever()
global qa
qa = ConversationalRetrievalChain.from_llm(
llm=OpenAI(temperature=0.2, model_name="gpt-3.5-turbo"),
retriever=retriever,
return_source_documents=False
)
return "Ready"
except Exception as e:
return f"Error processing PDF: {e}"
def clear_data():
global qa
qa = None
return "Data cleared"
def add_text(history, text):
global last_interaction_time
last_interaction_time = time.time()
history = history + [(text, None)]
return history, ""
def bot(history):
response = infer_anthropic(history[-1][0], history) # Call the new infer function
sentences = ' \n'.join(response.split('. '))
formatted_response = f"**Bot:**\n\n{sentences}"
history[-1][1] = formatted_response
return history
def infer_anthropic(question, history):
chat_history = [(human, ai) for human, ai in history[:-1]]
query = question
result = anthropic_model.query(query, context=chat_history)
return result['answer']
def auto_clear_data():
global qa, last_interaction_time
if time.time() - last_interaction_time > 600:
qa = None
def periodic_clear():
while True:
auto_clear_data()
time.sleep(60)
threading.Thread(target=periodic_clear).start()
css = """
#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
"""
title = """
<div style="text-align: center;max-width: 700px;">
<h1>CauseWriter Chat with PDF • OpenAI</h1>
<p style="text-align: center;">Upload a .PDF from your computer, click the "Load PDF to LangChain" button, <br />
when everything is ready, you can start asking questions about the pdf. <br />
This version is set to store chat history and uses OpenAI as LLM.</p>
</div>
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.HTML(title)
with gr.Column():
pdf_doc = gr.File(label="Load a pdf", file_types=['.pdf'], type="file")
with gr.Row():
langchain_status = gr.Textbox(label="Status", placeholder="", interactive=False)
load_pdf = gr.Button("Convert PDF to Magic AI language")
clear_btn = gr.Button("Clear Data")
chatbot = gr.Chatbot([], elem_id="chatbot").style(height=450)
question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter")
submit_btn = gr.Button("Send Message")
load_pdf.click(loading_pdf, None, langchain_status, queue=False)
load_pdf.click(pdf_changes, inputs=[pdf_doc], outputs=[langchain_status], queue=False)
clear_btn.click(clear_data, outputs=[langchain_status], queue=False)
question.submit(add_text, [chatbot, question], [chatbot, question]).then(
bot, chatbot, chatbot
)
submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(
bot, chatbot, chatbot
)
demo.launch()