|
import langchain |
|
from langchain.embeddings import SentenceTransformerEmbeddings |
|
from langchain.chains.question_answering import load_qa_chain |
|
from langchain.document_loaders import UnstructuredPDFLoader,UnstructuredWordDocumentLoader |
|
from langchain.indexes import VectorstoreIndexCreator |
|
from langchain.vectorstores import FAISS |
|
from langchain import HuggingFaceHub |
|
from langchain import PromptTemplate |
|
from langchain.chat_models import ChatOpenAI |
|
from zipfile import ZipFile |
|
import gradio as gr |
|
import openpyxl |
|
import os |
|
import shutil |
|
from langchain.schema import Document |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
import tiktoken |
|
import secrets |
|
import openai |
|
import time |
|
from duckduckgo_search import DDGS |
|
import requests |
|
import tempfile |
|
import pandas as pd |
|
import numpy as np |
|
from openai import OpenAI |
|
|
|
from mistralai.client import MistralClient |
|
from mistralai.models.chat_completion import ChatMessage |
|
|
|
from groq import Groq |
|
|
|
|
|
MODEL_LIST = [ |
|
"mistral-tiny", |
|
"mistral-small", |
|
"mistral-medium", |
|
] |
|
DEFAULT_MODEL = "mistral-small" |
|
DEFAULT_TEMPERATURE = 0.7 |
|
|
|
|
|
tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo") |
|
|
|
|
|
def tiktoken_len(text): |
|
tokens = tokenizer.encode( |
|
text, |
|
disallowed_special=() |
|
) |
|
return len(tokens) |
|
|
|
text_splitter = RecursiveCharacterTextSplitter( |
|
chunk_size=512, |
|
chunk_overlap=200, |
|
length_function=tiktoken_len, |
|
separators=["\n\n", "\n", " ", ""] |
|
) |
|
|
|
embeddings = SentenceTransformerEmbeddings(model_name="thenlper/gte-base") |
|
foo = Document(page_content='foo is fou!',metadata={"source":'foo source'}) |
|
|
|
def reset_database(ui_session_id): |
|
session_id = f"PDFAISS-{ui_session_id}" |
|
if 'drive' in session_id: |
|
print("RESET DATABASE: session_id contains 'drive' !!") |
|
return None |
|
|
|
try: |
|
shutil.rmtree(session_id) |
|
except: |
|
print(f'no {session_id} directory present') |
|
|
|
try: |
|
os.remove(f"{session_id}.zip") |
|
except: |
|
print("no {session_id}.zip present") |
|
|
|
return None |
|
|
|
def is_duplicate(split_docs,db): |
|
epsilon=0.0 |
|
print(f"DUPLICATE: Treating: {split_docs[0].metadata['source'].split('/')[-1]}") |
|
for i in range(min(3,len(split_docs))): |
|
query = split_docs[i].page_content |
|
docs = db.similarity_search_with_score(query,k=1) |
|
_ , score = docs[0] |
|
epsilon += score |
|
print(f"DUPLICATE: epsilon: {epsilon}") |
|
return epsilon < 0.1 |
|
|
|
def merge_split_docs_to_db(split_docs,db,progress,progress_step=0.1): |
|
progress(progress_step,desc="merging docs") |
|
if len(split_docs)==0: |
|
print("MERGE to db: NO docs!!") |
|
return |
|
|
|
filename = split_docs[0].metadata['source'] |
|
|
|
|
|
|
|
|
|
batch = 10 |
|
db1 = None |
|
for i in range(0, len(split_docs), batch): |
|
progress(i/len(split_docs),desc=f"added {i} chunks of {len(split_docs)} chunks") |
|
if db1: |
|
db1.add_documents(split_docs[i:i+batch]) |
|
else: |
|
db1 = FAISS.from_documents(split_docs[i:i+batch], embeddings) |
|
|
|
db1.save_local(split_docs[-1].metadata["source"].split(".")[-1]) |
|
|
|
return db1 |
|
|
|
def merge_pdf_to_db(filename,session_folder,progress,progress_step=0.1): |
|
progress_step+=0.05 |
|
progress(progress_step,'unpacking pdf') |
|
doc = UnstructuredPDFLoader(filename).load() |
|
doc[0].metadata['source'] = filename.split('/')[-1] |
|
split_docs = text_splitter.split_documents(doc) |
|
progress_step+=0.3 |
|
progress(progress_step,'pdf unpacked') |
|
return merge_split_docs_to_db(split_docs,session_folder,progress,progress_step) |
|
|
|
def merge_docx_to_db(filename,session_folder,progress,progress_step=0.1): |
|
progress_step+=0.05 |
|
progress(progress_step,'unpacking docx') |
|
doc = UnstructuredWordDocumentLoader(filename).load() |
|
doc[0].metadata['source'] = filename.split('/')[-1] |
|
split_docs = text_splitter.split_documents(doc) |
|
progress_step+=0.3 |
|
progress(progress_step,'docx unpacked') |
|
return merge_split_docs_to_db(split_docs,session_folder,progress,progress_step) |
|
|
|
def merge_txt_to_db(filename,session_folder,progress,progress_step=0.1): |
|
progress_step+=0.05 |
|
progress(progress_step,'unpacking txt') |
|
with open(filename) as f: |
|
docs = text_splitter.split_text(f.read()) |
|
split_docs = [Document(page_content=doc,metadata={'source':filename.split('/')[-1]}) for doc in docs] |
|
progress_step+=0.3 |
|
progress(progress_step,'txt unpacked') |
|
return merge_split_docs_to_db(split_docs,session_folder,progress,progress_step) |
|
|
|
def unpack_zip_file(filename,db,progress): |
|
with ZipFile(filename, 'r') as zipObj: |
|
contents = zipObj.namelist() |
|
print(f"unpack zip: contents: {contents}") |
|
tmp_directory = filename.split('/')[-1].split('.')[-2] |
|
shutil.unpack_archive(filename, tmp_directory) |
|
|
|
if 'index.faiss' in [item.lower() for item in contents]: |
|
db2 = FAISS.load_local(tmp_directory, embeddings) |
|
db.merge_from(db2) |
|
return db |
|
|
|
for file in contents: |
|
if file.lower().endswith('.docx'): |
|
db = merge_docx_to_db(f"{tmp_directory}/{file}",db,progress) |
|
if file.lower().endswith('.pdf'): |
|
db = merge_pdf_to_db(f"{tmp_directory}/{file}",db,progress) |
|
if file.lower().endswith('.txt'): |
|
db = merge_txt_to_db(f"{tmp_directory}/{file}",db,progress) |
|
return db |
|
|
|
def unzip_db(filename, ui_session_id): |
|
with ZipFile(filename, 'r') as zipObj: |
|
contents = zipObj.namelist() |
|
print(f"unzip: contents: {contents}") |
|
tmp_directory = f"PDFAISS-{ui_session_id}" |
|
shutil.unpack_archive(filename, tmp_directory) |
|
|
|
def add_files_to_zip(session_id): |
|
zip_file_name = f"{session_id}.zip" |
|
with ZipFile(zip_file_name, "w") as zipObj: |
|
for root, dirs, files in os.walk(session_id): |
|
for file_name in files: |
|
file_path = os.path.join(root, file_name) |
|
arcname = os.path.relpath(file_path, session_id) |
|
zipObj.write(file_path, arcname) |
|
|
|
|
|
|
|
def search_docs(topic, max_references): |
|
print(f"SEARCH PDF : {topic}") |
|
doc_list = [] |
|
with DDGS() as ddgs: |
|
i=0 |
|
for r in ddgs.text('{} filetype:pdf'.format(topic), region='wt-wt', safesearch='On', timelimit='n'): |
|
|
|
if i>=max_references: |
|
break |
|
doc_list.append("TITLE : " + r['title'] + " -- BODY : " + r['body'] + " -- URL : " + r['href']) |
|
i+=1 |
|
return gr.update(choices=doc_list) |
|
|
|
|
|
def store_files(references, ret_names=False): |
|
url_list=[] |
|
temp_files = [] |
|
for ref in references: |
|
url_list.append(ref.split(" ")[-1]) |
|
for url in url_list: |
|
response = requests.get(url) |
|
if response.status_code == 200: |
|
filename = url.split('/')[-1] |
|
if filename.split('.')[-1] == 'pdf': |
|
filename = filename[:-4] |
|
print('File name.pdf :', filename) |
|
temp_file = tempfile.NamedTemporaryFile(delete=False,prefix=filename, suffix='.pdf') |
|
else: |
|
print('File name :', filename) |
|
temp_file = tempfile.NamedTemporaryFile(delete=False,prefix=filename, suffix='.pdf') |
|
temp_file.write(response.content) |
|
temp_file.close() |
|
if ret_names: |
|
temp_files.append(temp_file.name) |
|
else: |
|
temp_files.append(temp_file) |
|
|
|
return temp_files |
|
|
|
|
|
|
|
|
|
def load_docs(ui_session_id): |
|
session_id_global_db = f"PDFAISS-{ui_session_id}" |
|
try: |
|
db = FAISS.load_local(session_id_global_db,embeddings) |
|
print("load_docs after loading global db:",session_id_global_db,len(db.index_to_docstore_id)) |
|
except: |
|
return f"SESSION: {session_id_global_db} database does not exist","","" |
|
docs = [] |
|
for i in range(1,len(db.index_to_docstore_id)): |
|
docs.append(db.docstore.search(db.index_to_docstore_id[i])) |
|
return docs |
|
|
|
|
|
|
|
def summarize_gpt(doc,system='provide a summary of the following document: ', first_tokens=600): |
|
doc = doc.replace('\n\n\n', '').replace('---', '').replace('...', '').replace('___', '') |
|
encoded = tokenizer.encode(doc) |
|
print("/n TOKENIZED : ", encoded) |
|
decoded = tokenizer.decode(encoded[:min(first_tokens, len(encoded))]) |
|
print("/n DOC SHORTEN", min(first_tokens, len(encoded)), " : ", decoded) |
|
completion = openai.ChatCompletion.create( |
|
model="gpt-3.5-turbo", |
|
messages=[ |
|
{"role": "system", "content": system}, |
|
{"role": "user", "content": decoded} |
|
] |
|
) |
|
return completion.choices[0].message["content"] |
|
|
|
|
|
def summarize_docs_generator(apikey_input, session_id): |
|
openai.api_key = apikey_input |
|
docs=load_docs(session_id) |
|
print("################# DOCS LOADED ##################", "docs type : ", type(docs[0])) |
|
|
|
try: |
|
fail = docs[0].page_content |
|
except: |
|
return docs[0] |
|
|
|
source = "" |
|
summaries = "" |
|
i = 0 |
|
while i<len(docs): |
|
doc = docs[i] |
|
unique_doc = "" |
|
if source != doc.metadata: |
|
unique_doc = ''.join([doc.page_content for doc in docs[i:i+3]]) |
|
print("\n\n****Open AI API called****\n\n") |
|
if i == 0: |
|
try: |
|
summary = summarize_gpt(unique_doc) |
|
except: |
|
return f"ERROR : Try checking the validity of the provided OpenAI API Key" |
|
else: |
|
try: |
|
summary = summarize_gpt(unique_doc) |
|
except: |
|
print(f"ERROR : There was an error but it is not linked with the validity of api key, taking a 20s nap") |
|
yield summaries + f"\n\n °°° OpenAI error, please wait 20 sec of cooldown. °°°" |
|
time.sleep(20) |
|
summary = summarize_gpt(unique_doc) |
|
|
|
print("SUMMARY : ", summary) |
|
summaries += f"Source : {doc.metadata['source'].split('/')[-1]}\n{summary} \n\n" |
|
source = doc.metadata |
|
yield summaries |
|
i+=1 |
|
yield summaries |
|
|
|
|
|
def summarize_docs(apikey_input, session_id): |
|
gen = summarize_docs_generator(apikey_input, session_id) |
|
while True: |
|
try: |
|
yield str(next(gen)) |
|
except StopIteration: |
|
return |
|
|
|
|
|
|
|
def update_df(ui_session_id): |
|
df = pd.DataFrame(columns=["File name", "Question 1"]) |
|
session_folder = f"PDFAISS-{ui_session_id}" |
|
file_names = os.listdir(session_folder) |
|
for i, file_name in enumerate(file_names): |
|
new_row = {'File name': str(file_name), 'Question': " ", 'Generated answer': " ", 'Sources': " "} |
|
df.loc[i] = new_row |
|
return df |
|
|
|
def embed_files(files,ui_session_id,progress=gr.Progress(),progress_step=0.05): |
|
print(files) |
|
progress(progress_step,desc="Starting...") |
|
split_docs=[] |
|
if len(ui_session_id)==0: |
|
ui_session_id = secrets.token_urlsafe(16) |
|
session_folder = f"PDFAISS-{ui_session_id}" |
|
|
|
if os.path.exists(session_folder) and os.path.isdir(session_folder): |
|
databases = os.listdir(session_folder) |
|
|
|
else: |
|
try: |
|
os.makedirs(session_folder) |
|
print(f"The folder '{session_folder}' has been created.") |
|
except OSError as e: |
|
print(f"Failed to create the folder '{session_folder}': {e}") |
|
|
|
|
|
|
|
|
|
|
|
for file_id,file in enumerate(files): |
|
print("ID : ", file_id, "FILE : ", file) |
|
file_type = file.name.split('.')[-1].lower() |
|
source = file.name.split('/')[-1] |
|
print(f"current file: {source}") |
|
progress(file_id/len(files),desc=f"Treating {source}") |
|
|
|
if file_type == 'zip': |
|
unzip_db(file.name, ui_session_id) |
|
add_files_to_zip(session_folder) |
|
return f"{session_folder}.zip", ui_session_id, update_df(ui_session_id) |
|
|
|
if file_type == 'pdf': |
|
db2 = merge_pdf_to_db(file.name,session_folder,progress) |
|
|
|
if file_type == 'txt': |
|
db2 = merge_txt_to_db(file.name,session_folder,progress) |
|
|
|
if file_type == 'docx': |
|
db2 = merge_docx_to_db(file.name,session_folder,progress) |
|
|
|
if db2 != None: |
|
|
|
|
|
db2.save_local(f"{session_folder}/{source}") |
|
|
|
progress(progress_step, desc = 'moving file to store') |
|
directory_path = f"{session_folder}/{source}/store/" |
|
if not os.path.exists(directory_path): |
|
os.makedirs(directory_path) |
|
try: |
|
shutil.move(file.name, directory_path) |
|
except: |
|
pass |
|
|
|
|
|
progress(progress_step, desc = 'loading db') |
|
|
|
|
|
progress(progress_step, desc = 'zipping db for download') |
|
add_files_to_zip(session_folder) |
|
print(f"EMBEDDED: db zipped") |
|
progress(progress_step, desc = 'db zipped') |
|
|
|
|
|
return f"{session_folder}.zip",ui_session_id, update_df(ui_session_id) |
|
|
|
|
|
|
|
def add_to_db(references,ui_session_id): |
|
files = store_files(references) |
|
return embed_files(files,ui_session_id) |
|
|
|
def export_files(references): |
|
files = store_files(references, ret_names=True) |
|
|
|
return files |
|
|
|
|
|
def display_docs(docs): |
|
output_str = '' |
|
for i, doc in enumerate(docs): |
|
source = doc.metadata['source'].split('/')[-1] |
|
output_str += f"Ref: {i+1}\n{repr(doc.page_content)}\nSource: {source}\n\n" |
|
return output_str |
|
|
|
def ask_gpt(query, apikey,history,ui_session_id): |
|
session_id = f"PDFAISS-{ui_session_id}" |
|
try: |
|
db = FAISS.load_local(session_id,embeddings) |
|
print("ASKGPT after loading",session_id,len(db.index_to_docstore_id)) |
|
except: |
|
print(f"SESSION: {session_id} database does not exist") |
|
return f"SESSION: {session_id} database does not exist","","" |
|
|
|
docs = db.similarity_search(query) |
|
history += f"[query]\n{query}\n[answer]\n" |
|
if(apikey==""): |
|
history += f"None\n[references]\n{display_docs(docs)}\n\n" |
|
return "No answer from GPT", display_docs(docs),history |
|
else: |
|
llm = ChatOpenAI(temperature=0, model_name = 'gpt-3.5-turbo', openai_api_key=apikey) |
|
chain = load_qa_chain(llm, chain_type="stuff") |
|
answer = chain.run(input_documents=docs, question=query, verbose=True) |
|
history += f"{answer}\n[references]\n{display_docs(docs)}\n\n" |
|
return answer,display_docs(docs),history |
|
|
|
|
|
|
|
|
|
|
|
|
|
def gpt_answer(api_key, query, model="gpt-3.5-turbo-1106", system_prompt="Use the provided References to answer the user Question. If the provided document do not contain the elements to answer the user question, just say 'No information.'."): |
|
if 'gpt' in model: |
|
client = OpenAI( api_key=api_key) |
|
|
|
chat_completion = client.chat.completions.create( |
|
messages=[ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": query}, |
|
|
|
], |
|
model=model, |
|
) |
|
return chat_completion.choices[0].message.content |
|
|
|
if 'mistral' in model: |
|
client = MistralClient(api_key=api_key) |
|
chat_response = client.chat( |
|
model=model, |
|
messages=[ |
|
ChatMessage(role="system", content=system_prompt), |
|
ChatMessage(role="user", content=query)], |
|
) |
|
return chat_response.choices[0].message.content |
|
if 'groq' in model: |
|
client = Groq(api_key=api_key) |
|
chat_completion = client.chat.completions.create( |
|
messages=[ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": query}, |
|
|
|
], |
|
model=model, |
|
) |
|
return chat_completion.choices[0].message.content |
|
|
|
def add_line_breaks(input_string, line_length=100): |
|
lines = [] |
|
to_break=input_string.split("\n---\n[Sources]")[0] |
|
for i in range(0, len(to_break), line_length): |
|
line = to_break[i:i+line_length] |
|
lines.append(line) |
|
return '\n'.join(lines)+input_string[len(to_break)-1:] |
|
|
|
|
|
|
|
def upload_text_file(content): |
|
data = {"content": content, "syntax": "text", "expiry_days": 1} |
|
headers = {"User-Agent": "Sources"} |
|
r = requests.post("https://dpaste.com/api/", data=data, headers=headers) |
|
return f"{str(r.text)[:-1]}.txt" |
|
|
|
|
|
def ask_df(df, api_key, model, ui_session_id): |
|
answers = [] |
|
session_folder = f"PDFAISS-{ui_session_id}" |
|
question_column = df.columns[-1] |
|
if len(df.at[0, question_column])<2: |
|
return df |
|
for index, row in df.iterrows(): |
|
question = row.iloc[-1] |
|
print(f"Question: {question}") |
|
if len(question)<2: |
|
question = df.at[0, question_column].split("\n---\n")[0] |
|
db_folder = "/".join([session_folder, row["File name"]]) |
|
db = FAISS.load_local(db_folder,embeddings) |
|
print(f"\n\nQUESTION:\n{question}\n\n") |
|
docs = db.similarity_search(question) |
|
references = '\n******************************\n'.join([d.page_content for d in docs]) |
|
print(f"REFERENCES: {references}") |
|
try: |
|
source = upload_text_file(references) |
|
except: |
|
source = "ERROR WHILE GETTING THE SOURCES FILE" |
|
query = f"## USER QUESTION:\n{question}\n\n## REFERENCES:\n{references}\n\nANSWER:\n\n" |
|
try: |
|
answer = gpt_answer(api_key, query, model) |
|
except Exception as e: |
|
answer = "ERROR WHILE ANSWERING THE QUESTION" |
|
print("ERROR: ", e) |
|
complete_answer = add_line_breaks("\n---\n".join(["## " + question, answer, "[Sources](" + source + ")"])) |
|
answers.append(complete_answer) |
|
print(complete_answer) |
|
df[question_column] = answers |
|
return df |
|
|
|
def export_df(df, ftype): |
|
fname=secrets.token_urlsafe(16) |
|
if ftype=="xlsx": |
|
df.to_excel(f"{fname}.xlsx", index=False) |
|
return f"{fname}.xlsx" |
|
if ftype=="pkl": |
|
df.to_pickle(f"{fname}.pkl", index=False) |
|
return f"{fname}.pkl" |
|
if ftype=="csv": |
|
df.to_csv(f"{fname}.csv", index=False) |
|
return f"{fname}.csv" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("Upload your documents and question them.") |
|
with gr.Accordion("Open to enter your API key", open=False): |
|
apikey_input = gr.Textbox(placeholder="Type here your OpenAI API key to use Summarization and Q&A", label="OpenAI API Key",type='password') |
|
dd_model = gr.Dropdown(["groq:llama-3.3-70b-specdec", "groq:llama-3.3-70b-versatile", "groq:mixtral-8x7b-32768", "groq:llama-3.1-70b-versatile", "groq:llama-3.2-90b-text-preview", "mistral-tiny", "mistral-small", "mistral-medium","gpt-3.5-turbo-1106", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4-1106-preview", "gpt-4", "gpt-4-32k"], value="gpt-3.5-turbo-1106", label='List of models', allow_custom_value=True, scale=1) |
|
|
|
with gr.Tab("Upload PDF & TXT"): |
|
with gr.Accordion("Get files from the web", open=False): |
|
with gr.Column(): |
|
topic_input = gr.Textbox(placeholder="Type your research", label="Research") |
|
with gr.Row(): |
|
max_files = gr.Slider(1, 30, step=1, value=10, label="Maximum number of files") |
|
btn_search = gr.Button("Search") |
|
dd_documents = gr.Dropdown(label='List of documents', info='Click to remove from selection', multiselect=True) |
|
with gr.Row(): |
|
btn_dl = gr.Button("Add these files to the Database") |
|
btn_export = gr.Button("⬇ Export selected files ⬇") |
|
|
|
tb_session_id = gr.Textbox(label='session id') |
|
docs_input = gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"]) |
|
db_output = gr.File(label="Download zipped database") |
|
btn_generate_db = gr.Button("Generate database") |
|
btn_reset_db = gr.Button("Reset database") |
|
df_qna = gr.Dataframe(interactive=True, datatype="markdown") |
|
with gr.Row(): |
|
btn_clear_df = gr.Button("Clear df") |
|
btn_fill_answers = gr.Button("Fill table with generated answers") |
|
with gr.Accordion("Export dataframe", open=False): |
|
with gr.Row(): |
|
btn_export_df = gr.Button("Export df as", scale=1) |
|
r_format = gr.Radio(["xlsx", "pkl", "csv"], label="File type", value="xlsx", scale=2) |
|
file_df = gr.File(scale=1) |
|
|
|
|
|
|
|
btn_clear_df.click(update_df, inputs=[tb_session_id], outputs=df_qna) |
|
btn_fill_answers.click(ask_df, inputs=[df_qna, apikey_input, dd_model, tb_session_id], outputs=df_qna) |
|
btn_export_df.click(export_df, inputs=[df_qna, r_format], outputs=[file_df]) |
|
with gr.Tab("Summarize PDF"): |
|
with gr.Column(): |
|
summary_output = gr.Textbox(label='Summarized files') |
|
btn_summary = gr.Button("Summarize") |
|
|
|
|
|
with gr.Tab("Ask PDF"): |
|
with gr.Column(): |
|
query_input = gr.Textbox(placeholder="Type your question", label="Question") |
|
btn_askGPT = gr.Button("Answer") |
|
answer_output = gr.Textbox(label='GPT 3.5 answer') |
|
sources = gr.Textbox(label='Sources') |
|
history = gr.Textbox(label='History') |
|
|
|
|
|
topic_input.submit(search_docs, inputs=[topic_input, max_files], outputs=dd_documents) |
|
btn_search.click(search_docs, inputs=[topic_input, max_files], outputs=dd_documents) |
|
btn_dl.click(add_to_db, inputs=[dd_documents,tb_session_id], outputs=[db_output,tb_session_id]) |
|
btn_export.click(export_files, inputs=dd_documents, outputs=docs_input) |
|
btn_generate_db.click(embed_files, inputs=[docs_input,tb_session_id], outputs=[db_output,tb_session_id, df_qna]) |
|
btn_reset_db.click(reset_database,inputs=[tb_session_id],outputs=[db_output]) |
|
btn_summary.click(summarize_docs, inputs=[apikey_input,tb_session_id], outputs=summary_output) |
|
btn_askGPT.click(ask_gpt, inputs=[query_input,apikey_input,history,tb_session_id], outputs=[answer_output,sources,history]) |
|
|
|
|
|
|
|
demo.launch(debug=False,share=False) |