Spaces:
Runtime error
Runtime error
## Setup | |
# Import the necessary Libraries | |
import json | |
import tiktoken | |
import pandas as pd | |
from openai import OpenAI | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_community.document_loaders import PyPDFDirectoryLoader | |
from langchain_community.embeddings.sentence_transformer import ( | |
SentenceTransformerEmbeddings | |
) | |
from langchain_community.vectorstores import Chroma | |
import os | |
import uuid | |
import joblib | |
import json | |
import gradio as gr | |
from dotenv import load_dotenv | |
from huggingface_hub import CommitScheduler | |
from pathlib import Path | |
# Create Client | |
# load_dotenv() | |
# os.environ['API_KEY_PROJ3'] = os.getenv('API_KEY_PROJ3') | |
# Create Client | |
client = OpenAI( | |
base_url="https://api.endpoints.anyscale.com/v1", | |
api_key=os.environ['Anyscale_Colab_key2'] | |
) | |
# Define the embedding model and the vectorstore | |
embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-large') | |
collection_name = 'collection' | |
# Load the persisted vectorDB | |
vectorstore_persisted = Chroma( | |
collection_name=collection_name, | |
persist_directory='./proj3_db', | |
embedding_function=embedding_model | |
) | |
retriever = vectorstore_persisted.as_retriever( | |
search_type = 'similarity', | |
search_kargs = {'k':5} | |
) | |
# persisted_vectordb_location = './proj3_db' | |
# Prepare the logging functionality | |
log_file = Path("logs/") / f"data_{uuid.uuid4()}.json" | |
log_folder = log_file.parent | |
scheduler = CommitScheduler( | |
repo_id="mgchavez/Finsights_Grey", | |
repo_type="dataset", | |
folder_path=log_folder, | |
path_in_repo="data", | |
every=2 | |
) | |
# Define the Q&A system message | |
qna_system_message = """ | |
User input will have the context required by you to answer user questions. | |
This context will begin with the token: ###Context | |
The context contains references to specific portions of a document relevant to the user query. | |
User questions will begin with the token: ###Question | |
Please answer only using the context provided in the input. Do not mention anything about the context in your final answer. | |
If the answer is not found in the context, respond "I don't know". | |
""" | |
# Define the user message template | |
qna_user_message_template = """ | |
###Context | |
Here are some documents that are relevant to the question mentioned below. | |
{context} | |
###Question | |
{question} | |
""" | |
# Define the predict function that runs when 'Submit' is clicked or when a API request is made | |
def predict(user_input, company): | |
filter = "dataset/"+company+"-10-k-2023.pdf" | |
relevant_document_chunks = vectorstore_persisted.similarity_search(user_input, k=5, filter={"source":filter}) | |
# Create context_for_query | |
context_for_query = ". ".join(relevant_document_chunks) | |
# Create messages | |
prompt = [ | |
{'role': 'system', 'content': qna_system_message}, | |
{'role': 'user', 'content': qna_user_message_template.format( | |
context=context_for_query, | |
question=user_input | |
) | |
} | |
] | |
# model_name = 'mlabonne/NeuralHermes-2.5-Mistral-7B' | |
model_name = 'mistralai/Mixtral-8x7B-Instruct-v0.1' | |
# model_name = 'thenlper/gte-large' | |
# Get response from the LLM | |
try: | |
response = client.chat.completions.create( | |
model=model_name, | |
messages=prompt, | |
temperature=0 | |
) | |
prediction = response.choices[0].message.content.strip() | |
except Exception as e: | |
prediction = f'Sorry, I encountered the following error: \n {e}' | |
# While the prediction is made, log both the inputs and outputs to a local log file | |
# While writing to the log file, ensure that the commit scheduler is locked to avoid parallel | |
# access | |
with scheduler.lock: | |
with log_file.open("a") as f: | |
f.write(json.dumps( | |
{ | |
'user_input': user_input, | |
'retrieved_context': context_for_query, | |
'model_response': prediction | |
} | |
)) | |
f.write("\n") | |
return prediction | |
# Set-up the Gradio UI | |
# Add text box and radio button to the interface | |
# The radio button is used to select the company 10k report in which the context needs to be retrieved. | |
lst_companies = ['aws', 'google', 'IBM', 'Meta', 'msft'] | |
textbox = gr.Textbox('Input user') | |
company = gr.Radio(lst_companies, label='Company') | |
model_output = gr.Label(label="Charge predictor") | |
# Create the interface | |
# For the inputs parameter of Interface provide [textbox,company] | |
demo = gr.Interface( | |
fn=predict, | |
inputs=[textbox, company], | |
outputs=model_output, | |
title="Charge Predictor", | |
description="This API allows you to predict the charge of insurace", | |
allow_flagging="auto", | |
concurrency_limit=8 | |
) | |
demo.queue() | |
demo.launch() |