Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,19 +22,20 @@ client = OpenAI(
|
|
22 |
api_key=os.environ['anyscale_api_key']
|
23 |
)
|
24 |
|
25 |
-
model_name = 'mlabonne/NeuralHermes-2.5-Mistral-7B'
|
|
|
26 |
|
27 |
# Define the embedding model and the vectorstore
|
28 |
embedding_model_name = 'thenlper/gte-large'
|
29 |
embedding_model = SentenceTransformerEmbeddings(model_name=embedding_model_name)
|
30 |
collection_name_qna = 'report_10K_db'
|
31 |
-
persisted_vectordb_location = 'report_10K_db'
|
32 |
|
33 |
# Load the persisted vectorDB
|
34 |
|
35 |
vectorstore_persisted = Chroma(
|
36 |
-
collection_name=
|
37 |
-
persist_directory=
|
38 |
embedding_function=embedding_model
|
39 |
)
|
40 |
|
@@ -167,8 +168,8 @@ demo = gr.Interface(
|
|
167 |
fn=predict,
|
168 |
inputs=[textbox,company],
|
169 |
outputs="text",
|
170 |
-
title="
|
171 |
-
description="
|
172 |
allow_flagging="auto",
|
173 |
concurrency_limit=12
|
174 |
)
|
|
|
22 |
api_key=os.environ['anyscale_api_key']
|
23 |
)
|
24 |
|
25 |
+
#model_name = 'mlabonne/NeuralHermes-2.5-Mistral-7B'
|
26 |
+
model_name = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
|
27 |
|
28 |
# Define the embedding model and the vectorstore
|
29 |
embedding_model_name = 'thenlper/gte-large'
|
30 |
embedding_model = SentenceTransformerEmbeddings(model_name=embedding_model_name)
|
31 |
collection_name_qna = 'report_10K_db'
|
32 |
+
persisted_vectordb_location = './report_10K_db'
|
33 |
|
34 |
# Load the persisted vectorDB
|
35 |
|
36 |
vectorstore_persisted = Chroma(
|
37 |
+
collection_name=collection_name_qna,
|
38 |
+
persist_directory=persisted_vectordb_location,
|
39 |
embedding_function=embedding_model
|
40 |
)
|
41 |
|
|
|
168 |
fn=predict,
|
169 |
inputs=[textbox,company],
|
170 |
outputs="text",
|
171 |
+
title="Insights from 10-K reports",
|
172 |
+
description="AI for extraction, summarization, and analysis of information from the 10-K reports",
|
173 |
allow_flagging="auto",
|
174 |
concurrency_limit=12
|
175 |
)
|