v0.0.6 sidebar
Browse files- app.py +41 -21
- kron/llm_predictor/KronBasetenCamelLLM.py +1 -0
- kron/persistence/dynamodb_request_log.py +1 -1
app.py
CHANGED
@@ -3,6 +3,7 @@ import streamlit as st
|
|
3 |
import os
|
4 |
import re
|
5 |
import sys
|
|
|
6 |
import logging
|
7 |
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
8 |
logger = logging.getLogger(__name__)
|
@@ -10,7 +11,7 @@ logger = logging.getLogger(__name__)
|
|
10 |
from dotenv import load_dotenv
|
11 |
load_dotenv()
|
12 |
|
13 |
-
os.environ['AWS_DEFAULT_REGION'] = 'us-west-2'
|
14 |
|
15 |
for key in st.session_state.keys():
|
16 |
#del st.session_state[key]
|
@@ -40,6 +41,39 @@ def set_baseten_key(bs_api_key):
|
|
40 |
|
41 |
set_baseten_key(bs_api_key)
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
from llama_index import StorageContext
|
44 |
from llama_index import ServiceContext
|
45 |
from llama_index import load_index_from_storage
|
@@ -188,11 +222,11 @@ def clear_question(query_model):
|
|
188 |
st.session_state.question_input = ''
|
189 |
st.session_state.question_answered = False
|
190 |
st.session_state.answer = ''
|
|
|
191 |
st.session_state.prev_model = query_model
|
192 |
|
193 |
|
194 |
initial_query = ''
|
195 |
-
#st.session_state.prev_model = None
|
196 |
|
197 |
if 'question' not in st.session_state:
|
198 |
st.session_state.question = ''
|
@@ -249,15 +283,10 @@ def submit():
|
|
249 |
st.session_state.question_input = ''
|
250 |
st.session_state.question_answered = False
|
251 |
|
252 |
-
#def submit_rating(query_model, req, resp):
|
253 |
-
# print(f'query model {query_model}')
|
254 |
-
# if 'answer_rating' in st.session_state:
|
255 |
-
# print(f'rating {st.session_state.answer_rating}')
|
256 |
|
257 |
st.write(f'Model, question, answer and rating are logged to help with the improvement of this application.')
|
258 |
question = st.text_input("Enter a question, e.g. What benchmarks can we use for QA?", key='question_input', on_change=submit )
|
259 |
|
260 |
-
# answer_str = None
|
261 |
if(st.session_state.question):
|
262 |
col1, col2 = st.columns([2, 2])
|
263 |
with col1:
|
@@ -277,25 +306,16 @@ if(st.session_state.question):
|
|
277 |
st.write(f' Please rate this answer.')
|
278 |
with col2:
|
279 |
from streamlit_star_rating import st_star_rating
|
280 |
-
stars = st_star_rating("", maxValue=5, defaultValue=3, key="answer_rating"
|
281 |
-
|
282 |
-
# on_change = submit_rating(query_model, st.session_state.question, answer_str)
|
283 |
-
)
|
284 |
-
print(f"------stars {stars}")
|
285 |
except Exception as e:
|
286 |
-
print(e)
|
287 |
-
answer_str =
|
288 |
st.session_state.answer_rating = -1
|
|
|
289 |
finally:
|
290 |
if 'question' in st.session_state:
|
291 |
req = st.session_state.question
|
292 |
-
#st.session_state.question = ''
|
293 |
if(__spaces__):
|
294 |
-
#request_log = get_request_log()
|
295 |
st.session_state.request_log.add_request_log_entry(query_model, req, answer_str, st.session_state.answer_rating)
|
296 |
|
297 |
-
# if "answer_rating" in st.session_state:
|
298 |
-
# if(__spaces__):
|
299 |
-
# print('time to log the rating')
|
300 |
-
# #request_log = get_request_log()
|
301 |
-
# st.session_state.request_log.add_request_log_entry(query_model, req, answer_str, st.session_state.answer_rating)
|
|
|
3 |
import os
|
4 |
import re
|
5 |
import sys
|
6 |
+
import base64
|
7 |
import logging
|
8 |
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
9 |
logger = logging.getLogger(__name__)
|
|
|
11 |
from dotenv import load_dotenv
|
12 |
load_dotenv()
|
13 |
|
14 |
+
#os.environ['AWS_DEFAULT_REGION'] = 'us-west-2'
|
15 |
|
16 |
for key in st.session_state.keys():
|
17 |
#del st.session_state[key]
|
|
|
41 |
|
42 |
set_baseten_key(bs_api_key)
|
43 |
|
44 |
+
def autoplay_video(video_path):
|
45 |
+
with open(video_path, "rb") as f:
|
46 |
+
video_content = f.read()
|
47 |
+
|
48 |
+
video_str = f"data:video/mp4;base64,{base64.b64encode(video_content).decode()}"
|
49 |
+
st.markdown(f"""
|
50 |
+
<video style="display: block; margin: auto; width: 140px;" controls loop autoplay width="140" height="180">
|
51 |
+
<source src="{video_str}" type="video/mp4">
|
52 |
+
</video>
|
53 |
+
""", unsafe_allow_html=True)
|
54 |
+
|
55 |
+
# sidebar
|
56 |
+
with st.sidebar:
|
57 |
+
st.header('KG Questions')
|
58 |
+
video, text = st.columns([2, 2])
|
59 |
+
with video:
|
60 |
+
autoplay_video('docs/images/kg_construction.mp4')
|
61 |
+
with text:
|
62 |
+
st.write(
|
63 |
+
f'''
|
64 |
+
###### The construction of a Knowledge Graph is mesmerizing.
|
65 |
+
###### Concepts in the middle are what most are doing. Are we considering anything different? Why? Why not?
|
66 |
+
###### Concepts on the edge are what few are doing. Are we considering that? Why? Why not?
|
67 |
+
'''
|
68 |
+
)
|
69 |
+
st.write(
|
70 |
+
f'''
|
71 |
+
#### How can <what most are doing> help with <what few are doing>?
|
72 |
+
''')
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
from llama_index import StorageContext
|
78 |
from llama_index import ServiceContext
|
79 |
from llama_index import load_index_from_storage
|
|
|
222 |
st.session_state.question_input = ''
|
223 |
st.session_state.question_answered = False
|
224 |
st.session_state.answer = ''
|
225 |
+
st.session_state.answer_rating = 3
|
226 |
st.session_state.prev_model = query_model
|
227 |
|
228 |
|
229 |
initial_query = ''
|
|
|
230 |
|
231 |
if 'question' not in st.session_state:
|
232 |
st.session_state.question = ''
|
|
|
283 |
st.session_state.question_input = ''
|
284 |
st.session_state.question_answered = False
|
285 |
|
|
|
|
|
|
|
|
|
286 |
|
287 |
st.write(f'Model, question, answer and rating are logged to help with the improvement of this application.')
|
288 |
question = st.text_input("Enter a question, e.g. What benchmarks can we use for QA?", key='question_input', on_change=submit )
|
289 |
|
|
|
290 |
if(st.session_state.question):
|
291 |
col1, col2 = st.columns([2, 2])
|
292 |
with col1:
|
|
|
306 |
st.write(f' Please rate this answer.')
|
307 |
with col2:
|
308 |
from streamlit_star_rating import st_star_rating
|
309 |
+
stars = st_star_rating("", maxValue=5, defaultValue=3, key="answer_rating")
|
310 |
+
#print(f"------stars {stars}")
|
|
|
|
|
|
|
311 |
except Exception as e:
|
312 |
+
#print(f'{type(e)}, {e}')
|
313 |
+
answer_str = f'{type(e)}, {e}'
|
314 |
st.session_state.answer_rating = -1
|
315 |
+
st.write(f'An error occured, please try again. \n{answer_str}')
|
316 |
finally:
|
317 |
if 'question' in st.session_state:
|
318 |
req = st.session_state.question
|
|
|
319 |
if(__spaces__):
|
|
|
320 |
st.session_state.request_log.add_request_log_entry(query_model, req, answer_str, st.session_state.answer_rating)
|
321 |
|
|
|
|
|
|
|
|
|
|
kron/llm_predictor/KronBasetenCamelLLM.py
CHANGED
@@ -27,6 +27,7 @@ class KronBasetenCamelLLM(Baseten):
|
|
27 |
model = baseten.deployed_model_id(self.model)
|
28 |
response = model.predict({"instruction": prompt, **kwargs})
|
29 |
|
|
|
30 |
response_txt = response['completion']
|
31 |
#print(f'\n********{response_txt}')
|
32 |
return response_txt
|
|
|
27 |
model = baseten.deployed_model_id(self.model)
|
28 |
response = model.predict({"instruction": prompt, **kwargs})
|
29 |
|
30 |
+
print(f'baseten response: {response}')
|
31 |
response_txt = response['completion']
|
32 |
#print(f'\n********{response_txt}')
|
33 |
return response_txt
|
kron/persistence/dynamodb_request_log.py
CHANGED
@@ -12,7 +12,7 @@ session = boto3.Session(
|
|
12 |
# aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
|
13 |
)
|
14 |
|
15 |
-
logger.
|
16 |
dynamodb = session.resource('dynamodb')
|
17 |
|
18 |
class RequestLog:
|
|
|
12 |
# aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
|
13 |
)
|
14 |
|
15 |
+
logger.debug(f'region name {session.region_name}')
|
16 |
dynamodb = session.resource('dynamodb')
|
17 |
|
18 |
class RequestLog:
|