Spaces:
Sleeping
Sleeping
Commit
·
a6a4233
1
Parent(s):
662562e
feat: logging
Browse files- app.py +16 -1
- requirements.txt +1 -0
- utils.py +1 -1
- utils_logging.py +28 -0
app.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from annotated_text import annotated_text
|
3 |
import pymongo
|
@@ -11,6 +13,8 @@ from utils import (
|
|
11 |
)
|
12 |
from openai import OpenAI
|
13 |
|
|
|
|
|
14 |
from const import (
|
15 |
LIMITATIONS,
|
16 |
GOAL,
|
@@ -51,6 +55,7 @@ if "login" not in st.session_state:
|
|
51 |
res = col.find_one({"user_token": st.session_state.user_token})
|
52 |
if res:
|
53 |
st.session_state.login = True
|
|
|
54 |
else:
|
55 |
reject_login()
|
56 |
|
@@ -89,6 +94,11 @@ if clicked:
|
|
89 |
job_ad_split, metrics = highlight_text(job_ad_field, st.session_state.data)
|
90 |
indices = get_indices(metrics)
|
91 |
openai_params = construct_params(job_ad_field)
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
with col2:
|
94 |
with st.container(border=True):
|
@@ -128,9 +138,14 @@ if clicked:
|
|
128 |
|
129 |
with placeholder:
|
130 |
with st.spinner("Performing analysis..."):
|
131 |
-
response = get_response(openai_params)
|
|
|
|
|
132 |
with st.container():
|
133 |
cleaned_response = process_response(response)
|
|
|
|
|
|
|
134 |
|
135 |
minicols2[0].metric("Diversity Statement", cleaned_response["answer_4"], None)
|
136 |
minicols2[1].metric("Generation Appeal", cleaned_response["answer_6"], None)
|
|
|
1 |
+
import uuid
|
2 |
+
|
3 |
import streamlit as st
|
4 |
from annotated_text import annotated_text
|
5 |
import pymongo
|
|
|
13 |
)
|
14 |
from openai import OpenAI
|
15 |
|
16 |
+
from utils_logging import send_log, make_log
|
17 |
+
|
18 |
from const import (
|
19 |
LIMITATIONS,
|
20 |
GOAL,
|
|
|
55 |
res = col.find_one({"user_token": st.session_state.user_token})
|
56 |
if res:
|
57 |
st.session_state.login = True
|
58 |
+
st.session_state.id = str(uuid.uuid4())
|
59 |
else:
|
60 |
reject_login()
|
61 |
|
|
|
94 |
job_ad_split, metrics = highlight_text(job_ad_field, st.session_state.data)
|
95 |
indices = get_indices(metrics)
|
96 |
openai_params = construct_params(job_ad_field)
|
97 |
+
st.session_state.openai_params = openai_params
|
98 |
+
st.session_state.job_ad = job_ad_field
|
99 |
+
st.session_state.metrics = metrics
|
100 |
+
st.session_state.job_ad_split = job_ad_split
|
101 |
+
st.session_state.indices = indices
|
102 |
|
103 |
with col2:
|
104 |
with st.container(border=True):
|
|
|
138 |
|
139 |
with placeholder:
|
140 |
with st.spinner("Performing analysis..."):
|
141 |
+
response, usage = get_response(openai_params)
|
142 |
+
st.session_state.response = response
|
143 |
+
st.session_state.usage = usage
|
144 |
with st.container():
|
145 |
cleaned_response = process_response(response)
|
146 |
+
st.session_state.cleaned_response = cleaned_response
|
147 |
+
send_log(make_log(st.session_state))
|
148 |
+
#print([choice.message.content for choice in st.session_state.response])
|
149 |
|
150 |
minicols2[0].metric("Diversity Statement", cleaned_response["answer_4"], None)
|
151 |
minicols2[1].metric("Generation Appeal", cleaned_response["answer_6"], None)
|
requirements.txt
CHANGED
@@ -3,3 +3,4 @@ openai
|
|
3 |
pymongo
|
4 |
st-annotated-text
|
5 |
tiktoken
|
|
|
|
3 |
pymongo
|
4 |
st-annotated-text
|
5 |
tiktoken
|
6 |
+
uuid
|
utils.py
CHANGED
@@ -42,7 +42,7 @@ def get_response(params):
|
|
42 |
st.error("We are sorry, we are unable to connect to our AI provider.")
|
43 |
return []
|
44 |
|
45 |
-
return list(comp.choices)
|
46 |
|
47 |
|
48 |
def process_response(choices):
|
|
|
42 |
st.error("We are sorry, we are unable to connect to our AI provider.")
|
43 |
return []
|
44 |
|
45 |
+
return list(comp.choices), dict(comp.usage)
|
46 |
|
47 |
|
48 |
def process_response(choices):
|
utils_logging.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pymongo
|
2 |
+
import streamlit as st
|
3 |
+
import datetime
|
4 |
+
|
5 |
+
def make_log(session_state):
|
6 |
+
log = {
|
7 |
+
"user_token": session_state.user_token,
|
8 |
+
"date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
9 |
+
"id": session_state.id,
|
10 |
+
"job_ad": session_state.job_ad,
|
11 |
+
"metrics": session_state.metrics,
|
12 |
+
"indices": session_state.indices,
|
13 |
+
"job_ad_split": session_state.job_ad_split,
|
14 |
+
"ai": {"params": session_state.openai_params,
|
15 |
+
"openai_response": [choice.message.content for choice in session_state.response],
|
16 |
+
"cleaned_response": session_state.cleaned_response,
|
17 |
+
"usage": session_state.usage},
|
18 |
+
}
|
19 |
+
return log
|
20 |
+
|
21 |
+
|
22 |
+
def send_log(log):
|
23 |
+
try:
|
24 |
+
client = pymongo.MongoClient(st.secrets["mongo_login"])
|
25 |
+
col = client["Fairplay"]["logging"]
|
26 |
+
col.insert_one(log)
|
27 |
+
except Exception as e:
|
28 |
+
st.toast("Had an issue contacting our server...")
|