Spaces:
Sleeping
Sleeping
Commit
·
df0d9e0
1
Parent(s):
fe62e29
feature: Minor edits, transition to 4o
Browse files
app.py
CHANGED
@@ -1,9 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
from annotated_text import annotated_text
|
3 |
import pymongo
|
4 |
-
|
5 |
-
# import language_tool_python
|
6 |
-
import re
|
7 |
from utils import (
|
8 |
load_files,
|
9 |
highlight_text,
|
@@ -13,15 +10,15 @@ from utils import (
|
|
13 |
process_response,
|
14 |
)
|
15 |
from openai import OpenAI
|
16 |
-
import json
|
17 |
|
18 |
from const import (
|
19 |
LIMITATIONS,
|
20 |
GOAL,
|
21 |
femi_color,
|
22 |
-
masc_color,
|
23 |
femi_pro_color,
|
24 |
masc_pro_color,
|
|
|
|
|
25 |
)
|
26 |
|
27 |
|
@@ -60,7 +57,7 @@ if "login" not in st.session_state:
|
|
60 |
else:
|
61 |
reject_login()
|
62 |
|
63 |
-
cols = st.columns([
|
64 |
|
65 |
with cols[1]:
|
66 |
st.header("Fairplay: bias advisor / attractiveness rating in job ads")
|
@@ -68,26 +65,31 @@ with cols[1]:
|
|
68 |
for line in LIMITATIONS:
|
69 |
st.write(line)
|
70 |
|
|
|
|
|
71 |
col1, col2, col3 = st.columns([1, 3, 1])
|
72 |
with col2:
|
|
|
73 |
job_ad_field = st.text_area(
|
74 |
label="Job ad text",
|
75 |
value="Please paste here your input",
|
76 |
max_chars=10000,
|
77 |
key="input",
|
78 |
height=500,
|
|
|
79 |
)
|
80 |
clicked = st.button("Analyze", key="analyze")
|
81 |
|
82 |
if clicked:
|
83 |
st.divider()
|
84 |
-
|
85 |
-
col1, col2, col3 = st.columns([1,
|
86 |
job_ad_split, metrics = highlight_text(job_ad_field, st.session_state.data)
|
87 |
indices = get_indices(metrics)
|
88 |
openai_params = construct_params(job_ad_field)
|
89 |
|
90 |
with col2:
|
|
|
91 |
with st.container(border=True):
|
92 |
minicols = st.columns([1, 1, 1, 2, 4])
|
93 |
|
@@ -97,7 +99,7 @@ if clicked:
|
|
97 |
|
98 |
placeholder = st.empty()
|
99 |
|
100 |
-
col1, col2, col3 = st.columns([1,
|
101 |
|
102 |
with col2:
|
103 |
with st.container(border=True):
|
@@ -105,20 +107,18 @@ if clicked:
|
|
105 |
with col3:
|
106 |
with st.container(border=True):
|
107 |
st.subheader("Color explanations:")
|
108 |
-
st.write("**Misspelled**")
|
109 |
-
annotated_text(("Misspelled", "", "#cccccc"))
|
110 |
st.write(
|
111 |
"**Gendered Language** according to [https://psycnet.apa.org/record/2011-04642-001]"
|
112 |
)
|
113 |
-
annotated_text(("
|
114 |
-
annotated_text(("Female root", "F", femi_pro_color))
|
115 |
st.write("**Gendered pronouns**")
|
116 |
-
annotated_text(("Male Pronoun", "",
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
122 |
|
123 |
with placeholder.container(border=True):
|
124 |
with st.spinner("Wait for it..."):
|
@@ -128,30 +128,29 @@ if clicked:
|
|
128 |
|
129 |
minicols[2].metric("Diversity Statement", cleaned_response["answer_4"], None)
|
130 |
minicols[3].metric("Target Age Group", cleaned_response["answer_6"], None)
|
131 |
-
|
132 |
st.write(f"**Presence of gendered jobtitles:** {cleaned_response['answer_3']}")
|
133 |
if cleaned_response["answer_3"] == "Yes":
|
134 |
st.write(cleaned_response["longer_answer_3"])
|
135 |
-
|
136 |
st.write(f"**Explicit strict requirements:** {cleaned_response['answer_1']}")
|
137 |
if cleaned_response["answer_1"] == "Yes":
|
138 |
st.write(f'\t{cleaned_response["longer_answer_1"]}')
|
139 |
-
|
140 |
st.write(f"**Family life challenges:** {cleaned_response['answer_2']}")
|
141 |
if cleaned_response["answer_2"] == "Yes":
|
142 |
st.write(f'\t{cleaned_response["longer_answer_2"]}')
|
143 |
|
144 |
-
|
145 |
st.write(f"**Text level equivalent:** {cleaned_response['answer_5']}")
|
146 |
|
147 |
-
|
148 |
st.write(f"**Target Age group:** {cleaned_response['longer_answer_6']}")
|
149 |
|
150 |
-
|
151 |
if cleaned_response["answer_4"] == "No":
|
152 |
st.write(
|
153 |
f"\t**Diversity statement**: This ad could benefit from a diversity statement."
|
154 |
)
|
155 |
-
|
156 |
-
|
157 |
-
st.write(cleaned_response["short_report"])
|
|
|
1 |
import streamlit as st
|
2 |
from annotated_text import annotated_text
|
3 |
import pymongo
|
|
|
|
|
|
|
4 |
from utils import (
|
5 |
load_files,
|
6 |
highlight_text,
|
|
|
10 |
process_response,
|
11 |
)
|
12 |
from openai import OpenAI
|
|
|
13 |
|
14 |
from const import (
|
15 |
LIMITATIONS,
|
16 |
GOAL,
|
17 |
femi_color,
|
|
|
18 |
femi_pro_color,
|
19 |
masc_pro_color,
|
20 |
+
ageism_color,
|
21 |
+
ableism_color,
|
22 |
)
|
23 |
|
24 |
|
|
|
57 |
else:
|
58 |
reject_login()
|
59 |
|
60 |
+
cols = st.columns([2, 3, 2])
|
61 |
|
62 |
with cols[1]:
|
63 |
st.header("Fairplay: bias advisor / attractiveness rating in job ads")
|
|
|
65 |
for line in LIMITATIONS:
|
66 |
st.write(line)
|
67 |
|
68 |
+
st.divider()
|
69 |
+
|
70 |
col1, col2, col3 = st.columns([1, 3, 1])
|
71 |
with col2:
|
72 |
+
st.write("**Please enter your job ad text below** [including a job title]:")
|
73 |
job_ad_field = st.text_area(
|
74 |
label="Job ad text",
|
75 |
value="Please paste here your input",
|
76 |
max_chars=10000,
|
77 |
key="input",
|
78 |
height=500,
|
79 |
+
label_visibility="collapsed",
|
80 |
)
|
81 |
clicked = st.button("Analyze", key="analyze")
|
82 |
|
83 |
if clicked:
|
84 |
st.divider()
|
85 |
+
|
86 |
+
col1, col2, col3 = st.columns([1, 2, 1])
|
87 |
job_ad_split, metrics = highlight_text(job_ad_field, st.session_state.data)
|
88 |
indices = get_indices(metrics)
|
89 |
openai_params = construct_params(job_ad_field)
|
90 |
|
91 |
with col2:
|
92 |
+
st.header("Results:")
|
93 |
with st.container(border=True):
|
94 |
minicols = st.columns([1, 1, 1, 2, 4])
|
95 |
|
|
|
99 |
|
100 |
placeholder = st.empty()
|
101 |
|
102 |
+
col1, col2, col3 = st.columns([1, 2, 1])
|
103 |
|
104 |
with col2:
|
105 |
with st.container(border=True):
|
|
|
107 |
with col3:
|
108 |
with st.container(border=True):
|
109 |
st.subheader("Color explanations:")
|
|
|
|
|
110 |
st.write(
|
111 |
"**Gendered Language** according to [https://psycnet.apa.org/record/2011-04642-001]"
|
112 |
)
|
113 |
+
annotated_text(("Gender significant words", "F/M", femi_color))
|
|
|
114 |
st.write("**Gendered pronouns**")
|
115 |
+
annotated_text(("Female Pronoun", "", femi_pro_color), "/", ("Male Pronoun", "", masc_pro_color))
|
116 |
+
st.write("**Age discriminatory language** [In the near future]")
|
117 |
+
annotated_text(("Ageism", "", ageism_color))
|
118 |
+
st.write("**Ableist discriminatory language** [In the near future]")
|
119 |
+
annotated_text(("Ableism", "", ableism_color))
|
120 |
+
st.write("**Misspelled** [In the near future]")
|
121 |
+
annotated_text(("Misspelled", "", "#cccccc"))
|
122 |
|
123 |
with placeholder.container(border=True):
|
124 |
with st.spinner("Wait for it..."):
|
|
|
128 |
|
129 |
minicols[2].metric("Diversity Statement", cleaned_response["answer_4"], None)
|
130 |
minicols[3].metric("Target Age Group", cleaned_response["answer_6"], None)
|
131 |
+
# Gendered jobtitles
|
132 |
st.write(f"**Presence of gendered jobtitles:** {cleaned_response['answer_3']}")
|
133 |
if cleaned_response["answer_3"] == "Yes":
|
134 |
st.write(cleaned_response["longer_answer_3"])
|
135 |
+
# Explicit length of experience
|
136 |
st.write(f"**Explicit strict requirements:** {cleaned_response['answer_1']}")
|
137 |
if cleaned_response["answer_1"] == "Yes":
|
138 |
st.write(f'\t{cleaned_response["longer_answer_1"]}')
|
139 |
+
# Explicit length of experience
|
140 |
st.write(f"**Family life challenges:** {cleaned_response['answer_2']}")
|
141 |
if cleaned_response["answer_2"] == "Yes":
|
142 |
st.write(f'\t{cleaned_response["longer_answer_2"]}')
|
143 |
|
144 |
+
# Text level equivalent
|
145 |
st.write(f"**Text level equivalent:** {cleaned_response['answer_5']}")
|
146 |
|
147 |
+
# Target Age group
|
148 |
st.write(f"**Target Age group:** {cleaned_response['longer_answer_6']}")
|
149 |
|
150 |
+
# Diversity statement
|
151 |
if cleaned_response["answer_4"] == "No":
|
152 |
st.write(
|
153 |
f"\t**Diversity statement**: This ad could benefit from a diversity statement."
|
154 |
)
|
155 |
+
# Short report
|
156 |
+
st.write(f'**Short report:** {cleaned_response["short_report"]}')
|
|
const.py
CHANGED
@@ -12,5 +12,7 @@ LIMITATIONS = [
|
|
12 |
|
13 |
masc_color = "#f6cefc"
|
14 |
femi_color = "#f6cefc"
|
15 |
-
masc_pro_color = "#
|
16 |
-
femi_pro_color = "#
|
|
|
|
|
|
12 |
|
13 |
masc_color = "#f6cefc"
|
14 |
femi_color = "#f6cefc"
|
15 |
+
masc_pro_color = "#FFAC1C"
|
16 |
+
femi_pro_color = "#FFAC1C"
|
17 |
+
ageism_color = "#ADD8E6"
|
18 |
+
ableism_color = "#E97451"
|
utils.py
CHANGED
@@ -26,8 +26,8 @@ def _construct_message(job_ad_text) -> list:
|
|
26 |
def construct_params(job_ad_text) -> dict:
|
27 |
messages = _construct_message(job_ad_text)
|
28 |
return {
|
29 |
-
"model": "gpt-3.5-turbo-0125",
|
30 |
-
|
31 |
"messages": messages,
|
32 |
"response_format": {"type": "json_object"},
|
33 |
"n": 1,
|
|
|
26 |
def construct_params(job_ad_text) -> dict:
|
27 |
messages = _construct_message(job_ad_text)
|
28 |
return {
|
29 |
+
#"model": "gpt-3.5-turbo-0125",
|
30 |
+
"model": "gpt-4o",
|
31 |
"messages": messages,
|
32 |
"response_format": {"type": "json_object"},
|
33 |
"n": 1,
|