Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,26 +9,23 @@ import plotly.express as px
|
|
9 |
import plotly.graph_objects as go
|
10 |
import streamlit as st
|
11 |
import nltk
|
12 |
-
import json
|
13 |
import tempfile
|
14 |
|
15 |
-
#
|
16 |
def get_credentials():
|
17 |
-
creds_json_str = os.getenv("JSONSTR")
|
18 |
if creds_json_str is None:
|
19 |
raise ValueError("GOOGLE_APPLICATION_CREDENTIALS_JSON not found in environment")
|
20 |
|
21 |
-
#
|
22 |
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as temp:
|
23 |
-
temp.write(creds_json_str)
|
24 |
-
temp_filename = temp.name
|
25 |
|
26 |
return temp_filename
|
27 |
-
|
28 |
-
# pass
|
29 |
-
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]= get_credentials()
|
30 |
-
|
31 |
|
|
|
|
|
32 |
|
33 |
max_seq_length = 2048
|
34 |
dtype = None
|
@@ -42,7 +39,6 @@ except LookupError:
|
|
42 |
|
43 |
text_split_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
|
44 |
|
45 |
-
|
46 |
def predict_custom_trained_model_sample(
|
47 |
project: str,
|
48 |
endpoint_id: str,
|
@@ -50,10 +46,6 @@ def predict_custom_trained_model_sample(
|
|
50 |
location: str = "us-east4",
|
51 |
api_endpoint: str = "us-east4-aiplatform.googleapis.com",
|
52 |
) -> List[str]:
|
53 |
-
"""
|
54 |
-
`instances` can be either single instance of type dict or a list
|
55 |
-
of instances.
|
56 |
-
"""
|
57 |
client_options = {"api_endpoint": api_endpoint}
|
58 |
client = aiplatform.gapic.PredictionServiceClient(client_options=client_options)
|
59 |
instances = instances if isinstance(instances, list) else [instances]
|
@@ -76,36 +68,30 @@ def predict_custom_trained_model_sample(
|
|
76 |
split_predictions = clean_prediction.split()
|
77 |
predictions_list.extend(split_predictions)
|
78 |
else:
|
79 |
-
print("
|
80 |
return [emotion for emotion in predictions_list if emotion in d_emotion.values()]
|
81 |
|
82 |
-
|
83 |
d_emotion = {0: 'admiration', 1: 'amusement', 2: 'anger', 3: 'annoyance', 4: 'approval', 5: 'caring', 6: 'confusion',
|
84 |
7: 'curiosity', 8: 'desire', 9: 'disappointment', 10: 'disapproval', 11: 'disgust', 12: 'embarrassment',
|
85 |
13: 'excitement', 14: 'fear', 15: 'gratitude', 16: 'grief', 17: 'joy', 18: 'love', 19: 'nervousness',
|
86 |
20: 'optimism', 21: 'pride', 22: 'realization', 23: 'relief', 24: 'remorse', 25: 'sadness', 26: 'surprise',
|
87 |
27: 'neutral'}
|
88 |
|
89 |
-
st.write(" ")
|
90 |
-
|
91 |
-
|
|
|
92 |
|
93 |
# Add button to fill in sample text
|
94 |
if st.button("Use Sample Text"):
|
95 |
-
user_input =
|
96 |
else:
|
97 |
user_input = st.text_area('Enter Text to Analyze')
|
98 |
|
99 |
-
|
100 |
-
# user_input = st.text_input(label, value=ur_input, height=None, max_chars=None, key=None, help=None, on_change=None, args=None, kwargs=None, *, placeholder=None, disabled=False, label_visibility="visible")
|
101 |
-
|
102 |
button = st.button("Analyze")
|
103 |
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
if user_input and button:
|
108 |
-
alpaca_prompt = """Below is a conversation between a human and an AI agent.
|
109 |
### Instruction:
|
110 |
predict the emotion word or words
|
111 |
### Input:
|
@@ -156,11 +142,9 @@ if user_input and button:
|
|
156 |
|
157 |
fig_bar = get_emotion_chart(predictions)
|
158 |
|
159 |
-
|
160 |
@st.cache_data
|
161 |
def get_emotion_heatmap(predictions):
|
162 |
# Create a matrix for heatmap
|
163 |
-
|
164 |
# Count occurrences of each emotion
|
165 |
emotion_counts = pd.Series(predictions).value_counts().reset_index()
|
166 |
emotion_counts.columns = ['Emotion', 'Count']
|
@@ -179,15 +163,13 @@ if user_input and button:
|
|
179 |
))
|
180 |
fig.update_layout(title='Emotion Heatmap', xaxis_title='Predicted Emotion', yaxis_title='Predicted Emotion')
|
181 |
return fig
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
tab1, tab2, tab3
|
186 |
with tab1:
|
187 |
st.plotly_chart(fig_pie)
|
188 |
with tab2:
|
189 |
st.plotly_chart(fig_bar)
|
190 |
with tab3:
|
191 |
-
st.plotly_chart(
|
192 |
-
|
193 |
-
|
|
|
9 |
import plotly.graph_objects as go
|
10 |
import streamlit as st
|
11 |
import nltk
|
|
|
12 |
import tempfile
|
13 |
|
14 |
+
# Process of getting credentials
|
15 |
def get_credentials():
|
16 |
+
creds_json_str = os.getenv("JSONSTR") # Get json credentials stored as a string
|
17 |
if creds_json_str is None:
|
18 |
raise ValueError("GOOGLE_APPLICATION_CREDENTIALS_JSON not found in environment")
|
19 |
|
20 |
+
# Create a temporary file
|
21 |
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as temp:
|
22 |
+
temp.write(creds_json_str) # Write in json format
|
23 |
+
temp_filename = temp.name
|
24 |
|
25 |
return temp_filename
|
|
|
|
|
|
|
|
|
26 |
|
27 |
+
# Set the credentials
|
28 |
+
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = get_credentials()
|
29 |
|
30 |
max_seq_length = 2048
|
31 |
dtype = None
|
|
|
39 |
|
40 |
text_split_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
|
41 |
|
|
|
42 |
def predict_custom_trained_model_sample(
|
43 |
project: str,
|
44 |
endpoint_id: str,
|
|
|
46 |
location: str = "us-east4",
|
47 |
api_endpoint: str = "us-east4-aiplatform.googleapis.com",
|
48 |
) -> List[str]:
|
|
|
|
|
|
|
|
|
49 |
client_options = {"api_endpoint": api_endpoint}
|
50 |
client = aiplatform.gapic.PredictionServiceClient(client_options=client_options)
|
51 |
instances = instances if isinstance(instances, list) else [instances]
|
|
|
68 |
split_predictions = clean_prediction.split()
|
69 |
predictions_list.extend(split_predictions)
|
70 |
else:
|
71 |
+
print("prediction (unknown type, skipping):", prediction)
|
72 |
return [emotion for emotion in predictions_list if emotion in d_emotion.values()]
|
73 |
|
|
|
74 |
d_emotion = {0: 'admiration', 1: 'amusement', 2: 'anger', 3: 'annoyance', 4: 'approval', 5: 'caring', 6: 'confusion',
|
75 |
7: 'curiosity', 8: 'desire', 9: 'disappointment', 10: 'disapproval', 11: 'disgust', 12: 'embarrassment',
|
76 |
13: 'excitement', 14: 'fear', 15: 'gratitude', 16: 'grief', 17: 'joy', 18: 'love', 19: 'nervousness',
|
77 |
20: 'optimism', 21: 'pride', 22: 'realization', 23: 'relief', 24: 'remorse', 25: 'sadness', 26: 'surprise',
|
78 |
27: 'neutral'}
|
79 |
|
80 |
+
st.write("Write or paste any number of document texts to analyze the emotion percentage within your document")
|
81 |
+
|
82 |
+
# Define user_input outside the conditional block
|
83 |
+
user_input = ""
|
84 |
|
85 |
# Add button to fill in sample text
|
86 |
if st.button("Use Sample Text"):
|
87 |
+
user_input = "Once, in a small village nestled in the rolling hills of Tuscany, lived an elderly woman named Isabella. She had spent her entire life in this village, raising her children and caring for her garden, which was the most beautiful in the region. Her husband, Marco, had passed away many years ago, leaving her with a heart full of memories and a small, quaint house that overlooked the lush vineyards."
|
88 |
else:
|
89 |
user_input = st.text_area('Enter Text to Analyze')
|
90 |
|
|
|
|
|
|
|
91 |
button = st.button("Analyze")
|
92 |
|
|
|
|
|
|
|
93 |
if user_input and button:
|
94 |
+
alpaca_prompt = """Below is a conversation between a human and an AI agent. Write a response based on the input.
|
95 |
### Instruction:
|
96 |
predict the emotion word or words
|
97 |
### Input:
|
|
|
142 |
|
143 |
fig_bar = get_emotion_chart(predictions)
|
144 |
|
|
|
145 |
@st.cache_data
|
146 |
def get_emotion_heatmap(predictions):
|
147 |
# Create a matrix for heatmap
|
|
|
148 |
# Count occurrences of each emotion
|
149 |
emotion_counts = pd.Series(predictions).value_counts().reset_index()
|
150 |
emotion_counts.columns = ['Emotion', 'Count']
|
|
|
163 |
))
|
164 |
fig.update_layout(title='Emotion Heatmap', xaxis_title='Predicted Emotion', yaxis_title='Predicted Emotion')
|
165 |
return fig
|
166 |
+
|
167 |
+
fig_heatmap = get_emotion_heatmap(predictions)
|
168 |
+
|
169 |
+
tab1, tab2, tab3 = st.tabs(["Emotion Analysis", "Emotion Counts Distribution", "Heatmap"])
|
170 |
with tab1:
|
171 |
st.plotly_chart(fig_pie)
|
172 |
with tab2:
|
173 |
st.plotly_chart(fig_bar)
|
174 |
with tab3:
|
175 |
+
st.plotly_chart(fig_heatmap)
|
|
|
|