Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from keras.models import load_model
|
2 |
+
from PIL import Image
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
|
6 |
+
#the following are to do with this interactive notebook code
|
7 |
+
|
8 |
+
|
9 |
+
from matplotlib import pyplot as plt # this lets you draw inline pictures in the notebooks
|
10 |
+
import pylab # this allows you to control figure size
|
11 |
+
pylab.rcParams['figure.figsize'] = (10.0, 8.0) # this controls figure size in the notebook
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
###loading model###
|
16 |
+
age_model = load_model('age_model_pretrained.h5')
|
17 |
+
gender_model = load_model('gender_model_pretrained.h5')
|
18 |
+
emotion_model = load_model('emotion_model_pretrained.h5')
|
19 |
+
|
20 |
+
# Labels on Age, Gender and Emotion to be predicted
|
21 |
+
age_ranges = ['1-5', '6-10', '11-15', '16-20', '21-25', '26-30', '31-35','36-40','41-50','51-60','61-70','71-80','81-90']
|
22 |
+
gender_ranges = ['male', 'female']
|
23 |
+
emotion_ranges= ['positive','negative','neutral']
|
24 |
+
|
25 |
+
############################
|
26 |
+
import io
|
27 |
+
import streamlit as st
|
28 |
+
bytes_data=None
|
29 |
+
|
30 |
+
img_file_buffer=st.camera_input("Take a picture")
|
31 |
+
if img_file_buffer is not None:
|
32 |
+
|
33 |
+
test_image = Image.open(img_file_buffer)
|
34 |
+
st.image(test_image, use_column_width=True)
|
35 |
+
st.write(type(test_image))
|
36 |
+
test_image = np.asarray(test_image)
|
37 |
+
gray = cv2.cvtColor(test_image,cv2.COLOR_BGR2GRAY)
|
38 |
+
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
|
39 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
40 |
+
|
41 |
+
i = 0
|
42 |
+
|
43 |
+
for (x,y,w,h) in faces:
|
44 |
+
i = i+1
|
45 |
+
cv2.rectangle(test_image,(x,y),(x+w,y+h),(203,12,255),2)
|
46 |
+
|
47 |
+
img_gray=gray[y:y+h,x:x+w]
|
48 |
+
|
49 |
+
emotion_img = cv2.resize(img_gray, (48, 48), interpolation = cv2.INTER_AREA)
|
50 |
+
emotion_image_array = np.array(emotion_img)
|
51 |
+
emotion_input = np.expand_dims(emotion_image_array, axis=0)
|
52 |
+
output_emotion= emotion_ranges[np.argmax(emotion_model.predict(emotion_input))]
|
53 |
+
|
54 |
+
gender_img = cv2.resize(img_gray, (100, 100), interpolation = cv2.INTER_AREA)
|
55 |
+
gender_image_array = np.array(gender_img)
|
56 |
+
gender_input = np.expand_dims(gender_image_array, axis=0)
|
57 |
+
output_gender=gender_ranges[np.argmax(gender_model.predict(gender_input))]
|
58 |
+
|
59 |
+
age_image=cv2.resize(img_gray, (200, 200), interpolation = cv2.INTER_AREA)
|
60 |
+
age_input = age_image.reshape(-1, 200, 200, 1)
|
61 |
+
output_age = age_ranges[np.argmax(age_model.predict(age_input))]
|
62 |
+
|
63 |
+
|
64 |
+
output_str = str(i) + ": "+ output_gender + ', '+ output_age + ', '+ output_emotion
|
65 |
+
st.write(output_str)
|
66 |
+
|
67 |
+
col = (0,255,0)
|
68 |
+
|
69 |
+
cv2.putText(test_image, str(i),(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,col,2)
|
70 |
+
st.image(cv2.cvtColor(test_image, cv2.COLOR_BGR2RGB))
|
71 |
+
if bytes_data is None:
|
72 |
+
st.stop()
|