awacke1 commited on
Commit
f7fe1c1
·
1 Parent(s): 8bf3ab6

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +316 -0
  2. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ import mediapipe as mp
4
+ import dlib
5
+ import imutils
6
+ import numpy as np
7
+
8
+
9
+ mp_drawing = mp.solutions.drawing_utils
10
+ mp_drawing_styles = mp.solutions.drawing_styles
11
+ mp_face_mesh = mp.solutions.face_mesh
12
+ mp_face_detection = mp.solutions.face_detection
13
+
14
+
15
+ def apply_media_pipe_face_detection(image):
16
+ with mp_face_detection.FaceDetection(
17
+ model_selection=1, min_detection_confidence=0.5) as face_detection:
18
+ results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
19
+ if not results.detections:
20
+ return image
21
+ annotated_image = image.copy()
22
+ for detection in results.detections:
23
+ mp_drawing.draw_detection(annotated_image, detection)
24
+ return annotated_image
25
+
26
+
27
+ def apply_media_pipe_facemesh(image):
28
+ with mp_face_mesh.FaceMesh(
29
+ static_image_mode=True,
30
+ max_num_faces=1,
31
+ refine_landmarks=True,
32
+ min_detection_confidence=0.5) as face_mesh:
33
+ results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
34
+ if not results.multi_face_landmarks:
35
+ return image
36
+ annotated_image = image.copy()
37
+ for face_landmarks in results.multi_face_landmarks:
38
+ mp_drawing.draw_landmarks(
39
+ image=annotated_image,
40
+ landmark_list=face_landmarks,
41
+ connections=mp_face_mesh.FACEMESH_TESSELATION,
42
+ landmark_drawing_spec=None,
43
+ connection_drawing_spec=mp_drawing_styles
44
+ .get_default_face_mesh_tesselation_style())
45
+ mp_drawing.draw_landmarks(
46
+ image=annotated_image,
47
+ landmark_list=face_landmarks,
48
+ connections=mp_face_mesh.FACEMESH_CONTOURS,
49
+ landmark_drawing_spec=None,
50
+ connection_drawing_spec=mp_drawing_styles
51
+ .get_default_face_mesh_contours_style())
52
+ mp_drawing.draw_landmarks(
53
+ image=annotated_image,
54
+ landmark_list=face_landmarks,
55
+ connections=mp_face_mesh.FACEMESH_IRISES,
56
+ landmark_drawing_spec=None,
57
+ connection_drawing_spec=mp_drawing_styles
58
+ .get_default_face_mesh_iris_connections_style())
59
+ return annotated_image
60
+
61
+
62
+ class FaceOrientation(object):
63
+ def __init__(self):
64
+ self.detect = dlib.get_frontal_face_detector()
65
+ self.predict = dlib.shape_predictor("model/shape_predictor_68_face_landmarks.dat")
66
+
67
+ def create_orientation(self, frame):
68
+ draw_rect1 = True
69
+ draw_rect2 = True
70
+ draw_lines = True
71
+
72
+ frame = imutils.resize(frame, width=800)
73
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
74
+ subjects = self.detect(gray, 0)
75
+
76
+ for subject in subjects:
77
+ landmarks = self.predict(gray, subject)
78
+ size = frame.shape
79
+
80
+ # 2D image points. If you change the image, you need to change vector
81
+ image_points = np.array([
82
+ (landmarks.part(33).x, landmarks.part(33).y), # Nose tip
83
+ (landmarks.part(8).x, landmarks.part(8).y), # Chin
84
+ (landmarks.part(36).x, landmarks.part(36).y), # Left eye left corner
85
+ (landmarks.part(45).x, landmarks.part(45).y), # Right eye right corne
86
+ (landmarks.part(48).x, landmarks.part(48).y), # Left Mouth corner
87
+ (landmarks.part(54).x, landmarks.part(54).y) # Right mouth corner
88
+ ], dtype="double")
89
+
90
+ # 3D model points.
91
+ model_points = np.array([
92
+ (0.0, 0.0, 0.0), # Nose tip
93
+ (0.0, -330.0, -65.0), # Chin
94
+ (-225.0, 170.0, -135.0), # Left eye left corner
95
+ (225.0, 170.0, -135.0), # Right eye right corne
96
+ (-150.0, -150.0, -125.0), # Left Mouth corner
97
+ (150.0, -150.0, -125.0) # Right mouth corner
98
+
99
+ ])
100
+ # Camera internals
101
+ focal_length = size[1]
102
+ center = (size[1] / 2, size[0] / 2)
103
+ camera_matrix = np.array(
104
+ [[focal_length, 0, center[0]],
105
+ [0, focal_length, center[1]],
106
+ [0, 0, 1]], dtype="double"
107
+ )
108
+
109
+ dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
110
+ (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
111
+ dist_coeffs)
112
+
113
+ (b1, jacobian) = cv2.projectPoints(np.array([(350.0, 270.0, 0.0)]), rotation_vector, translation_vector,
114
+ camera_matrix, dist_coeffs)
115
+ (b2, jacobian) = cv2.projectPoints(np.array([(-350.0, -270.0, 0.0)]), rotation_vector,
116
+ translation_vector, camera_matrix, dist_coeffs)
117
+ (b3, jacobian) = cv2.projectPoints(np.array([(-350.0, 270, 0.0)]), rotation_vector, translation_vector,
118
+ camera_matrix, dist_coeffs)
119
+ (b4, jacobian) = cv2.projectPoints(np.array([(350.0, -270.0, 0.0)]), rotation_vector,
120
+ translation_vector, camera_matrix, dist_coeffs)
121
+
122
+ (b11, jacobian) = cv2.projectPoints(np.array([(450.0, 350.0, 400.0)]), rotation_vector,
123
+ translation_vector, camera_matrix, dist_coeffs)
124
+ (b12, jacobian) = cv2.projectPoints(np.array([(-450.0, -350.0, 400.0)]), rotation_vector,
125
+ translation_vector, camera_matrix, dist_coeffs)
126
+ (b13, jacobian) = cv2.projectPoints(np.array([(-450.0, 350, 400.0)]), rotation_vector,
127
+ translation_vector, camera_matrix, dist_coeffs)
128
+ (b14, jacobian) = cv2.projectPoints(np.array([(450.0, -350.0, 400.0)]), rotation_vector,
129
+ translation_vector, camera_matrix, dist_coeffs)
130
+
131
+ b1 = (int(b1[0][0][0]), int(b1[0][0][1]))
132
+ b2 = (int(b2[0][0][0]), int(b2[0][0][1]))
133
+ b3 = (int(b3[0][0][0]), int(b3[0][0][1]))
134
+ b4 = (int(b4[0][0][0]), int(b4[0][0][1]))
135
+
136
+ b11 = (int(b11[0][0][0]), int(b11[0][0][1]))
137
+ b12 = (int(b12[0][0][0]), int(b12[0][0][1]))
138
+ b13 = (int(b13[0][0][0]), int(b13[0][0][1]))
139
+ b14 = (int(b14[0][0][0]), int(b14[0][0][1]))
140
+
141
+ if draw_rect1 == True:
142
+ cv2.line(frame, b1, b3, (255, 255, 0), 10)
143
+ cv2.line(frame, b3, b2, (255, 255, 0), 10)
144
+ cv2.line(frame, b2, b4, (255, 255, 0), 10)
145
+ cv2.line(frame, b4, b1, (255, 255, 0), 10)
146
+
147
+ if draw_rect2 == True:
148
+ cv2.line(frame, b11, b13, (255, 255, 0), 10)
149
+ cv2.line(frame, b13, b12, (255, 255, 0), 10)
150
+ cv2.line(frame, b12, b14, (255, 255, 0), 10)
151
+ cv2.line(frame, b14, b11, (255, 255, 0), 10)
152
+
153
+ if draw_lines == True:
154
+ cv2.line(frame, b11, b1, (0, 255, 0), 10)
155
+ cv2.line(frame, b13, b3, (0, 255, 0), 10)
156
+ cv2.line(frame, b12, b2, (0, 255, 0), 10)
157
+ cv2.line(frame, b14, b4, (0, 255, 0), 10)
158
+
159
+ return frame
160
+
161
+
162
+ face_orientation_obj = FaceOrientation()
163
+
164
+
165
+ class FaceProcessing(object):
166
+ def __init__(self, ui_obj):
167
+ self.name = "Face Image Processing"
168
+ self.description = "Call for Face Image and video Processing"
169
+ self.ui_obj = ui_obj
170
+
171
+ def take_webcam_photo(self, image):
172
+ return image
173
+
174
+ def take_webcam_video(self, images):
175
+ return images
176
+
177
+ def mp_webcam_photo(self, image):
178
+ return image
179
+
180
+ def mp_webcam_face_mesh(self, image):
181
+ mesh_image = apply_media_pipe_facemesh(image)
182
+ return mesh_image
183
+
184
+ def mp_webcam_face_detection(self, image):
185
+ face_detection_img = apply_media_pipe_face_detection(image)
186
+ return face_detection_img
187
+
188
+ def dlib_apply_face_orientation(self, image):
189
+ image = face_orientation_obj.create_orientation(image)
190
+ return image
191
+
192
+ def webcam_stream_update(self, video_frame):
193
+ video_out = face_orientation_obj.create_orientation(video_frame)
194
+ return video_out
195
+
196
+ def create_ui(self):
197
+ with self.ui_obj:
198
+ gr.Markdown("Face Analysis with Webcam/Video")
199
+ with gr.Tabs():
200
+ with gr.TabItem("Playing with Webcam"):
201
+ with gr.Row():
202
+ webcam_image_in = gr.Image(label="Webcam Image Input", source="webcam")
203
+ webcam_video_in = gr.Video(label="Webcam Video Input", source="webcam")
204
+ with gr.Row():
205
+ webcam_photo_action = gr.Button("Take the Photo")
206
+ webcam_video_action = gr.Button("Take the Video")
207
+ with gr.Row():
208
+ webcam_photo_out = gr.Image(label="Webcam Photo Output")
209
+ webcam_video_out = gr.Video(label="Webcam Video")
210
+ with gr.TabItem("Mediapipe Facemesh with Webcam"):
211
+ with gr.Row():
212
+ with gr.Column():
213
+ mp_image_in = gr.Image(label="Webcam Image Input", source="webcam")
214
+ with gr.Column():
215
+ mp_photo_action = gr.Button("Take the Photo")
216
+ mp_apply_fm_action = gr.Button("Apply Face Mesh the Photo")
217
+ mp_apply_landmarks_action = gr.Button("Apply Face Landmarks the Photo")
218
+ with gr.Row():
219
+ mp_photo_out = gr.Image(label="Webcam Photo Output")
220
+ mp_fm_photo_out = gr.Image(label="Face Mesh Photo Output")
221
+ mp_lm_photo_out = gr.Image(label="Face Landmarks Photo Output")
222
+ with gr.TabItem("DLib Based Face Orientation"):
223
+ with gr.Row():
224
+ with gr.Column():
225
+ dlib_image_in = gr.Image(label="Webcam Image Input", source="webcam")
226
+ with gr.Column():
227
+ dlib_photo_action = gr.Button("Take the Photo")
228
+ dlib_apply_orientation_action = gr.Button("Apply Face Mesh the Photo")
229
+ with gr.Row():
230
+ dlib_photo_out = gr.Image(label="Webcam Photo Output")
231
+ dlib_orientation_photo_out = gr.Image(label="Face Mesh Photo Output")
232
+ with gr.TabItem("Face Orientation on Live Webcam Stream"):
233
+ with gr.Row():
234
+ webcam_stream_in = gr.Image(label="Webcam Stream Input",
235
+ source="webcam",
236
+ streaming=True)
237
+ webcam_stream_out = gr.Image(label="Webcam Stream Output")
238
+ webcam_stream_in.change(
239
+ self.webcam_stream_update,
240
+ inputs=webcam_stream_in,
241
+ outputs=webcam_stream_out
242
+ )
243
+
244
+ dlib_photo_action.click(
245
+ self.mp_webcam_photo,
246
+ [
247
+ dlib_image_in
248
+ ],
249
+ [
250
+ dlib_photo_out
251
+ ]
252
+ )
253
+ dlib_apply_orientation_action.click(
254
+ self.dlib_apply_face_orientation,
255
+ [
256
+ dlib_image_in
257
+ ],
258
+ [
259
+ dlib_orientation_photo_out
260
+ ]
261
+ )
262
+ mp_photo_action.click(
263
+ self.mp_webcam_photo,
264
+ [
265
+ mp_image_in
266
+ ],
267
+ [
268
+ mp_photo_out
269
+ ]
270
+ )
271
+ mp_apply_fm_action.click(
272
+ self.mp_webcam_face_mesh,
273
+ [
274
+ mp_image_in
275
+ ],
276
+ [
277
+ mp_fm_photo_out
278
+ ]
279
+ )
280
+ mp_apply_landmarks_action.click(
281
+ self.mp_webcam_face_detection,
282
+ [
283
+ mp_image_in
284
+ ],
285
+ [
286
+ mp_lm_photo_out
287
+ ]
288
+ )
289
+ webcam_photo_action.click(
290
+ self.take_webcam_photo,
291
+ [
292
+ webcam_image_in
293
+ ],
294
+ [
295
+ webcam_photo_out
296
+ ]
297
+ )
298
+ webcam_video_action.click(
299
+ self.take_webcam_video,
300
+ [
301
+ webcam_video_in
302
+ ],
303
+ [
304
+ webcam_video_out
305
+ ]
306
+ )
307
+
308
+ def launch_ui(self):
309
+ self.ui_obj.launch()
310
+
311
+
312
+ if __name__ == '__main__':
313
+ my_app = gr.Blocks()
314
+ face_ui = FaceProcessing(my_app)
315
+ face_ui.create_ui()
316
+ face_ui.launch_ui()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ opencv-python
2
+ dlib
3
+ imutils
4
+ numpy
5
+ gradio
6
+ mediapipe