awacke1 commited on
Commit
15314a9
·
1 Parent(s): 4e9b87a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -119
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import cv2
2
  import gradio as gr
3
  import mediapipe as mp
4
- import dlib
5
  import imutils
6
  import numpy as np
7
 
@@ -58,110 +58,6 @@ def apply_media_pipe_facemesh(image):
58
  .get_default_face_mesh_iris_connections_style())
59
  return annotated_image
60
 
61
-
62
- class FaceOrientation(object):
63
- def __init__(self):
64
- self.detect = dlib.get_frontal_face_detector()
65
- self.predict = dlib.shape_predictor("model/shape_predictor_68_face_landmarks.dat")
66
-
67
- def create_orientation(self, frame):
68
- draw_rect1 = True
69
- draw_rect2 = True
70
- draw_lines = True
71
-
72
- frame = imutils.resize(frame, width=800)
73
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
74
- subjects = self.detect(gray, 0)
75
-
76
- for subject in subjects:
77
- landmarks = self.predict(gray, subject)
78
- size = frame.shape
79
-
80
- # 2D image points. If you change the image, you need to change vector
81
- image_points = np.array([
82
- (landmarks.part(33).x, landmarks.part(33).y), # Nose tip
83
- (landmarks.part(8).x, landmarks.part(8).y), # Chin
84
- (landmarks.part(36).x, landmarks.part(36).y), # Left eye left corner
85
- (landmarks.part(45).x, landmarks.part(45).y), # Right eye right corne
86
- (landmarks.part(48).x, landmarks.part(48).y), # Left Mouth corner
87
- (landmarks.part(54).x, landmarks.part(54).y) # Right mouth corner
88
- ], dtype="double")
89
-
90
- # 3D model points.
91
- model_points = np.array([
92
- (0.0, 0.0, 0.0), # Nose tip
93
- (0.0, -330.0, -65.0), # Chin
94
- (-225.0, 170.0, -135.0), # Left eye left corner
95
- (225.0, 170.0, -135.0), # Right eye right corne
96
- (-150.0, -150.0, -125.0), # Left Mouth corner
97
- (150.0, -150.0, -125.0) # Right mouth corner
98
-
99
- ])
100
- # Camera internals
101
- focal_length = size[1]
102
- center = (size[1] / 2, size[0] / 2)
103
- camera_matrix = np.array(
104
- [[focal_length, 0, center[0]],
105
- [0, focal_length, center[1]],
106
- [0, 0, 1]], dtype="double"
107
- )
108
-
109
- dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
110
- (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
111
- dist_coeffs)
112
-
113
- (b1, jacobian) = cv2.projectPoints(np.array([(350.0, 270.0, 0.0)]), rotation_vector, translation_vector,
114
- camera_matrix, dist_coeffs)
115
- (b2, jacobian) = cv2.projectPoints(np.array([(-350.0, -270.0, 0.0)]), rotation_vector,
116
- translation_vector, camera_matrix, dist_coeffs)
117
- (b3, jacobian) = cv2.projectPoints(np.array([(-350.0, 270, 0.0)]), rotation_vector, translation_vector,
118
- camera_matrix, dist_coeffs)
119
- (b4, jacobian) = cv2.projectPoints(np.array([(350.0, -270.0, 0.0)]), rotation_vector,
120
- translation_vector, camera_matrix, dist_coeffs)
121
-
122
- (b11, jacobian) = cv2.projectPoints(np.array([(450.0, 350.0, 400.0)]), rotation_vector,
123
- translation_vector, camera_matrix, dist_coeffs)
124
- (b12, jacobian) = cv2.projectPoints(np.array([(-450.0, -350.0, 400.0)]), rotation_vector,
125
- translation_vector, camera_matrix, dist_coeffs)
126
- (b13, jacobian) = cv2.projectPoints(np.array([(-450.0, 350, 400.0)]), rotation_vector,
127
- translation_vector, camera_matrix, dist_coeffs)
128
- (b14, jacobian) = cv2.projectPoints(np.array([(450.0, -350.0, 400.0)]), rotation_vector,
129
- translation_vector, camera_matrix, dist_coeffs)
130
-
131
- b1 = (int(b1[0][0][0]), int(b1[0][0][1]))
132
- b2 = (int(b2[0][0][0]), int(b2[0][0][1]))
133
- b3 = (int(b3[0][0][0]), int(b3[0][0][1]))
134
- b4 = (int(b4[0][0][0]), int(b4[0][0][1]))
135
-
136
- b11 = (int(b11[0][0][0]), int(b11[0][0][1]))
137
- b12 = (int(b12[0][0][0]), int(b12[0][0][1]))
138
- b13 = (int(b13[0][0][0]), int(b13[0][0][1]))
139
- b14 = (int(b14[0][0][0]), int(b14[0][0][1]))
140
-
141
- if draw_rect1 == True:
142
- cv2.line(frame, b1, b3, (255, 255, 0), 10)
143
- cv2.line(frame, b3, b2, (255, 255, 0), 10)
144
- cv2.line(frame, b2, b4, (255, 255, 0), 10)
145
- cv2.line(frame, b4, b1, (255, 255, 0), 10)
146
-
147
- if draw_rect2 == True:
148
- cv2.line(frame, b11, b13, (255, 255, 0), 10)
149
- cv2.line(frame, b13, b12, (255, 255, 0), 10)
150
- cv2.line(frame, b12, b14, (255, 255, 0), 10)
151
- cv2.line(frame, b14, b11, (255, 255, 0), 10)
152
-
153
- if draw_lines == True:
154
- cv2.line(frame, b11, b1, (0, 255, 0), 10)
155
- cv2.line(frame, b13, b3, (0, 255, 0), 10)
156
- cv2.line(frame, b12, b2, (0, 255, 0), 10)
157
- cv2.line(frame, b14, b4, (0, 255, 0), 10)
158
-
159
- return frame
160
-
161
-
162
- face_orientation_obj = FaceOrientation()
163
-
164
-
165
  class FaceProcessing(object):
166
  def __init__(self, ui_obj):
167
  self.name = "Face Image Processing"
@@ -185,10 +81,6 @@ class FaceProcessing(object):
185
  face_detection_img = apply_media_pipe_face_detection(image)
186
  return face_detection_img
187
 
188
- def dlib_apply_face_orientation(self, image):
189
- image = face_orientation_obj.create_orientation(image)
190
- return image
191
-
192
  def webcam_stream_update(self, video_frame):
193
  video_out = face_orientation_obj.create_orientation(video_frame)
194
  return video_out
@@ -219,16 +111,6 @@ class FaceProcessing(object):
219
  mp_photo_out = gr.Image(label="Webcam Photo Output")
220
  mp_fm_photo_out = gr.Image(label="Face Mesh Photo Output")
221
  mp_lm_photo_out = gr.Image(label="Face Landmarks Photo Output")
222
- with gr.TabItem("DLib Based Face Orientation"):
223
- with gr.Row():
224
- with gr.Column():
225
- dlib_image_in = gr.Image(label="Webcam Image Input", source="webcam")
226
- with gr.Column():
227
- dlib_photo_action = gr.Button("Take the Photo")
228
- dlib_apply_orientation_action = gr.Button("Apply Face Mesh the Photo")
229
- with gr.Row():
230
- dlib_photo_out = gr.Image(label="Webcam Photo Output")
231
- dlib_orientation_photo_out = gr.Image(label="Face Mesh Photo Output")
232
  with gr.TabItem("Face Orientation on Live Webcam Stream"):
233
  with gr.Row():
234
  webcam_stream_in = gr.Image(label="Webcam Stream Input",
 
1
  import cv2
2
  import gradio as gr
3
  import mediapipe as mp
4
+ #import dlib
5
  import imutils
6
  import numpy as np
7
 
 
58
  .get_default_face_mesh_iris_connections_style())
59
  return annotated_image
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  class FaceProcessing(object):
62
  def __init__(self, ui_obj):
63
  self.name = "Face Image Processing"
 
81
  face_detection_img = apply_media_pipe_face_detection(image)
82
  return face_detection_img
83
 
 
 
 
 
84
  def webcam_stream_update(self, video_frame):
85
  video_out = face_orientation_obj.create_orientation(video_frame)
86
  return video_out
 
111
  mp_photo_out = gr.Image(label="Webcam Photo Output")
112
  mp_fm_photo_out = gr.Image(label="Face Mesh Photo Output")
113
  mp_lm_photo_out = gr.Image(label="Face Landmarks Photo Output")
 
 
 
 
 
 
 
 
 
 
114
  with gr.TabItem("Face Orientation on Live Webcam Stream"):
115
  with gr.Row():
116
  webcam_stream_in = gr.Image(label="Webcam Stream Input",