Akjava commited on
Commit
ff3c171
·
1 Parent(s): 24e5920
Files changed (3) hide show
  1. app.py +135 -49
  2. demo_header.html +2 -1
  3. mp_estimate.py +0 -2
app.py CHANGED
@@ -26,7 +26,7 @@ from numpy.typing import NDArray
26
  iris_mask_blur - final iris edge blur
27
  '''
28
 
29
- def process_images(image,base_image,
30
  double_check_offset_center,center_index,
31
  draw_mediapipe_mesh,z_multiply=0.8,draw_mediapipe_angle=False,draw_hozizontal_line=False,draw_vertical_line=False,draw_faceratio_line=False,
32
  progress=gr.Progress(track_tqdm=True)):
@@ -118,9 +118,9 @@ def process_images(image,base_image,
118
 
119
  if draw_hozizontal_line:
120
  for cordinates in h_cordinates:
121
- print(cordinates)
122
  points = normalized_to_pixel(cordinates,w,h)
123
- print(points)
124
  plot_points(cv2_image,points[:2],False,5,(255,0,0),3)#last one is middle point on horizontal
125
 
126
 
@@ -179,6 +179,7 @@ def process_images(image,base_image,
179
  im_with_pose = cv2_image
180
  mediapipe_text = None
181
 
 
182
  def face_landmarker_result_to_angle_label(face_landmarker_result,order="yxz"):
183
  if len(face_landmarker_result.facial_transformation_matrixes)>0:
184
 
@@ -195,16 +196,16 @@ def process_images(image,base_image,
195
  r = R.from_matrix(rotation_matrix)
196
  euler_angles = r.as_euler(order, degrees=True)
197
  #label = f"Media pipe {order}-Euler Angles [x,y,z] (degrees): [{euler_angles[1]:.2f},{euler_angles[0]:.2f},{euler_angles[2]:.2f}]"
198
- label = f"[x:{euler_angles[1]:.2f},y:{-euler_angles[0]:.2f},z:{-euler_angles[2]:.2f}]"
199
 
200
  return label,rotation_matrix,scaled_translation_vector
201
 
202
  if first_landmarker_result != None:
203
- mediapipe_first_text,_,_ = face_landmarker_result_to_angle_label(first_landmarker_result)
204
  else:
205
  mediapipe_first_text = ""
206
 
207
- mediapipe_second_text,rotation_matrix,scaled_translation_vector = face_landmarker_result_to_angle_label(face_landmarker_result)
208
 
209
  rotation_vector, _ = cv2.Rodrigues(rotation_matrix)
210
  translation_vector = scaled_translation_vector
@@ -222,9 +223,9 @@ def process_images(image,base_image,
222
 
223
  r = R.from_matrix(rotation_matrix)
224
  euler_angles = r.as_euler("yxz", degrees=False)
225
- print(r.as_euler("yxz", degrees=True))
226
- draw_cordinate1=rotate_point_euler((0,0,-100),[-euler_angles[1],euler_angles[0],euler_angles[2]],"yzx")
227
- draw_cordinate2=rotate_point_euler((0,0,-200),[-euler_angles[1],euler_angles[0],euler_angles[2]],"yzx")
228
 
229
  plot_points(im_with_pose,[root_cordinate[:2]+draw_cordinate1[:2],root_cordinate[:2]+draw_cordinate2[:2],root_cordinate[:2]],False,5,(0,128,0),3,(0,255,0))
230
 
@@ -233,9 +234,9 @@ def process_images(image,base_image,
233
  face_ratio_infos = []
234
 
235
 
236
- print("landmark",[landmarks[37],landmarks[267]])
237
- print("numpy",np.array([landmarks[37],landmarks[267]]))
238
- print("mean",np.mean(np.array([landmarks[37],landmarks[267]]),axis=0))
239
  v_cordinates=[
240
  ["philtrum",landmarks[175],landmarks[13],np.mean((landmarks[164],landmarks[2]),axis=0).tolist()],
241
  ["straight",landmarks[175],landmarks[94],landmarks[9]],
@@ -258,7 +259,7 @@ def process_images(image,base_image,
258
  return cv2.cvtColor(im_with_pose,cv2.COLOR_BGR2RGB),mediapipe_first_text,mediapipe_second_text,z_angle_text,y_ratio_text,x_ratio_text,z_angle_detail,y_ratio_detail,x_ratio_detail,face_ratio_info
259
 
260
 
261
-
262
  def find_nearest_weighted_euclidean_2d(target_angles_full, all_angles_full, weights):
263
  target_angles = target_angles_full[:5] # 最初の3つの角度を使用
264
  all_angles = all_angles_full[:, :5] # 最初の3列を使用
@@ -268,8 +269,13 @@ def find_nearest_weighted_euclidean_2d(target_angles_full, all_angles_full, weig
268
  nearest_index = np.argmin(distances)
269
  return nearest_index, all_angles_full[nearest_index]
270
 
 
271
  from mp_estimate import estimate_horizontal_points ,estimate_vertical_points,estimate_rotations_v2
272
- def find_angles(image):
 
 
 
 
273
  if image is None:
274
  raise gr.Error("need image")
275
  cv2_image = pil_to_bgr_image(image)
@@ -280,6 +286,7 @@ def find_angles(image):
280
  features_text = estimate_rotations_v2(face_landmarker_result)
281
  features_value_origin = [float(value) for value in features_text.split(",")]
282
  features_value = features_value_origin.copy()
 
283
  #print(features_value)
284
  #weights = np.array([0.2, 0.2,0.3,0.3])
285
 
@@ -296,31 +303,67 @@ def find_angles(image):
296
  [np.hstack([features_value[ 3:5],features_value[ 6:-x_ratios]])]
297
  #[features_value[:-x_ratios]]
298
  ]
299
- import joblib
300
- def estimate(model_path,scaler_path,features_values):
301
- scalers = joblib.load("models/"+scaler_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302
  if not isinstance(scalers,list):
303
  scalers=(scalers,scalers,scalers)
304
  for i,scaler in enumerate(scalers):
305
- print(i,scaler)
306
  features_values[i] = scaler.transform(features_values[i].copy())
307
 
308
 
309
  result_preds=[]
310
- models = joblib.load("models/"+model_path)
311
- for i,model in enumerate(models):
312
- y_pred = model.predict(features_values[i])
313
- result_preds.append(y_pred.round(2))
314
- return result_preds
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  def estimate2(model_key,features_values):
316
  model_path=f"models/{model_key}.joblib"
317
  scaler_path=f"models/{model_key}_scaler.joblib"
318
  polynomial_path=f"models/{model_key}_polynomial_features.joblib"
319
  selectkbest_path=f"models/{model_key}_selectkbest.joblib"
320
- model = joblib.load(model_path)
321
- scaler = joblib.load(scaler_path)
322
- polynomial = joblib.load(polynomial_path)
323
- selectkbest = joblib.load(selectkbest_path)
 
 
324
  result_preds=[]
325
  for i in range(3):
326
  x = polynomial[i].transform(features_values[i].copy())
@@ -328,7 +371,20 @@ def find_angles(image):
328
  x = scaler[i].transform(x)
329
  y_pred = model[i].predict(x)
330
  result_preds.append(y_pred.round(2))
331
- return result_preds
 
 
 
 
 
 
 
 
 
 
 
 
 
332
 
333
 
334
  #short_result = estimate('linear-svr-xyz_5.joblib','linear-svr-xyz_5_scaler.joblib',features_values)
@@ -359,30 +415,57 @@ def find_angles(image):
359
  middle_result2 = estimate(f'{e1_key}.joblib',f'{e1_key}_scaler.joblib',features_values.copy())
360
  e1_key="lgbm-optimizer_90_random"
361
  long_result2 = estimate(f'{e1_key}.joblib',f'{e1_key}_scaler.joblib',features_values.copy())
362
- def flatten_for(lst):
363
- return [round(item, 3) for sublist in lst for item in sublist]
364
-
 
 
 
365
  def average(values):
366
  flat_values=[]
367
  for value in values:
368
  flat_values += [flatten_for(value)]
369
- print(np.mean(flat_values,axis=0))
370
 
371
  import average
372
  data={
373
  "hgbr-15":flatten_for(short_result),
374
  "hgbr-45":flatten_for(middle_result),
375
  "hgbr-90":flatten_for(long_result),
376
- "lgbm-15dart":flatten_for(short_result2a),
377
- "lgbm-15":flatten_for(short_result2),
378
- "lgbm-45":flatten_for(middle_result2),
379
- "lgbm-90":flatten_for(long_result2),
380
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
  #print(data)
382
- average_data=average.analyze_3d_data(data.values())
383
- print(average_data)
384
  #average((short_result,middle_result,long_result,short_result2a,short_result2,middle_result2,long_result2))
385
- return average_data['trimmed_mean'],flatten_for(short_result),flatten_for(middle_result),flatten_for(long_result),flatten_for(short_result2a),flatten_for(short_result2),flatten_for(middle_result2),flatten_for(long_result2)
 
386
 
387
  css="""
388
  #col-left {
@@ -426,7 +509,8 @@ with gr.Blocks(css=css, elem_id="demo-container") as demo:
426
  with gr.Row(elem_id="prompt-container", equal_height=False):
427
  with gr.Row():
428
  btn = gr.Button("Head-Pose Estimate", elem_id="run_button",variant="primary")
429
-
 
430
 
431
 
432
  with gr.Accordion(label="Advanced Settings", open=True):
@@ -435,7 +519,7 @@ with gr.Blocks(css=css, elem_id="demo-container") as demo:
435
 
436
  with gr.Row( equal_height=True):
437
 
438
- double_check = gr.Checkbox(label="Double Check",value=True,info="move center and detect again(usually more accurate).recommend choose 195")
439
  center_index = gr.Slider(info="center-index",
440
  label="Center-index",
441
  minimum=0,
@@ -460,8 +544,8 @@ with gr.Blocks(css=css, elem_id="demo-container") as demo:
460
  with gr.Column():
461
  result_image = gr.Image(height=760,label="Result", elem_id="output-animation",image_mode='RGB')
462
  with gr.Row( equal_height=True):
463
- mediapipe_last_text = gr.Textbox(label="2nd or last mediapipe result(yzx-eulder[x,y,z])")
464
- mediapipe_first_text = gr.Textbox(label="first mediapipe result(yzx-eulder[x,y,z])")
465
 
466
  with gr.Row( equal_height=True):
467
  z_angle_text = gr.Textbox(label="Z angle by horizontal-line",info="start with 0,exactly Z-Angle")
@@ -474,23 +558,25 @@ with gr.Blocks(css=css, elem_id="demo-container") as demo:
474
  x_ratio_detail = gr.TextArea(label="X-ratio detail",value="")
475
  with gr.Row( equal_height=True):
476
  face_ratio_info = gr.Text(label="Face Ratio",info="Average philtrum:1.82(std 0.13),straight:0.82(std 0.04),face:0.91(std 0.02),r-eyes:0.86(std 0.03),r-contour:0.77(std 0.05),l-eyes:0.86(std 0.03),l-contour:0.75(std 0.05),lips:1.43(std 0.16),mouth-eye:1.21(std 0.07)")
477
- gr.HTML("<h5>For Rotation sometime differenct to mediapipe's result(Especially X usually minus 4-7)</h5>")
478
- bt_test = gr.Button("Model-Estimate")
479
- gr.HTML("<p>YXZ-Euler [x,y,z] hgbr is stable,lgbm is accurate(dart is more).trimmed works well on small angles</p>")
 
 
480
  with gr.Row( equal_height=True):
481
- average_result = gr.Text(label="trimmed-mean")
482
  short_result = gr.Text(label="hgbr-15")
483
  middle_result = gr.Text(label="hgbr-45")
484
  long_result = gr.Text(label="hgbr-90")
 
485
  with gr.Row( equal_height=True):
486
  short_result2a = gr.Text(label="lgbm-15dart")
487
  short_result2 = gr.Text(label="lgbm-15")
488
  middle_result2 = gr.Text(label="lgbm-45")
489
  long_result2 = gr.Text(label="lgbm-90")
490
  #,
491
- bt_test.click(fn=find_angles,inputs=image,outputs=[average_result,short_result,middle_result,long_result,short_result2a,short_result2,middle_result2,long_result2])
492
 
493
- btn.click(fn=process_images, inputs=[image,base_image,
494
  double_check,center_index,
495
  draw_mediapipe_mesh,z_multiply,draw_mediapipe_angle,draw_hozizontal_line,draw_vertical_line,draw_faceratio_line,
496
  ],outputs=[result_image,mediapipe_first_text,mediapipe_last_text,z_angle_text,y_ratio_text,x_ratio_text,z_angle_detail_text,y_ratio_detail,x_ratio_detail,face_ratio_info] ,api_name='infer')
 
26
  iris_mask_blur - final iris edge blur
27
  '''
28
 
29
+ def process_images(image,base_image,order,
30
  double_check_offset_center,center_index,
31
  draw_mediapipe_mesh,z_multiply=0.8,draw_mediapipe_angle=False,draw_hozizontal_line=False,draw_vertical_line=False,draw_faceratio_line=False,
32
  progress=gr.Progress(track_tqdm=True)):
 
118
 
119
  if draw_hozizontal_line:
120
  for cordinates in h_cordinates:
121
+ #print(cordinates)
122
  points = normalized_to_pixel(cordinates,w,h)
123
+ #print(points)
124
  plot_points(cv2_image,points[:2],False,5,(255,0,0),3)#last one is middle point on horizontal
125
 
126
 
 
179
  im_with_pose = cv2_image
180
  mediapipe_text = None
181
 
182
+
183
  def face_landmarker_result_to_angle_label(face_landmarker_result,order="yxz"):
184
  if len(face_landmarker_result.facial_transformation_matrixes)>0:
185
 
 
196
  r = R.from_matrix(rotation_matrix)
197
  euler_angles = r.as_euler(order, degrees=True)
198
  #label = f"Media pipe {order}-Euler Angles [x,y,z] (degrees): [{euler_angles[1]:.2f},{euler_angles[0]:.2f},{euler_angles[2]:.2f}]"
199
+ label = f"[{order[0]}:{euler_angles[0]:.2f},{order[1]}:{-euler_angles[1]:.2f},{order[2]}:{-euler_angles[2]:.2f}]"
200
 
201
  return label,rotation_matrix,scaled_translation_vector
202
 
203
  if first_landmarker_result != None:
204
+ mediapipe_first_text,_,_ = face_landmarker_result_to_angle_label(first_landmarker_result,order)
205
  else:
206
  mediapipe_first_text = ""
207
 
208
+ mediapipe_second_text,rotation_matrix,scaled_translation_vector = face_landmarker_result_to_angle_label(face_landmarker_result,order)
209
 
210
  rotation_vector, _ = cv2.Rodrigues(rotation_matrix)
211
  translation_vector = scaled_translation_vector
 
223
 
224
  r = R.from_matrix(rotation_matrix)
225
  euler_angles = r.as_euler("yxz", degrees=False)
226
+ #print(r.as_euler("yxz", degrees=True))
227
+ draw_cordinate1=rotate_point_euler((0,0,-100),[-euler_angles[1],euler_angles[0],euler_angles[2]],"yxz")
228
+ draw_cordinate2=rotate_point_euler((0,0,-200),[-euler_angles[1],euler_angles[0],euler_angles[2]],"yxz")
229
 
230
  plot_points(im_with_pose,[root_cordinate[:2]+draw_cordinate1[:2],root_cordinate[:2]+draw_cordinate2[:2],root_cordinate[:2]],False,5,(0,128,0),3,(0,255,0))
231
 
 
234
  face_ratio_infos = []
235
 
236
 
237
+ #print("landmark",[landmarks[37],landmarks[267]])
238
+ #print("numpy",np.array([landmarks[37],landmarks[267]]))
239
+ #print("mean",np.mean(np.array([landmarks[37],landmarks[267]]),axis=0))
240
  v_cordinates=[
241
  ["philtrum",landmarks[175],landmarks[13],np.mean((landmarks[164],landmarks[2]),axis=0).tolist()],
242
  ["straight",landmarks[175],landmarks[94],landmarks[9]],
 
259
  return cv2.cvtColor(im_with_pose,cv2.COLOR_BGR2RGB),mediapipe_first_text,mediapipe_second_text,z_angle_text,y_ratio_text,x_ratio_text,z_angle_detail,y_ratio_detail,x_ratio_detail,face_ratio_info
260
 
261
 
262
+ #deprecated
263
  def find_nearest_weighted_euclidean_2d(target_angles_full, all_angles_full, weights):
264
  target_angles = target_angles_full[:5] # 最初の3つの角度を使用
265
  all_angles = all_angles_full[:, :5] # 最初の3列を使用
 
269
  nearest_index = np.argmin(distances)
270
  return nearest_index, all_angles_full[nearest_index]
271
 
272
+ import math
273
  from mp_estimate import estimate_horizontal_points ,estimate_vertical_points,estimate_rotations_v2
274
+
275
+ import joblib
276
+ stacking8_model = joblib.load(f"models/stacking8.joblib")
277
+ cached_models = {}
278
+ def find_angles(image,order):
279
  if image is None:
280
  raise gr.Error("need image")
281
  cv2_image = pil_to_bgr_image(image)
 
286
  features_text = estimate_rotations_v2(face_landmarker_result)
287
  features_value_origin = [float(value) for value in features_text.split(",")]
288
  features_value = features_value_origin.copy()
289
+ print("features x-angle",math.degrees(features_value[3])-90)
290
  #print(features_value)
291
  #weights = np.array([0.2, 0.2,0.3,0.3])
292
 
 
303
  [np.hstack([features_value[ 3:5],features_value[ 6:-x_ratios]])]
304
  #[features_value[:-x_ratios]]
305
  ]
306
+
307
+
308
+ from scipy.spatial.transform import Rotation as R
309
+ def flatten_for(lst):
310
+ return [round(item, 3) for sublist in lst for item in sublist]
311
+ def change_euler_order(orderd_array,from_order,to_order,degrees=True):
312
+ r = R.from_euler(from_order,orderd_array,degrees=degrees)
313
+ result = r.as_euler(to_order,degrees=degrees)
314
+ return np.round(result,2).tolist()
315
+
316
+ def load_joblib(path):
317
+ if path in cached_models:
318
+ return cached_models[path]
319
+ else:
320
+ model = joblib.load(path)
321
+ cached_models[path] = model
322
+ return model
323
+
324
+ def estimate(model_path,scaler_path,features_values,multi=True):
325
+
326
+ scalers = load_joblib("models/"+scaler_path)
327
  if not isinstance(scalers,list):
328
  scalers=(scalers,scalers,scalers)
329
  for i,scaler in enumerate(scalers):
330
+ #print(i,scaler)
331
  features_values[i] = scaler.transform(features_values[i].copy())
332
 
333
 
334
  result_preds=[]
335
+ models = load_joblib("models/"+model_path)
336
+
337
+ if multi:
338
+ for i,model in enumerate(models):
339
+ y_pred = model.predict(features_values[i])
340
+ result_preds.append(y_pred.round(2))
341
+ result_preds=flatten_for(result_preds)
342
+ yxz =[result_preds[1],result_preds[0],result_preds[2]]
343
+ else:
344
+ result_preds=models.predict(features_values[0])
345
+ result_preds=flatten_for(result_preds)
346
+ #yxz=flatten_for(yxz)
347
+ #yxz =[yxz[1],yxz[0],yxz[2]]
348
+
349
+ #zyx = change_euler_order(yxz,"yxz","zyx")
350
+ #return [round(zyx[2],2),round(zyx[1],2),round(zyx[0],2)]#
351
+ return result_preds # yxz-orderd x,y,z
352
+
353
+
354
+
355
+
356
  def estimate2(model_key,features_values):
357
  model_path=f"models/{model_key}.joblib"
358
  scaler_path=f"models/{model_key}_scaler.joblib"
359
  polynomial_path=f"models/{model_key}_polynomial_features.joblib"
360
  selectkbest_path=f"models/{model_key}_selectkbest.joblib"
361
+
362
+ model = load_joblib(model_path)
363
+ scaler = load_joblib(scaler_path)
364
+ polynomial = load_joblib(polynomial_path)
365
+ selectkbest = load_joblib(selectkbest_path)
366
+
367
  result_preds=[]
368
  for i in range(3):
369
  x = polynomial[i].transform(features_values[i].copy())
 
371
  x = scaler[i].transform(x)
372
  y_pred = model[i].predict(x)
373
  result_preds.append(y_pred.round(2))
374
+ return result_preds # yxz-orderd x,y,z
375
+ import onnxruntime as ort
376
+ def estimate3(model_key,features_values):
377
+ model_path=f"models/{model_key}.onnx"
378
+ ort_session = ort.InferenceSession(model_path)
379
+
380
+ #result_preds=[]
381
+ #result_preds=models.predict(features_values[0])
382
+ #result_preds=flatten_for(result_preds)
383
+ input_name = ort_session.get_inputs()[0].name
384
+ input_data = features_values.astype(np.float32)
385
+ result_preds = ort_session.run(None, {input_name: input_data})
386
+ #print((result_preds))
387
+ return result_preds[0] # yxz-orderd x,y,z
388
 
389
 
390
  #short_result = estimate('linear-svr-xyz_5.joblib','linear-svr-xyz_5_scaler.joblib',features_values)
 
415
  middle_result2 = estimate(f'{e1_key}.joblib',f'{e1_key}_scaler.joblib',features_values.copy())
416
  e1_key="lgbm-optimizer_90_random"
417
  long_result2 = estimate(f'{e1_key}.joblib',f'{e1_key}_scaler.joblib',features_values.copy())
418
+
419
+ e1_key="etr_90"
420
+ long_result3 = estimate(f'{e1_key}.joblib',f'{e1_key}_scaler.joblib',features_values.copy(),False)
421
+ #long_result3 = estimate3(e1_key,np.array([features_value]))#single
422
+ #long_result3 = flatten_for(long_result3)
423
+ #long_result3 = long_result2
424
  def average(values):
425
  flat_values=[]
426
  for value in values:
427
  flat_values += [flatten_for(value)]
428
+ #print(np.mean(flat_values,axis=0))
429
 
430
  import average
431
  data={
432
  "hgbr-15":flatten_for(short_result),
433
  "hgbr-45":flatten_for(middle_result),
434
  "hgbr-90":flatten_for(long_result),
435
+ "lgbm-15dart":(short_result2a),
436
+ "lgbm-15":(short_result2),
437
+ "lgbm-45":(middle_result2),
438
+ "lgbm-90":(long_result2),
439
  }
440
+
441
+
442
+ stack_x = short_result2a+short_result2+middle_result2+long_result2+flatten_for(short_result)+flatten_for(middle_result)+flatten_for(long_result)+long_result3
443
+
444
+ #average_data=estimate3("stacking8",np.array([stack_x]))#onnx not
445
+ average_data=stacking8_model.predict(np.array([stack_x]))
446
+
447
+ #change order
448
+
449
+ #all data train with yxz-order x,y,z
450
+ def yxz_xyz_to_yxz(euler):
451
+ return [euler[1],euler[0],euler[2]]
452
+
453
+ average_data = change_euler_order(yxz_xyz_to_yxz(flatten_for(average_data)),"yxz",order)
454
+ short_result = change_euler_order(yxz_xyz_to_yxz(flatten_for(short_result)),"yxz",order)
455
+ middle_result = change_euler_order(yxz_xyz_to_yxz(flatten_for(middle_result)),"yxz",order)
456
+ long_result = change_euler_order(yxz_xyz_to_yxz(flatten_for(long_result)),"yxz",order)
457
+ short_result2a = change_euler_order(yxz_xyz_to_yxz(short_result2a),"yxz",order)
458
+ short_result2 = change_euler_order(yxz_xyz_to_yxz(short_result2),"yxz",order)
459
+ middle_result2 = change_euler_order(yxz_xyz_to_yxz(middle_result2),"yxz",order)
460
+ long_result2 = change_euler_order(yxz_xyz_to_yxz(long_result2),"yxz",order)
461
+ long_result3 = change_euler_order(yxz_xyz_to_yxz(long_result3),"yxz",order)
462
+
463
  #print(data)
464
+ #average_data=average.analyze_3d_data(data.values())
465
+ #print(average_data)
466
  #average((short_result,middle_result,long_result,short_result2a,short_result2,middle_result2,long_result2))
467
+ return average_data,short_result,middle_result,long_result,(short_result2a),(short_result2),(middle_result2),(long_result2),long_result3
468
+ #return average_data['trimmed_mean'],flatten_for(short_result),flatten_for(middle_result),flatten_for(long_result),(short_result2a),(short_result2),(middle_result2),(long_result2)
469
 
470
  css="""
471
  #col-left {
 
509
  with gr.Row(elem_id="prompt-container", equal_height=False):
510
  with gr.Row():
511
  btn = gr.Button("Head-Pose Estimate", elem_id="run_button",variant="primary")
512
+ order = gr.Dropdown(label="Order",value="xyz",choices=["xyz","xzy","yxz","yzx","zxy","zyx"],info="returened array order is same as label")
513
+
514
 
515
 
516
  with gr.Accordion(label="Advanced Settings", open=True):
 
519
 
520
  with gr.Row( equal_height=True):
521
 
522
+ double_check = gr.Checkbox(label="Double Check",value=True,info="move center-index and detect again(usually more accurate).recommend choose 195")
523
  center_index = gr.Slider(info="center-index",
524
  label="Center-index",
525
  minimum=0,
 
544
  with gr.Column():
545
  result_image = gr.Image(height=760,label="Result", elem_id="output-animation",image_mode='RGB')
546
  with gr.Row( equal_height=True):
547
+ mediapipe_last_text = gr.Textbox(label=f"2nd or last mediapipe result",)
548
+ mediapipe_first_text = gr.Textbox(label=f"first mediapipe result")
549
 
550
  with gr.Row( equal_height=True):
551
  z_angle_text = gr.Textbox(label="Z angle by horizontal-line",info="start with 0,exactly Z-Angle")
 
558
  x_ratio_detail = gr.TextArea(label="X-ratio detail",value="")
559
  with gr.Row( equal_height=True):
560
  face_ratio_info = gr.Text(label="Face Ratio",info="Average philtrum:1.82(std 0.13),straight:0.82(std 0.04),face:0.91(std 0.02),r-eyes:0.86(std 0.03),r-contour:0.77(std 0.05),l-eyes:0.86(std 0.03),l-contour:0.75(std 0.05),lips:1.43(std 0.16),mouth-eye:1.21(std 0.07)")
561
+ gr.HTML("<h5>For Rotation sometime differenct to mediapipe's result</h5>")
562
+ with gr.Row( equal_height=True):
563
+ bt_test = gr.Button("Estimate by Models")
564
+ average_result = gr.Text(label="stacking")
565
+ gr.HTML("<p>number is max training angle,usually stacking is works well.slow because of etr</p>")
566
  with gr.Row( equal_height=True):
 
567
  short_result = gr.Text(label="hgbr-15")
568
  middle_result = gr.Text(label="hgbr-45")
569
  long_result = gr.Text(label="hgbr-90")
570
+ long_result3 = gr.Text(label="etr-90")
571
  with gr.Row( equal_height=True):
572
  short_result2a = gr.Text(label="lgbm-15dart")
573
  short_result2 = gr.Text(label="lgbm-15")
574
  middle_result2 = gr.Text(label="lgbm-45")
575
  long_result2 = gr.Text(label="lgbm-90")
576
  #,
577
+ bt_test.click(fn=find_angles,inputs=[image,order],outputs=[average_result,short_result,middle_result,long_result,short_result2a,short_result2,middle_result2,long_result2,long_result3])
578
 
579
+ btn.click(fn=process_images, inputs=[image,base_image,order,
580
  double_check,center_index,
581
  draw_mediapipe_mesh,z_multiply,draw_mediapipe_angle,draw_hozizontal_line,draw_vertical_line,draw_faceratio_line,
582
  ],outputs=[result_image,mediapipe_first_text,mediapipe_last_text,z_angle_text,y_ratio_text,x_ratio_text,z_angle_detail_text,y_ratio_detail,x_ratio_detail,face_ratio_info] ,api_name='infer')
demo_header.html CHANGED
@@ -12,7 +12,8 @@
12
  Accurate detection in MediaPipe requires correct positioning, but I don't know the exact position needed. This is a known <a href="https://github.com/google-ai-edge/mediapipe/issues/4759">issue</a><br>
13
  I start to test hgbr and lgbm models,this estimate result help to make aligned image.<br>
14
  center index see <a href="https://github.com/google-ai-edge/mediapipe/blob/a908d668c730da128dfa8d9f6bd25d519d006692/mediapipe/modules/face_geometry/data/canonical_face_model_uv_visualization.png">mediapipe face index image</a><br>
15
- Recently Choose center index and mediapipe-estimate + models trained with face-features extracted by mediapipe
 
16
  </p>
17
  </div>
18
 
 
12
  Accurate detection in MediaPipe requires correct positioning, but I don't know the exact position needed. This is a known <a href="https://github.com/google-ai-edge/mediapipe/issues/4759">issue</a><br>
13
  I start to test hgbr and lgbm models,this estimate result help to make aligned image.<br>
14
  center index see <a href="https://github.com/google-ai-edge/mediapipe/blob/a908d668c730da128dfa8d9f6bd25d519d006692/mediapipe/modules/face_geometry/data/canonical_face_model_uv_visualization.png">mediapipe face index image</a><br>
15
+ Recently Choose center index and mediapipe-estimate + models trained with face-features extracted by mediapipe<br>
16
+ I'll share model and dataset soon.
17
  </p>
18
  </div>
19
 
mp_estimate.py CHANGED
@@ -114,9 +114,7 @@ def get_feature_ratios_cordinate(face_landmarks,ratios=feature_ratios_indices):
114
  def ratios_cordinates(cordinates):
115
 
116
  distance_a = calculate_distance(cordinates[0],cordinates[1])
117
- print(distance_a)
118
  distance_b = calculate_distance(cordinates[-2],cordinates[-1])
119
- print(distance_b)
120
  if distance_a == 0 or distance_b == 0:
121
  return 0
122
  else:
 
114
  def ratios_cordinates(cordinates):
115
 
116
  distance_a = calculate_distance(cordinates[0],cordinates[1])
 
117
  distance_b = calculate_distance(cordinates[-2],cordinates[-1])
 
118
  if distance_a == 0 or distance_b == 0:
119
  return 0
120
  else: