Liusuthu commited on
Commit
077653d
·
verified ·
1 Parent(s): 46390ae

Update app_utils.py

Browse files
Files changed (1) hide show
  1. app_utils.py +3 -154
app_utils.py CHANGED
@@ -87,6 +87,9 @@ def preprocess_video_and_predict(video):
87
 
88
 
89
  #to return scores
 
 
 
90
  def video_score(video):
91
 
92
  cap = cv2.VideoCapture(video)
@@ -244,160 +247,6 @@ def video_score(video):
244
  my_audio_clip.write_audiofile("data/audio.wav",ffmpeg_params=["-ac","1"])
245
 
246
  return stat,scores_str,"data/audio.wav"
247
-
248
- ###########################################################################################################################
249
- # def video_score(video):
250
- # cap = cv2.VideoCapture(video)
251
- # w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
252
- # h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
253
- # fps = np.round(cap.get(cv2.CAP_PROP_FPS))
254
-
255
- # path_save_video_face = 'result_face.mp4'
256
- # vid_writer_face = cv2.VideoWriter(path_save_video_face, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
257
-
258
- # # path_save_video_hm = 'result_hm.mp4'
259
- # # vid_writer_hm = cv2.VideoWriter(path_save_video_hm, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
260
-
261
- # lstm_features = []
262
- # count_frame = 1
263
- # count_face = 0
264
- # probs = []
265
- # frames = []
266
- # last_output = None
267
- # last_heatmap = None
268
- # cur_face = None
269
-
270
- # with mp_face_mesh.FaceMesh(
271
- # max_num_faces=1,
272
- # refine_landmarks=False,
273
- # min_detection_confidence=0.5,
274
- # min_tracking_confidence=0.5) as face_mesh:
275
-
276
- # while cap.isOpened():
277
- # _, frame = cap.read()
278
- # if frame is None: break
279
-
280
- # frame_copy = frame.copy()
281
- # frame_copy.flags.writeable = False
282
- # frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
283
- # results = face_mesh.process(frame_copy)
284
- # frame_copy.flags.writeable = True
285
-
286
- # if results.multi_face_landmarks:
287
- # for fl in results.multi_face_landmarks:
288
- # startX, startY, endX, endY = get_box(fl, w, h)
289
- # cur_face = frame_copy[startY:endY, startX: endX]
290
-
291
- # if count_face%config_data.FRAME_DOWNSAMPLING == 0:
292
- # cur_face_copy = pth_processing(Image.fromarray(cur_face))
293
- # with torch.no_grad():
294
- # features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).detach().numpy()
295
-
296
- # # grayscale_cam = cam(input_tensor=cur_face_copy)
297
- # # grayscale_cam = grayscale_cam[0, :]
298
- # # cur_face_hm = cv2.resize(cur_face,(224,224), interpolation = cv2.INTER_AREA)
299
- # # cur_face_hm = np.float32(cur_face_hm) / 255
300
- # # heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=False)
301
- # # last_heatmap = heatmap
302
-
303
- # if len(lstm_features) == 0:
304
- # lstm_features = [features]*10
305
- # else:
306
- # lstm_features = lstm_features[1:] + [features]
307
-
308
- # lstm_f = torch.from_numpy(np.vstack(lstm_features))
309
- # lstm_f = torch.unsqueeze(lstm_f, 0)
310
- # with torch.no_grad():
311
- # output = pth_model_dynamic(lstm_f).detach().numpy()
312
- # last_output = output
313
-
314
- # if count_face == 0:
315
- # count_face += 1
316
-
317
- # else:
318
- # if last_output is not None:
319
- # output = last_output
320
- # # heatmap = last_heatmap
321
-
322
- # elif last_output is None:
323
- # output = np.empty((1, 7))
324
- # output[:] = np.nan
325
-
326
- # probs.append(output[0])
327
- # frames.append(count_frame)
328
- # else:
329
- # if last_output is not None:
330
- # lstm_features = []
331
- # empty = np.empty((7))
332
- # empty[:] = np.nan
333
- # probs.append(empty)
334
- # frames.append(count_frame)
335
-
336
- # if cur_face is not None:
337
- # # heatmap_f = display_info(heatmap, 'Frame: {}'.format(count_frame), box_scale=.3)
338
-
339
- # cur_face = cv2.cvtColor(cur_face, cv2.COLOR_RGB2BGR)
340
- # cur_face = cv2.resize(cur_face, (224,224), interpolation = cv2.INTER_AREA)
341
- # cur_face = display_info(cur_face, 'Frame: {}'.format(count_frame), box_scale=.3)
342
- # vid_writer_face.write(cur_face)
343
- # # vid_writer_hm.write(heatmap_f)
344
-
345
- # count_frame += 1
346
- # if count_face != 0:
347
- # count_face += 1
348
-
349
- # vid_writer_face.release()
350
- # # vid_writer_hm.release()
351
-
352
- # stat = statistics_plot(frames, probs)
353
-
354
- # if not stat:
355
- # return None, None
356
-
357
- # #for debug
358
- # print(type(frames))
359
- # print(frames)
360
- # print(type(probs))
361
- # print(probs)
362
- # # to calculate scores
363
- # nan=float('nan')
364
- # s1 = 0
365
- # s2 = 0
366
- # s3 = 0
367
- # s4 = 0
368
- # s5 = 0
369
- # s6 = 0
370
- # s7 = 0
371
- # frames_len=len(frames)
372
- # for i in range(frames_len):
373
- # if np.isnan(probs[i][0]):
374
- # frames_len=frames_len-1
375
- # else:
376
- # s1=s1+probs[i][0]
377
- # s2=s2+probs[i][1]
378
- # s3=s3+probs[i][2]
379
- # s4=s4+probs[i][3]
380
- # s5=s5+probs[i][4]
381
- # s6=s6+probs[i][5]
382
- # s7=s7+probs[i][6]
383
- # s1=s1/frames_len
384
- # s2=s2/frames_len
385
- # s3=s3/frames_len
386
- # s4=s4/frames_len
387
- # s5=s5/frames_len
388
- # s6=s6/frames_len
389
- # s7=s7/frames_len
390
- # prob=[s1,s2,s3,s4,s5,s6,s7]
391
- # prob_str=str(prob)
392
- # with open("local_data/data.txt",'a', encoding="utf8") as f:
393
- # f.write(prob_str+'\n')
394
-
395
- # with open("local_data/data.txt",'r', encoding="utf8") as f:
396
- # for i in f:
397
- # print(i)
398
- # #平衡点值为零,越正越负面
399
- # score1=0*prob[0]-8*prob[1]+4*prob[2]+0*prob[3]+2*prob[4]+2*prob[5]+4*prob[6]
400
- # print("score1=",score1)
401
 
402
  # #trans the audio file
403
  # my_audio_clip = AudioFileClip(video)
 
87
 
88
 
89
  #to return scores
90
+
91
+
92
+ ###########################################################################################################################
93
  def video_score(video):
94
 
95
  cap = cv2.VideoCapture(video)
 
247
  my_audio_clip.write_audiofile("data/audio.wav",ffmpeg_params=["-ac","1"])
248
 
249
  return stat,scores_str,"data/audio.wav"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
251
  # #trans the audio file
252
  # my_audio_clip = AudioFileClip(video)