linhcuem commited on
Commit
c0e60fc
1 Parent(s): 6e5dd7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -2
app.py CHANGED
@@ -4,6 +4,7 @@ import yolov5
4
  from PIL import Image
5
  from huggingface_hub import hf_hub_download
6
  import os
 
7
 
8
 
9
  app_title = "Detect san pham VSK"
@@ -40,7 +41,55 @@ def predict(image, threshold=0.25, model_id=None):
40
  output_image = Image.fromarray(numpy_image)
41
  return output_image, count_result
42
 
43
- gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  title=app_title,
45
  description="DO ANH DAT",
46
  fn=predict,
@@ -52,4 +101,9 @@ gr.Interface(
52
  outputs=[gr.Image(type="pil"),gr.Textbox(show_label=False)],
53
  examples=examples,
54
  cache_examples=True if examples else Fale,
55
- ).launch(enable_queue=True)
 
 
 
 
 
 
4
  from PIL import Image
5
  from huggingface_hub import hf_hub_download
6
  import os
7
+ import cv2
8
 
9
 
10
  app_title = "Detect san pham VSK"
 
41
  output_image = Image.fromarray(numpy_image)
42
  return output_image, count_result
43
 
44
+ def show_pred_vid(
45
+ video_path: str = None,
46
+ model_path: str = None,
47
+ image_size: 640,
48
+ conf_threshold: float = 0.25,
49
+ iou_threshold: float = 0.45,
50
+ ):
51
+ cap = cv2.VideoCapture(video_path)
52
+
53
+ while cap.isOpened():
54
+ success, frame = cap.read()
55
+
56
+ if success:
57
+ model = YOLO(model_path)
58
+ model.overrides['conf'] = conf_threshold
59
+ model.overrides['iou'] = iou_threshold
60
+ model.overrides['agnostic_nms'] = False
61
+ model.overrides['max_det'] = 1000
62
+ results = model.predict(frame)
63
+ annotated_frame = results[0].plot()
64
+
65
+ if cv2.waitKey(1) & 0xFF == ord("q"):
66
+ break
67
+ else:
68
+ break
69
+ cap.release()
70
+ cv2.destroyAllWindows()
71
+
72
+ inputs_vid = [
73
+ gr.components.Video(type="filepath", label="Input Video"),
74
+ gr.inputs.Dropdown(["linhcuem/yolov5_chamdiem_ver13"], default="linhcuem/yolov5_chamdiem_ver13", label="Model"),
75
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label= "Image Size"),
76
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
77
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
78
+ ]
79
+
80
+ outputs_vid = gr.outputs.Image(type="filepath", label="Output Video")
81
+ interface_vid = gr.Interface(
82
+ fn=show_pred_vid,
83
+ inputs = inputs_vid,
84
+ outputs = outputs_vid,
85
+ title = app_title,
86
+ description=description,
87
+ cache_examples=False,
88
+ theme='huggingface'
89
+ )
90
+
91
+
92
+ interface_image = gr.Interface(
93
  title=app_title,
94
  description="DO ANH DAT",
95
  fn=predict,
 
101
  outputs=[gr.Image(type="pil"),gr.Textbox(show_label=False)],
102
  examples=examples,
103
  cache_examples=True if examples else Fale,
104
+ )
105
+
106
+ gr.TabbedInterface(
107
+ [interface_image, interface_vid],
108
+ tab_names=['Image inferece', 'Video inference']
109
+ ).launch()