linhcuem commited on
Commit
28862c0
1 Parent(s): e31efcc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -68
app.py CHANGED
@@ -39,11 +39,11 @@ image_path = [['test_images/2a998cfb0901db5f8210.jpg','cham_diem_yolov8', 640, 0
39
 
40
  ###################################################
41
  def yolov8_img_inference(
42
- image = None,
43
- model_path = None,
44
- image_size = 640,
45
- conf_threshold = 0.25,
46
- iou_threshold = 0.45,
47
  ):
48
  # model = YOLO(model_path)
49
  model = YOLO(model_path)
@@ -78,82 +78,89 @@ def yolov8_img_inference(
78
  return render, {names[k]: v for k, v in present_objects.items()}
79
 
80
 
81
- def yolov8_vid_inference(video_path):
82
- cap = cv2.VideoCapture(video_path)
83
- while cap.isOpened():
84
- success, frame = cap.read()
85
-
86
- if success:
87
- frame_copy = frame.copy()
88
- outputs = model.predict(source=frame)
89
- results = outputs[0].cpu().numpy()
90
- for i, det in enumerate(results.boxes.xyxy):
91
- cv2.rectangle(
92
- frame_copy,
93
- (int(det[0]), int(det[1])),
94
- (int(det[2]), int(det[3])),
95
- color=(0, 0, 255),
96
- thickness=2,
97
- lineType=cv2.LINE_AA
98
- )
99
- yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
100
 
101
 
102
- inputs_vid = [
103
- gr.components.Video(type="filepath", label="Input Video"),
104
- ]
105
 
106
- outputs_vid = [
107
- gr.components.Image(type="numpy", label="Output Image"),
108
- ]
109
 
110
- interface_vid = gr.Interface(
111
- fn=yolov8_vid_inference,
112
- inputs = inputs_vid,
113
- outputs = outputs_vid,
114
- title = "Detect Thiên Việt productions",
115
- cache_examples = False,
116
 
117
- )
118
 
119
- # inputs_image = [
120
- # # gr.inputs.Image(type="filepath", label="Input Image"),
121
- # gr.Image(type="pil"),
122
- # # gr.Dropdown(["linhcuem/linhcuem/chamdiemgianhang_yolov8_ver1"],
123
- # default="linhcuem/chamdiemgianhang_yolov8_ver1", label="Model"),
124
-
125
- # gr.Slider(maximum=1280, step=32, value = 640, label="Image Size"),
126
- # gr.Slider(maximum=1.0 , step=0.05, value = 0.25, label="Confidence Threshold"),
127
- # gr.Slider(maximum=1.0, step=0.05, value = 0.45, label="IOU Threshold"),
128
-
129
-
130
- # ]
131
 
132
- # outputs_image =gr.outputs.Image(type="filepath", label="Output Image")
133
  # count_obj = gr.Textbox(show_label=False)
134
 
135
  title = "Detect Thiên Việt productions"
136
 
137
- interface_image = gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  fn=yolov8_img_inference,
139
- inputs=[
140
- gr.Image(type='pil'),
141
- gr.Dropdown(["linhcuem/checker_TB_yolov8_ver1", "linhcuem/chamdiemgianhang_yolov8_ver21"],
142
- default="linhcuem/checker_TB_yolov8_ver1", label="Model"),
143
- gr.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
144
- gr.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
145
- gr.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
146
- ],
147
- outputs=[gr.Image(type="pil"),gr.Textbox(show_label=False)],
148
  title=title,
149
- examples=image_path,
150
- cache_examples=True if image_path else False,
151
-
152
  )
153
-
154
- gr.TabbedInterface(
155
- [interface_image, interface_vid],
156
- tab_names=['Image inference', 'Video inference']
157
- ).queue().launch()
158
 
159
  # interface_image.launch(debug=True, enable_queue=True)
 
39
 
40
  ###################################################
41
  def yolov8_img_inference(
42
+ image: gr.inputs.Image = None,
43
+ model_path: gr.inputs.Dropdown = None,
44
+ image_size: gr.inputs.Slider = 640,
45
+ conf_threshold: gr.inputs.Slider = 0.25,
46
+ iou_threshold: gr.inputs.Slider = 0.45,
47
  ):
48
  # model = YOLO(model_path)
49
  model = YOLO(model_path)
 
78
  return render, {names[k]: v for k, v in present_objects.items()}
79
 
80
 
81
+ # def yolov8_vid_inference(video_path):
82
+ # cap = cv2.VideoCapture(video_path)
83
+ # while cap.isOpened():
84
+ # success, frame = cap.read()
85
+
86
+ # if success:
87
+ # frame_copy = frame.copy()
88
+ # outputs = model.predict(source=frame)
89
+ # results = outputs[0].cpu().numpy()
90
+ # for i, det in enumerate(results.boxes.xyxy):
91
+ # cv2.rectangle(
92
+ # frame_copy,
93
+ # (int(det[0]), int(det[1])),
94
+ # (int(det[2]), int(det[3])),
95
+ # color=(0, 0, 255),
96
+ # thickness=2,
97
+ # lineType=cv2.LINE_AA
98
+ # )
99
+ # yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
100
 
101
 
102
+ # inputs_vid = [
103
+ # gr.components.Video(type="filepath", label="Input Video"),
104
+ # ]
105
 
106
+ # outputs_vid = [
107
+ # gr.components.Image(type="numpy", label="Output Image"),
108
+ # ]
109
 
110
+ # interface_vid = gr.Interface(
111
+ # fn=yolov8_vid_inference,
112
+ # inputs = inputs_vid,
113
+ # outputs = outputs_vid,
114
+ # title = "Detect Thiên Việt productions",
115
+ # cache_examples = False,
116
 
117
+ # )
118
 
119
+ inputs = [
120
+ gr.inputs.Image(type="filepath", label="Input Image"),
121
+ gr.inputs.Dropdown(["linhcuem/checker_TB_yolov8_ver1", "linhcuem/chamdiemgianhang_yolov8_ver21"],
122
+ default="linhcuem/checker_TB_yolov8_ver1", label="Model"),
123
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
124
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
125
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
126
+ ]
 
 
 
 
127
 
128
+ outputs_image =gr.outputs.Image(type="filepath", label="Output Image")
129
  # count_obj = gr.Textbox(show_label=False)
130
 
131
  title = "Detect Thiên Việt productions"
132
 
133
+ # interface_image = gr.Interface(
134
+ # fn=yolov8_img_inference,
135
+ # inputs=[
136
+ # gr.Image(type='pil'),
137
+ # gr.Dropdown(["linhcuem/checker_TB_yolov8_ver1", "linhcuem/chamdiemgianhang_yolov8_ver21"],
138
+ # default="linhcuem/checker_TB_yolov8_ver1", label="Model"),
139
+ # gr.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
140
+ # gr.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
141
+ # gr.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
142
+ # ],
143
+ # outputs=[gr.Image(type="pil"),gr.Textbox(show_label=False)],
144
+ # title=title,
145
+ # examples=image_path,
146
+ # cache_examples=True if image_path else False,
147
+
148
+ # )
149
+
150
+ # gr.TabbedInterface(
151
+ # [interface_image, interface_vid],
152
+ # tab_names=['Image inference', 'Video inference']
153
+ # ).queue().launch()
154
+
155
+ demo_app = gr.Interface(
156
  fn=yolov8_img_inference,
157
+ inputs=inputs,
158
+ outputs=outputs,
 
 
 
 
 
 
 
159
  title=title,
160
+ examples=examples,
161
+ cache_examples=True,
162
+ theme='huggingface',
163
  )
164
+ demo_app.launch(debug=True, enable_queue=True)
 
 
 
 
165
 
166
  # interface_image.launch(debug=True, enable_queue=True)