linhcuem commited on
Commit
a5c8542
·
1 Parent(s): ba0ecd2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -7
app.py CHANGED
@@ -5,6 +5,7 @@ import torch
5
  import os
6
  import requests
7
  import json
 
8
 
9
  from PIL import Image
10
  from huggingface_hub import hf_hub_download
@@ -103,9 +104,44 @@ def yolov8_img_inference(
103
  # output_image = visualize_object_predictions(image=image, object_prediction_list=object_prediction_list)
104
  # return output_image['image']
105
  # render = render_result(model=model, image=image, result=results[0])
106
-
107
 
108
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  inputs_image = [
111
  # gr.inputs.Image(type="filepath", label="Input Image"),
@@ -141,9 +177,9 @@ interface_image = gr.Interface(
141
 
142
  )
143
 
144
- # gr.TabbedInterface(
145
- # [interface_image],
146
- # tab_names=['Image inference']
147
- # ).queue().launch()
148
 
149
- interface_image.launch(debug=True, enable_queue=True)
 
5
  import os
6
  import requests
7
  import json
8
+ import cv2
9
 
10
  from PIL import Image
11
  from huggingface_hub import hf_hub_download
 
104
  # output_image = visualize_object_predictions(image=image, object_prediction_list=object_prediction_list)
105
  # return output_image['image']
106
  # render = render_result(model=model, image=image, result=results[0])
 
107
 
108
+ def yolov8_vid_inference(video_path):
109
+ cap = cv2.VideoCapture(video_path)
110
+ while cap.isOpened():
111
+ success, frame = cap.read()
112
+
113
+ if success:
114
+ frame_copy = frame.copy()
115
+ outputs = model.predict(source=frame)
116
+ results = outputs[0].cpu().numpy()
117
+ for i, det in enumerate(results.boxes.xyxy):
118
+ cv2.rectangle(
119
+ frame_copy,
120
+ (int(det[0]), int(det[1])),
121
+ (int(det[2]), int(det[3])),
122
+ color=(0, 0, 255),
123
+ thickness=2,
124
+ lineType=cv2.LINE_AA
125
+ )
126
+ yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
127
+
128
+
129
+ inputs_vid = [
130
+ gr.components.Video(type="filepath", label="Input Video"),
131
+ ]
132
+
133
+ outputs_vid = [
134
+ gr.components.Image(type="numpy", label="Output Image"),
135
+ ]
136
+
137
+ interface_vid = gr.Interface(
138
+ fn=yolov8_vid_inference,
139
+ inputs = inputs_vid,
140
+ outputs = outputs_vid,
141
+ title = app_title,
142
+ cache_examples = False,
143
+
144
+ )
145
 
146
  inputs_image = [
147
  # gr.inputs.Image(type="filepath", label="Input Image"),
 
177
 
178
  )
179
 
180
+ gr.TabbedInterface(
181
+ [interface_image, interface_vid],
182
+ tab_names=['Image inference', 'Video inference']
183
+ ).queue().launch()
184
 
185
+ # interface_image.launch(debug=True, enable_queue=True)