linhcuem commited on
Commit
023264e
·
verified ·
1 Parent(s): dac2e50

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -44
app.py CHANGED
@@ -31,9 +31,9 @@ image_path = [['test_images/2a998cfb0901db5f8210.jpg','cham_diem_yolov8', 640, 0
31
  # model = YOLO('linhcuem/chamdiemgianhang_yolov8_300epochs')
32
  # model = YOLO('linhcuem/chamdiemgianhang_yolov8_ver21')
33
  # model = YOLO('linhcuem/cham_diem_yolov8_ver20')
34
- model_ids = ['linhcuem/checker_TB_yolov8_ver1', 'linhcuem/cham_diem_yolov8', 'linhcuem/chamdiemgianhang_yolov8_300epochs', 'linhcuem/cham_diem_yolov8_ver20', 'linhcuem/chamdiemgianhang_yolov8_ver21']
35
- current_model_id = model_ids[-1]
36
- model = YOLO(current_model_id)
37
 
38
 
39
  ###################################################
@@ -44,7 +44,7 @@ def yolov8_img_inference(
44
  conf_threshold= 0.25,
45
  iou_threshold = 0.45,
46
  ):
47
- # model = YOLO(model_path)
48
  model.conf = conf_threshold
49
  model.iou = iou_threshold
50
  # model.overrides['conf'] = conf_threshold
@@ -56,14 +56,7 @@ def yolov8_img_inference(
56
  render = render_result(model=model, image=image, result=results[0])
57
  # get the model names list
58
  names = model.names
59
- # get the 'obj' class id
60
- # obj_id = list(names)[list(names.values()).index('lo_ytv')]
61
- # ('hop_dln','hop_jn','hop_vtg','hop_ytv','lo_kids', 'lo_ytv','loc_dln','loc_jn','loc_kids','loc_ytv')]
62
- # obj_id = list(names)[list(names.values()).index([0])]
63
- # count 'car' objects in the results
64
- # count_result = results[0].boxes.cls[0].item()
65
- #count_result = results[0]boxes.cls[0].tolist()
66
-
67
  object_counts = {x: 0 for x in names}
68
  for r in results:
69
  for c in r.boxes.cls:
@@ -81,35 +74,7 @@ def yolov8_img_inference(
81
 
82
 
83
  return render, {names[k]: v for k, v in present_objects.items()}
84
- # results = model.predict(image, imgsz=image_size, return_outputs=True)
85
- # results = model.predict(image)
86
- # object_prediction_list = []
87
- # for _, image_results in enumerate(results):
88
- # if len(image_results)!=0:
89
- # image_predictions_in_xyxy_format = image_results['det']
90
- # for pred in image_predictions_in_xyxy_format:
91
- # x1, y1, x2, y2 = (
92
- # int(pred[0]),
93
- # int(pred[1]),
94
- # int(pred[2]),
95
- # int(pred[3]),
96
- # )
97
- # bbox = [x1, y1, x2, y2]
98
- # score = pred[4]
99
- # category_name = model.model.names[int(pred[5])]
100
- # category_id = pred[5]
101
- # object_prediction = ObjectPrediction(
102
- # bbox=bbox,
103
- # category_id=int(category_id),
104
- # score=score,
105
- # category_name=category_name,
106
- # )
107
- # object_prediction_list.append(object_prediction)
108
-
109
- # image = read_image(image)
110
- # output_image = visualize_object_predictions(image=image, object_prediction_list=object_prediction_list)
111
- # return output_image['image']
112
- # render = render_result(model=model, image=image, result=results[0])
113
 
114
  def yolov8_vid_inference(video_path):
115
  cap = cv2.VideoCapture(video_path)
@@ -171,9 +136,9 @@ interface_image = gr.Interface(
171
  fn=yolov8_img_inference,
172
  inputs=[
173
  gr.Image(type='pil'),
174
- # gr.Dropdown(["linhcuem/chamdiemgianhang_yolov8_ver1"],
175
- # default="linhcuem/chamdiemgianhang_yolov8_ver1"),
176
- gr.Dropdown(model_ids, value=model_ids[-1]),
177
  gr.Slider(maximum=1280, step=32, value=640),
178
  gr.Slider(maximum=1.0, step=0.05, value=0.25),
179
  gr.Slider(maximum=1.0, step=0.05, value=0.45),
 
31
  # model = YOLO('linhcuem/chamdiemgianhang_yolov8_300epochs')
32
  # model = YOLO('linhcuem/chamdiemgianhang_yolov8_ver21')
33
  # model = YOLO('linhcuem/cham_diem_yolov8_ver20')
34
+ # model_ids = ['linhcuem/checker_TB_yolov8_ver1', 'linhcuem/cham_diem_yolov8', 'linhcuem/chamdiemgianhang_yolov8_300epochs', 'linhcuem/cham_diem_yolov8_ver20', 'linhcuem/chamdiemgianhang_yolov8_ver21']
35
+ # current_model_id = model_ids[-1]
36
+ # model = YOLO(current_model_id)
37
 
38
 
39
  ###################################################
 
44
  conf_threshold= 0.25,
45
  iou_threshold = 0.45,
46
  ):
47
+ model = YOLO(model_path)
48
  model.conf = conf_threshold
49
  model.iou = iou_threshold
50
  # model.overrides['conf'] = conf_threshold
 
56
  render = render_result(model=model, image=image, result=results[0])
57
  # get the model names list
58
  names = model.names
59
+
 
 
 
 
 
 
 
60
  object_counts = {x: 0 for x in names}
61
  for r in results:
62
  for c in r.boxes.cls:
 
74
 
75
 
76
  return render, {names[k]: v for k, v in present_objects.items()}
77
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
  def yolov8_vid_inference(video_path):
80
  cap = cv2.VideoCapture(video_path)
 
136
  fn=yolov8_img_inference,
137
  inputs=[
138
  gr.Image(type='pil'),
139
+ gr.Dropdown(["linhcuem/checker_TB_yolov8_ver1","linhcuem/cham_diem_yolov8","linhcuem/chamdiemgianhang_yolov8_300epochs", "linhcuem/chamdiemgianhang_yolov8_ver21", "linhcuem/cham_diem_yolov8_ver20" ],
140
+ default="linhcuem/checker_TB_yolov8_ver1", label="Model"),
141
+
142
  gr.Slider(maximum=1280, step=32, value=640),
143
  gr.Slider(maximum=1.0, step=0.05, value=0.25),
144
  gr.Slider(maximum=1.0, step=0.05, value=0.45),