File size: 5,352 Bytes
6f5ac87
b345bd5
851eb77
 
 
 
 
97afcf2
6f5ac87
35410c9
 
 
bac63be
6578a56
 
 
 
 
 
 
 
 
6f5ac87
 
b9b23e2
6e8c52d
6f5ac87
 
 
bdbb930
6f5ac87
 
 
 
 
85b23b5
f3958f6
 
 
 
 
 
851eb77
f3958f6
851eb77
9dd51cf
eca09e4
 
f3958f6
 
e29b3e1
f3958f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ad137f
f3958f6
 
 
e29b3e1
fca7821
169e0bb
2ad137f
6f5ac87
754c31a
6f5ac87
68128b9
4321e84
6f5ac87
754c31a
 
6f5ac87
 
754c31a
59b5360
1206f49
151bcd3
 
6f5ac87
 
754c31a
6c21698
151bcd3
6f5ac87
e29b3e1
6f5ac87
 
 
f3958f6
 
 
 
fca7821
e29b3e1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import gradio as gr
import torch
# from sahi.prediction import ObjectPrediction
# from sahi.utils.cv import visualize_object_predictions, read_image
import os 
import requests

from ultralyticsplus import YOLO, render_result

# from ultralyticsplus import render_result
# import requests
# import cv2

image_path = [['test_images/2a998cfb0901db5f8210.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],['test_images/2ce19ce0191acb44920b.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
             ['test_images/2daab6ea3310e14eb801.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/4a137deefb14294a7005 (1).jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
             ['test_images/7e77c596436c9132c87d.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/170f914014bac6e49fab.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
             ['test_images/3355ec3269c8bb96e2d9.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/546306a88052520c0b43.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
             ['test_images/33148464019ed3c08a8f.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/a17a992a1cd0ce8e97c1.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
             ['test_images/b5db5e42d8b80ae653a9 (1).jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],['test_images/b8ee1f5299a84bf612b9.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
             ['test_images/b272fec7783daa63f32c.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],['test_images/bb202b3eaec47c9a25d5.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
             ['test_images/bf1e22b0a44a76142f5b.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/ea5473c5f53f27617e2e.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
             ['test_images/ee106392e56837366e79.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/f88d2214a4ee76b02fff.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45]]

# Load YOLO model
model = YOLO('linhcuem/cham_diem_yolov8')
# model = YOLO('linhcuem/cham_diem_yolov8_ver20')

###################################################
def yolov8_img_inference(
    image: gr.inputs.Image = None,
    model_path: gr.inputs.Dropdown = None,
    image_size: gr.inputs.Slider = 640,
    conf_threshold: gr.inputs.Slider = 0.25,
    iou_threshold: gr.inputs.Slider = 0.45,
):
    model = YOLO(model_path)
    # model.conf = conf_threshold
    # model.iou = iou_threshold
    model.overrides['conf'] = conf_threshold
    model.overrides['iou'] = iou_threshold
    model.overrides['agnostic_nms'] = False
    model.overrides['max_det'] = 1000
    # image = read_image
    results = model.predict(image)
    render = render_result(model=model, image=image, result=results[0])
    count_result = results[0].boxes.cls.tolist().count('name')
    # count_result = results.pandas().xyxy[0].value_counts('name')
    return render, count_result
    
    # results = model.predict(image, imgsz=image_size, return_outputs=True)
    # results = model.predict(image)
    # object_prediction_list = []
    # for _, image_results in enumerate(results):
    #     if len(image_results)!=0:
    #         image_predictions_in_xyxy_format = image_results['det']
    #         for pred in image_predictions_in_xyxy_format:
    #             x1, y1, x2, y2 = (
    #                 int(pred[0]),
    #                 int(pred[1]),
    #                 int(pred[2]),
    #                 int(pred[3]),
    #             )
    #             bbox = [x1, y1, x2, y2]
    #             score = pred[4]
    #             category_name = model.model.names[int(pred[5])]
    #             category_id = pred[5]
    #             object_prediction = ObjectPrediction(
    #                 bbox=bbox,
    #                 category_id=int(category_id),
    #                 score=score,
    #                 category_name=category_name,
    #             )
    #             object_prediction_list.append(object_prediction)

    # image = read_image(image)
    # output_image = visualize_object_predictions(image=image, object_prediction_list=object_prediction_list)
    # return output_image['image']
    # render = render_result(model=model, image=image, result=results[0])
    

        

inputs_image = [
    gr.inputs.Image(type="filepath", label="Input Image"),
    gr.inputs.Dropdown(["linhcuem/linhcuem/cham_diem_yolov8"], 
                        default="linhcuem/cham_diem_yolov8", label="Model"),
    gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
    gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
    gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
]

outputs_image =gr.outputs.Image(type="filepath", label="Output Image")
count_obj = gr.outputs.Textbox(show_label=True)

title = "Tất cả do anh Đạt"

interface_image = gr.Interface(
    fn=yolov8_img_inference,
    inputs=inputs_image,
    outputs=[gr.Image(type="pil"),gr.Textbox(show_label=False)],
    title=title,
    examples=image_path,
    cache_examples=True,
    theme='huggingface'
)

# gr.TabbedInterface(
#     [interface_image],
#     tab_names=['Image inference']
# ).queue().launch()

interface_image.launch(debug=True, enable_queue=True)