File size: 4,794 Bytes
6f5ac87
b345bd5
 
 
 
6f5ac87
fca7821
 
 
bac63be
6f5ac87
 
 
 
 
 
 
 
 
 
 
c814da5
6f5ac87
 
 
bdbb930
6f5ac87
 
 
 
 
85b23b5
b345bd5
2ad137f
453adf5
83f8953
fca7821
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ad137f
fca7821
 
 
 
 
 
2ad137f
6f5ac87
754c31a
6f5ac87
754c31a
 
6f5ac87
754c31a
 
6f5ac87
 
754c31a
151bcd3
 
6f5ac87
 
754c31a
 
151bcd3
6f5ac87
 
 
 
 
fca7821
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import gradio as gr
import torch
from sahi.prediction import ObjectPrediction
from sahi.utils.cv import visualize_object_predictions, read_image
from ultralyticsplus import YOLO

from ultralyticsplus import render_result
import requests
import cv2

image_path = [['test_images/2a998cfb0901db5f8210.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],['test_images/2ce19ce0191acb44920b.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],
             ['test_images/2daab6ea3310e14eb801.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45], ['test_images/4a137deefb14294a7005 (1).jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],
             ['test_images/7e77c596436c9132c87d.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45], ['test_images/170f914014bac6e49fab.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],
             ['test_images/3355ec3269c8bb96e2d9.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45], ['test_images/546306a88052520c0b43.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],
             ['test_images/33148464019ed3c08a8f.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45], ['test_images/a17a992a1cd0ce8e97c1.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],
             ['test_images/b5db5e42d8b80ae653a9 (1).jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],['test_images/b8ee1f5299a84bf612b9.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],
             ['test_images/b272fec7783daa63f32c.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],['test_images/bb202b3eaec47c9a25d5.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],
             ['test_images/bf1e22b0a44a76142f5b.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45], ['test_images/ea5473c5f53f27617e2e.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45],
             ['test_images/ee106392e56837366e79.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45], ['test_images/f88d2214a4ee76b02fff.jpg','linhcuem/chamdiem_yolov8_ver10', 640, 0.25, 0.45]]

# Load YOLO model
# model = YOLO('linhcuem/chamdiem_yolov8_ver10')

###################################################
def yolov8_img_inference(
    image: gr.inputs.Image = None,
    model_path: gr.inputs.Dropdown = None,
    image_size: gr.inputs.Slider = 640,
    conf_threshold: gr.inputs.Slider = 0.25,
    iou_threshold: gr.inputs.Slider = 0.45,
):
    model = YOLO(model_path)
    model.conf = conf_threshold
    model.iou = iou_threshold
    # results = model.predict(image, imgsz=image_size, return_outputs=True)
    results = model.predict(image)
    # object_prediction_list = []
    # for _, image_results in enumerate(results):
    #     if len(image_results)!=0:
    #         image_predictions_in_xyxy_format = image_results['det']
    #         for pred in image_predictions_in_xyxy_format:
    #             x1, y1, x2, y2 = (
    #                 int(pred[0]),
    #                 int(pred[1]),
    #                 int(pred[2]),
    #                 int(pred[3]),
    #             )
    #             bbox = [x1, y1, x2, y2]
    #             score = pred[4]
    #             category_name = model.model.names[int(pred[5])]
    #             category_id = pred[5]
    #             object_prediction = ObjectPrediction(
    #                 bbox=bbox,
    #                 category_id=int(category_id),
    #                 score=score,
    #                 category_name=category_name,
    #             )
    #             object_prediction_list.append(object_prediction)

    # image = read_image(image)
    # output_image = visualize_object_predictions(image=image, object_prediction_list=object_prediction_list)
    # return output_image['image']
    render = render_result(model=model, image=image, result=results[0])
    
    return render
        

inputs_image = [
    gr.inputs.Image(type="filepath", label="Input Image"),
    gr.inputs.Dropdown(["linhcuem/chamdiem_yolov8_ver10"], 
                       default="linhcuem/chamdiem_yolov8_ver10", label="Model"),
    gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
    gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
    gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
]

outputs_image =gr.outputs.Image(type="filepath", label="Output Image")
title = "Tất cả do anh Đạt"

interface_image = gr.Interface(
    fn=yolov8_img_inference,
    inputs=inputs_image,
    outputs=outputs_image,
    title=title,
    examples=image_path,
    cache_examples=False,
    theme='huggingface'
)

gr.TabbedInterface(
    [interface_image],
    tab_names=['Image inference']
).queue().launch()

# interface_image.launch(debug=True, enable_queue=True)