Spaces:
Sleeping
Sleeping
File size: 6,642 Bytes
6f5ac87 b345bd5 851eb77 3a28ead 851eb77 97afcf2 6f5ac87 35410c9 bac63be 6578a56 6f5ac87 b9b23e2 6e8c52d 6f5ac87 bdbb930 6f5ac87 85b23b5 f3958f6 851eb77 f3958f6 851eb77 fe3f366 47d75b8 7ad4879 ffb2d1b 39cc8c5 665ac38 fe3f366 47d75b8 7ebebbd 5627083 3a7c9f0 f890670 3a7c9f0 90d7927 a22db96 0e69d8b 90d7927 6665333 3a7c9f0 f3958f6 e29b3e1 f3958f6 2ad137f f3958f6 e29b3e1 72825f3 169e0bb 2ad137f 6f5ac87 754c31a 64d5085 68128b9 4321e84 6f5ac87 754c31a 6f5ac87 64d5085 1206f49 151bcd3 6f5ac87 ac472a5 754c31a 64d5085 151bcd3 6f5ac87 e29b3e1 6f5ac87 f3958f6 fca7821 e29b3e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import gradio as gr
import torch
# from sahi.prediction import ObjectPrediction
# from sahi.utils.cv import visualize_object_predictions, read_image
import os
import requests
import json
from PIL import Image
from huggingface_hub import hf_hub_download
from ultralyticsplus import YOLO, render_result
# from ultralyticsplus import render_result
# import requests
# import cv2
image_path = [['test_images/2a998cfb0901db5f8210.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],['test_images/2ce19ce0191acb44920b.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
['test_images/2daab6ea3310e14eb801.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/4a137deefb14294a7005 (1).jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
['test_images/7e77c596436c9132c87d.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/170f914014bac6e49fab.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
['test_images/3355ec3269c8bb96e2d9.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/546306a88052520c0b43.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
['test_images/33148464019ed3c08a8f.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/a17a992a1cd0ce8e97c1.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
['test_images/b5db5e42d8b80ae653a9 (1).jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],['test_images/b8ee1f5299a84bf612b9.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
['test_images/b272fec7783daa63f32c.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],['test_images/bb202b3eaec47c9a25d5.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
['test_images/bf1e22b0a44a76142f5b.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/ea5473c5f53f27617e2e.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45],
['test_images/ee106392e56837366e79.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45], ['test_images/f88d2214a4ee76b02fff.jpg','linhcuem/cham_diem_yolov8', 640, 0.25, 0.45]]
# Load YOLO model
model = YOLO('linhcuem/cham_diem_yolov8')
# model = YOLO('linhcuem/cham_diem_yolov8_ver20')
###################################################
def yolov8_img_inference(
image: gr.inputs.Image = None,
model_path: gr.inputs.Dropdown = None,
image_size: gr.inputs.Slider = 640,
conf_threshold: gr.inputs.Slider = 0.25,
iou_threshold: gr.inputs.Slider = 0.45,
):
model = YOLO(model_path)
# model.conf = conf_threshold
# model.iou = iou_threshold
model.overrides['conf'] = conf_threshold
model.overrides['iou'] = iou_threshold
model.overrides['agnostic_nms'] = False
model.overrides['max_det'] = 1000
# image = read_image
results = model.predict(image)
render = render_result(model=model, image=image, result=results[0])
# get the model names list
names = model.names
# get the 'obj' class id
# obj_id = list(names)[list(names.values()).index('lo_ytv')]
# ('hop_dln','hop_jn','hop_vtg','hop_ytv','lo_kids', 'lo_ytv','loc_dln','loc_jn','loc_kids','loc_ytv')]
# obj_id = list(names)[list(names.values()).index([0])]
# count 'car' objects in the results
# count_result = results[0].boxes.cls[0].item()
# count_result = results[0].boxes.cls.tolist()
object_counts = {x: 0 for x in names}
for r in results:
for c in r.boxes.cls:
c = int(c)
if c in names:
object_counts[c] += 1
elif c not in names:
object_counts[c] = 1
# clist = results[0].boxes.cls
# cls = set()
# for cno in clist:
# cls.add(model.names[int(cno)])
# if cno in names:
# object_counts[cno] += 1
# elif cno not in names:
# object_counts[cno] = 1
present_objects = object_counts.copy()
for i in object_counts:
if object_counts[i] < 1:
present_objects.pop(i)
# clist= results[0].boxes.cls.tolist()
# cls = set()
# for cno in clist:
# cls.add(model.names[int(cno)])
# count_result = results.pandas().xyxy[0].value_counts('name')
return render, present_objects
# results = model.predict(image, imgsz=image_size, return_outputs=True)
# results = model.predict(image)
# object_prediction_list = []
# for _, image_results in enumerate(results):
# if len(image_results)!=0:
# image_predictions_in_xyxy_format = image_results['det']
# for pred in image_predictions_in_xyxy_format:
# x1, y1, x2, y2 = (
# int(pred[0]),
# int(pred[1]),
# int(pred[2]),
# int(pred[3]),
# )
# bbox = [x1, y1, x2, y2]
# score = pred[4]
# category_name = model.model.names[int(pred[5])]
# category_id = pred[5]
# object_prediction = ObjectPrediction(
# bbox=bbox,
# category_id=int(category_id),
# score=score,
# category_name=category_name,
# )
# object_prediction_list.append(object_prediction)
# image = read_image(image)
# output_image = visualize_object_predictions(image=image, object_prediction_list=object_prediction_list)
# return output_image['image']
# render = render_result(model=model, image=image, result=results[0])
inputs_image = [
# gr.inputs.Image(type="filepath", label="Input Image"),
gr.inputs.Image(type="pil"),
gr.inputs.Dropdown(["linhcuem/linhcuem/cham_diem_yolov8"],
default="linhcuem/cham_diem_yolov8", label="Model"),
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
]
# outputs_image =gr.outputs.Image(type="filepath", label="Output Image")
# count_obj = gr.Textbox(show_label=False)
title = "Tất cả do anh Đạt"
interface_image = gr.Interface(
fn=yolov8_img_inference,
inputs=inputs_image,
outputs=[gr.Image(type="pil"),gr.Textbox(show_label=False)],
title=title,
examples=image_path,
cache_examples=True,
theme='huggingface'
)
# gr.TabbedInterface(
# [interface_image],
# tab_names=['Image inference']
# ).queue().launch()
interface_image.launch(debug=True, enable_queue=True) |