|
import cv2 |
|
import matplotlib.pyplot as plt |
|
import numpy as np |
|
from ultralytics import YOLO |
|
from PIL import Image, ImageDraw, ImageFont |
|
import pandas as pd |
|
import gradio as gr |
|
|
|
|
|
model = YOLO('yolo11n-obb.pt') |
|
|
|
|
|
class_names = { |
|
0: ('plane', 'هواپیما'), |
|
1: ('ship', 'کشتی'), |
|
2: ('storage tank', 'مخزن ذخیره'), |
|
3: ('baseball diamond', 'زمین بیسبال'), |
|
4: ('tennis court', 'زمین تنیس'), |
|
5: ('basketball court', 'زمین بسکتبال'), |
|
6: ('ground track field', 'زمین دو و میدانی'), |
|
7: ('harbor', 'بندرگاه'), |
|
8: ('bridge', 'پل'), |
|
9: ('large vehicle', 'خودرو بزرگ'), |
|
10: ('small vehicle', 'خودرو کوچک'), |
|
11: ('helicopter', 'هلیکوپتر'), |
|
12: ('roundabout', 'میدان'), |
|
13: ('soccer ball field', 'زمین فوتبال'), |
|
14: ('swimming pool', 'استخر شنا') |
|
} |
|
|
|
|
|
colors = { |
|
0: (255, 0, 0), |
|
1: (0, 255, 0), |
|
2: (0, 0, 255), |
|
3: (255, 255, 0), |
|
4: (255, 0, 255), |
|
5: (0, 255, 255), |
|
6: (128, 0, 128), |
|
7: (255, 165, 0), |
|
8: (0, 128, 0), |
|
9: (128, 128, 0), |
|
10: (128, 0, 0), |
|
11: (0, 128, 128), |
|
12: (0, 0, 128), |
|
13: (75, 0, 130), |
|
14: (199, 21, 133) |
|
} |
|
|
|
|
|
def detect_and_draw_image(input_image): |
|
|
|
input_image_np = np.array(input_image) |
|
|
|
|
|
results = model.predict(source=input_image_np, conf=0.3) |
|
obb_results = results[0].obb |
|
|
|
|
|
if obb_results is None or len(obb_results.data) == 0: |
|
print("هیچ شیء شناسایی نشده است.") |
|
df = pd.DataFrame({ |
|
'Label (English)': [], |
|
'Label (Persian)': [], |
|
'Object Count': [] |
|
}) |
|
return input_image, df |
|
|
|
|
|
image_np = np.array(input_image.convert('RGB'))[:, :, ::-1] |
|
|
|
counts = {} |
|
for obb, conf, cls in zip(obb_results.data.cpu().numpy(), obb_results.conf.cpu().numpy(), obb_results.cls.cpu().numpy()): |
|
|
|
x_center, y_center, width, height, rotation = obb[:5] |
|
class_id = int(cls) |
|
confidence = float(conf) |
|
|
|
|
|
label_en, label_fa = class_names.get(class_id, ('unknown', 'ناشناخته')) |
|
counts[label_en] = counts.get(label_en, 0) + 1 |
|
|
|
|
|
rect = ((x_center, y_center), (width, height), rotation * 180.0 / np.pi) |
|
box = cv2.boxPoints(rect) |
|
box = np.int0(box) |
|
color = colors.get(class_id, (0, 255, 0)) |
|
cv2.drawContours(image_np, [box], 0, color, 2) |
|
cv2.putText(image_np, f'{label_en}: {confidence:.2f}', (int(x_center), int(y_center)), |
|
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1, cv2.LINE_AA) |
|
|
|
|
|
image_rgb = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB) |
|
output_image = Image.fromarray(image_rgb) |
|
|
|
|
|
df = pd.DataFrame({ |
|
'Label (English)': list(counts.keys()), |
|
'Label (Persian)': [class_names.get(k, ('unknown', 'ناشناخته'))[1] for k in counts.keys()], |
|
'Object Count': list(counts.values()) |
|
}) |
|
|
|
return output_image, df |
|
|
|
|
|
def detect_and_draw_video(video_path): |
|
cap = cv2.VideoCapture(video_path) |
|
frames = [] |
|
overall_counts = {} |
|
seen_objects = [] |
|
|
|
while cap.isOpened(): |
|
ret, frame = cap.read() |
|
if not ret: |
|
break |
|
|
|
frame = cv2.resize(frame, (640, 480)) |
|
results = model.predict(source=frame, conf=0.3) |
|
obb_results = results[0].obb |
|
|
|
if obb_results is not None and len(obb_results.data) > 0: |
|
for obb, conf, cls in zip(obb_results.data.cpu().numpy(), obb_results.conf.cpu().numpy(), obb_results.cls.cpu().numpy()): |
|
x_center, y_center, width, height, rotation = obb[:5] |
|
class_id = int(cls) |
|
confidence = float(conf) |
|
label_en, label_fa = class_names.get(class_id, ('unknown', 'ناشناخته')) |
|
current_object = (label_en, int(x_center - width / 2), int(y_center - height / 2), int(x_center + width / 2), int(y_center + height / 2)) |
|
|
|
|
|
if not any(existing[0] == label_en and |
|
(existing[1] < current_object[3] and existing[3] > current_object[1] and |
|
existing[2] < current_object[4] and existing[4] > current_object[2]) for existing in seen_objects): |
|
seen_objects.append(current_object) |
|
overall_counts[label_en] = overall_counts.get(label_en, 0) + 1 |
|
|
|
|
|
rect = ((x_center, y_center), (width, height), rotation * 180.0 / np.pi) |
|
box = cv2.boxPoints(rect) |
|
box = np.int0(box) |
|
color = colors.get(class_id, (0, 255, 0)) |
|
cv2.drawContours(frame, [box], 0, color, 2) |
|
cv2.putText(frame, f"{label_en}: {confidence:.2f}", (int(x_center), int(y_center)), |
|
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) |
|
|
|
frames.append(frame) |
|
|
|
cap.release() |
|
|
|
output_path = 'output.mp4' |
|
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), 20.0, (640, 480)) |
|
|
|
for frame in frames: |
|
out.write(frame) |
|
out.release() |
|
|
|
|
|
df = pd.DataFrame({ |
|
'Label (English)': list(overall_counts.keys()), |
|
'Label (Persian)': [class_names.get(k, ('unknown', 'ناشناخته'))[1] for k in overall_counts.keys()], |
|
'Object Count': list(overall_counts.values()) |
|
}) |
|
|
|
return output_path, df |
|
|
|
|
|
image_interface = gr.Interface( |
|
fn=detect_and_draw_image, |
|
inputs=gr.Image(type="pil", label="بارگذاری تصویر"), |
|
outputs=[gr.Image(type="pil"), gr.Dataframe(label="تعداد اشیاء")], |
|
title="تشخیص اشیاء در تصاویر هوایی", |
|
description="یک تصویر هوایی بارگذاری کنید تا اشیاء شناسایی شده و تعداد آنها را ببینید.", |
|
examples=['Examples/images/areial_car.jpg', 'Examples/images/arieal_car_1.jpg','Examples/images/t.jpg'] |
|
) |
|
|
|
|
|
video_interface = gr.Interface( |
|
fn=detect_and_draw_video, |
|
inputs=gr.Video(label="بارگذاری ویدئو"), |
|
outputs=[gr.Video(label="ویدئوی پردازش شده"), gr.Dataframe(label="تعداد اشیاء")], |
|
title="تشخیص اشیاء در ویدئوها", |
|
description="یک ویدئو بارگذاری کنید تا اشیاء شناسایی شده و تعداد آنها را ببینید.", |
|
examples=['Examples/video/city.mp4', 'Examples/video/airplane.mp4'] |
|
) |
|
|
|
|
|
app = gr.TabbedInterface([image_interface, video_interface], ["تشخیص تصویر", "تشخیص ویدئو"]) |
|
app.launch(debug=True, share=True) |
|
|