Spaces:
Sleeping
Sleeping
ayberkuckun
commited on
Commit
•
5efbd91
1
Parent(s):
269d1a9
try
Browse files- .gitattributes +1 -0
- .gitignore +1 -0
- app.py +23 -0
- configs.yaml +9 -0
- data/Video.mp4 +3 -0
- main2.py +180 -0
- requirements.txt +7 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.idea/
|
app.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import hopsworks
|
3 |
+
import argparse
|
4 |
+
|
5 |
+
from main2 import main
|
6 |
+
|
7 |
+
project = hopsworks.login()
|
8 |
+
|
9 |
+
dataset_api = project.get_dataset_api()
|
10 |
+
|
11 |
+
dataset_api.download("Resources/aqi/images/df_next_7_days.png", overwrite=True)
|
12 |
+
|
13 |
+
|
14 |
+
def count_vehicles(video):
|
15 |
+
args = argparse.Namespace()
|
16 |
+
args.config_path = "configs/configs.yaml"
|
17 |
+
args.source = "./data/Video.mp4"
|
18 |
+
yield from main(args)
|
19 |
+
|
20 |
+
|
21 |
+
demo = gr.Interface(count_vehicles, inputs=gr.Textbox("./data/Video.mp4", label="Path to the video file."), outputs="image")
|
22 |
+
|
23 |
+
demo.launch()
|
configs.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"min_class_confidence": 0.1
|
2 |
+
"model": "yolov8n.pt"
|
3 |
+
"vehicle_classes": ["bicycle", "bus", "car", "motorbike"]
|
4 |
+
"class_colours": {
|
5 |
+
"bicycle": [0, 0, 255],
|
6 |
+
"bus": [0, 255, 0],
|
7 |
+
"car": [255, 0, 0],
|
8 |
+
"motorbike": [255, 255, 0]
|
9 |
+
}
|
data/Video.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aa6acb40e4468657fc9aaeabb000739f9045b8970af20e9daaa0b2b4bffe9e9d
|
3 |
+
size 6648163
|
main2.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import yaml
|
5 |
+
import os
|
6 |
+
import time
|
7 |
+
|
8 |
+
from boxmot import DeepOCSORT
|
9 |
+
|
10 |
+
|
11 |
+
def main(args):
|
12 |
+
# Get configs.
|
13 |
+
try:
|
14 |
+
with open(args.config_path) as file:
|
15 |
+
configs = yaml.safe_load(file)
|
16 |
+
except:
|
17 |
+
raise FileNotFoundError("Please give the correct path to the configs YAML file.")
|
18 |
+
|
19 |
+
videoSrcPath = args.source
|
20 |
+
if not os.path.exists(videoSrcPath):
|
21 |
+
print(f" Exit as the video path {videoSrcPath} doesnt exist")
|
22 |
+
return
|
23 |
+
cap = cv2.VideoCapture(videoSrcPath)
|
24 |
+
frames_count, fps, width, height = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(
|
25 |
+
cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
26 |
+
width = int(width)
|
27 |
+
height = int(height)
|
28 |
+
configs["width"] = width
|
29 |
+
configs["height"] = height
|
30 |
+
|
31 |
+
print(f"Input video #frames ={frames_count}, fps ={fps}, width ={width}, height={height}")
|
32 |
+
|
33 |
+
frame_number = 0
|
34 |
+
cv2.namedWindow("output", cv2.WINDOW_AUTOSIZE)
|
35 |
+
|
36 |
+
from ultralytics.utils.checks import check_imgsz # resize?
|
37 |
+
imgsz = check_imgsz([width, height], min_dim=2)
|
38 |
+
|
39 |
+
from ultralytics import YOLO
|
40 |
+
model = YOLO(configs["model"])
|
41 |
+
|
42 |
+
# from deep_sort_realtime.deepsort_tracker import DeepSort
|
43 |
+
# tracker = DeepSort()
|
44 |
+
|
45 |
+
from pathlib import Path
|
46 |
+
tracker = DeepOCSORT(
|
47 |
+
model_weights=Path('osnet_x0_25_msmt17.pt'), # which ReID model to use
|
48 |
+
device='cpu:0',
|
49 |
+
fp16=False,
|
50 |
+
)
|
51 |
+
|
52 |
+
tracked_vehicles = {}
|
53 |
+
totalOutgoing = 0
|
54 |
+
totalIncoming = 0
|
55 |
+
|
56 |
+
while cap.isOpened():
|
57 |
+
ret, frame = cap.read()
|
58 |
+
if ret:
|
59 |
+
results = model(frame, imgsz=imgsz) # predict on an image
|
60 |
+
configs["classes"] = results[0].names
|
61 |
+
object_classes = results[0].boxes.cls.numpy() # .cpu()
|
62 |
+
object_confidences = results[0].boxes.conf.numpy() # .cpu()
|
63 |
+
object_coordinates = results[0].boxes.xyxy.numpy() # .cpu()
|
64 |
+
detections = []
|
65 |
+
|
66 |
+
for i in range(len(object_classes)):
|
67 |
+
if configs["classes"][object_classes[i]] in configs["vehicle_classes"]:
|
68 |
+
if object_confidences[i] > configs["min_class_confidence"]:
|
69 |
+
startX, startY = (int(object_coordinates[i][0]), int(object_coordinates[i][1]))
|
70 |
+
endX, endY = (int(object_coordinates[i][2]), int(object_coordinates[i][3]))
|
71 |
+
|
72 |
+
detections.append([startX, startY, endX, endY, object_confidences[i], object_classes[i]])
|
73 |
+
# detections.append(([startX, startY, endX - startX, endY - startY], object_confidences[i], object_classes[i]))
|
74 |
+
|
75 |
+
detections = np.array(detections)
|
76 |
+
tracks = tracker.update(detections, frame)
|
77 |
+
# tracks = tracker.update_tracks(detections, frame=frame)
|
78 |
+
|
79 |
+
# Counting lines for the vehicles in the frame.
|
80 |
+
cv2.line(
|
81 |
+
frame, (0, configs["height"] - 150), (configs["width"], configs["height"] - 150), (0, 255, 255), 2
|
82 |
+
)
|
83 |
+
|
84 |
+
cv2.line(
|
85 |
+
frame, (0, configs["height"] - 325), (configs["width"], configs["height"] - 325), (0, 255, 255), 2
|
86 |
+
)
|
87 |
+
|
88 |
+
for track in tracks:
|
89 |
+
(startX, startY, endX, endY, objectID, confidence, class_idx, _) = track
|
90 |
+
# startX, startY, endX, endY = track.original_ltwh[0], track.original_ltwh[1], track.original_ltwh[0] + \
|
91 |
+
# track.original_ltwh[2], track.original_ltwh[1] + track.original_ltwh[3]
|
92 |
+
# objectID = track.track_id
|
93 |
+
# confidence = track.det_conf
|
94 |
+
# class_idx = track.det_class
|
95 |
+
|
96 |
+
centroid = (int((startX + endX) / 2.0), int((startY + endY) / 2.0))
|
97 |
+
tracked_vehicle = tracked_vehicles.get(objectID, None)
|
98 |
+
if tracked_vehicle is None:
|
99 |
+
tracked_vehicle = TrackedVehicle(objectID, centroid)
|
100 |
+
|
101 |
+
else:
|
102 |
+
direction = centroid[1] - np.mean([centroid[1] for centroid in tracked_vehicle.centroids])
|
103 |
+
tracked_vehicle.centroids.append(centroid)
|
104 |
+
|
105 |
+
if not tracked_vehicle.counted:
|
106 |
+
if direction < 0 and centroid[1] < configs["height"] - 150 and centroid[1] > configs["height"] - 325:
|
107 |
+
totalOutgoing += 1
|
108 |
+
tracked_vehicle.direction = "Outgoing"
|
109 |
+
tracked_vehicle.counted = True
|
110 |
+
|
111 |
+
elif direction > 0 and centroid[1] > configs["height"] - 325 and centroid[1] < configs["height"] - 150:
|
112 |
+
totalIncoming += 1
|
113 |
+
tracked_vehicle.direction = "Incoming"
|
114 |
+
tracked_vehicle.counted = True
|
115 |
+
|
116 |
+
tracked_vehicles[objectID] = tracked_vehicle
|
117 |
+
|
118 |
+
y = startY - 10 if startY - 10 > 10 else startY + 10
|
119 |
+
text = f'id: {int(objectID)}, class: {configs["classes"][class_idx]}, conf: {confidence * 100:.1f}%'
|
120 |
+
|
121 |
+
frame = cv2.rectangle(frame, (int(startX), int(startY)), (int(endX), int(endY)),
|
122 |
+
configs["class_colours"][configs["classes"][class_idx]], 2)
|
123 |
+
|
124 |
+
frame = cv2.circle(frame, (centroid[0], centroid[1]), 4,
|
125 |
+
configs["class_colours"][configs["classes"][class_idx]], -1)
|
126 |
+
|
127 |
+
frame = cv2.putText(frame, text, (int(startX), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.45,
|
128 |
+
configs["class_colours"][configs["classes"][class_idx]], 2)
|
129 |
+
|
130 |
+
frame = cv2.putText(frame, "Frame#: " + str(frame_number), (0, configs["height"] - 30), cv2.FONT_HERSHEY_SIMPLEX,
|
131 |
+
1, (2, 10, 200), 2)
|
132 |
+
|
133 |
+
frame = cv2.putText(frame, "Incoming#: " + str(totalIncoming), (0, 30),
|
134 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1, (2, 10, 200), 2)
|
135 |
+
|
136 |
+
frame = cv2.putText(frame, "Outgoing#: " + str(totalOutgoing), (configs["width"] - 300, 30),
|
137 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1, (2, 10, 200), 2)
|
138 |
+
|
139 |
+
cv2.imshow('output', frame)
|
140 |
+
key = cv2.waitKey(1)
|
141 |
+
# Quit when 'q' is pressed
|
142 |
+
if key == ord('q'):
|
143 |
+
break
|
144 |
+
elif key == ord('k'):
|
145 |
+
cv2.waitKey(0)
|
146 |
+
|
147 |
+
frame_number = frame_number + 1
|
148 |
+
|
149 |
+
yield frame
|
150 |
+
|
151 |
+
|
152 |
+
class TrackedVehicle:
|
153 |
+
def __init__(self, objectID, centroid):
|
154 |
+
# store the object ID, then initialize a list of centroids
|
155 |
+
# using the current centroid
|
156 |
+
self.objectID = objectID
|
157 |
+
self.centroids = [centroid]
|
158 |
+
self.direction = None
|
159 |
+
# initialize a boolean used to indicate if the object has
|
160 |
+
# already been counted or not
|
161 |
+
self.counted = False
|
162 |
+
|
163 |
+
|
164 |
+
def get_arguments():
|
165 |
+
parser = argparse.ArgumentParser(description='program to open a video and display; ',
|
166 |
+
formatter_class=argparse.RawTextHelpFormatter,
|
167 |
+
usage='\n #1: open a single video: >> python3 main.py -s "videoname.MP4"')
|
168 |
+
|
169 |
+
parser.add_argument('--source', "-s", type=str, required=True, help='source') # file/folder, 0 for webcam
|
170 |
+
parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
171 |
+
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') # not used 640 for now
|
172 |
+
parser.add_argument("--config_path", "-c", required=True, help="Path to user defined configs", type=str)
|
173 |
+
|
174 |
+
return parser.parse_args()
|
175 |
+
|
176 |
+
|
177 |
+
if __name__ == "__main__":
|
178 |
+
args = get_arguments()
|
179 |
+
print(args)
|
180 |
+
main(args)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
matplotlib==3.5.3
|
2 |
+
opencv-python==4.7.0.72
|
3 |
+
scipy==1.7.3
|
4 |
+
|
5 |
+
ultralytics
|
6 |
+
boxmot
|
7 |
+
pyyaml
|