Spaces:
Runtime error
Runtime error
Gemma-sciling
commited on
Commit
·
edad70f
1
Parent(s):
04295ff
Upload 49 files
Browse files- models/__init__.py +0 -0
- models/__pycache__/__init__.cpython-39.pyc +0 -0
- models/__pycache__/common.cpython-39.pyc +0 -0
- models/__pycache__/experimental.cpython-39.pyc +0 -0
- models/__pycache__/yolo.cpython-39.pyc +0 -0
- models/blazeface.yaml +33 -0
- models/blazeface_fpn.yaml +38 -0
- models/common.py +457 -0
- models/experimental.py +133 -0
- models/yolo.py +345 -0
- models/yolov5l.yaml +47 -0
- models/yolov5l6.yaml +60 -0
- models/yolov5m.yaml +47 -0
- models/yolov5m6.yaml +60 -0
- models/yolov5n-0.5.yaml +46 -0
- models/yolov5n.yaml +46 -0
- models/yolov5n6.yaml +58 -0
- models/yolov5s.yaml +47 -0
- models/yolov5s6.yaml +60 -0
- utils/__init__.py +0 -0
- utils/__pycache__/__init__.cpython-39.pyc +0 -0
- utils/__pycache__/autoanchor.cpython-39.pyc +0 -0
- utils/__pycache__/datasets.cpython-39.pyc +0 -0
- utils/__pycache__/general.cpython-39.pyc +0 -0
- utils/__pycache__/google_utils.cpython-39.pyc +0 -0
- utils/__pycache__/metrics.cpython-39.pyc +0 -0
- utils/__pycache__/plots.cpython-39.pyc +0 -0
- utils/__pycache__/torch_utils.cpython-39.pyc +0 -0
- utils/activations.py +72 -0
- utils/autoanchor.py +155 -0
- utils/aws/__init__.py +0 -0
- utils/aws/mime.sh +26 -0
- utils/aws/resume.py +37 -0
- utils/aws/userdata.sh +27 -0
- utils/datasets.py +1019 -0
- utils/face_datasets.py +834 -0
- utils/general.py +646 -0
- utils/google_app_engine/Dockerfile +25 -0
- utils/google_app_engine/additional_requirements.txt +4 -0
- utils/google_app_engine/app.yaml +14 -0
- utils/google_utils.py +122 -0
- utils/infer_utils.py +36 -0
- utils/loss.py +304 -0
- utils/metrics.py +200 -0
- utils/plots.py +413 -0
- utils/torch_utils.py +294 -0
- utils/wandb_logging/__init__.py +0 -0
- utils/wandb_logging/log_dataset.py +24 -0
- utils/wandb_logging/wandb_utils.py +306 -0
models/__init__.py
ADDED
File without changes
|
models/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (143 Bytes). View file
|
|
models/__pycache__/common.cpython-39.pyc
ADDED
Binary file (18.9 kB). View file
|
|
models/__pycache__/experimental.cpython-39.pyc
ADDED
Binary file (5.62 kB). View file
|
|
models/__pycache__/yolo.cpython-39.pyc
ADDED
Binary file (12.7 kB). View file
|
|
models/blazeface.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [5,6, 10,13, 21,26] # P3/8
|
9 |
+
- [55,72, 225,304, 438,553] # P4/16
|
10 |
+
|
11 |
+
# YOLOv5 backbone
|
12 |
+
backbone:
|
13 |
+
# [from, number, module, args]
|
14 |
+
[[-1, 1, Conv, [24, 3, 2]], # 0-P1/2
|
15 |
+
[-1, 2, BlazeBlock, [24]], # 1
|
16 |
+
[-1, 1, BlazeBlock, [48, None, 2]], # 2-P2/4
|
17 |
+
[-1, 2, BlazeBlock, [48]], # 3
|
18 |
+
[-1, 1, DoubleBlazeBlock, [96, 24, 2]], # 4-P3/8
|
19 |
+
[-1, 2, DoubleBlazeBlock, [96, 24]], # 5
|
20 |
+
[-1, 1, DoubleBlazeBlock, [96, 24, 2]], # 6-P4/16
|
21 |
+
[-1, 2, DoubleBlazeBlock, [96, 24]], # 7
|
22 |
+
]
|
23 |
+
|
24 |
+
|
25 |
+
# YOLOv5 head
|
26 |
+
head:
|
27 |
+
[[-1, 1, Conv, [64, 1, 1]], # 8 (P4/32-large)
|
28 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
29 |
+
[[-1, 5], 1, Concat, [1]], # cat backbone P3
|
30 |
+
[-1, 1, Conv, [64, 1, 1]], # 11 (P3/8-medium)
|
31 |
+
|
32 |
+
[[11, 8], 1, Detect, [nc, anchors]], # Detect(P3, P4)
|
33 |
+
]
|
models/blazeface_fpn.yaml
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [5,6, 10,13, 21,26] # P3/8
|
9 |
+
- [55,72, 225,304, 438,553] # P4/16
|
10 |
+
|
11 |
+
# YOLOv5 backbone
|
12 |
+
backbone:
|
13 |
+
# [from, number, module, args]
|
14 |
+
[[-1, 1, Conv, [24, 3, 2]], # 0-P1/2
|
15 |
+
[-1, 2, BlazeBlock, [24]], # 1
|
16 |
+
[-1, 1, BlazeBlock, [48, None, 2]], # 2-P2/4
|
17 |
+
[-1, 2, BlazeBlock, [48]], # 3
|
18 |
+
[-1, 1, DoubleBlazeBlock, [96, 24, 2]], # 4-P3/8
|
19 |
+
[-1, 2, DoubleBlazeBlock, [96, 24]], # 5
|
20 |
+
[-1, 1, DoubleBlazeBlock, [96, 24, 2]], # 6-P4/16
|
21 |
+
[-1, 2, DoubleBlazeBlock, [96, 24]], # 7
|
22 |
+
]
|
23 |
+
|
24 |
+
|
25 |
+
# YOLOv5 head
|
26 |
+
head:
|
27 |
+
[[-1, 1, Conv, [48, 1, 1]], # 8
|
28 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
29 |
+
[[-1, 5], 1, Concat, [1]], # cat backbone P3
|
30 |
+
[-1, 1, Conv, [48, 1, 1]], # 11 (P3/8-medium)
|
31 |
+
|
32 |
+
[-1, 1, nn.MaxPool2d, [3, 2, 1]], # 12
|
33 |
+
[[-1, 7], 1, Concat, [1]], # cat backbone P3
|
34 |
+
[-1, 1, Conv, [48, 1, 1]], # 14 (P4/16-large)
|
35 |
+
|
36 |
+
[[11, 14], 1, Detect, [nc, anchors]], # Detect(P3, P4)
|
37 |
+
]
|
38 |
+
|
models/common.py
ADDED
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file contains modules common to various models
|
2 |
+
|
3 |
+
import math
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import requests
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
from PIL import Image, ImageDraw
|
10 |
+
|
11 |
+
from utils.datasets import letterbox
|
12 |
+
from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
|
13 |
+
from utils.plots import color_list
|
14 |
+
|
15 |
+
def autopad(k, p=None): # kernel, padding
|
16 |
+
# Pad to 'same'
|
17 |
+
if p is None:
|
18 |
+
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
|
19 |
+
return p
|
20 |
+
|
21 |
+
def channel_shuffle(x, groups):
|
22 |
+
batchsize, num_channels, height, width = x.data.size()
|
23 |
+
channels_per_group = num_channels // groups
|
24 |
+
|
25 |
+
# reshape
|
26 |
+
x = x.view(batchsize, groups, channels_per_group, height, width)
|
27 |
+
x = torch.transpose(x, 1, 2).contiguous()
|
28 |
+
|
29 |
+
# flatten
|
30 |
+
x = x.view(batchsize, -1, height, width)
|
31 |
+
return x
|
32 |
+
|
33 |
+
def DWConv(c1, c2, k=1, s=1, act=True):
|
34 |
+
# Depthwise convolution
|
35 |
+
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
|
36 |
+
|
37 |
+
class Conv(nn.Module):
|
38 |
+
# Standard convolution
|
39 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
40 |
+
super(Conv, self).__init__()
|
41 |
+
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
|
42 |
+
self.bn = nn.BatchNorm2d(c2)
|
43 |
+
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
|
44 |
+
#self.act = self.act = nn.LeakyReLU(0.1, inplace=True) if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
|
45 |
+
|
46 |
+
def forward(self, x):
|
47 |
+
return self.act(self.bn(self.conv(x)))
|
48 |
+
|
49 |
+
def fuseforward(self, x):
|
50 |
+
return self.act(self.conv(x))
|
51 |
+
|
52 |
+
class StemBlock(nn.Module):
|
53 |
+
def __init__(self, c1, c2, k=3, s=2, p=None, g=1, act=True):
|
54 |
+
super(StemBlock, self).__init__()
|
55 |
+
self.stem_1 = Conv(c1, c2, k, s, p, g, act)
|
56 |
+
self.stem_2a = Conv(c2, c2 // 2, 1, 1, 0)
|
57 |
+
self.stem_2b = Conv(c2 // 2, c2, 3, 2, 1)
|
58 |
+
self.stem_2p = nn.MaxPool2d(kernel_size=2,stride=2,ceil_mode=True)
|
59 |
+
self.stem_3 = Conv(c2 * 2, c2, 1, 1, 0)
|
60 |
+
|
61 |
+
def forward(self, x):
|
62 |
+
stem_1_out = self.stem_1(x)
|
63 |
+
stem_2a_out = self.stem_2a(stem_1_out)
|
64 |
+
stem_2b_out = self.stem_2b(stem_2a_out)
|
65 |
+
stem_2p_out = self.stem_2p(stem_1_out)
|
66 |
+
out = self.stem_3(torch.cat((stem_2b_out,stem_2p_out),1))
|
67 |
+
return out
|
68 |
+
|
69 |
+
class Bottleneck(nn.Module):
|
70 |
+
# Standard bottleneck
|
71 |
+
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
|
72 |
+
super(Bottleneck, self).__init__()
|
73 |
+
c_ = int(c2 * e) # hidden channels
|
74 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
75 |
+
self.cv2 = Conv(c_, c2, 3, 1, g=g)
|
76 |
+
self.add = shortcut and c1 == c2
|
77 |
+
|
78 |
+
def forward(self, x):
|
79 |
+
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
80 |
+
|
81 |
+
class BottleneckCSP(nn.Module):
|
82 |
+
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
83 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
84 |
+
super(BottleneckCSP, self).__init__()
|
85 |
+
c_ = int(c2 * e) # hidden channels
|
86 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
87 |
+
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
88 |
+
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
89 |
+
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
90 |
+
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
91 |
+
self.act = nn.LeakyReLU(0.1, inplace=True)
|
92 |
+
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
|
93 |
+
|
94 |
+
def forward(self, x):
|
95 |
+
y1 = self.cv3(self.m(self.cv1(x)))
|
96 |
+
y2 = self.cv2(x)
|
97 |
+
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
|
98 |
+
|
99 |
+
|
100 |
+
class C3(nn.Module):
|
101 |
+
# CSP Bottleneck with 3 convolutions
|
102 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
103 |
+
super(C3, self).__init__()
|
104 |
+
c_ = int(c2 * e) # hidden channels
|
105 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
106 |
+
self.cv2 = Conv(c1, c_, 1, 1)
|
107 |
+
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
|
108 |
+
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
|
109 |
+
|
110 |
+
def forward(self, x):
|
111 |
+
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
|
112 |
+
|
113 |
+
class ShuffleV2Block(nn.Module):
|
114 |
+
def __init__(self, inp, oup, stride):
|
115 |
+
super(ShuffleV2Block, self).__init__()
|
116 |
+
|
117 |
+
if not (1 <= stride <= 3):
|
118 |
+
raise ValueError('illegal stride value')
|
119 |
+
self.stride = stride
|
120 |
+
|
121 |
+
branch_features = oup // 2
|
122 |
+
assert (self.stride != 1) or (inp == branch_features << 1)
|
123 |
+
|
124 |
+
if self.stride > 1:
|
125 |
+
self.branch1 = nn.Sequential(
|
126 |
+
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
|
127 |
+
nn.BatchNorm2d(inp),
|
128 |
+
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
|
129 |
+
nn.BatchNorm2d(branch_features),
|
130 |
+
nn.SiLU(),
|
131 |
+
)
|
132 |
+
else:
|
133 |
+
self.branch1 = nn.Sequential()
|
134 |
+
|
135 |
+
self.branch2 = nn.Sequential(
|
136 |
+
nn.Conv2d(inp if (self.stride > 1) else branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
|
137 |
+
nn.BatchNorm2d(branch_features),
|
138 |
+
nn.SiLU(),
|
139 |
+
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
|
140 |
+
nn.BatchNorm2d(branch_features),
|
141 |
+
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
|
142 |
+
nn.BatchNorm2d(branch_features),
|
143 |
+
nn.SiLU(),
|
144 |
+
)
|
145 |
+
|
146 |
+
@staticmethod
|
147 |
+
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
|
148 |
+
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
|
149 |
+
|
150 |
+
def forward(self, x):
|
151 |
+
if self.stride == 1:
|
152 |
+
x1, x2 = x.chunk(2, dim=1)
|
153 |
+
out = torch.cat((x1, self.branch2(x2)), dim=1)
|
154 |
+
else:
|
155 |
+
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
|
156 |
+
out = channel_shuffle(out, 2)
|
157 |
+
return out
|
158 |
+
|
159 |
+
class BlazeBlock(nn.Module):
|
160 |
+
def __init__(self, in_channels,out_channels,mid_channels=None,stride=1):
|
161 |
+
super(BlazeBlock, self).__init__()
|
162 |
+
mid_channels = mid_channels or in_channels
|
163 |
+
assert stride in [1, 2]
|
164 |
+
if stride>1:
|
165 |
+
self.use_pool = True
|
166 |
+
else:
|
167 |
+
self.use_pool = False
|
168 |
+
|
169 |
+
self.branch1 = nn.Sequential(
|
170 |
+
nn.Conv2d(in_channels=in_channels,out_channels=mid_channels,kernel_size=5,stride=stride,padding=2,groups=in_channels),
|
171 |
+
nn.BatchNorm2d(mid_channels),
|
172 |
+
nn.Conv2d(in_channels=mid_channels,out_channels=out_channels,kernel_size=1,stride=1),
|
173 |
+
nn.BatchNorm2d(out_channels),
|
174 |
+
)
|
175 |
+
|
176 |
+
if self.use_pool:
|
177 |
+
self.shortcut = nn.Sequential(
|
178 |
+
nn.MaxPool2d(kernel_size=stride, stride=stride),
|
179 |
+
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
|
180 |
+
nn.BatchNorm2d(out_channels),
|
181 |
+
)
|
182 |
+
|
183 |
+
self.relu = nn.SiLU(inplace=True)
|
184 |
+
|
185 |
+
def forward(self, x):
|
186 |
+
branch1 = self.branch1(x)
|
187 |
+
out = (branch1+self.shortcut(x)) if self.use_pool else (branch1+x)
|
188 |
+
return self.relu(out)
|
189 |
+
|
190 |
+
class DoubleBlazeBlock(nn.Module):
|
191 |
+
def __init__(self,in_channels,out_channels,mid_channels=None,stride=1):
|
192 |
+
super(DoubleBlazeBlock, self).__init__()
|
193 |
+
mid_channels = mid_channels or in_channels
|
194 |
+
assert stride in [1, 2]
|
195 |
+
if stride > 1:
|
196 |
+
self.use_pool = True
|
197 |
+
else:
|
198 |
+
self.use_pool = False
|
199 |
+
|
200 |
+
self.branch1 = nn.Sequential(
|
201 |
+
nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=5, stride=stride,padding=2,groups=in_channels),
|
202 |
+
nn.BatchNorm2d(in_channels),
|
203 |
+
nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1),
|
204 |
+
nn.BatchNorm2d(mid_channels),
|
205 |
+
nn.SiLU(inplace=True),
|
206 |
+
nn.Conv2d(in_channels=mid_channels, out_channels=mid_channels, kernel_size=5, stride=1,padding=2),
|
207 |
+
nn.BatchNorm2d(mid_channels),
|
208 |
+
nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1),
|
209 |
+
nn.BatchNorm2d(out_channels),
|
210 |
+
)
|
211 |
+
|
212 |
+
if self.use_pool:
|
213 |
+
self.shortcut = nn.Sequential(
|
214 |
+
nn.MaxPool2d(kernel_size=stride, stride=stride),
|
215 |
+
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
|
216 |
+
nn.BatchNorm2d(out_channels),
|
217 |
+
)
|
218 |
+
|
219 |
+
self.relu = nn.SiLU(inplace=True)
|
220 |
+
|
221 |
+
def forward(self, x):
|
222 |
+
branch1 = self.branch1(x)
|
223 |
+
out = (branch1 + self.shortcut(x)) if self.use_pool else (branch1 + x)
|
224 |
+
return self.relu(out)
|
225 |
+
|
226 |
+
|
227 |
+
class SPP(nn.Module):
|
228 |
+
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
229 |
+
def __init__(self, c1, c2, k=(5, 9, 13)):
|
230 |
+
super(SPP, self).__init__()
|
231 |
+
c_ = c1 // 2 # hidden channels
|
232 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
233 |
+
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
|
234 |
+
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
|
235 |
+
|
236 |
+
def forward(self, x):
|
237 |
+
x = self.cv1(x)
|
238 |
+
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
|
239 |
+
|
240 |
+
class SPPF(nn.Module):
|
241 |
+
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
|
242 |
+
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
|
243 |
+
super().__init__()
|
244 |
+
c_ = c1 // 2 # hidden channels
|
245 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
246 |
+
self.cv2 = Conv(c_ * 4, c2, 1, 1)
|
247 |
+
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
|
248 |
+
|
249 |
+
def forward(self, x):
|
250 |
+
x = self.cv1(x)
|
251 |
+
with warnings.catch_warnings():
|
252 |
+
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
|
253 |
+
y1 = self.m(x)
|
254 |
+
y2 = self.m(y1)
|
255 |
+
return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
|
256 |
+
|
257 |
+
|
258 |
+
class Focus(nn.Module):
|
259 |
+
# Focus wh information into c-space
|
260 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
261 |
+
super(Focus, self).__init__()
|
262 |
+
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
|
263 |
+
# self.contract = Contract(gain=2)
|
264 |
+
|
265 |
+
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
266 |
+
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
|
267 |
+
# return self.conv(self.contract(x))
|
268 |
+
|
269 |
+
|
270 |
+
class Contract(nn.Module):
|
271 |
+
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
|
272 |
+
def __init__(self, gain=2):
|
273 |
+
super().__init__()
|
274 |
+
self.gain = gain
|
275 |
+
|
276 |
+
def forward(self, x):
|
277 |
+
N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
|
278 |
+
s = self.gain
|
279 |
+
x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
|
280 |
+
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
|
281 |
+
return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
|
282 |
+
|
283 |
+
|
284 |
+
class Expand(nn.Module):
|
285 |
+
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
|
286 |
+
def __init__(self, gain=2):
|
287 |
+
super().__init__()
|
288 |
+
self.gain = gain
|
289 |
+
|
290 |
+
def forward(self, x):
|
291 |
+
N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
|
292 |
+
s = self.gain
|
293 |
+
x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
|
294 |
+
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
|
295 |
+
return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
|
296 |
+
|
297 |
+
|
298 |
+
class Concat(nn.Module):
|
299 |
+
# Concatenate a list of tensors along dimension
|
300 |
+
def __init__(self, dimension=1):
|
301 |
+
super(Concat, self).__init__()
|
302 |
+
self.d = dimension
|
303 |
+
|
304 |
+
def forward(self, x):
|
305 |
+
return torch.cat(x, self.d)
|
306 |
+
|
307 |
+
|
308 |
+
class NMS(nn.Module):
|
309 |
+
# Non-Maximum Suppression (NMS) module
|
310 |
+
conf = 0.25 # confidence threshold
|
311 |
+
iou = 0.45 # IoU threshold
|
312 |
+
classes = None # (optional list) filter by class
|
313 |
+
|
314 |
+
def __init__(self):
|
315 |
+
super(NMS, self).__init__()
|
316 |
+
|
317 |
+
def forward(self, x):
|
318 |
+
return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
|
319 |
+
|
320 |
+
class autoShape(nn.Module):
|
321 |
+
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
|
322 |
+
img_size = 640 # inference size (pixels)
|
323 |
+
conf = 0.25 # NMS confidence threshold
|
324 |
+
iou = 0.45 # NMS IoU threshold
|
325 |
+
classes = None # (optional list) filter by class
|
326 |
+
|
327 |
+
def __init__(self, model):
|
328 |
+
super(autoShape, self).__init__()
|
329 |
+
self.model = model.eval()
|
330 |
+
|
331 |
+
def autoshape(self):
|
332 |
+
print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
|
333 |
+
return self
|
334 |
+
|
335 |
+
def forward(self, imgs, size=640, augment=False, profile=False):
|
336 |
+
# Inference from various sources. For height=720, width=1280, RGB images example inputs are:
|
337 |
+
# filename: imgs = 'data/samples/zidane.jpg'
|
338 |
+
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
|
339 |
+
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
|
340 |
+
# PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
|
341 |
+
# numpy: = np.zeros((720,1280,3)) # HWC
|
342 |
+
# torch: = torch.zeros(16,3,720,1280) # BCHW
|
343 |
+
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
344 |
+
|
345 |
+
p = next(self.model.parameters()) # for device and type
|
346 |
+
if isinstance(imgs, torch.Tensor): # torch
|
347 |
+
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
|
348 |
+
|
349 |
+
# Pre-process
|
350 |
+
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
|
351 |
+
shape0, shape1 = [], [] # image and inference shapes
|
352 |
+
for i, im in enumerate(imgs):
|
353 |
+
if isinstance(im, str): # filename or uri
|
354 |
+
im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open
|
355 |
+
im = np.array(im) # to numpy
|
356 |
+
if im.shape[0] < 5: # image in CHW
|
357 |
+
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
|
358 |
+
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
|
359 |
+
s = im.shape[:2] # HWC
|
360 |
+
shape0.append(s) # image shape
|
361 |
+
g = (size / max(s)) # gain
|
362 |
+
shape1.append([y * g for y in s])
|
363 |
+
imgs[i] = im # update
|
364 |
+
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
|
365 |
+
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
|
366 |
+
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
|
367 |
+
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
|
368 |
+
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
|
369 |
+
|
370 |
+
# Inference
|
371 |
+
with torch.no_grad():
|
372 |
+
y = self.model(x, augment, profile)[0] # forward
|
373 |
+
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
|
374 |
+
|
375 |
+
# Post-process
|
376 |
+
for i in range(n):
|
377 |
+
scale_coords(shape1, y[i][:, :4], shape0[i])
|
378 |
+
|
379 |
+
return Detections(imgs, y, self.names)
|
380 |
+
|
381 |
+
|
382 |
+
class Detections:
|
383 |
+
# detections class for YOLOv5 inference results
|
384 |
+
def __init__(self, imgs, pred, names=None):
|
385 |
+
super(Detections, self).__init__()
|
386 |
+
d = pred[0].device # device
|
387 |
+
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
|
388 |
+
self.imgs = imgs # list of images as numpy arrays
|
389 |
+
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
390 |
+
self.names = names # class names
|
391 |
+
self.xyxy = pred # xyxy pixels
|
392 |
+
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
393 |
+
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
|
394 |
+
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
|
395 |
+
self.n = len(self.pred)
|
396 |
+
|
397 |
+
def display(self, pprint=False, show=False, save=False, render=False):
|
398 |
+
colors = color_list()
|
399 |
+
for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
|
400 |
+
str = f'Image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
|
401 |
+
if pred is not None:
|
402 |
+
for c in pred[:, -1].unique():
|
403 |
+
n = (pred[:, -1] == c).sum() # detections per class
|
404 |
+
if len(self.names) > int(c):
|
405 |
+
str += f'{n} {self.names[int(c)]}s, ' # add to string
|
406 |
+
if show or save or render:
|
407 |
+
img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np
|
408 |
+
for *box, conf, cls in pred: # xyxy, confidence, class
|
409 |
+
# str += '%s %.2f, ' % (names[int(cls)], conf) # label
|
410 |
+
ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot
|
411 |
+
if pprint:
|
412 |
+
print(str)
|
413 |
+
if show:
|
414 |
+
img.show(f'Image {i}') # show
|
415 |
+
if save:
|
416 |
+
f = f'results{i}.jpg'
|
417 |
+
str += f"saved to '{f}'"
|
418 |
+
img.save(f) # save
|
419 |
+
if render:
|
420 |
+
self.imgs[i] = np.asarray(img)
|
421 |
+
|
422 |
+
def print(self):
|
423 |
+
self.display(pprint=True) # print results
|
424 |
+
|
425 |
+
def show(self):
|
426 |
+
self.display(show=True) # show results
|
427 |
+
|
428 |
+
def save(self):
|
429 |
+
self.display(save=True) # save results
|
430 |
+
|
431 |
+
def render(self):
|
432 |
+
self.display(render=True) # render results
|
433 |
+
return self.imgs
|
434 |
+
|
435 |
+
def __len__(self):
|
436 |
+
return self.n
|
437 |
+
|
438 |
+
def tolist(self):
|
439 |
+
# return a list of Detections objects, i.e. 'for result in results.tolist():'
|
440 |
+
x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)]
|
441 |
+
for d in x:
|
442 |
+
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
|
443 |
+
setattr(d, k, getattr(d, k)[0]) # pop out of list
|
444 |
+
return x
|
445 |
+
|
446 |
+
|
447 |
+
class Classify(nn.Module):
|
448 |
+
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
|
449 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
|
450 |
+
super(Classify, self).__init__()
|
451 |
+
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
|
452 |
+
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
|
453 |
+
self.flat = nn.Flatten()
|
454 |
+
|
455 |
+
def forward(self, x):
|
456 |
+
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
|
457 |
+
return self.flat(self.conv(z)) # flatten to x(b,c2)
|
models/experimental.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file contains experimental modules
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
|
7 |
+
from models.common import Conv, DWConv
|
8 |
+
from utils.google_utils import attempt_download
|
9 |
+
|
10 |
+
|
11 |
+
class CrossConv(nn.Module):
|
12 |
+
# Cross Convolution Downsample
|
13 |
+
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
|
14 |
+
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
|
15 |
+
super(CrossConv, self).__init__()
|
16 |
+
c_ = int(c2 * e) # hidden channels
|
17 |
+
self.cv1 = Conv(c1, c_, (1, k), (1, s))
|
18 |
+
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
|
19 |
+
self.add = shortcut and c1 == c2
|
20 |
+
|
21 |
+
def forward(self, x):
|
22 |
+
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
23 |
+
|
24 |
+
|
25 |
+
class Sum(nn.Module):
|
26 |
+
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
27 |
+
def __init__(self, n, weight=False): # n: number of inputs
|
28 |
+
super(Sum, self).__init__()
|
29 |
+
self.weight = weight # apply weights boolean
|
30 |
+
self.iter = range(n - 1) # iter object
|
31 |
+
if weight:
|
32 |
+
self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
|
33 |
+
|
34 |
+
def forward(self, x):
|
35 |
+
y = x[0] # no weight
|
36 |
+
if self.weight:
|
37 |
+
w = torch.sigmoid(self.w) * 2
|
38 |
+
for i in self.iter:
|
39 |
+
y = y + x[i + 1] * w[i]
|
40 |
+
else:
|
41 |
+
for i in self.iter:
|
42 |
+
y = y + x[i + 1]
|
43 |
+
return y
|
44 |
+
|
45 |
+
|
46 |
+
class GhostConv(nn.Module):
|
47 |
+
# Ghost Convolution https://github.com/huawei-noah/ghostnet
|
48 |
+
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
|
49 |
+
super(GhostConv, self).__init__()
|
50 |
+
c_ = c2 // 2 # hidden channels
|
51 |
+
self.cv1 = Conv(c1, c_, k, s, None, g, act)
|
52 |
+
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
|
53 |
+
|
54 |
+
def forward(self, x):
|
55 |
+
y = self.cv1(x)
|
56 |
+
return torch.cat([y, self.cv2(y)], 1)
|
57 |
+
|
58 |
+
|
59 |
+
class GhostBottleneck(nn.Module):
|
60 |
+
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
|
61 |
+
def __init__(self, c1, c2, k, s):
|
62 |
+
super(GhostBottleneck, self).__init__()
|
63 |
+
c_ = c2 // 2
|
64 |
+
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
|
65 |
+
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
|
66 |
+
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
|
67 |
+
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
|
68 |
+
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
|
69 |
+
|
70 |
+
def forward(self, x):
|
71 |
+
return self.conv(x) + self.shortcut(x)
|
72 |
+
|
73 |
+
|
74 |
+
class MixConv2d(nn.Module):
|
75 |
+
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
|
76 |
+
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
|
77 |
+
super(MixConv2d, self).__init__()
|
78 |
+
groups = len(k)
|
79 |
+
if equal_ch: # equal c_ per group
|
80 |
+
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
|
81 |
+
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
|
82 |
+
else: # equal weight.numel() per group
|
83 |
+
b = [c2] + [0] * groups
|
84 |
+
a = np.eye(groups + 1, groups, k=-1)
|
85 |
+
a -= np.roll(a, 1, axis=1)
|
86 |
+
a *= np.array(k) ** 2
|
87 |
+
a[0] = 1
|
88 |
+
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
|
89 |
+
|
90 |
+
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
|
91 |
+
self.bn = nn.BatchNorm2d(c2)
|
92 |
+
self.act = nn.LeakyReLU(0.1, inplace=True)
|
93 |
+
|
94 |
+
def forward(self, x):
|
95 |
+
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
96 |
+
|
97 |
+
|
98 |
+
class Ensemble(nn.ModuleList):
|
99 |
+
# Ensemble of models
|
100 |
+
def __init__(self):
|
101 |
+
super(Ensemble, self).__init__()
|
102 |
+
|
103 |
+
def forward(self, x, augment=False):
|
104 |
+
y = []
|
105 |
+
for module in self:
|
106 |
+
y.append(module(x, augment)[0])
|
107 |
+
# y = torch.stack(y).max(0)[0] # max ensemble
|
108 |
+
# y = torch.stack(y).mean(0) # mean ensemble
|
109 |
+
y = torch.cat(y, 1) # nms ensemble
|
110 |
+
return y, None # inference, train output
|
111 |
+
|
112 |
+
|
113 |
+
def attempt_load(weights, map_location=None):
|
114 |
+
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
|
115 |
+
model = Ensemble()
|
116 |
+
for w in weights if isinstance(weights, list) else [weights]:
|
117 |
+
attempt_download(w)
|
118 |
+
model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
|
119 |
+
|
120 |
+
# Compatibility updates
|
121 |
+
for m in model.modules():
|
122 |
+
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
|
123 |
+
m.inplace = True # pytorch 1.7.0 compatibility
|
124 |
+
elif type(m) is Conv:
|
125 |
+
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
|
126 |
+
|
127 |
+
if len(model) == 1:
|
128 |
+
return model[-1] # return model
|
129 |
+
else:
|
130 |
+
print('Ensemble created with %s\n' % weights)
|
131 |
+
for k in ['names', 'stride']:
|
132 |
+
setattr(model, k, getattr(model[-1], k))
|
133 |
+
return model # return ensemble
|
models/yolo.py
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import logging
|
3 |
+
import math
|
4 |
+
import sys
|
5 |
+
from copy import deepcopy
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
|
11 |
+
sys.path.append('./') # to run '$ python *.py' files in subdirectories
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
+
|
14 |
+
from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, C3, ShuffleV2Block, Concat, NMS, autoShape, StemBlock, BlazeBlock, DoubleBlazeBlock
|
15 |
+
from models.experimental import MixConv2d, CrossConv
|
16 |
+
from utils.autoanchor import check_anchor_order
|
17 |
+
from utils.general import make_divisible, check_file, set_logging
|
18 |
+
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
|
19 |
+
select_device, copy_attr
|
20 |
+
|
21 |
+
try:
|
22 |
+
import thop # for FLOPS computation
|
23 |
+
except ImportError:
|
24 |
+
thop = None
|
25 |
+
|
26 |
+
|
27 |
+
class Detect(nn.Module):
|
28 |
+
stride = None # strides computed during build
|
29 |
+
export_cat = False # onnx export cat output
|
30 |
+
|
31 |
+
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
|
32 |
+
super(Detect, self).__init__()
|
33 |
+
self.nc = nc # number of classes
|
34 |
+
#self.no = nc + 5 # number of outputs per anchor
|
35 |
+
self.no = nc + 5 + 10 # number of outputs per anchor
|
36 |
+
|
37 |
+
self.nl = len(anchors) # number of detection layers
|
38 |
+
self.na = len(anchors[0]) // 2 # number of anchors
|
39 |
+
self.grid = [torch.zeros(1)] * self.nl # init grid
|
40 |
+
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
|
41 |
+
self.register_buffer('anchors', a) # shape(nl,na,2)
|
42 |
+
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
|
43 |
+
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
44 |
+
|
45 |
+
def forward(self, x):
|
46 |
+
# x = x.copy() # for profiling
|
47 |
+
z = [] # inference output
|
48 |
+
if self.export_cat:
|
49 |
+
for i in range(self.nl):
|
50 |
+
x[i] = self.m[i](x[i]) # conv
|
51 |
+
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
|
52 |
+
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
|
53 |
+
|
54 |
+
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
|
55 |
+
# self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
|
56 |
+
self.grid[i], self.anchor_grid[i] = self._make_grid_new(nx, ny,i)
|
57 |
+
|
58 |
+
y = torch.full_like(x[i], 0)
|
59 |
+
y = y + torch.cat((x[i][:, :, :, :, 0:5].sigmoid(), torch.cat((x[i][:, :, :, :, 5:15], x[i][:, :, :, :, 15:15+self.nc].sigmoid()), 4)), 4)
|
60 |
+
|
61 |
+
box_xy = (y[:, :, :, :, 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
|
62 |
+
box_wh = (y[:, :, :, :, 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
|
63 |
+
# box_conf = torch.cat((box_xy, torch.cat((box_wh, y[:, :, :, :, 4:5]), 4)), 4)
|
64 |
+
|
65 |
+
landm1 = y[:, :, :, :, 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x1 y1
|
66 |
+
landm2 = y[:, :, :, :, 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x2 y2
|
67 |
+
landm3 = y[:, :, :, :, 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x3 y3
|
68 |
+
landm4 = y[:, :, :, :, 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x4 y4
|
69 |
+
landm5 = y[:, :, :, :, 13:15] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x5 y5
|
70 |
+
# landm = torch.cat((landm1, torch.cat((landm2, torch.cat((landm3, torch.cat((landm4, landm5), 4)), 4)), 4)), 4)
|
71 |
+
# y = torch.cat((box_conf, torch.cat((landm, y[:, :, :, :, 15:15+self.nc]), 4)), 4)
|
72 |
+
y = torch.cat([box_xy, box_wh, y[:, :, :, :, 4:5], landm1, landm2, landm3, landm4, landm5, y[:, :, :, :, 15:15+self.nc]], -1)
|
73 |
+
|
74 |
+
z.append(y.view(bs, -1, self.no))
|
75 |
+
return torch.cat(z, 1)
|
76 |
+
|
77 |
+
for i in range(self.nl):
|
78 |
+
x[i] = self.m[i](x[i]) # conv
|
79 |
+
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
|
80 |
+
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
|
81 |
+
|
82 |
+
if not self.training: # inference
|
83 |
+
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
|
84 |
+
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
|
85 |
+
|
86 |
+
y = torch.full_like(x[i], 0)
|
87 |
+
class_range = list(range(5)) + list(range(15,15+self.nc))
|
88 |
+
y[..., class_range] = x[i][..., class_range].sigmoid()
|
89 |
+
y[..., 5:15] = x[i][..., 5:15]
|
90 |
+
#y = x[i].sigmoid()
|
91 |
+
|
92 |
+
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
|
93 |
+
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
|
94 |
+
|
95 |
+
#y[..., 5:15] = y[..., 5:15] * 8 - 4
|
96 |
+
y[..., 5:7] = y[..., 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x1 y1
|
97 |
+
y[..., 7:9] = y[..., 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x2 y2
|
98 |
+
y[..., 9:11] = y[..., 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x3 y3
|
99 |
+
y[..., 11:13] = y[..., 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x4 y4
|
100 |
+
y[..., 13:15] = y[..., 13:15] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x5 y5
|
101 |
+
|
102 |
+
#y[..., 5:7] = (y[..., 5:7] * 2 -1) * self.anchor_grid[i] # landmark x1 y1
|
103 |
+
#y[..., 7:9] = (y[..., 7:9] * 2 -1) * self.anchor_grid[i] # landmark x2 y2
|
104 |
+
#y[..., 9:11] = (y[..., 9:11] * 2 -1) * self.anchor_grid[i] # landmark x3 y3
|
105 |
+
#y[..., 11:13] = (y[..., 11:13] * 2 -1) * self.anchor_grid[i] # landmark x4 y4
|
106 |
+
#y[..., 13:15] = (y[..., 13:15] * 2 -1) * self.anchor_grid[i] # landmark x5 y5
|
107 |
+
|
108 |
+
z.append(y.view(bs, -1, self.no))
|
109 |
+
|
110 |
+
return x if self.training else (torch.cat(z, 1), x)
|
111 |
+
|
112 |
+
@staticmethod
|
113 |
+
def _make_grid(nx=20, ny=20):
|
114 |
+
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
|
115 |
+
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
|
116 |
+
|
117 |
+
def _make_grid_new(self,nx=20, ny=20,i=0):
|
118 |
+
d = self.anchors[i].device
|
119 |
+
if '1.10.0' in torch.__version__: # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility
|
120 |
+
yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)], indexing='ij')
|
121 |
+
else:
|
122 |
+
yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)])
|
123 |
+
grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float()
|
124 |
+
anchor_grid = (self.anchors[i].clone() * self.stride[i]).view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float()
|
125 |
+
return grid, anchor_grid
|
126 |
+
class Model(nn.Module):
|
127 |
+
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes
|
128 |
+
super(Model, self).__init__()
|
129 |
+
if isinstance(cfg, dict):
|
130 |
+
self.yaml = cfg # model dict
|
131 |
+
else: # is *.yaml
|
132 |
+
import yaml # for torch hub
|
133 |
+
self.yaml_file = Path(cfg).name
|
134 |
+
with open(cfg) as f:
|
135 |
+
self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
136 |
+
|
137 |
+
# Define model
|
138 |
+
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
|
139 |
+
if nc and nc != self.yaml['nc']:
|
140 |
+
logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
|
141 |
+
self.yaml['nc'] = nc # override yaml value
|
142 |
+
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
|
143 |
+
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
|
144 |
+
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
|
145 |
+
|
146 |
+
# Build strides, anchors
|
147 |
+
m = self.model[-1] # Detect()
|
148 |
+
if isinstance(m, Detect):
|
149 |
+
s = 128 # 2x min stride
|
150 |
+
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
|
151 |
+
m.anchors /= m.stride.view(-1, 1, 1)
|
152 |
+
check_anchor_order(m)
|
153 |
+
self.stride = m.stride
|
154 |
+
self._initialize_biases() # only run once
|
155 |
+
# print('Strides: %s' % m.stride.tolist())
|
156 |
+
|
157 |
+
# Init weights, biases
|
158 |
+
initialize_weights(self)
|
159 |
+
self.info()
|
160 |
+
logger.info('')
|
161 |
+
|
162 |
+
def forward(self, x, augment=False, profile=False):
|
163 |
+
if augment:
|
164 |
+
img_size = x.shape[-2:] # height, width
|
165 |
+
s = [1, 0.83, 0.67] # scales
|
166 |
+
f = [None, 3, None] # flips (2-ud, 3-lr)
|
167 |
+
y = [] # outputs
|
168 |
+
for si, fi in zip(s, f):
|
169 |
+
xi = scale_img(x.flip(fi) if fi else x, si)
|
170 |
+
yi = self.forward_once(xi)[0] # forward
|
171 |
+
# cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
172 |
+
yi[..., :4] /= si # de-scale
|
173 |
+
if fi == 2:
|
174 |
+
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
|
175 |
+
elif fi == 3:
|
176 |
+
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
|
177 |
+
y.append(yi)
|
178 |
+
return torch.cat(y, 1), None # augmented inference, train
|
179 |
+
else:
|
180 |
+
return self.forward_once(x, profile) # single-scale inference, train
|
181 |
+
|
182 |
+
def forward_once(self, x, profile=False):
|
183 |
+
y, dt = [], [] # outputs
|
184 |
+
for m in self.model:
|
185 |
+
if m.f != -1: # if not from previous layer
|
186 |
+
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
187 |
+
|
188 |
+
if profile:
|
189 |
+
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
|
190 |
+
t = time_synchronized()
|
191 |
+
for _ in range(10):
|
192 |
+
_ = m(x)
|
193 |
+
dt.append((time_synchronized() - t) * 100)
|
194 |
+
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
|
195 |
+
|
196 |
+
x = m(x) # run
|
197 |
+
y.append(x if m.i in self.save else None) # save output
|
198 |
+
|
199 |
+
if profile:
|
200 |
+
print('%.1fms total' % sum(dt))
|
201 |
+
return x
|
202 |
+
|
203 |
+
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
|
204 |
+
# https://arxiv.org/abs/1708.02002 section 3.3
|
205 |
+
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
|
206 |
+
m = self.model[-1] # Detect() module
|
207 |
+
for mi, s in zip(m.m, m.stride): # from
|
208 |
+
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
|
209 |
+
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
|
210 |
+
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
|
211 |
+
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
|
212 |
+
|
213 |
+
def _print_biases(self):
|
214 |
+
m = self.model[-1] # Detect() module
|
215 |
+
for mi in m.m: # from
|
216 |
+
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
|
217 |
+
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
|
218 |
+
|
219 |
+
# def _print_weights(self):
|
220 |
+
# for m in self.model.modules():
|
221 |
+
# if type(m) is Bottleneck:
|
222 |
+
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
|
223 |
+
|
224 |
+
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
|
225 |
+
print('Fusing layers... ')
|
226 |
+
for m in self.model.modules():
|
227 |
+
if type(m) is Conv and hasattr(m, 'bn'):
|
228 |
+
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
|
229 |
+
delattr(m, 'bn') # remove batchnorm
|
230 |
+
m.forward = m.fuseforward # update forward
|
231 |
+
elif type(m) is nn.Upsample:
|
232 |
+
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
233 |
+
self.info()
|
234 |
+
return self
|
235 |
+
|
236 |
+
def nms(self, mode=True): # add or remove NMS module
|
237 |
+
present = type(self.model[-1]) is NMS # last layer is NMS
|
238 |
+
if mode and not present:
|
239 |
+
print('Adding NMS... ')
|
240 |
+
m = NMS() # module
|
241 |
+
m.f = -1 # from
|
242 |
+
m.i = self.model[-1].i + 1 # index
|
243 |
+
self.model.add_module(name='%s' % m.i, module=m) # add
|
244 |
+
self.eval()
|
245 |
+
elif not mode and present:
|
246 |
+
print('Removing NMS... ')
|
247 |
+
self.model = self.model[:-1] # remove
|
248 |
+
return self
|
249 |
+
|
250 |
+
def autoshape(self): # add autoShape module
|
251 |
+
print('Adding autoShape... ')
|
252 |
+
m = autoShape(self) # wrap model
|
253 |
+
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
|
254 |
+
return m
|
255 |
+
|
256 |
+
def info(self, verbose=False, img_size=640): # print model information
|
257 |
+
model_info(self, verbose, img_size)
|
258 |
+
|
259 |
+
|
260 |
+
def parse_model(d, ch): # model_dict, input_channels(3)
|
261 |
+
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
|
262 |
+
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
263 |
+
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
264 |
+
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
265 |
+
|
266 |
+
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
267 |
+
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
268 |
+
m = eval(m) if isinstance(m, str) else m # eval strings
|
269 |
+
for j, a in enumerate(args):
|
270 |
+
try:
|
271 |
+
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
272 |
+
except:
|
273 |
+
pass
|
274 |
+
|
275 |
+
n = max(round(n * gd), 1) if n > 1 else n # depth gain
|
276 |
+
if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, ShuffleV2Block, StemBlock, BlazeBlock, DoubleBlazeBlock]:
|
277 |
+
c1, c2 = ch[f], args[0]
|
278 |
+
|
279 |
+
# Normal
|
280 |
+
# if i > 0 and args[0] != no: # channel expansion factor
|
281 |
+
# ex = 1.75 # exponential (default 2.0)
|
282 |
+
# e = math.log(c2 / ch[1]) / math.log(2)
|
283 |
+
# c2 = int(ch[1] * ex ** e)
|
284 |
+
# if m != Focus:
|
285 |
+
|
286 |
+
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
|
287 |
+
|
288 |
+
# Experimental
|
289 |
+
# if i > 0 and args[0] != no: # channel expansion factor
|
290 |
+
# ex = 1 + gw # exponential (default 2.0)
|
291 |
+
# ch1 = 32 # ch[1]
|
292 |
+
# e = math.log(c2 / ch1) / math.log(2) # level 1-n
|
293 |
+
# c2 = int(ch1 * ex ** e)
|
294 |
+
# if m != Focus:
|
295 |
+
# c2 = make_divisible(c2, 8) if c2 != no else c2
|
296 |
+
|
297 |
+
args = [c1, c2, *args[1:]]
|
298 |
+
if m in [BottleneckCSP, C3]:
|
299 |
+
args.insert(2, n)
|
300 |
+
n = 1
|
301 |
+
elif m is nn.BatchNorm2d:
|
302 |
+
args = [ch[f]]
|
303 |
+
elif m is Concat:
|
304 |
+
c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
|
305 |
+
elif m is Detect:
|
306 |
+
args.append([ch[x + 1] for x in f])
|
307 |
+
if isinstance(args[1], int): # number of anchors
|
308 |
+
args[1] = [list(range(args[1] * 2))] * len(f)
|
309 |
+
else:
|
310 |
+
c2 = ch[f]
|
311 |
+
|
312 |
+
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
|
313 |
+
t = str(m)[8:-2].replace('__main__.', '') # module type
|
314 |
+
np = sum([x.numel() for x in m_.parameters()]) # number params
|
315 |
+
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
316 |
+
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
|
317 |
+
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
318 |
+
layers.append(m_)
|
319 |
+
ch.append(c2)
|
320 |
+
return nn.Sequential(*layers), sorted(save)
|
321 |
+
|
322 |
+
|
323 |
+
from thop import profile
|
324 |
+
from thop import clever_format
|
325 |
+
if __name__ == '__main__':
|
326 |
+
parser = argparse.ArgumentParser()
|
327 |
+
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
328 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
329 |
+
opt = parser.parse_args()
|
330 |
+
opt.cfg = check_file(opt.cfg) # check file
|
331 |
+
set_logging()
|
332 |
+
device = select_device(opt.device)
|
333 |
+
|
334 |
+
# Create model
|
335 |
+
model = Model(opt.cfg).to(device)
|
336 |
+
stride = model.stride.max()
|
337 |
+
if stride == 32:
|
338 |
+
input = torch.Tensor(1, 3, 480, 640).to(device)
|
339 |
+
else:
|
340 |
+
input = torch.Tensor(1, 3, 512, 640).to(device)
|
341 |
+
model.train()
|
342 |
+
print(model)
|
343 |
+
flops, params = profile(model, inputs=(input, ))
|
344 |
+
flops, params = clever_format([flops, params], "%.3f")
|
345 |
+
print('Flops:', flops, ',Params:' ,params)
|
models/yolov5l.yaml
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [4,5, 8,10, 13,16] # P3/8
|
9 |
+
- [23,29, 43,55, 73,105] # P4/16
|
10 |
+
- [146,217, 231,300, 335,433] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2
|
16 |
+
[-1, 3, C3, [128]],
|
17 |
+
[-1, 1, Conv, [256, 3, 2]], # 2-P3/8
|
18 |
+
[-1, 9, C3, [256]],
|
19 |
+
[-1, 1, Conv, [512, 3, 2]], # 4-P4/16
|
20 |
+
[-1, 9, C3, [512]],
|
21 |
+
[-1, 1, Conv, [1024, 3, 2]], # 6-P5/32
|
22 |
+
[-1, 1, SPP, [1024, [3,5,7]]],
|
23 |
+
[-1, 3, C3, [1024, False]], # 8
|
24 |
+
]
|
25 |
+
|
26 |
+
# YOLOv5 head
|
27 |
+
head:
|
28 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
29 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
30 |
+
[[-1, 5], 1, Concat, [1]], # cat backbone P4
|
31 |
+
[-1, 3, C3, [512, False]], # 12
|
32 |
+
|
33 |
+
[-1, 1, Conv, [256, 1, 1]],
|
34 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
35 |
+
[[-1, 3], 1, Concat, [1]], # cat backbone P3
|
36 |
+
[-1, 3, C3, [256, False]], # 16 (P3/8-small)
|
37 |
+
|
38 |
+
[-1, 1, Conv, [256, 3, 2]],
|
39 |
+
[[-1, 13], 1, Concat, [1]], # cat head P4
|
40 |
+
[-1, 3, C3, [512, False]], # 19 (P4/16-medium)
|
41 |
+
|
42 |
+
[-1, 1, Conv, [512, 3, 2]],
|
43 |
+
[[-1, 9], 1, Concat, [1]], # cat head P5
|
44 |
+
[-1, 3, C3, [1024, False]], # 22 (P5/32-large)
|
45 |
+
|
46 |
+
[[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
47 |
+
]
|
models/yolov5l6.yaml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [6,7, 9,11, 13,16] # P3/8
|
9 |
+
- [18,23, 26,33, 37,47] # P4/16
|
10 |
+
- [54,67, 77,104, 112,154] # P5/32
|
11 |
+
- [174,238, 258,355, 445,568] # P6/64
|
12 |
+
|
13 |
+
# YOLOv5 backbone
|
14 |
+
backbone:
|
15 |
+
# [from, number, module, args]
|
16 |
+
[ [ -1, 1, StemBlock, [ 64, 3, 2 ] ], # 0-P1/2
|
17 |
+
[ -1, 3, C3, [ 128 ] ],
|
18 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 2-P3/8
|
19 |
+
[ -1, 9, C3, [ 256 ] ],
|
20 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 4-P4/16
|
21 |
+
[ -1, 9, C3, [ 512 ] ],
|
22 |
+
[ -1, 1, Conv, [ 768, 3, 2 ] ], # 6-P5/32
|
23 |
+
[ -1, 3, C3, [ 768 ] ],
|
24 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 8-P6/64
|
25 |
+
[ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
|
26 |
+
[ -1, 3, C3, [ 1024, False ] ], # 10
|
27 |
+
]
|
28 |
+
|
29 |
+
# YOLOv5 head
|
30 |
+
head:
|
31 |
+
[ [ -1, 1, Conv, [ 768, 1, 1 ] ],
|
32 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
33 |
+
[ [ -1, 7 ], 1, Concat, [ 1 ] ], # cat backbone P5
|
34 |
+
[ -1, 3, C3, [ 768, False ] ], # 14
|
35 |
+
|
36 |
+
[ -1, 1, Conv, [ 512, 1, 1 ] ],
|
37 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
38 |
+
[ [ -1, 5 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
39 |
+
[ -1, 3, C3, [ 512, False ] ], # 18
|
40 |
+
|
41 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
42 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
43 |
+
[ [ -1, 3 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
44 |
+
[ -1, 3, C3, [ 256, False ] ], # 22 (P3/8-small)
|
45 |
+
|
46 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
47 |
+
[ [ -1, 19 ], 1, Concat, [ 1 ] ], # cat head P4
|
48 |
+
[ -1, 3, C3, [ 512, False ] ], # 25 (P4/16-medium)
|
49 |
+
|
50 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ],
|
51 |
+
[ [ -1, 15 ], 1, Concat, [ 1 ] ], # cat head P5
|
52 |
+
[ -1, 3, C3, [ 768, False ] ], # 28 (P5/32-large)
|
53 |
+
|
54 |
+
[ -1, 1, Conv, [ 768, 3, 2 ] ],
|
55 |
+
[ [ -1, 11 ], 1, Concat, [ 1 ] ], # cat head P6
|
56 |
+
[ -1, 3, C3, [ 1024, False ] ], # 31 (P6/64-xlarge)
|
57 |
+
|
58 |
+
[ [ 22, 25, 28, 31 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
|
59 |
+
]
|
60 |
+
|
models/yolov5m.yaml
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 0.67 # model depth multiple
|
4 |
+
width_multiple: 0.75 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [4,5, 8,10, 13,16] # P3/8
|
9 |
+
- [23,29, 43,55, 73,105] # P4/16
|
10 |
+
- [146,217, 231,300, 335,433] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2
|
16 |
+
[-1, 3, C3, [128]],
|
17 |
+
[-1, 1, Conv, [256, 3, 2]], # 2-P3/8
|
18 |
+
[-1, 9, C3, [256]],
|
19 |
+
[-1, 1, Conv, [512, 3, 2]], # 4-P4/16
|
20 |
+
[-1, 9, C3, [512]],
|
21 |
+
[-1, 1, Conv, [1024, 3, 2]], # 6-P5/32
|
22 |
+
[-1, 1, SPP, [1024, [3,5,7]]],
|
23 |
+
[-1, 3, C3, [1024, False]], # 8
|
24 |
+
]
|
25 |
+
|
26 |
+
# YOLOv5 head
|
27 |
+
head:
|
28 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
29 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
30 |
+
[[-1, 5], 1, Concat, [1]], # cat backbone P4
|
31 |
+
[-1, 3, C3, [512, False]], # 12
|
32 |
+
|
33 |
+
[-1, 1, Conv, [256, 1, 1]],
|
34 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
35 |
+
[[-1, 3], 1, Concat, [1]], # cat backbone P3
|
36 |
+
[-1, 3, C3, [256, False]], # 16 (P3/8-small)
|
37 |
+
|
38 |
+
[-1, 1, Conv, [256, 3, 2]],
|
39 |
+
[[-1, 13], 1, Concat, [1]], # cat head P4
|
40 |
+
[-1, 3, C3, [512, False]], # 19 (P4/16-medium)
|
41 |
+
|
42 |
+
[-1, 1, Conv, [512, 3, 2]],
|
43 |
+
[[-1, 9], 1, Concat, [1]], # cat head P5
|
44 |
+
[-1, 3, C3, [1024, False]], # 22 (P5/32-large)
|
45 |
+
|
46 |
+
[[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
47 |
+
]
|
models/yolov5m6.yaml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 0.67 # model depth multiple
|
4 |
+
width_multiple: 0.75 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [6,7, 9,11, 13,16] # P3/8
|
9 |
+
- [18,23, 26,33, 37,47] # P4/16
|
10 |
+
- [54,67, 77,104, 112,154] # P5/32
|
11 |
+
- [174,238, 258,355, 445,568] # P6/64
|
12 |
+
|
13 |
+
# YOLOv5 backbone
|
14 |
+
backbone:
|
15 |
+
# [from, number, module, args]
|
16 |
+
[ [ -1, 1, StemBlock, [ 64, 3, 2 ] ], # 0-P1/2
|
17 |
+
[ -1, 3, C3, [ 128 ] ],
|
18 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 2-P3/8
|
19 |
+
[ -1, 9, C3, [ 256 ] ],
|
20 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 4-P4/16
|
21 |
+
[ -1, 9, C3, [ 512 ] ],
|
22 |
+
[ -1, 1, Conv, [ 768, 3, 2 ] ], # 6-P5/32
|
23 |
+
[ -1, 3, C3, [ 768 ] ],
|
24 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 8-P6/64
|
25 |
+
[ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
|
26 |
+
[ -1, 3, C3, [ 1024, False ] ], # 10
|
27 |
+
]
|
28 |
+
|
29 |
+
# YOLOv5 head
|
30 |
+
head:
|
31 |
+
[ [ -1, 1, Conv, [ 768, 1, 1 ] ],
|
32 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
33 |
+
[ [ -1, 7 ], 1, Concat, [ 1 ] ], # cat backbone P5
|
34 |
+
[ -1, 3, C3, [ 768, False ] ], # 14
|
35 |
+
|
36 |
+
[ -1, 1, Conv, [ 512, 1, 1 ] ],
|
37 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
38 |
+
[ [ -1, 5 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
39 |
+
[ -1, 3, C3, [ 512, False ] ], # 18
|
40 |
+
|
41 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
42 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
43 |
+
[ [ -1, 3 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
44 |
+
[ -1, 3, C3, [ 256, False ] ], # 22 (P3/8-small)
|
45 |
+
|
46 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
47 |
+
[ [ -1, 19 ], 1, Concat, [ 1 ] ], # cat head P4
|
48 |
+
[ -1, 3, C3, [ 512, False ] ], # 25 (P4/16-medium)
|
49 |
+
|
50 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ],
|
51 |
+
[ [ -1, 15 ], 1, Concat, [ 1 ] ], # cat head P5
|
52 |
+
[ -1, 3, C3, [ 768, False ] ], # 28 (P5/32-large)
|
53 |
+
|
54 |
+
[ -1, 1, Conv, [ 768, 3, 2 ] ],
|
55 |
+
[ [ -1, 11 ], 1, Concat, [ 1 ] ], # cat head P6
|
56 |
+
[ -1, 3, C3, [ 1024, False ] ], # 31 (P6/64-xlarge)
|
57 |
+
|
58 |
+
[ [ 22, 25, 28, 31 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
|
59 |
+
]
|
60 |
+
|
models/yolov5n-0.5.yaml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 0.5 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [4,5, 8,10, 13,16] # P3/8
|
9 |
+
- [23,29, 43,55, 73,105] # P4/16
|
10 |
+
- [146,217, 231,300, 335,433] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, StemBlock, [32, 3, 2]], # 0-P2/4
|
16 |
+
[-1, 1, ShuffleV2Block, [128, 2]], # 1-P3/8
|
17 |
+
[-1, 3, ShuffleV2Block, [128, 1]], # 2
|
18 |
+
[-1, 1, ShuffleV2Block, [256, 2]], # 3-P4/16
|
19 |
+
[-1, 7, ShuffleV2Block, [256, 1]], # 4
|
20 |
+
[-1, 1, ShuffleV2Block, [512, 2]], # 5-P5/32
|
21 |
+
[-1, 3, ShuffleV2Block, [512, 1]], # 6
|
22 |
+
]
|
23 |
+
|
24 |
+
# YOLOv5 head
|
25 |
+
head:
|
26 |
+
[[-1, 1, Conv, [128, 1, 1]],
|
27 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
28 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P4
|
29 |
+
[-1, 1, C3, [128, False]], # 10
|
30 |
+
|
31 |
+
[-1, 1, Conv, [128, 1, 1]],
|
32 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
33 |
+
[[-1, 2], 1, Concat, [1]], # cat backbone P3
|
34 |
+
[-1, 1, C3, [128, False]], # 14 (P3/8-small)
|
35 |
+
|
36 |
+
[-1, 1, Conv, [128, 3, 2]],
|
37 |
+
[[-1, 11], 1, Concat, [1]], # cat head P4
|
38 |
+
[-1, 1, C3, [128, False]], # 17 (P4/16-medium)
|
39 |
+
|
40 |
+
[-1, 1, Conv, [128, 3, 2]],
|
41 |
+
[[-1, 7], 1, Concat, [1]], # cat head P5
|
42 |
+
[-1, 1, C3, [128, False]], # 20 (P5/32-large)
|
43 |
+
|
44 |
+
[[14, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
45 |
+
]
|
46 |
+
|
models/yolov5n.yaml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [4,5, 8,10, 13,16] # P3/8
|
9 |
+
- [23,29, 43,55, 73,105] # P4/16
|
10 |
+
- [146,217, 231,300, 335,433] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, StemBlock, [32, 3, 2]], # 0-P2/4
|
16 |
+
[-1, 1, ShuffleV2Block, [128, 2]], # 1-P3/8
|
17 |
+
[-1, 3, ShuffleV2Block, [128, 1]], # 2
|
18 |
+
[-1, 1, ShuffleV2Block, [256, 2]], # 3-P4/16
|
19 |
+
[-1, 7, ShuffleV2Block, [256, 1]], # 4
|
20 |
+
[-1, 1, ShuffleV2Block, [512, 2]], # 5-P5/32
|
21 |
+
[-1, 3, ShuffleV2Block, [512, 1]], # 6
|
22 |
+
]
|
23 |
+
|
24 |
+
# YOLOv5 head
|
25 |
+
head:
|
26 |
+
[[-1, 1, Conv, [128, 1, 1]],
|
27 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
28 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P4
|
29 |
+
[-1, 1, C3, [128, False]], # 10
|
30 |
+
|
31 |
+
[-1, 1, Conv, [128, 1, 1]],
|
32 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
33 |
+
[[-1, 2], 1, Concat, [1]], # cat backbone P3
|
34 |
+
[-1, 1, C3, [128, False]], # 14 (P3/8-small)
|
35 |
+
|
36 |
+
[-1, 1, Conv, [128, 3, 2]],
|
37 |
+
[[-1, 11], 1, Concat, [1]], # cat head P4
|
38 |
+
[-1, 1, C3, [128, False]], # 17 (P4/16-medium)
|
39 |
+
|
40 |
+
[-1, 1, Conv, [128, 3, 2]],
|
41 |
+
[[-1, 7], 1, Concat, [1]], # cat head P5
|
42 |
+
[-1, 1, C3, [128, False]], # 20 (P5/32-large)
|
43 |
+
|
44 |
+
[[14, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
45 |
+
]
|
46 |
+
|
models/yolov5n6.yaml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [6,7, 9,11, 13,16] # P3/8
|
9 |
+
- [18,23, 26,33, 37,47] # P4/16
|
10 |
+
- [54,67, 77,104, 112,154] # P5/32
|
11 |
+
- [174,238, 258,355, 445,568] # P6/64
|
12 |
+
|
13 |
+
# YOLOv5 backbone
|
14 |
+
backbone:
|
15 |
+
# [from, number, module, args]
|
16 |
+
[[-1, 1, StemBlock, [32, 3, 2]], # 0-P2/4
|
17 |
+
[-1, 1, ShuffleV2Block, [128, 2]], # 1-P3/8
|
18 |
+
[-1, 3, ShuffleV2Block, [128, 1]], # 2
|
19 |
+
[-1, 1, ShuffleV2Block, [256, 2]], # 3-P4/16
|
20 |
+
[-1, 7, ShuffleV2Block, [256, 1]], # 4
|
21 |
+
[-1, 1, ShuffleV2Block, [384, 2]], # 5-P5/32
|
22 |
+
[-1, 3, ShuffleV2Block, [384, 1]], # 6
|
23 |
+
[-1, 1, ShuffleV2Block, [512, 2]], # 7-P6/64
|
24 |
+
[-1, 3, ShuffleV2Block, [512, 1]], # 8
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [128, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P5
|
32 |
+
[-1, 1, C3, [128, False]], # 12
|
33 |
+
|
34 |
+
[-1, 1, Conv, [128, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P4
|
37 |
+
[-1, 1, C3, [128, False]], # 16 (P4/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [128, 1, 1]],
|
40 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
41 |
+
[[-1, 2], 1, Concat, [1]], # cat backbone P3
|
42 |
+
[-1, 1, C3, [128, False]], # 20 (P3/8-small)
|
43 |
+
|
44 |
+
[-1, 1, Conv, [128, 3, 2]],
|
45 |
+
[[-1, 17], 1, Concat, [1]], # cat head P4
|
46 |
+
[-1, 1, C3, [128, False]], # 23 (P4/16-medium)
|
47 |
+
|
48 |
+
[-1, 1, Conv, [128, 3, 2]],
|
49 |
+
[[-1, 13], 1, Concat, [1]], # cat head P5
|
50 |
+
[-1, 1, C3, [128, False]], # 26 (P5/32-large)
|
51 |
+
|
52 |
+
[-1, 1, Conv, [128, 3, 2]],
|
53 |
+
[[-1, 9], 1, Concat, [1]], # cat head P6
|
54 |
+
[-1, 1, C3, [128, False]], # 29 (P6/64-large)
|
55 |
+
|
56 |
+
[[20, 23, 26, 29], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
57 |
+
]
|
58 |
+
|
models/yolov5s.yaml
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 0.33 # model depth multiple
|
4 |
+
width_multiple: 0.5 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [4,5, 8,10, 13,16] # P3/8
|
9 |
+
- [23,29, 43,55, 73,105] # P4/16
|
10 |
+
- [146,217, 231,300, 335,433] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2
|
16 |
+
[-1, 3, C3, [128]],
|
17 |
+
[-1, 1, Conv, [256, 3, 2]], # 2-P3/8
|
18 |
+
[-1, 9, C3, [256]],
|
19 |
+
[-1, 1, Conv, [512, 3, 2]], # 4-P4/16
|
20 |
+
[-1, 9, C3, [512]],
|
21 |
+
[-1, 1, Conv, [1024, 3, 2]], # 6-P5/32
|
22 |
+
[-1, 1, SPP, [1024, [3,5,7]]],
|
23 |
+
[-1, 3, C3, [1024, False]], # 8
|
24 |
+
]
|
25 |
+
|
26 |
+
# YOLOv5 head
|
27 |
+
head:
|
28 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
29 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
30 |
+
[[-1, 5], 1, Concat, [1]], # cat backbone P4
|
31 |
+
[-1, 3, C3, [512, False]], # 12
|
32 |
+
|
33 |
+
[-1, 1, Conv, [256, 1, 1]],
|
34 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
35 |
+
[[-1, 3], 1, Concat, [1]], # cat backbone P3
|
36 |
+
[-1, 3, C3, [256, False]], # 16 (P3/8-small)
|
37 |
+
|
38 |
+
[-1, 1, Conv, [256, 3, 2]],
|
39 |
+
[[-1, 13], 1, Concat, [1]], # cat head P4
|
40 |
+
[-1, 3, C3, [512, False]], # 19 (P4/16-medium)
|
41 |
+
|
42 |
+
[-1, 1, Conv, [512, 3, 2]],
|
43 |
+
[[-1, 9], 1, Concat, [1]], # cat head P5
|
44 |
+
[-1, 3, C3, [1024, False]], # 22 (P5/32-large)
|
45 |
+
|
46 |
+
[[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
47 |
+
]
|
models/yolov5s6.yaml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 1 # number of classes
|
3 |
+
depth_multiple: 0.33 # model depth multiple
|
4 |
+
width_multiple: 0.50 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [6,7, 9,11, 13,16] # P3/8
|
9 |
+
- [18,23, 26,33, 37,47] # P4/16
|
10 |
+
- [54,67, 77,104, 112,154] # P5/32
|
11 |
+
- [174,238, 258,355, 445,568] # P6/64
|
12 |
+
|
13 |
+
# YOLOv5 backbone
|
14 |
+
backbone:
|
15 |
+
# [from, number, module, args]
|
16 |
+
[ [ -1, 1, StemBlock, [ 64, 3, 2 ] ], # 0-P1/2
|
17 |
+
[ -1, 3, C3, [ 128 ] ],
|
18 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 2-P3/8
|
19 |
+
[ -1, 9, C3, [ 256 ] ],
|
20 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 4-P4/16
|
21 |
+
[ -1, 9, C3, [ 512 ] ],
|
22 |
+
[ -1, 1, Conv, [ 768, 3, 2 ] ], # 6-P5/32
|
23 |
+
[ -1, 3, C3, [ 768 ] ],
|
24 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 8-P6/64
|
25 |
+
[ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
|
26 |
+
[ -1, 3, C3, [ 1024, False ] ], # 10
|
27 |
+
]
|
28 |
+
|
29 |
+
# YOLOv5 head
|
30 |
+
head:
|
31 |
+
[ [ -1, 1, Conv, [ 768, 1, 1 ] ],
|
32 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
33 |
+
[ [ -1, 7 ], 1, Concat, [ 1 ] ], # cat backbone P5
|
34 |
+
[ -1, 3, C3, [ 768, False ] ], # 14
|
35 |
+
|
36 |
+
[ -1, 1, Conv, [ 512, 1, 1 ] ],
|
37 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
38 |
+
[ [ -1, 5 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
39 |
+
[ -1, 3, C3, [ 512, False ] ], # 18
|
40 |
+
|
41 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
42 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
43 |
+
[ [ -1, 3 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
44 |
+
[ -1, 3, C3, [ 256, False ] ], # 22 (P3/8-small)
|
45 |
+
|
46 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
47 |
+
[ [ -1, 19 ], 1, Concat, [ 1 ] ], # cat head P4
|
48 |
+
[ -1, 3, C3, [ 512, False ] ], # 25 (P4/16-medium)
|
49 |
+
|
50 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ],
|
51 |
+
[ [ -1, 15 ], 1, Concat, [ 1 ] ], # cat head P5
|
52 |
+
[ -1, 3, C3, [ 768, False ] ], # 28 (P5/32-large)
|
53 |
+
|
54 |
+
[ -1, 1, Conv, [ 768, 3, 2 ] ],
|
55 |
+
[ [ -1, 11 ], 1, Concat, [ 1 ] ], # cat head P6
|
56 |
+
[ -1, 3, C3, [ 1024, False ] ], # 31 (P6/64-xlarge)
|
57 |
+
|
58 |
+
[ [ 22, 25, 28, 31 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
|
59 |
+
]
|
60 |
+
|
utils/__init__.py
ADDED
File without changes
|
utils/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (142 Bytes). View file
|
|
utils/__pycache__/autoanchor.cpython-39.pyc
ADDED
Binary file (6 kB). View file
|
|
utils/__pycache__/datasets.cpython-39.pyc
ADDED
Binary file (31.2 kB). View file
|
|
utils/__pycache__/general.cpython-39.pyc
ADDED
Binary file (19.6 kB). View file
|
|
utils/__pycache__/google_utils.cpython-39.pyc
ADDED
Binary file (3.35 kB). View file
|
|
utils/__pycache__/metrics.cpython-39.pyc
ADDED
Binary file (6.72 kB). View file
|
|
utils/__pycache__/plots.cpython-39.pyc
ADDED
Binary file (15.1 kB). View file
|
|
utils/__pycache__/torch_utils.cpython-39.pyc
ADDED
Binary file (11 kB). View file
|
|
utils/activations.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Activation functions
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.nn.functional as F
|
6 |
+
|
7 |
+
|
8 |
+
# SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
|
9 |
+
class SiLU(nn.Module): # export-friendly version of nn.SiLU()
|
10 |
+
@staticmethod
|
11 |
+
def forward(x):
|
12 |
+
return x * torch.sigmoid(x)
|
13 |
+
|
14 |
+
|
15 |
+
class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
|
16 |
+
@staticmethod
|
17 |
+
def forward(x):
|
18 |
+
# return x * F.hardsigmoid(x) # for torchscript and CoreML
|
19 |
+
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
|
20 |
+
|
21 |
+
|
22 |
+
class MemoryEfficientSwish(nn.Module):
|
23 |
+
class F(torch.autograd.Function):
|
24 |
+
@staticmethod
|
25 |
+
def forward(ctx, x):
|
26 |
+
ctx.save_for_backward(x)
|
27 |
+
return x * torch.sigmoid(x)
|
28 |
+
|
29 |
+
@staticmethod
|
30 |
+
def backward(ctx, grad_output):
|
31 |
+
x = ctx.saved_tensors[0]
|
32 |
+
sx = torch.sigmoid(x)
|
33 |
+
return grad_output * (sx * (1 + x * (1 - sx)))
|
34 |
+
|
35 |
+
def forward(self, x):
|
36 |
+
return self.F.apply(x)
|
37 |
+
|
38 |
+
|
39 |
+
# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
|
40 |
+
class Mish(nn.Module):
|
41 |
+
@staticmethod
|
42 |
+
def forward(x):
|
43 |
+
return x * F.softplus(x).tanh()
|
44 |
+
|
45 |
+
|
46 |
+
class MemoryEfficientMish(nn.Module):
|
47 |
+
class F(torch.autograd.Function):
|
48 |
+
@staticmethod
|
49 |
+
def forward(ctx, x):
|
50 |
+
ctx.save_for_backward(x)
|
51 |
+
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
|
52 |
+
|
53 |
+
@staticmethod
|
54 |
+
def backward(ctx, grad_output):
|
55 |
+
x = ctx.saved_tensors[0]
|
56 |
+
sx = torch.sigmoid(x)
|
57 |
+
fx = F.softplus(x).tanh()
|
58 |
+
return grad_output * (fx + x * sx * (1 - fx * fx))
|
59 |
+
|
60 |
+
def forward(self, x):
|
61 |
+
return self.F.apply(x)
|
62 |
+
|
63 |
+
|
64 |
+
# FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
|
65 |
+
class FReLU(nn.Module):
|
66 |
+
def __init__(self, c1, k=3): # ch_in, kernel
|
67 |
+
super().__init__()
|
68 |
+
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
|
69 |
+
self.bn = nn.BatchNorm2d(c1)
|
70 |
+
|
71 |
+
def forward(self, x):
|
72 |
+
return torch.max(x, self.bn(self.conv(x)))
|
utils/autoanchor.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Auto-anchor utils
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
import yaml
|
6 |
+
from scipy.cluster.vq import kmeans
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
from utils.general import colorstr
|
10 |
+
|
11 |
+
|
12 |
+
def check_anchor_order(m):
|
13 |
+
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
|
14 |
+
a = m.anchor_grid.prod(-1).view(-1) # anchor area
|
15 |
+
da = a[-1] - a[0] # delta a
|
16 |
+
ds = m.stride[-1] - m.stride[0] # delta s
|
17 |
+
if da.sign() != ds.sign(): # same order
|
18 |
+
print('Reversing anchor order')
|
19 |
+
m.anchors[:] = m.anchors.flip(0)
|
20 |
+
m.anchor_grid[:] = m.anchor_grid.flip(0)
|
21 |
+
|
22 |
+
|
23 |
+
def check_anchors(dataset, model, thr=4.0, imgsz=640):
|
24 |
+
# Check anchor fit to data, recompute if necessary
|
25 |
+
prefix = colorstr('autoanchor: ')
|
26 |
+
print(f'\n{prefix}Analyzing anchors... ', end='')
|
27 |
+
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
|
28 |
+
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
29 |
+
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
|
30 |
+
wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
|
31 |
+
|
32 |
+
def metric(k): # compute metric
|
33 |
+
r = wh[:, None] / k[None]
|
34 |
+
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
|
35 |
+
best = x.max(1)[0] # best_x
|
36 |
+
aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
|
37 |
+
bpr = (best > 1. / thr).float().mean() # best possible recall
|
38 |
+
return bpr, aat
|
39 |
+
|
40 |
+
bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
|
41 |
+
print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')
|
42 |
+
if bpr < 0.98: # threshold to recompute
|
43 |
+
print('. Attempting to improve anchors, please wait...')
|
44 |
+
na = m.anchor_grid.numel() // 2 # number of anchors
|
45 |
+
new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
|
46 |
+
new_bpr = metric(new_anchors.reshape(-1, 2))[0]
|
47 |
+
if new_bpr > bpr: # replace anchors
|
48 |
+
new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
|
49 |
+
m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
|
50 |
+
m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
|
51 |
+
check_anchor_order(m)
|
52 |
+
print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')
|
53 |
+
else:
|
54 |
+
print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')
|
55 |
+
print('') # newline
|
56 |
+
|
57 |
+
|
58 |
+
def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
|
59 |
+
""" Creates kmeans-evolved anchors from training dataset
|
60 |
+
|
61 |
+
Arguments:
|
62 |
+
path: path to dataset *.yaml, or a loaded dataset
|
63 |
+
n: number of anchors
|
64 |
+
img_size: image size used for training
|
65 |
+
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
|
66 |
+
gen: generations to evolve anchors using genetic algorithm
|
67 |
+
verbose: print all results
|
68 |
+
|
69 |
+
Return:
|
70 |
+
k: kmeans evolved anchors
|
71 |
+
|
72 |
+
Usage:
|
73 |
+
from utils.autoanchor import *; _ = kmean_anchors()
|
74 |
+
"""
|
75 |
+
thr = 1. / thr
|
76 |
+
prefix = colorstr('autoanchor: ')
|
77 |
+
|
78 |
+
def metric(k, wh): # compute metrics
|
79 |
+
r = wh[:, None] / k[None]
|
80 |
+
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
|
81 |
+
# x = wh_iou(wh, torch.tensor(k)) # iou metric
|
82 |
+
return x, x.max(1)[0] # x, best_x
|
83 |
+
|
84 |
+
def anchor_fitness(k): # mutation fitness
|
85 |
+
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
|
86 |
+
return (best * (best > thr).float()).mean() # fitness
|
87 |
+
|
88 |
+
def print_results(k):
|
89 |
+
k = k[np.argsort(k.prod(1))] # sort small to large
|
90 |
+
x, best = metric(k, wh0)
|
91 |
+
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
|
92 |
+
print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr')
|
93 |
+
print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, '
|
94 |
+
f'past_thr={x[x > thr].mean():.3f}-mean: ', end='')
|
95 |
+
for i, x in enumerate(k):
|
96 |
+
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
|
97 |
+
return k
|
98 |
+
|
99 |
+
if isinstance(path, str): # *.yaml file
|
100 |
+
with open(path) as f:
|
101 |
+
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict
|
102 |
+
from utils.datasets import LoadImagesAndLabels
|
103 |
+
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
|
104 |
+
else:
|
105 |
+
dataset = path # dataset
|
106 |
+
|
107 |
+
# Get label wh
|
108 |
+
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
109 |
+
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
|
110 |
+
|
111 |
+
# Filter
|
112 |
+
i = (wh0 < 3.0).any(1).sum()
|
113 |
+
if i:
|
114 |
+
print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.')
|
115 |
+
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
|
116 |
+
# wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
|
117 |
+
|
118 |
+
# Kmeans calculation
|
119 |
+
print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...')
|
120 |
+
s = wh.std(0) # sigmas for whitening
|
121 |
+
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
|
122 |
+
k *= s
|
123 |
+
wh = torch.tensor(wh, dtype=torch.float32) # filtered
|
124 |
+
wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
|
125 |
+
k = print_results(k)
|
126 |
+
|
127 |
+
# Plot
|
128 |
+
# k, d = [None] * 20, [None] * 20
|
129 |
+
# for i in tqdm(range(1, 21)):
|
130 |
+
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
|
131 |
+
# fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
|
132 |
+
# ax = ax.ravel()
|
133 |
+
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
|
134 |
+
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
|
135 |
+
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
|
136 |
+
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
|
137 |
+
# fig.savefig('wh.png', dpi=200)
|
138 |
+
|
139 |
+
# Evolve
|
140 |
+
npr = np.random
|
141 |
+
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
|
142 |
+
pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar
|
143 |
+
for _ in pbar:
|
144 |
+
v = np.ones(sh)
|
145 |
+
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
|
146 |
+
v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
|
147 |
+
kg = (k.copy() * v).clip(min=2.0)
|
148 |
+
fg = anchor_fitness(kg)
|
149 |
+
if fg > f:
|
150 |
+
f, k = fg, kg.copy()
|
151 |
+
pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
|
152 |
+
if verbose:
|
153 |
+
print_results(k)
|
154 |
+
|
155 |
+
return print_results(k)
|
utils/aws/__init__.py
ADDED
File without changes
|
utils/aws/mime.sh
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
|
2 |
+
# This script will run on every instance restart, not only on first start
|
3 |
+
# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
|
4 |
+
|
5 |
+
Content-Type: multipart/mixed; boundary="//"
|
6 |
+
MIME-Version: 1.0
|
7 |
+
|
8 |
+
--//
|
9 |
+
Content-Type: text/cloud-config; charset="us-ascii"
|
10 |
+
MIME-Version: 1.0
|
11 |
+
Content-Transfer-Encoding: 7bit
|
12 |
+
Content-Disposition: attachment; filename="cloud-config.txt"
|
13 |
+
|
14 |
+
#cloud-config
|
15 |
+
cloud_final_modules:
|
16 |
+
- [scripts-user, always]
|
17 |
+
|
18 |
+
--//
|
19 |
+
Content-Type: text/x-shellscript; charset="us-ascii"
|
20 |
+
MIME-Version: 1.0
|
21 |
+
Content-Transfer-Encoding: 7bit
|
22 |
+
Content-Disposition: attachment; filename="userdata.txt"
|
23 |
+
|
24 |
+
#!/bin/bash
|
25 |
+
# --- paste contents of userdata.sh here ---
|
26 |
+
--//
|
utils/aws/resume.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Resume all interrupted trainings in yolov5/ dir including DDP trainings
|
2 |
+
# Usage: $ python utils/aws/resume.py
|
3 |
+
|
4 |
+
import os
|
5 |
+
import sys
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import yaml
|
10 |
+
|
11 |
+
sys.path.append('./') # to run '$ python *.py' files in subdirectories
|
12 |
+
|
13 |
+
port = 0 # --master_port
|
14 |
+
path = Path('').resolve()
|
15 |
+
for last in path.rglob('*/**/last.pt'):
|
16 |
+
ckpt = torch.load(last)
|
17 |
+
if ckpt['optimizer'] is None:
|
18 |
+
continue
|
19 |
+
|
20 |
+
# Load opt.yaml
|
21 |
+
with open(last.parent.parent / 'opt.yaml') as f:
|
22 |
+
opt = yaml.load(f, Loader=yaml.SafeLoader)
|
23 |
+
|
24 |
+
# Get device count
|
25 |
+
d = opt['device'].split(',') # devices
|
26 |
+
nd = len(d) # number of devices
|
27 |
+
ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
|
28 |
+
|
29 |
+
if ddp: # multi-GPU
|
30 |
+
port += 1
|
31 |
+
cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
|
32 |
+
else: # single-GPU
|
33 |
+
cmd = f'python train.py --resume {last}'
|
34 |
+
|
35 |
+
cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
|
36 |
+
print(cmd)
|
37 |
+
os.system(cmd)
|
utils/aws/userdata.sh
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
|
3 |
+
# This script will run only once on first instance start (for a re-start script see mime.sh)
|
4 |
+
# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
|
5 |
+
# Use >300 GB SSD
|
6 |
+
|
7 |
+
cd home/ubuntu
|
8 |
+
if [ ! -d yolov5 ]; then
|
9 |
+
echo "Running first-time script." # install dependencies, download COCO, pull Docker
|
10 |
+
git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5
|
11 |
+
cd yolov5
|
12 |
+
bash data/scripts/get_coco.sh && echo "Data done." &
|
13 |
+
sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
|
14 |
+
python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
|
15 |
+
wait && echo "All tasks done." # finish background tasks
|
16 |
+
else
|
17 |
+
echo "Running re-start script." # resume interrupted runs
|
18 |
+
i=0
|
19 |
+
list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
|
20 |
+
while IFS= read -r id; do
|
21 |
+
((i++))
|
22 |
+
echo "restarting container $i: $id"
|
23 |
+
sudo docker start $id
|
24 |
+
# sudo docker exec -it $id python train.py --resume # single-GPU
|
25 |
+
sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
|
26 |
+
done <<<"$list"
|
27 |
+
fi
|
utils/datasets.py
ADDED
@@ -0,0 +1,1019 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Dataset utils and dataloaders
|
2 |
+
|
3 |
+
import glob
|
4 |
+
import logging
|
5 |
+
import math
|
6 |
+
import os
|
7 |
+
import random
|
8 |
+
import shutil
|
9 |
+
import time
|
10 |
+
from itertools import repeat
|
11 |
+
from multiprocessing.pool import ThreadPool
|
12 |
+
from pathlib import Path
|
13 |
+
from threading import Thread
|
14 |
+
|
15 |
+
import cv2
|
16 |
+
import numpy as np
|
17 |
+
import torch
|
18 |
+
import torch.nn.functional as F
|
19 |
+
from PIL import Image, ExifTags
|
20 |
+
from torch.utils.data import Dataset
|
21 |
+
from tqdm import tqdm
|
22 |
+
|
23 |
+
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str
|
24 |
+
from utils.torch_utils import torch_distributed_zero_first
|
25 |
+
|
26 |
+
# Parameters
|
27 |
+
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
|
28 |
+
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
|
29 |
+
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
|
30 |
+
logger = logging.getLogger(__name__)
|
31 |
+
|
32 |
+
# Get orientation exif tag
|
33 |
+
for orientation in ExifTags.TAGS.keys():
|
34 |
+
if ExifTags.TAGS[orientation] == 'Orientation':
|
35 |
+
break
|
36 |
+
|
37 |
+
|
38 |
+
def get_hash(files):
|
39 |
+
# Returns a single hash value of a list of files
|
40 |
+
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
|
41 |
+
|
42 |
+
|
43 |
+
def exif_size(img):
|
44 |
+
# Returns exif-corrected PIL size
|
45 |
+
s = img.size # (width, height)
|
46 |
+
try:
|
47 |
+
rotation = dict(img._getexif().items())[orientation]
|
48 |
+
if rotation == 6: # rotation 270
|
49 |
+
s = (s[1], s[0])
|
50 |
+
elif rotation == 8: # rotation 90
|
51 |
+
s = (s[1], s[0])
|
52 |
+
except:
|
53 |
+
pass
|
54 |
+
|
55 |
+
return s
|
56 |
+
|
57 |
+
|
58 |
+
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
|
59 |
+
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
|
60 |
+
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
|
61 |
+
with torch_distributed_zero_first(rank):
|
62 |
+
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
|
63 |
+
augment=augment, # augment images
|
64 |
+
hyp=hyp, # augmentation hyperparameters
|
65 |
+
rect=rect, # rectangular training
|
66 |
+
cache_images=cache,
|
67 |
+
single_cls=opt.single_cls,
|
68 |
+
stride=int(stride),
|
69 |
+
pad=pad,
|
70 |
+
image_weights=image_weights,
|
71 |
+
prefix=prefix)
|
72 |
+
|
73 |
+
batch_size = min(batch_size, len(dataset))
|
74 |
+
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
|
75 |
+
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
|
76 |
+
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
|
77 |
+
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
|
78 |
+
dataloader = loader(dataset,
|
79 |
+
batch_size=batch_size,
|
80 |
+
num_workers=nw,
|
81 |
+
sampler=sampler,
|
82 |
+
pin_memory=True,
|
83 |
+
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
|
84 |
+
return dataloader, dataset
|
85 |
+
|
86 |
+
|
87 |
+
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
|
88 |
+
""" Dataloader that reuses workers
|
89 |
+
|
90 |
+
Uses same syntax as vanilla DataLoader
|
91 |
+
"""
|
92 |
+
|
93 |
+
def __init__(self, *args, **kwargs):
|
94 |
+
super().__init__(*args, **kwargs)
|
95 |
+
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
|
96 |
+
self.iterator = super().__iter__()
|
97 |
+
|
98 |
+
def __len__(self):
|
99 |
+
return len(self.batch_sampler.sampler)
|
100 |
+
|
101 |
+
def __iter__(self):
|
102 |
+
for i in range(len(self)):
|
103 |
+
yield next(self.iterator)
|
104 |
+
|
105 |
+
|
106 |
+
class _RepeatSampler(object):
|
107 |
+
""" Sampler that repeats forever
|
108 |
+
|
109 |
+
Args:
|
110 |
+
sampler (Sampler)
|
111 |
+
"""
|
112 |
+
|
113 |
+
def __init__(self, sampler):
|
114 |
+
self.sampler = sampler
|
115 |
+
|
116 |
+
def __iter__(self):
|
117 |
+
while True:
|
118 |
+
yield from iter(self.sampler)
|
119 |
+
|
120 |
+
|
121 |
+
class LoadImages: # for inference
|
122 |
+
def __init__(self, path, img_size=640):
|
123 |
+
p = str(Path(path)) # os-agnostic
|
124 |
+
p = os.path.abspath(p) # absolute path
|
125 |
+
if '*' in p:
|
126 |
+
files = sorted(glob.glob(p, recursive=True)) # glob
|
127 |
+
elif os.path.isdir(p):
|
128 |
+
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
|
129 |
+
elif os.path.isfile(p):
|
130 |
+
files = [p] # files
|
131 |
+
else:
|
132 |
+
raise Exception(f'ERROR: {p} does not exist')
|
133 |
+
|
134 |
+
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
|
135 |
+
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
|
136 |
+
ni, nv = len(images), len(videos)
|
137 |
+
|
138 |
+
self.img_size = img_size
|
139 |
+
self.files = images + videos
|
140 |
+
self.nf = ni + nv # number of files
|
141 |
+
self.video_flag = [False] * ni + [True] * nv
|
142 |
+
self.mode = 'image'
|
143 |
+
if any(videos):
|
144 |
+
self.new_video(videos[0]) # new video
|
145 |
+
else:
|
146 |
+
self.cap = None
|
147 |
+
assert self.nf > 0, f'No images or videos found in {p}. ' \
|
148 |
+
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
|
149 |
+
|
150 |
+
def __iter__(self):
|
151 |
+
self.count = 0
|
152 |
+
return self
|
153 |
+
|
154 |
+
def __next__(self):
|
155 |
+
if self.count == self.nf:
|
156 |
+
raise StopIteration
|
157 |
+
path = self.files[self.count]
|
158 |
+
|
159 |
+
if self.video_flag[self.count]:
|
160 |
+
# Read video
|
161 |
+
self.mode = 'video'
|
162 |
+
ret_val, img0 = self.cap.read()
|
163 |
+
if not ret_val:
|
164 |
+
self.count += 1
|
165 |
+
self.cap.release()
|
166 |
+
if self.count == self.nf: # last video
|
167 |
+
raise StopIteration
|
168 |
+
else:
|
169 |
+
path = self.files[self.count]
|
170 |
+
self.new_video(path)
|
171 |
+
ret_val, img0 = self.cap.read()
|
172 |
+
|
173 |
+
self.frame += 1
|
174 |
+
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
|
175 |
+
|
176 |
+
else:
|
177 |
+
# Read image
|
178 |
+
self.count += 1
|
179 |
+
img0 = cv2.imread(path) # BGR
|
180 |
+
assert img0 is not None, 'Image Not Found ' + path
|
181 |
+
print(f'image {self.count}/{self.nf} {path}: ', end='')
|
182 |
+
|
183 |
+
# Padded resize
|
184 |
+
img = letterbox(img0, new_shape=self.img_size)[0]
|
185 |
+
|
186 |
+
# Convert
|
187 |
+
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
188 |
+
img = np.ascontiguousarray(img)
|
189 |
+
|
190 |
+
return path, img, img0, self.cap
|
191 |
+
|
192 |
+
def new_video(self, path):
|
193 |
+
self.frame = 0
|
194 |
+
self.cap = cv2.VideoCapture(path)
|
195 |
+
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
196 |
+
|
197 |
+
def __len__(self):
|
198 |
+
return self.nf # number of files
|
199 |
+
|
200 |
+
|
201 |
+
class LoadWebcam: # for inference
|
202 |
+
def __init__(self, pipe='0', img_size=640):
|
203 |
+
self.img_size = img_size
|
204 |
+
|
205 |
+
if pipe.isnumeric():
|
206 |
+
pipe = eval(pipe) # local camera
|
207 |
+
# pipe = 'rtsp://192.168.1.64/1' # IP camera
|
208 |
+
# pipe = 'rtsp://username:[email protected]/1' # IP camera with login
|
209 |
+
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
|
210 |
+
|
211 |
+
self.pipe = pipe
|
212 |
+
self.cap = cv2.VideoCapture(pipe) # video capture object
|
213 |
+
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
|
214 |
+
|
215 |
+
def __iter__(self):
|
216 |
+
self.count = -1
|
217 |
+
return self
|
218 |
+
|
219 |
+
def __next__(self):
|
220 |
+
self.count += 1
|
221 |
+
if cv2.waitKey(1) == ord('q'): # q to quit
|
222 |
+
self.cap.release()
|
223 |
+
cv2.destroyAllWindows()
|
224 |
+
raise StopIteration
|
225 |
+
|
226 |
+
# Read frame
|
227 |
+
if self.pipe == 0: # local camera
|
228 |
+
ret_val, img0 = self.cap.read()
|
229 |
+
img0 = cv2.flip(img0, 1) # flip left-right
|
230 |
+
else: # IP camera
|
231 |
+
n = 0
|
232 |
+
while True:
|
233 |
+
n += 1
|
234 |
+
self.cap.grab()
|
235 |
+
if n % 30 == 0: # skip frames
|
236 |
+
ret_val, img0 = self.cap.retrieve()
|
237 |
+
if ret_val:
|
238 |
+
break
|
239 |
+
|
240 |
+
# Print
|
241 |
+
assert ret_val, f'Camera Error {self.pipe}'
|
242 |
+
img_path = 'webcam.jpg'
|
243 |
+
print(f'webcam {self.count}: ', end='')
|
244 |
+
|
245 |
+
# Padded resize
|
246 |
+
img = letterbox(img0, new_shape=self.img_size)[0]
|
247 |
+
|
248 |
+
# Convert
|
249 |
+
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
250 |
+
img = np.ascontiguousarray(img)
|
251 |
+
|
252 |
+
return img_path, img, img0, None
|
253 |
+
|
254 |
+
def __len__(self):
|
255 |
+
return 0
|
256 |
+
|
257 |
+
|
258 |
+
class LoadStreams: # multiple IP or RTSP cameras
|
259 |
+
def __init__(self, sources='streams.txt', img_size=640):
|
260 |
+
self.mode = 'stream'
|
261 |
+
self.img_size = img_size
|
262 |
+
|
263 |
+
if os.path.isfile(sources):
|
264 |
+
with open(sources, 'r') as f:
|
265 |
+
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
|
266 |
+
else:
|
267 |
+
sources = [sources]
|
268 |
+
|
269 |
+
n = len(sources)
|
270 |
+
self.imgs = [None] * n
|
271 |
+
self.sources = [clean_str(x) for x in sources] # clean source names for later
|
272 |
+
for i, s in enumerate(sources):
|
273 |
+
# Start the thread to read frames from the video stream
|
274 |
+
print(f'{i + 1}/{n}: {s}... ', end='')
|
275 |
+
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
|
276 |
+
assert cap.isOpened(), f'Failed to open {s}'
|
277 |
+
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
278 |
+
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
279 |
+
fps = cap.get(cv2.CAP_PROP_FPS) % 100
|
280 |
+
_, self.imgs[i] = cap.read() # guarantee first frame
|
281 |
+
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
|
282 |
+
print(f' success ({w}x{h} at {fps:.2f} FPS).')
|
283 |
+
thread.start()
|
284 |
+
print('') # newline
|
285 |
+
|
286 |
+
# check for common shapes
|
287 |
+
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
|
288 |
+
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
|
289 |
+
if not self.rect:
|
290 |
+
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
|
291 |
+
|
292 |
+
def update(self, index, cap):
|
293 |
+
# Read next stream frame in a daemon thread
|
294 |
+
n = 0
|
295 |
+
while cap.isOpened():
|
296 |
+
n += 1
|
297 |
+
# _, self.imgs[index] = cap.read()
|
298 |
+
cap.grab()
|
299 |
+
if n == 4: # read every 4th frame
|
300 |
+
_, self.imgs[index] = cap.retrieve()
|
301 |
+
n = 0
|
302 |
+
time.sleep(0.01) # wait time
|
303 |
+
|
304 |
+
def __iter__(self):
|
305 |
+
self.count = -1
|
306 |
+
return self
|
307 |
+
|
308 |
+
def __next__(self):
|
309 |
+
self.count += 1
|
310 |
+
img0 = self.imgs.copy()
|
311 |
+
if cv2.waitKey(1) == ord('q'): # q to quit
|
312 |
+
cv2.destroyAllWindows()
|
313 |
+
raise StopIteration
|
314 |
+
|
315 |
+
# Letterbox
|
316 |
+
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
|
317 |
+
|
318 |
+
# Stack
|
319 |
+
img = np.stack(img, 0)
|
320 |
+
|
321 |
+
# Convert
|
322 |
+
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
|
323 |
+
img = np.ascontiguousarray(img)
|
324 |
+
|
325 |
+
return self.sources, img, img0, None
|
326 |
+
|
327 |
+
def __len__(self):
|
328 |
+
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
|
329 |
+
|
330 |
+
|
331 |
+
def img2label_paths(img_paths):
|
332 |
+
# Define label paths as a function of image paths
|
333 |
+
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
|
334 |
+
return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
|
335 |
+
|
336 |
+
|
337 |
+
class LoadImagesAndLabels(Dataset): # for training/testing
|
338 |
+
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
|
339 |
+
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
|
340 |
+
self.img_size = img_size
|
341 |
+
self.augment = augment
|
342 |
+
self.hyp = hyp
|
343 |
+
self.image_weights = image_weights
|
344 |
+
self.rect = False if image_weights else rect
|
345 |
+
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
|
346 |
+
self.mosaic_border = [-img_size // 2, -img_size // 2]
|
347 |
+
self.stride = stride
|
348 |
+
|
349 |
+
try:
|
350 |
+
f = [] # image files
|
351 |
+
for p in path if isinstance(path, list) else [path]:
|
352 |
+
p = Path(p) # os-agnostic
|
353 |
+
if p.is_dir(): # dir
|
354 |
+
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
|
355 |
+
elif p.is_file(): # file
|
356 |
+
with open(p, 'r') as t:
|
357 |
+
t = t.read().strip().splitlines()
|
358 |
+
parent = str(p.parent) + os.sep
|
359 |
+
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
|
360 |
+
else:
|
361 |
+
raise Exception(f'{prefix}{p} does not exist')
|
362 |
+
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
|
363 |
+
assert self.img_files, f'{prefix}No images found'
|
364 |
+
except Exception as e:
|
365 |
+
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
|
366 |
+
|
367 |
+
# Check cache
|
368 |
+
self.label_files = img2label_paths(self.img_files) # labels
|
369 |
+
cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
|
370 |
+
if cache_path.is_file():
|
371 |
+
cache = torch.load(cache_path) # load
|
372 |
+
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
|
373 |
+
cache = self.cache_labels(cache_path, prefix) # re-cache
|
374 |
+
else:
|
375 |
+
cache = self.cache_labels(cache_path, prefix) # cache
|
376 |
+
|
377 |
+
# Display cache
|
378 |
+
[nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
|
379 |
+
desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
|
380 |
+
tqdm(None, desc=prefix + desc, total=n, initial=n)
|
381 |
+
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
|
382 |
+
|
383 |
+
# Read cache
|
384 |
+
cache.pop('hash') # remove hash
|
385 |
+
labels, shapes = zip(*cache.values())
|
386 |
+
self.labels = list(labels)
|
387 |
+
self.shapes = np.array(shapes, dtype=np.float64)
|
388 |
+
self.img_files = list(cache.keys()) # update
|
389 |
+
self.label_files = img2label_paths(cache.keys()) # update
|
390 |
+
if single_cls:
|
391 |
+
for x in self.labels:
|
392 |
+
x[:, 0] = 0
|
393 |
+
|
394 |
+
n = len(shapes) # number of images
|
395 |
+
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
|
396 |
+
nb = bi[-1] + 1 # number of batches
|
397 |
+
self.batch = bi # batch index of image
|
398 |
+
self.n = n
|
399 |
+
self.indices = range(n)
|
400 |
+
|
401 |
+
# Rectangular Training
|
402 |
+
if self.rect:
|
403 |
+
# Sort by aspect ratio
|
404 |
+
s = self.shapes # wh
|
405 |
+
ar = s[:, 1] / s[:, 0] # aspect ratio
|
406 |
+
irect = ar.argsort()
|
407 |
+
self.img_files = [self.img_files[i] for i in irect]
|
408 |
+
self.label_files = [self.label_files[i] for i in irect]
|
409 |
+
self.labels = [self.labels[i] for i in irect]
|
410 |
+
self.shapes = s[irect] # wh
|
411 |
+
ar = ar[irect]
|
412 |
+
|
413 |
+
# Set training image shapes
|
414 |
+
shapes = [[1, 1]] * nb
|
415 |
+
for i in range(nb):
|
416 |
+
ari = ar[bi == i]
|
417 |
+
mini, maxi = ari.min(), ari.max()
|
418 |
+
if maxi < 1:
|
419 |
+
shapes[i] = [maxi, 1]
|
420 |
+
elif mini > 1:
|
421 |
+
shapes[i] = [1, 1 / mini]
|
422 |
+
|
423 |
+
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
|
424 |
+
|
425 |
+
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
|
426 |
+
self.imgs = [None] * n
|
427 |
+
if cache_images:
|
428 |
+
gb = 0 # Gigabytes of cached images
|
429 |
+
self.img_hw0, self.img_hw = [None] * n, [None] * n
|
430 |
+
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
|
431 |
+
pbar = tqdm(enumerate(results), total=n)
|
432 |
+
for i, x in pbar:
|
433 |
+
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
|
434 |
+
gb += self.imgs[i].nbytes
|
435 |
+
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
|
436 |
+
|
437 |
+
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
|
438 |
+
# Cache dataset labels, check images and read shapes
|
439 |
+
x = {} # dict
|
440 |
+
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
|
441 |
+
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
|
442 |
+
for i, (im_file, lb_file) in enumerate(pbar):
|
443 |
+
try:
|
444 |
+
# verify images
|
445 |
+
im = Image.open(im_file)
|
446 |
+
im.verify() # PIL verify
|
447 |
+
shape = exif_size(im) # image size
|
448 |
+
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
|
449 |
+
|
450 |
+
# verify labels
|
451 |
+
if os.path.isfile(lb_file):
|
452 |
+
nf += 1 # label found
|
453 |
+
with open(lb_file, 'r') as f:
|
454 |
+
l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
|
455 |
+
if len(l):
|
456 |
+
assert l.shape[1] == 5, 'labels require 5 columns each'
|
457 |
+
assert (l >= 0).all(), 'negative labels'
|
458 |
+
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
|
459 |
+
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
|
460 |
+
else:
|
461 |
+
ne += 1 # label empty
|
462 |
+
l = np.zeros((0, 5), dtype=np.float32)
|
463 |
+
else:
|
464 |
+
nm += 1 # label missing
|
465 |
+
l = np.zeros((0, 5), dtype=np.float32)
|
466 |
+
x[im_file] = [l, shape]
|
467 |
+
except Exception as e:
|
468 |
+
nc += 1
|
469 |
+
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
|
470 |
+
|
471 |
+
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \
|
472 |
+
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
|
473 |
+
|
474 |
+
if nf == 0:
|
475 |
+
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
|
476 |
+
|
477 |
+
x['hash'] = get_hash(self.label_files + self.img_files)
|
478 |
+
x['results'] = [nf, nm, ne, nc, i + 1]
|
479 |
+
torch.save(x, path) # save for next time
|
480 |
+
logging.info(f'{prefix}New cache created: {path}')
|
481 |
+
return x
|
482 |
+
|
483 |
+
def __len__(self):
|
484 |
+
return len(self.img_files)
|
485 |
+
|
486 |
+
# def __iter__(self):
|
487 |
+
# self.count = -1
|
488 |
+
# print('ran dataset iter')
|
489 |
+
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
|
490 |
+
# return self
|
491 |
+
|
492 |
+
def __getitem__(self, index):
|
493 |
+
index = self.indices[index] # linear, shuffled, or image_weights
|
494 |
+
|
495 |
+
hyp = self.hyp
|
496 |
+
mosaic = self.mosaic and random.random() < hyp['mosaic']
|
497 |
+
if mosaic:
|
498 |
+
# Load mosaic
|
499 |
+
img, labels = load_mosaic(self, index)
|
500 |
+
shapes = None
|
501 |
+
|
502 |
+
# MixUp https://arxiv.org/pdf/1710.09412.pdf
|
503 |
+
if random.random() < hyp['mixup']:
|
504 |
+
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
|
505 |
+
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
|
506 |
+
img = (img * r + img2 * (1 - r)).astype(np.uint8)
|
507 |
+
labels = np.concatenate((labels, labels2), 0)
|
508 |
+
|
509 |
+
else:
|
510 |
+
# Load image
|
511 |
+
img, (h0, w0), (h, w) = load_image(self, index)
|
512 |
+
|
513 |
+
# Letterbox
|
514 |
+
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
|
515 |
+
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
|
516 |
+
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
|
517 |
+
|
518 |
+
labels = self.labels[index].copy()
|
519 |
+
if labels.size: # normalized xywh to pixel xyxy format
|
520 |
+
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
|
521 |
+
|
522 |
+
if self.augment:
|
523 |
+
# Augment imagespace
|
524 |
+
if not mosaic:
|
525 |
+
img, labels = random_perspective(img, labels,
|
526 |
+
degrees=hyp['degrees'],
|
527 |
+
translate=hyp['translate'],
|
528 |
+
scale=hyp['scale'],
|
529 |
+
shear=hyp['shear'],
|
530 |
+
perspective=hyp['perspective'])
|
531 |
+
|
532 |
+
# Augment colorspace
|
533 |
+
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
|
534 |
+
|
535 |
+
# Apply cutouts
|
536 |
+
# if random.random() < 0.9:
|
537 |
+
# labels = cutout(img, labels)
|
538 |
+
|
539 |
+
nL = len(labels) # number of labels
|
540 |
+
if nL:
|
541 |
+
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
|
542 |
+
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
|
543 |
+
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
|
544 |
+
|
545 |
+
if self.augment:
|
546 |
+
# flip up-down
|
547 |
+
if random.random() < hyp['flipud']:
|
548 |
+
img = np.flipud(img)
|
549 |
+
if nL:
|
550 |
+
labels[:, 2] = 1 - labels[:, 2]
|
551 |
+
|
552 |
+
# flip left-right
|
553 |
+
if random.random() < hyp['fliplr']:
|
554 |
+
img = np.fliplr(img)
|
555 |
+
if nL:
|
556 |
+
labels[:, 1] = 1 - labels[:, 1]
|
557 |
+
|
558 |
+
labels_out = torch.zeros((nL, 6))
|
559 |
+
if nL:
|
560 |
+
labels_out[:, 1:] = torch.from_numpy(labels)
|
561 |
+
|
562 |
+
# Convert
|
563 |
+
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
564 |
+
img = np.ascontiguousarray(img)
|
565 |
+
|
566 |
+
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
|
567 |
+
|
568 |
+
@staticmethod
|
569 |
+
def collate_fn(batch):
|
570 |
+
img, label, path, shapes = zip(*batch) # transposed
|
571 |
+
for i, l in enumerate(label):
|
572 |
+
l[:, 0] = i # add target image index for build_targets()
|
573 |
+
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
|
574 |
+
|
575 |
+
@staticmethod
|
576 |
+
def collate_fn4(batch):
|
577 |
+
img, label, path, shapes = zip(*batch) # transposed
|
578 |
+
n = len(shapes) // 4
|
579 |
+
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
|
580 |
+
|
581 |
+
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
|
582 |
+
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
|
583 |
+
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
|
584 |
+
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
|
585 |
+
i *= 4
|
586 |
+
if random.random() < 0.5:
|
587 |
+
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
|
588 |
+
0].type(img[i].type())
|
589 |
+
l = label[i]
|
590 |
+
else:
|
591 |
+
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
|
592 |
+
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
|
593 |
+
img4.append(im)
|
594 |
+
label4.append(l)
|
595 |
+
|
596 |
+
for i, l in enumerate(label4):
|
597 |
+
l[:, 0] = i # add target image index for build_targets()
|
598 |
+
|
599 |
+
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
|
600 |
+
|
601 |
+
|
602 |
+
# Ancillary functions --------------------------------------------------------------------------------------------------
|
603 |
+
def load_image(self, index):
|
604 |
+
# loads 1 image from dataset, returns img, original hw, resized hw
|
605 |
+
img = self.imgs[index]
|
606 |
+
if img is None: # not cached
|
607 |
+
path = self.img_files[index]
|
608 |
+
img = cv2.imread(path) # BGR
|
609 |
+
assert img is not None, 'Image Not Found ' + path
|
610 |
+
h0, w0 = img.shape[:2] # orig hw
|
611 |
+
r = self.img_size / max(h0, w0) # resize image to img_size
|
612 |
+
if r != 1: # always resize down, only resize up if training with augmentation
|
613 |
+
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
|
614 |
+
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
|
615 |
+
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
|
616 |
+
else:
|
617 |
+
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
|
618 |
+
|
619 |
+
|
620 |
+
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
|
621 |
+
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
|
622 |
+
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
|
623 |
+
dtype = img.dtype # uint8
|
624 |
+
|
625 |
+
x = np.arange(0, 256, dtype=np.int16)
|
626 |
+
lut_hue = ((x * r[0]) % 180).astype(dtype)
|
627 |
+
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
|
628 |
+
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
|
629 |
+
|
630 |
+
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
|
631 |
+
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
|
632 |
+
|
633 |
+
# Histogram equalization
|
634 |
+
# if random.random() < 0.2:
|
635 |
+
# for i in range(3):
|
636 |
+
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
|
637 |
+
|
638 |
+
|
639 |
+
def load_mosaic(self, index):
|
640 |
+
# loads images in a 4-mosaic
|
641 |
+
|
642 |
+
labels4 = []
|
643 |
+
s = self.img_size
|
644 |
+
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
|
645 |
+
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
|
646 |
+
for i, index in enumerate(indices):
|
647 |
+
# Load image
|
648 |
+
img, _, (h, w) = load_image(self, index)
|
649 |
+
|
650 |
+
# place img in img4
|
651 |
+
if i == 0: # top left
|
652 |
+
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
|
653 |
+
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
|
654 |
+
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
|
655 |
+
elif i == 1: # top right
|
656 |
+
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
|
657 |
+
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
|
658 |
+
elif i == 2: # bottom left
|
659 |
+
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
|
660 |
+
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
|
661 |
+
elif i == 3: # bottom right
|
662 |
+
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
|
663 |
+
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
|
664 |
+
|
665 |
+
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
|
666 |
+
padw = x1a - x1b
|
667 |
+
padh = y1a - y1b
|
668 |
+
|
669 |
+
# Labels
|
670 |
+
labels = self.labels[index].copy()
|
671 |
+
if labels.size:
|
672 |
+
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
|
673 |
+
labels4.append(labels)
|
674 |
+
|
675 |
+
# Concat/clip labels
|
676 |
+
if len(labels4):
|
677 |
+
labels4 = np.concatenate(labels4, 0)
|
678 |
+
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
|
679 |
+
# img4, labels4 = replicate(img4, labels4) # replicate
|
680 |
+
|
681 |
+
# Augment
|
682 |
+
img4, labels4 = random_perspective(img4, labels4,
|
683 |
+
degrees=self.hyp['degrees'],
|
684 |
+
translate=self.hyp['translate'],
|
685 |
+
scale=self.hyp['scale'],
|
686 |
+
shear=self.hyp['shear'],
|
687 |
+
perspective=self.hyp['perspective'],
|
688 |
+
border=self.mosaic_border) # border to remove
|
689 |
+
|
690 |
+
return img4, labels4
|
691 |
+
|
692 |
+
|
693 |
+
def load_mosaic9(self, index):
|
694 |
+
# loads images in a 9-mosaic
|
695 |
+
|
696 |
+
labels9 = []
|
697 |
+
s = self.img_size
|
698 |
+
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
|
699 |
+
for i, index in enumerate(indices):
|
700 |
+
# Load image
|
701 |
+
img, _, (h, w) = load_image(self, index)
|
702 |
+
|
703 |
+
# place img in img9
|
704 |
+
if i == 0: # center
|
705 |
+
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
|
706 |
+
h0, w0 = h, w
|
707 |
+
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
|
708 |
+
elif i == 1: # top
|
709 |
+
c = s, s - h, s + w, s
|
710 |
+
elif i == 2: # top right
|
711 |
+
c = s + wp, s - h, s + wp + w, s
|
712 |
+
elif i == 3: # right
|
713 |
+
c = s + w0, s, s + w0 + w, s + h
|
714 |
+
elif i == 4: # bottom right
|
715 |
+
c = s + w0, s + hp, s + w0 + w, s + hp + h
|
716 |
+
elif i == 5: # bottom
|
717 |
+
c = s + w0 - w, s + h0, s + w0, s + h0 + h
|
718 |
+
elif i == 6: # bottom left
|
719 |
+
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
|
720 |
+
elif i == 7: # left
|
721 |
+
c = s - w, s + h0 - h, s, s + h0
|
722 |
+
elif i == 8: # top left
|
723 |
+
c = s - w, s + h0 - hp - h, s, s + h0 - hp
|
724 |
+
|
725 |
+
padx, pady = c[:2]
|
726 |
+
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
|
727 |
+
|
728 |
+
# Labels
|
729 |
+
labels = self.labels[index].copy()
|
730 |
+
if labels.size:
|
731 |
+
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
|
732 |
+
labels9.append(labels)
|
733 |
+
|
734 |
+
# Image
|
735 |
+
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
|
736 |
+
hp, wp = h, w # height, width previous
|
737 |
+
|
738 |
+
# Offset
|
739 |
+
yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
|
740 |
+
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
|
741 |
+
|
742 |
+
# Concat/clip labels
|
743 |
+
if len(labels9):
|
744 |
+
labels9 = np.concatenate(labels9, 0)
|
745 |
+
labels9[:, [1, 3]] -= xc
|
746 |
+
labels9[:, [2, 4]] -= yc
|
747 |
+
|
748 |
+
np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
|
749 |
+
# img9, labels9 = replicate(img9, labels9) # replicate
|
750 |
+
|
751 |
+
# Augment
|
752 |
+
img9, labels9 = random_perspective(img9, labels9,
|
753 |
+
degrees=self.hyp['degrees'],
|
754 |
+
translate=self.hyp['translate'],
|
755 |
+
scale=self.hyp['scale'],
|
756 |
+
shear=self.hyp['shear'],
|
757 |
+
perspective=self.hyp['perspective'],
|
758 |
+
border=self.mosaic_border) # border to remove
|
759 |
+
|
760 |
+
return img9, labels9
|
761 |
+
|
762 |
+
|
763 |
+
def replicate(img, labels):
|
764 |
+
# Replicate labels
|
765 |
+
h, w = img.shape[:2]
|
766 |
+
boxes = labels[:, 1:].astype(int)
|
767 |
+
x1, y1, x2, y2 = boxes.T
|
768 |
+
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
|
769 |
+
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
|
770 |
+
x1b, y1b, x2b, y2b = boxes[i]
|
771 |
+
bh, bw = y2b - y1b, x2b - x1b
|
772 |
+
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
|
773 |
+
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
|
774 |
+
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
|
775 |
+
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
|
776 |
+
|
777 |
+
return img, labels
|
778 |
+
|
779 |
+
|
780 |
+
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
|
781 |
+
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
|
782 |
+
shape = img.shape[:2] # current shape [height, width]
|
783 |
+
if isinstance(new_shape, int):
|
784 |
+
new_shape = (new_shape, new_shape)
|
785 |
+
|
786 |
+
# Scale ratio (new / old)
|
787 |
+
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
788 |
+
if not scaleup: # only scale down, do not scale up (for better test mAP)
|
789 |
+
r = min(r, 1.0)
|
790 |
+
|
791 |
+
# Compute padding
|
792 |
+
ratio = r, r # width, height ratios
|
793 |
+
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
|
794 |
+
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
|
795 |
+
if auto: # minimum rectangle
|
796 |
+
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
|
797 |
+
elif scaleFill: # stretch
|
798 |
+
dw, dh = 0.0, 0.0
|
799 |
+
new_unpad = (new_shape[1], new_shape[0])
|
800 |
+
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
|
801 |
+
|
802 |
+
dw /= 2 # divide padding into 2 sides
|
803 |
+
dh /= 2
|
804 |
+
|
805 |
+
if shape[::-1] != new_unpad: # resize
|
806 |
+
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
|
807 |
+
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
808 |
+
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
809 |
+
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
|
810 |
+
return img, ratio, (dw, dh)
|
811 |
+
|
812 |
+
|
813 |
+
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
|
814 |
+
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
|
815 |
+
# targets = [cls, xyxy]
|
816 |
+
|
817 |
+
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
|
818 |
+
width = img.shape[1] + border[1] * 2
|
819 |
+
|
820 |
+
# Center
|
821 |
+
C = np.eye(3)
|
822 |
+
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
|
823 |
+
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
|
824 |
+
|
825 |
+
# Perspective
|
826 |
+
P = np.eye(3)
|
827 |
+
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
|
828 |
+
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
|
829 |
+
|
830 |
+
# Rotation and Scale
|
831 |
+
R = np.eye(3)
|
832 |
+
a = random.uniform(-degrees, degrees)
|
833 |
+
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
|
834 |
+
s = random.uniform(1 - scale, 1 + scale)
|
835 |
+
# s = 2 ** random.uniform(-scale, scale)
|
836 |
+
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
|
837 |
+
|
838 |
+
# Shear
|
839 |
+
S = np.eye(3)
|
840 |
+
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
|
841 |
+
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
|
842 |
+
|
843 |
+
# Translation
|
844 |
+
T = np.eye(3)
|
845 |
+
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
|
846 |
+
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
|
847 |
+
|
848 |
+
# Combined rotation matrix
|
849 |
+
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
|
850 |
+
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
|
851 |
+
if perspective:
|
852 |
+
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
|
853 |
+
else: # affine
|
854 |
+
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
|
855 |
+
|
856 |
+
# Visualize
|
857 |
+
# import matplotlib.pyplot as plt
|
858 |
+
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
|
859 |
+
# ax[0].imshow(img[:, :, ::-1]) # base
|
860 |
+
# ax[1].imshow(img2[:, :, ::-1]) # warped
|
861 |
+
|
862 |
+
# Transform label coordinates
|
863 |
+
n = len(targets)
|
864 |
+
if n:
|
865 |
+
# warp points
|
866 |
+
xy = np.ones((n * 4, 3))
|
867 |
+
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
|
868 |
+
xy = xy @ M.T # transform
|
869 |
+
if perspective:
|
870 |
+
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
|
871 |
+
else: # affine
|
872 |
+
xy = xy[:, :2].reshape(n, 8)
|
873 |
+
|
874 |
+
# create new boxes
|
875 |
+
x = xy[:, [0, 2, 4, 6]]
|
876 |
+
y = xy[:, [1, 3, 5, 7]]
|
877 |
+
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
|
878 |
+
|
879 |
+
# # apply angle-based reduction of bounding boxes
|
880 |
+
# radians = a * math.pi / 180
|
881 |
+
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
|
882 |
+
# x = (xy[:, 2] + xy[:, 0]) / 2
|
883 |
+
# y = (xy[:, 3] + xy[:, 1]) / 2
|
884 |
+
# w = (xy[:, 2] - xy[:, 0]) * reduction
|
885 |
+
# h = (xy[:, 3] - xy[:, 1]) * reduction
|
886 |
+
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
|
887 |
+
|
888 |
+
# clip boxes
|
889 |
+
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
|
890 |
+
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
|
891 |
+
|
892 |
+
# filter candidates
|
893 |
+
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
|
894 |
+
targets = targets[i]
|
895 |
+
targets[:, 1:5] = xy[i]
|
896 |
+
|
897 |
+
return img, targets
|
898 |
+
|
899 |
+
|
900 |
+
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
|
901 |
+
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
|
902 |
+
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
|
903 |
+
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
|
904 |
+
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
|
905 |
+
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
|
906 |
+
|
907 |
+
|
908 |
+
def cutout(image, labels):
|
909 |
+
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
|
910 |
+
h, w = image.shape[:2]
|
911 |
+
|
912 |
+
def bbox_ioa(box1, box2):
|
913 |
+
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
|
914 |
+
box2 = box2.transpose()
|
915 |
+
|
916 |
+
# Get the coordinates of bounding boxes
|
917 |
+
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
|
918 |
+
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
|
919 |
+
|
920 |
+
# Intersection area
|
921 |
+
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
|
922 |
+
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
|
923 |
+
|
924 |
+
# box2 area
|
925 |
+
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
|
926 |
+
|
927 |
+
# Intersection over box2 area
|
928 |
+
return inter_area / box2_area
|
929 |
+
|
930 |
+
# create random masks
|
931 |
+
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
|
932 |
+
for s in scales:
|
933 |
+
mask_h = random.randint(1, int(h * s))
|
934 |
+
mask_w = random.randint(1, int(w * s))
|
935 |
+
|
936 |
+
# box
|
937 |
+
xmin = max(0, random.randint(0, w) - mask_w // 2)
|
938 |
+
ymin = max(0, random.randint(0, h) - mask_h // 2)
|
939 |
+
xmax = min(w, xmin + mask_w)
|
940 |
+
ymax = min(h, ymin + mask_h)
|
941 |
+
|
942 |
+
# apply random color mask
|
943 |
+
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
|
944 |
+
|
945 |
+
# return unobscured labels
|
946 |
+
if len(labels) and s > 0.03:
|
947 |
+
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
|
948 |
+
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
|
949 |
+
labels = labels[ioa < 0.60] # remove >60% obscured labels
|
950 |
+
|
951 |
+
return labels
|
952 |
+
|
953 |
+
|
954 |
+
def create_folder(path='./new'):
|
955 |
+
# Create folder
|
956 |
+
if os.path.exists(path):
|
957 |
+
shutil.rmtree(path) # delete output folder
|
958 |
+
os.makedirs(path) # make new output folder
|
959 |
+
|
960 |
+
|
961 |
+
def flatten_recursive(path='../coco128'):
|
962 |
+
# Flatten a recursive directory by bringing all files to top level
|
963 |
+
new_path = Path(path + '_flat')
|
964 |
+
create_folder(new_path)
|
965 |
+
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
|
966 |
+
shutil.copyfile(file, new_path / Path(file).name)
|
967 |
+
|
968 |
+
|
969 |
+
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
|
970 |
+
# Convert detection dataset into classification dataset, with one directory per class
|
971 |
+
|
972 |
+
path = Path(path) # images dir
|
973 |
+
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
|
974 |
+
files = list(path.rglob('*.*'))
|
975 |
+
n = len(files) # number of files
|
976 |
+
for im_file in tqdm(files, total=n):
|
977 |
+
if im_file.suffix[1:] in img_formats:
|
978 |
+
# image
|
979 |
+
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
|
980 |
+
h, w = im.shape[:2]
|
981 |
+
|
982 |
+
# labels
|
983 |
+
lb_file = Path(img2label_paths([str(im_file)])[0])
|
984 |
+
if Path(lb_file).exists():
|
985 |
+
with open(lb_file, 'r') as f:
|
986 |
+
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
|
987 |
+
|
988 |
+
for j, x in enumerate(lb):
|
989 |
+
c = int(x[0]) # class
|
990 |
+
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
|
991 |
+
if not f.parent.is_dir():
|
992 |
+
f.parent.mkdir(parents=True)
|
993 |
+
|
994 |
+
b = x[1:] * [w, h, w, h] # box
|
995 |
+
# b[2:] = b[2:].max() # rectangle to square
|
996 |
+
b[2:] = b[2:] * 1.2 + 3 # pad
|
997 |
+
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
|
998 |
+
|
999 |
+
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
|
1000 |
+
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
|
1001 |
+
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
|
1002 |
+
|
1003 |
+
|
1004 |
+
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
|
1005 |
+
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
|
1006 |
+
# Arguments
|
1007 |
+
path: Path to images directory
|
1008 |
+
weights: Train, val, test weights (list)
|
1009 |
+
"""
|
1010 |
+
path = Path(path) # images dir
|
1011 |
+
files = list(path.rglob('*.*'))
|
1012 |
+
n = len(files) # number of files
|
1013 |
+
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
|
1014 |
+
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
|
1015 |
+
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
|
1016 |
+
for i, img in tqdm(zip(indices, files), total=n):
|
1017 |
+
if img.suffix[1:] in img_formats:
|
1018 |
+
with open(path / txt[i], 'a') as f:
|
1019 |
+
f.write(str(img) + '\n') # add image to txt file
|
utils/face_datasets.py
ADDED
@@ -0,0 +1,834 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import glob
|
2 |
+
import logging
|
3 |
+
import math
|
4 |
+
import os
|
5 |
+
import random
|
6 |
+
import shutil
|
7 |
+
import time
|
8 |
+
from itertools import repeat
|
9 |
+
from multiprocessing.pool import ThreadPool
|
10 |
+
from pathlib import Path
|
11 |
+
from threading import Thread
|
12 |
+
|
13 |
+
import cv2
|
14 |
+
import numpy as np
|
15 |
+
import torch
|
16 |
+
from PIL import Image, ExifTags
|
17 |
+
from torch.utils.data import Dataset
|
18 |
+
from tqdm import tqdm
|
19 |
+
|
20 |
+
from utils.general import xyxy2xywh, xywh2xyxy, clean_str
|
21 |
+
from utils.torch_utils import torch_distributed_zero_first
|
22 |
+
|
23 |
+
|
24 |
+
# Parameters
|
25 |
+
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
|
26 |
+
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
|
27 |
+
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
|
28 |
+
logger = logging.getLogger(__name__)
|
29 |
+
|
30 |
+
# Get orientation exif tag
|
31 |
+
for orientation in ExifTags.TAGS.keys():
|
32 |
+
if ExifTags.TAGS[orientation] == 'Orientation':
|
33 |
+
break
|
34 |
+
|
35 |
+
def get_hash(files):
|
36 |
+
# Returns a single hash value of a list of files
|
37 |
+
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
|
38 |
+
|
39 |
+
def img2label_paths(img_paths):
|
40 |
+
# Define label paths as a function of image paths
|
41 |
+
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
|
42 |
+
return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
|
43 |
+
|
44 |
+
def exif_size(img):
|
45 |
+
# Returns exif-corrected PIL size
|
46 |
+
s = img.size # (width, height)
|
47 |
+
try:
|
48 |
+
rotation = dict(img._getexif().items())[orientation]
|
49 |
+
if rotation == 6: # rotation 270
|
50 |
+
s = (s[1], s[0])
|
51 |
+
elif rotation == 8: # rotation 90
|
52 |
+
s = (s[1], s[0])
|
53 |
+
except:
|
54 |
+
pass
|
55 |
+
|
56 |
+
return s
|
57 |
+
|
58 |
+
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
|
59 |
+
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
|
60 |
+
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
|
61 |
+
with torch_distributed_zero_first(rank):
|
62 |
+
dataset = LoadFaceImagesAndLabels(path, imgsz, batch_size,
|
63 |
+
augment=augment, # augment images
|
64 |
+
hyp=hyp, # augmentation hyperparameters
|
65 |
+
rect=rect, # rectangular training
|
66 |
+
cache_images=cache,
|
67 |
+
single_cls=opt.single_cls,
|
68 |
+
stride=int(stride),
|
69 |
+
pad=pad,
|
70 |
+
image_weights=image_weights,
|
71 |
+
)
|
72 |
+
|
73 |
+
batch_size = min(batch_size, len(dataset))
|
74 |
+
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
|
75 |
+
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
|
76 |
+
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
|
77 |
+
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
|
78 |
+
dataloader = loader(dataset,
|
79 |
+
batch_size=batch_size,
|
80 |
+
num_workers=nw,
|
81 |
+
sampler=sampler,
|
82 |
+
pin_memory=True,
|
83 |
+
collate_fn=LoadFaceImagesAndLabels.collate_fn4 if quad else LoadFaceImagesAndLabels.collate_fn)
|
84 |
+
return dataloader, dataset
|
85 |
+
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
|
86 |
+
""" Dataloader that reuses workers
|
87 |
+
|
88 |
+
Uses same syntax as vanilla DataLoader
|
89 |
+
"""
|
90 |
+
|
91 |
+
def __init__(self, *args, **kwargs):
|
92 |
+
super().__init__(*args, **kwargs)
|
93 |
+
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
|
94 |
+
self.iterator = super().__iter__()
|
95 |
+
|
96 |
+
def __len__(self):
|
97 |
+
return len(self.batch_sampler.sampler)
|
98 |
+
|
99 |
+
def __iter__(self):
|
100 |
+
for i in range(len(self)):
|
101 |
+
yield next(self.iterator)
|
102 |
+
class _RepeatSampler(object):
|
103 |
+
""" Sampler that repeats forever
|
104 |
+
|
105 |
+
Args:
|
106 |
+
sampler (Sampler)
|
107 |
+
"""
|
108 |
+
|
109 |
+
def __init__(self, sampler):
|
110 |
+
self.sampler = sampler
|
111 |
+
|
112 |
+
def __iter__(self):
|
113 |
+
while True:
|
114 |
+
yield from iter(self.sampler)
|
115 |
+
|
116 |
+
class LoadFaceImagesAndLabels(Dataset): # for training/testing
|
117 |
+
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
|
118 |
+
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
|
119 |
+
self.img_size = img_size
|
120 |
+
self.augment = augment
|
121 |
+
self.hyp = hyp
|
122 |
+
self.image_weights = image_weights
|
123 |
+
self.rect = False if image_weights else rect
|
124 |
+
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
|
125 |
+
self.mosaic_border = [-img_size // 2, -img_size // 2]
|
126 |
+
self.stride = stride
|
127 |
+
|
128 |
+
try:
|
129 |
+
f = [] # image files
|
130 |
+
for p in path if isinstance(path, list) else [path]:
|
131 |
+
p = Path(p) # os-agnostic
|
132 |
+
if p.is_dir(): # dir
|
133 |
+
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
|
134 |
+
elif p.is_file(): # file
|
135 |
+
with open(p, 'r') as t:
|
136 |
+
t = t.read().strip().splitlines()
|
137 |
+
parent = str(p.parent) + os.sep
|
138 |
+
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
|
139 |
+
else:
|
140 |
+
raise Exception('%s does not exist' % p)
|
141 |
+
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
|
142 |
+
assert self.img_files, 'No images found'
|
143 |
+
except Exception as e:
|
144 |
+
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
|
145 |
+
|
146 |
+
# Check cache
|
147 |
+
self.label_files = img2label_paths(self.img_files) # labels
|
148 |
+
cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
|
149 |
+
if cache_path.is_file():
|
150 |
+
cache = torch.load(cache_path) # load
|
151 |
+
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
|
152 |
+
cache = self.cache_labels(cache_path) # re-cache
|
153 |
+
else:
|
154 |
+
cache = self.cache_labels(cache_path) # cache
|
155 |
+
|
156 |
+
# Display cache
|
157 |
+
[nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
|
158 |
+
desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
|
159 |
+
tqdm(None, desc=desc, total=n, initial=n)
|
160 |
+
assert nf > 0 or not augment, f'No labels found in {cache_path}. Can not train without labels. See {help_url}'
|
161 |
+
|
162 |
+
# Read cache
|
163 |
+
cache.pop('hash') # remove hash
|
164 |
+
labels, shapes = zip(*cache.values())
|
165 |
+
self.labels = list(labels)
|
166 |
+
self.shapes = np.array(shapes, dtype=np.float64)
|
167 |
+
self.img_files = list(cache.keys()) # update
|
168 |
+
self.label_files = img2label_paths(cache.keys()) # update
|
169 |
+
if single_cls:
|
170 |
+
for x in self.labels:
|
171 |
+
x[:, 0] = 0
|
172 |
+
|
173 |
+
n = len(shapes) # number of images
|
174 |
+
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
|
175 |
+
nb = bi[-1] + 1 # number of batches
|
176 |
+
self.batch = bi # batch index of image
|
177 |
+
self.n = n
|
178 |
+
self.indices = range(n)
|
179 |
+
|
180 |
+
# Rectangular Training
|
181 |
+
if self.rect:
|
182 |
+
# Sort by aspect ratio
|
183 |
+
s = self.shapes # wh
|
184 |
+
ar = s[:, 1] / s[:, 0] # aspect ratio
|
185 |
+
irect = ar.argsort()
|
186 |
+
self.img_files = [self.img_files[i] for i in irect]
|
187 |
+
self.label_files = [self.label_files[i] for i in irect]
|
188 |
+
self.labels = [self.labels[i] for i in irect]
|
189 |
+
self.shapes = s[irect] # wh
|
190 |
+
ar = ar[irect]
|
191 |
+
|
192 |
+
# Set training image shapes
|
193 |
+
shapes = [[1, 1]] * nb
|
194 |
+
for i in range(nb):
|
195 |
+
ari = ar[bi == i]
|
196 |
+
mini, maxi = ari.min(), ari.max()
|
197 |
+
if maxi < 1:
|
198 |
+
shapes[i] = [maxi, 1]
|
199 |
+
elif mini > 1:
|
200 |
+
shapes[i] = [1, 1 / mini]
|
201 |
+
|
202 |
+
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
|
203 |
+
|
204 |
+
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
|
205 |
+
self.imgs = [None] * n
|
206 |
+
if cache_images:
|
207 |
+
gb = 0 # Gigabytes of cached images
|
208 |
+
self.img_hw0, self.img_hw = [None] * n, [None] * n
|
209 |
+
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
|
210 |
+
pbar = tqdm(enumerate(results), total=n)
|
211 |
+
for i, x in pbar:
|
212 |
+
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
|
213 |
+
gb += self.imgs[i].nbytes
|
214 |
+
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
|
215 |
+
|
216 |
+
def cache_labels(self, path=Path('./labels.cache')):
|
217 |
+
# Cache dataset labels, check images and read shapes
|
218 |
+
x = {} # dict
|
219 |
+
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
|
220 |
+
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
|
221 |
+
for i, (im_file, lb_file) in enumerate(pbar):
|
222 |
+
try:
|
223 |
+
# verify images
|
224 |
+
im = Image.open(im_file)
|
225 |
+
im.verify() # PIL verify
|
226 |
+
shape = exif_size(im) # image size
|
227 |
+
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
|
228 |
+
|
229 |
+
# verify labels
|
230 |
+
if os.path.isfile(lb_file):
|
231 |
+
nf += 1 # label found
|
232 |
+
with open(lb_file, 'r') as f:
|
233 |
+
l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
|
234 |
+
if len(l):
|
235 |
+
assert l.shape[1] == 15, 'labels require 15 columns each'
|
236 |
+
assert (l >= -1).all(), 'negative labels'
|
237 |
+
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
|
238 |
+
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
|
239 |
+
else:
|
240 |
+
ne += 1 # label empty
|
241 |
+
l = np.zeros((0, 15), dtype=np.float32)
|
242 |
+
else:
|
243 |
+
nm += 1 # label missing
|
244 |
+
l = np.zeros((0, 15), dtype=np.float32)
|
245 |
+
x[im_file] = [l, shape]
|
246 |
+
except Exception as e:
|
247 |
+
nc += 1
|
248 |
+
print('WARNING: Ignoring corrupted image and/or label %s: %s' % (im_file, e))
|
249 |
+
|
250 |
+
pbar.desc = f"Scanning '{path.parent / path.stem}' for images and labels... " \
|
251 |
+
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
|
252 |
+
|
253 |
+
if nf == 0:
|
254 |
+
print(f'WARNING: No labels found in {path}. See {help_url}')
|
255 |
+
|
256 |
+
x['hash'] = get_hash(self.label_files + self.img_files)
|
257 |
+
x['results'] = [nf, nm, ne, nc, i + 1]
|
258 |
+
torch.save(x, path) # save for next time
|
259 |
+
logging.info(f"New cache created: {path}")
|
260 |
+
return x
|
261 |
+
|
262 |
+
def __len__(self):
|
263 |
+
return len(self.img_files)
|
264 |
+
|
265 |
+
# def __iter__(self):
|
266 |
+
# self.count = -1
|
267 |
+
# print('ran dataset iter')
|
268 |
+
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
|
269 |
+
# return self
|
270 |
+
|
271 |
+
def __getitem__(self, index):
|
272 |
+
index = self.indices[index] # linear, shuffled, or image_weights
|
273 |
+
|
274 |
+
hyp = self.hyp
|
275 |
+
mosaic = self.mosaic and random.random() < hyp['mosaic']
|
276 |
+
if mosaic:
|
277 |
+
# Load mosaic
|
278 |
+
img, labels = load_mosaic_face(self, index)
|
279 |
+
shapes = None
|
280 |
+
|
281 |
+
# MixUp https://arxiv.org/pdf/1710.09412.pdf
|
282 |
+
if random.random() < hyp['mixup']:
|
283 |
+
img2, labels2 = load_mosaic_face(self, random.randint(0, self.n - 1))
|
284 |
+
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
|
285 |
+
img = (img * r + img2 * (1 - r)).astype(np.uint8)
|
286 |
+
labels = np.concatenate((labels, labels2), 0)
|
287 |
+
|
288 |
+
else:
|
289 |
+
# Load image
|
290 |
+
img, (h0, w0), (h, w) = load_image(self, index)
|
291 |
+
|
292 |
+
# Letterbox
|
293 |
+
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
|
294 |
+
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
|
295 |
+
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
|
296 |
+
|
297 |
+
# Load labels
|
298 |
+
labels = []
|
299 |
+
x = self.labels[index]
|
300 |
+
if x.size > 0:
|
301 |
+
# Normalized xywh to pixel xyxy format
|
302 |
+
labels = x.copy()
|
303 |
+
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
|
304 |
+
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
|
305 |
+
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
|
306 |
+
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
|
307 |
+
|
308 |
+
#labels[:, 5] = ratio[0] * w * x[:, 5] + pad[0] # pad width
|
309 |
+
labels[:, 5] = np.array(x[:, 5] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 5] + pad[0]) + (
|
310 |
+
np.array(x[:, 5] > 0, dtype=np.int32) - 1)
|
311 |
+
labels[:, 6] = np.array(x[:, 6] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 6] + pad[1]) + (
|
312 |
+
np.array(x[:, 6] > 0, dtype=np.int32) - 1)
|
313 |
+
labels[:, 7] = np.array(x[:, 7] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 7] + pad[0]) + (
|
314 |
+
np.array(x[:, 7] > 0, dtype=np.int32) - 1)
|
315 |
+
labels[:, 8] = np.array(x[:, 8] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 8] + pad[1]) + (
|
316 |
+
np.array(x[:, 8] > 0, dtype=np.int32) - 1)
|
317 |
+
labels[:, 9] = np.array(x[:, 5] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 9] + pad[0]) + (
|
318 |
+
np.array(x[:, 9] > 0, dtype=np.int32) - 1)
|
319 |
+
labels[:, 10] = np.array(x[:, 5] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 10] + pad[1]) + (
|
320 |
+
np.array(x[:, 10] > 0, dtype=np.int32) - 1)
|
321 |
+
labels[:, 11] = np.array(x[:, 11] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 11] + pad[0]) + (
|
322 |
+
np.array(x[:, 11] > 0, dtype=np.int32) - 1)
|
323 |
+
labels[:, 12] = np.array(x[:, 12] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 12] + pad[1]) + (
|
324 |
+
np.array(x[:, 12] > 0, dtype=np.int32) - 1)
|
325 |
+
labels[:, 13] = np.array(x[:, 13] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 13] + pad[0]) + (
|
326 |
+
np.array(x[:, 13] > 0, dtype=np.int32) - 1)
|
327 |
+
labels[:, 14] = np.array(x[:, 14] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 14] + pad[1]) + (
|
328 |
+
np.array(x[:, 14] > 0, dtype=np.int32) - 1)
|
329 |
+
|
330 |
+
if self.augment:
|
331 |
+
# Augment imagespace
|
332 |
+
if not mosaic:
|
333 |
+
img, labels = random_perspective(img, labels,
|
334 |
+
degrees=hyp['degrees'],
|
335 |
+
translate=hyp['translate'],
|
336 |
+
scale=hyp['scale'],
|
337 |
+
shear=hyp['shear'],
|
338 |
+
perspective=hyp['perspective'])
|
339 |
+
|
340 |
+
# Augment colorspace
|
341 |
+
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
|
342 |
+
|
343 |
+
# Apply cutouts
|
344 |
+
# if random.random() < 0.9:
|
345 |
+
# labels = cutout(img, labels)
|
346 |
+
|
347 |
+
nL = len(labels) # number of labels
|
348 |
+
if nL:
|
349 |
+
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
|
350 |
+
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
|
351 |
+
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
|
352 |
+
|
353 |
+
labels[:, [5, 7, 9, 11, 13]] /= img.shape[1] # normalized landmark x 0-1
|
354 |
+
labels[:, [5, 7, 9, 11, 13]] = np.where(labels[:, [5, 7, 9, 11, 13]] < 0, -1, labels[:, [5, 7, 9, 11, 13]])
|
355 |
+
labels[:, [6, 8, 10, 12, 14]] /= img.shape[0] # normalized landmark y 0-1
|
356 |
+
labels[:, [6, 8, 10, 12, 14]] = np.where(labels[:, [6, 8, 10, 12, 14]] < 0, -1, labels[:, [6, 8, 10, 12, 14]])
|
357 |
+
|
358 |
+
if self.augment:
|
359 |
+
# flip up-down
|
360 |
+
if random.random() < hyp['flipud']:
|
361 |
+
img = np.flipud(img)
|
362 |
+
if nL:
|
363 |
+
labels[:, 2] = 1 - labels[:, 2]
|
364 |
+
|
365 |
+
labels[:, 6] = np.where(labels[:,6] < 0, -1, 1 - labels[:, 6])
|
366 |
+
labels[:, 8] = np.where(labels[:, 8] < 0, -1, 1 - labels[:, 8])
|
367 |
+
labels[:, 10] = np.where(labels[:, 10] < 0, -1, 1 - labels[:, 10])
|
368 |
+
labels[:, 12] = np.where(labels[:, 12] < 0, -1, 1 - labels[:, 12])
|
369 |
+
labels[:, 14] = np.where(labels[:, 14] < 0, -1, 1 - labels[:, 14])
|
370 |
+
|
371 |
+
# flip left-right
|
372 |
+
if random.random() < hyp['fliplr']:
|
373 |
+
img = np.fliplr(img)
|
374 |
+
if nL:
|
375 |
+
labels[:, 1] = 1 - labels[:, 1]
|
376 |
+
|
377 |
+
labels[:, 5] = np.where(labels[:, 5] < 0, -1, 1 - labels[:, 5])
|
378 |
+
labels[:, 7] = np.where(labels[:, 7] < 0, -1, 1 - labels[:, 7])
|
379 |
+
labels[:, 9] = np.where(labels[:, 9] < 0, -1, 1 - labels[:, 9])
|
380 |
+
labels[:, 11] = np.where(labels[:, 11] < 0, -1, 1 - labels[:, 11])
|
381 |
+
labels[:, 13] = np.where(labels[:, 13] < 0, -1, 1 - labels[:, 13])
|
382 |
+
|
383 |
+
#左右镜像的时候,左眼、右眼, 左嘴角、右嘴角无法区分, 应该交换位置,便于网络学习
|
384 |
+
eye_left = np.copy(labels[:, [5, 6]])
|
385 |
+
mouth_left = np.copy(labels[:, [11, 12]])
|
386 |
+
labels[:, [5, 6]] = labels[:, [7, 8]]
|
387 |
+
labels[:, [7, 8]] = eye_left
|
388 |
+
labels[:, [11, 12]] = labels[:, [13, 14]]
|
389 |
+
labels[:, [13, 14]] = mouth_left
|
390 |
+
|
391 |
+
labels_out = torch.zeros((nL, 16))
|
392 |
+
if nL:
|
393 |
+
labels_out[:, 1:] = torch.from_numpy(labels)
|
394 |
+
#showlabels(img, labels[:, 1:5], labels[:, 5:15])
|
395 |
+
|
396 |
+
# Convert
|
397 |
+
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
398 |
+
img = np.ascontiguousarray(img)
|
399 |
+
#print(index, ' --- labels_out: ', labels_out)
|
400 |
+
#if nL:
|
401 |
+
#print( ' : landmarks : ', torch.max(labels_out[:, 5:15]), ' --- ', torch.min(labels_out[:, 5:15]))
|
402 |
+
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
|
403 |
+
|
404 |
+
@staticmethod
|
405 |
+
def collate_fn(batch):
|
406 |
+
img, label, path, shapes = zip(*batch) # transposed
|
407 |
+
for i, l in enumerate(label):
|
408 |
+
l[:, 0] = i # add target image index for build_targets()
|
409 |
+
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
|
410 |
+
|
411 |
+
|
412 |
+
def showlabels(img, boxs, landmarks):
|
413 |
+
for box in boxs:
|
414 |
+
x,y,w,h = box[0] * img.shape[1], box[1] * img.shape[0], box[2] * img.shape[1], box[3] * img.shape[0]
|
415 |
+
#cv2.rectangle(image, (x,y), (x+w,y+h), (0,255,0), 2)
|
416 |
+
cv2.rectangle(img, (int(x - w/2), int(y - h/2)), (int(x + w/2), int(y + h/2)), (0, 255, 0), 2)
|
417 |
+
|
418 |
+
for landmark in landmarks:
|
419 |
+
#cv2.circle(img,(60,60),30,(0,0,255))
|
420 |
+
for i in range(5):
|
421 |
+
cv2.circle(img, (int(landmark[2*i] * img.shape[1]), int(landmark[2*i+1]*img.shape[0])), 3 ,(0,0,255), -1)
|
422 |
+
cv2.imshow('test', img)
|
423 |
+
cv2.waitKey(0)
|
424 |
+
|
425 |
+
|
426 |
+
def load_mosaic_face(self, index):
|
427 |
+
# loads images in a mosaic
|
428 |
+
labels4 = []
|
429 |
+
s = self.img_size
|
430 |
+
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
|
431 |
+
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
|
432 |
+
for i, index in enumerate(indices):
|
433 |
+
# Load image
|
434 |
+
img, _, (h, w) = load_image(self, index)
|
435 |
+
|
436 |
+
# place img in img4
|
437 |
+
if i == 0: # top left
|
438 |
+
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
|
439 |
+
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
|
440 |
+
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
|
441 |
+
elif i == 1: # top right
|
442 |
+
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
|
443 |
+
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
|
444 |
+
elif i == 2: # bottom left
|
445 |
+
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
|
446 |
+
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
|
447 |
+
elif i == 3: # bottom right
|
448 |
+
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
|
449 |
+
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
|
450 |
+
|
451 |
+
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
|
452 |
+
padw = x1a - x1b
|
453 |
+
padh = y1a - y1b
|
454 |
+
|
455 |
+
# Labels
|
456 |
+
x = self.labels[index]
|
457 |
+
labels = x.copy()
|
458 |
+
if x.size > 0: # Normalized xywh to pixel xyxy format
|
459 |
+
#box, x1,y1,x2,y2
|
460 |
+
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
|
461 |
+
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
|
462 |
+
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
|
463 |
+
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
|
464 |
+
#10 landmarks
|
465 |
+
|
466 |
+
labels[:, 5] = np.array(x[:, 5] > 0, dtype=np.int32) * (w * x[:, 5] + padw) + (np.array(x[:, 5] > 0, dtype=np.int32) - 1)
|
467 |
+
labels[:, 6] = np.array(x[:, 6] > 0, dtype=np.int32) * (h * x[:, 6] + padh) + (np.array(x[:, 6] > 0, dtype=np.int32) - 1)
|
468 |
+
labels[:, 7] = np.array(x[:, 7] > 0, dtype=np.int32) * (w * x[:, 7] + padw) + (np.array(x[:, 7] > 0, dtype=np.int32) - 1)
|
469 |
+
labels[:, 8] = np.array(x[:, 8] > 0, dtype=np.int32) * (h * x[:, 8] + padh) + (np.array(x[:, 8] > 0, dtype=np.int32) - 1)
|
470 |
+
labels[:, 9] = np.array(x[:, 9] > 0, dtype=np.int32) * (w * x[:, 9] + padw) + (np.array(x[:, 9] > 0, dtype=np.int32) - 1)
|
471 |
+
labels[:, 10] = np.array(x[:, 10] > 0, dtype=np.int32) * (h * x[:, 10] + padh) + (np.array(x[:, 10] > 0, dtype=np.int32) - 1)
|
472 |
+
labels[:, 11] = np.array(x[:, 11] > 0, dtype=np.int32) * (w * x[:, 11] + padw) + (np.array(x[:, 11] > 0, dtype=np.int32) - 1)
|
473 |
+
labels[:, 12] = np.array(x[:, 12] > 0, dtype=np.int32) * (h * x[:, 12] + padh) + (np.array(x[:, 12] > 0, dtype=np.int32) - 1)
|
474 |
+
labels[:, 13] = np.array(x[:, 13] > 0, dtype=np.int32) * (w * x[:, 13] + padw) + (np.array(x[:, 13] > 0, dtype=np.int32) - 1)
|
475 |
+
labels[:, 14] = np.array(x[:, 14] > 0, dtype=np.int32) * (h * x[:, 14] + padh) + (np.array(x[:, 14] > 0, dtype=np.int32) - 1)
|
476 |
+
labels4.append(labels)
|
477 |
+
|
478 |
+
# Concat/clip labels
|
479 |
+
if len(labels4):
|
480 |
+
labels4 = np.concatenate(labels4, 0)
|
481 |
+
np.clip(labels4[:, 1:5], 0, 2 * s, out=labels4[:, 1:5]) # use with random_perspective
|
482 |
+
# img4, labels4 = replicate(img4, labels4) # replicate
|
483 |
+
|
484 |
+
#landmarks
|
485 |
+
labels4[:, 5:] = np.where(labels4[:, 5:] < 0, -1, labels4[:, 5:])
|
486 |
+
labels4[:, 5:] = np.where(labels4[:, 5:] > 2 * s, -1, labels4[:, 5:])
|
487 |
+
|
488 |
+
labels4[:, 5] = np.where(labels4[:, 6] == -1, -1, labels4[:, 5])
|
489 |
+
labels4[:, 6] = np.where(labels4[:, 5] == -1, -1, labels4[:, 6])
|
490 |
+
|
491 |
+
labels4[:, 7] = np.where(labels4[:, 8] == -1, -1, labels4[:, 7])
|
492 |
+
labels4[:, 8] = np.where(labels4[:, 7] == -1, -1, labels4[:, 8])
|
493 |
+
|
494 |
+
labels4[:, 9] = np.where(labels4[:, 10] == -1, -1, labels4[:, 9])
|
495 |
+
labels4[:, 10] = np.where(labels4[:, 9] == -1, -1, labels4[:, 10])
|
496 |
+
|
497 |
+
labels4[:, 11] = np.where(labels4[:, 12] == -1, -1, labels4[:, 11])
|
498 |
+
labels4[:, 12] = np.where(labels4[:, 11] == -1, -1, labels4[:, 12])
|
499 |
+
|
500 |
+
labels4[:, 13] = np.where(labels4[:, 14] == -1, -1, labels4[:, 13])
|
501 |
+
labels4[:, 14] = np.where(labels4[:, 13] == -1, -1, labels4[:, 14])
|
502 |
+
|
503 |
+
# Augment
|
504 |
+
img4, labels4 = random_perspective(img4, labels4,
|
505 |
+
degrees=self.hyp['degrees'],
|
506 |
+
translate=self.hyp['translate'],
|
507 |
+
scale=self.hyp['scale'],
|
508 |
+
shear=self.hyp['shear'],
|
509 |
+
perspective=self.hyp['perspective'],
|
510 |
+
border=self.mosaic_border) # border to remove
|
511 |
+
return img4, labels4
|
512 |
+
|
513 |
+
|
514 |
+
# Ancillary functions --------------------------------------------------------------------------------------------------
|
515 |
+
def load_image(self, index):
|
516 |
+
# loads 1 image from dataset, returns img, original hw, resized hw
|
517 |
+
img = self.imgs[index]
|
518 |
+
if img is None: # not cached
|
519 |
+
path = self.img_files[index]
|
520 |
+
img = cv2.imread(path) # BGR
|
521 |
+
assert img is not None, 'Image Not Found ' + path
|
522 |
+
h0, w0 = img.shape[:2] # orig hw
|
523 |
+
r = self.img_size / max(h0, w0) # resize image to img_size
|
524 |
+
if r != 1: # always resize down, only resize up if training with augmentation
|
525 |
+
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
|
526 |
+
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
|
527 |
+
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
|
528 |
+
else:
|
529 |
+
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
|
530 |
+
|
531 |
+
|
532 |
+
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
|
533 |
+
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
|
534 |
+
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
|
535 |
+
dtype = img.dtype # uint8
|
536 |
+
|
537 |
+
x = np.arange(0, 256, dtype=np.int16)
|
538 |
+
lut_hue = ((x * r[0]) % 180).astype(dtype)
|
539 |
+
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
|
540 |
+
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
|
541 |
+
|
542 |
+
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
|
543 |
+
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
|
544 |
+
|
545 |
+
# Histogram equalization
|
546 |
+
# if random.random() < 0.2:
|
547 |
+
# for i in range(3):
|
548 |
+
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
|
549 |
+
|
550 |
+
def replicate(img, labels):
|
551 |
+
# Replicate labels
|
552 |
+
h, w = img.shape[:2]
|
553 |
+
boxes = labels[:, 1:].astype(int)
|
554 |
+
x1, y1, x2, y2 = boxes.T
|
555 |
+
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
|
556 |
+
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
|
557 |
+
x1b, y1b, x2b, y2b = boxes[i]
|
558 |
+
bh, bw = y2b - y1b, x2b - x1b
|
559 |
+
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
|
560 |
+
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
|
561 |
+
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
|
562 |
+
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
|
563 |
+
|
564 |
+
return img, labels
|
565 |
+
|
566 |
+
|
567 |
+
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
|
568 |
+
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
|
569 |
+
shape = img.shape[:2] # current shape [height, width]
|
570 |
+
if isinstance(new_shape, int):
|
571 |
+
new_shape = (new_shape, new_shape)
|
572 |
+
|
573 |
+
# Scale ratio (new / old)
|
574 |
+
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
575 |
+
if not scaleup: # only scale down, do not scale up (for better test mAP)
|
576 |
+
r = min(r, 1.0)
|
577 |
+
|
578 |
+
# Compute padding
|
579 |
+
ratio = r, r # width, height ratios
|
580 |
+
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
|
581 |
+
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
|
582 |
+
if auto: # minimum rectangle
|
583 |
+
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
|
584 |
+
elif scaleFill: # stretch
|
585 |
+
dw, dh = 0.0, 0.0
|
586 |
+
new_unpad = (new_shape[1], new_shape[0])
|
587 |
+
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
|
588 |
+
|
589 |
+
dw /= 2 # divide padding into 2 sides
|
590 |
+
dh /= 2
|
591 |
+
|
592 |
+
if shape[::-1] != new_unpad: # resize
|
593 |
+
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
|
594 |
+
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
595 |
+
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
596 |
+
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
|
597 |
+
return img, ratio, (dw, dh)
|
598 |
+
|
599 |
+
|
600 |
+
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
|
601 |
+
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
|
602 |
+
# targets = [cls, xyxy]
|
603 |
+
|
604 |
+
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
|
605 |
+
width = img.shape[1] + border[1] * 2
|
606 |
+
|
607 |
+
# Center
|
608 |
+
C = np.eye(3)
|
609 |
+
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
|
610 |
+
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
|
611 |
+
|
612 |
+
# Perspective
|
613 |
+
P = np.eye(3)
|
614 |
+
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
|
615 |
+
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
|
616 |
+
|
617 |
+
# Rotation and Scale
|
618 |
+
R = np.eye(3)
|
619 |
+
a = random.uniform(-degrees, degrees)
|
620 |
+
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
|
621 |
+
s = random.uniform(1 - scale, 1 + scale)
|
622 |
+
# s = 2 ** random.uniform(-scale, scale)
|
623 |
+
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
|
624 |
+
|
625 |
+
# Shear
|
626 |
+
S = np.eye(3)
|
627 |
+
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
|
628 |
+
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
|
629 |
+
|
630 |
+
# Translation
|
631 |
+
T = np.eye(3)
|
632 |
+
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
|
633 |
+
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
|
634 |
+
|
635 |
+
# Combined rotation matrix
|
636 |
+
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
|
637 |
+
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
|
638 |
+
if perspective:
|
639 |
+
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
|
640 |
+
else: # affine
|
641 |
+
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
|
642 |
+
|
643 |
+
# Visualize
|
644 |
+
# import matplotlib.pyplot as plt
|
645 |
+
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
|
646 |
+
# ax[0].imshow(img[:, :, ::-1]) # base
|
647 |
+
# ax[1].imshow(img2[:, :, ::-1]) # warped
|
648 |
+
|
649 |
+
# Transform label coordinates
|
650 |
+
n = len(targets)
|
651 |
+
if n:
|
652 |
+
# warp points
|
653 |
+
#xy = np.ones((n * 4, 3))
|
654 |
+
xy = np.ones((n * 9, 3))
|
655 |
+
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]].reshape(n * 9, 2) # x1y1, x2y2, x1y2, x2y1
|
656 |
+
xy = xy @ M.T # transform
|
657 |
+
if perspective:
|
658 |
+
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 18) # rescale
|
659 |
+
else: # affine
|
660 |
+
xy = xy[:, :2].reshape(n, 18)
|
661 |
+
|
662 |
+
# create new boxes
|
663 |
+
x = xy[:, [0, 2, 4, 6]]
|
664 |
+
y = xy[:, [1, 3, 5, 7]]
|
665 |
+
|
666 |
+
landmarks = xy[:, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17]]
|
667 |
+
mask = np.array(targets[:, 5:] > 0, dtype=np.int32)
|
668 |
+
landmarks = landmarks * mask
|
669 |
+
landmarks = landmarks + mask - 1
|
670 |
+
|
671 |
+
landmarks = np.where(landmarks < 0, -1, landmarks)
|
672 |
+
landmarks[:, [0, 2, 4, 6, 8]] = np.where(landmarks[:, [0, 2, 4, 6, 8]] > width, -1, landmarks[:, [0, 2, 4, 6, 8]])
|
673 |
+
landmarks[:, [1, 3, 5, 7, 9]] = np.where(landmarks[:, [1, 3, 5, 7, 9]] > height, -1,landmarks[:, [1, 3, 5, 7, 9]])
|
674 |
+
|
675 |
+
landmarks[:, 0] = np.where(landmarks[:, 1] == -1, -1, landmarks[:, 0])
|
676 |
+
landmarks[:, 1] = np.where(landmarks[:, 0] == -1, -1, landmarks[:, 1])
|
677 |
+
|
678 |
+
landmarks[:, 2] = np.where(landmarks[:, 3] == -1, -1, landmarks[:, 2])
|
679 |
+
landmarks[:, 3] = np.where(landmarks[:, 2] == -1, -1, landmarks[:, 3])
|
680 |
+
|
681 |
+
landmarks[:, 4] = np.where(landmarks[:, 5] == -1, -1, landmarks[:, 4])
|
682 |
+
landmarks[:, 5] = np.where(landmarks[:, 4] == -1, -1, landmarks[:, 5])
|
683 |
+
|
684 |
+
landmarks[:, 6] = np.where(landmarks[:, 7] == -1, -1, landmarks[:, 6])
|
685 |
+
landmarks[:, 7] = np.where(landmarks[:, 6] == -1, -1, landmarks[:, 7])
|
686 |
+
|
687 |
+
landmarks[:, 8] = np.where(landmarks[:, 9] == -1, -1, landmarks[:, 8])
|
688 |
+
landmarks[:, 9] = np.where(landmarks[:, 8] == -1, -1, landmarks[:, 9])
|
689 |
+
|
690 |
+
targets[:,5:] = landmarks
|
691 |
+
|
692 |
+
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
|
693 |
+
|
694 |
+
# # apply angle-based reduction of bounding boxes
|
695 |
+
# radians = a * math.pi / 180
|
696 |
+
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
|
697 |
+
# x = (xy[:, 2] + xy[:, 0]) / 2
|
698 |
+
# y = (xy[:, 3] + xy[:, 1]) / 2
|
699 |
+
# w = (xy[:, 2] - xy[:, 0]) * reduction
|
700 |
+
# h = (xy[:, 3] - xy[:, 1]) * reduction
|
701 |
+
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
|
702 |
+
|
703 |
+
# clip boxes
|
704 |
+
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
|
705 |
+
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
|
706 |
+
|
707 |
+
# filter candidates
|
708 |
+
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
|
709 |
+
targets = targets[i]
|
710 |
+
targets[:, 1:5] = xy[i]
|
711 |
+
|
712 |
+
return img, targets
|
713 |
+
|
714 |
+
|
715 |
+
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
|
716 |
+
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
|
717 |
+
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
|
718 |
+
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
|
719 |
+
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
|
720 |
+
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
|
721 |
+
|
722 |
+
|
723 |
+
def cutout(image, labels):
|
724 |
+
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
|
725 |
+
h, w = image.shape[:2]
|
726 |
+
|
727 |
+
def bbox_ioa(box1, box2):
|
728 |
+
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
|
729 |
+
box2 = box2.transpose()
|
730 |
+
|
731 |
+
# Get the coordinates of bounding boxes
|
732 |
+
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
|
733 |
+
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
|
734 |
+
|
735 |
+
# Intersection area
|
736 |
+
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
|
737 |
+
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
|
738 |
+
|
739 |
+
# box2 area
|
740 |
+
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
|
741 |
+
|
742 |
+
# Intersection over box2 area
|
743 |
+
return inter_area / box2_area
|
744 |
+
|
745 |
+
# create random masks
|
746 |
+
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
|
747 |
+
for s in scales:
|
748 |
+
mask_h = random.randint(1, int(h * s))
|
749 |
+
mask_w = random.randint(1, int(w * s))
|
750 |
+
|
751 |
+
# box
|
752 |
+
xmin = max(0, random.randint(0, w) - mask_w // 2)
|
753 |
+
ymin = max(0, random.randint(0, h) - mask_h // 2)
|
754 |
+
xmax = min(w, xmin + mask_w)
|
755 |
+
ymax = min(h, ymin + mask_h)
|
756 |
+
|
757 |
+
# apply random color mask
|
758 |
+
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
|
759 |
+
|
760 |
+
# return unobscured labels
|
761 |
+
if len(labels) and s > 0.03:
|
762 |
+
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
|
763 |
+
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
|
764 |
+
labels = labels[ioa < 0.60] # remove >60% obscured labels
|
765 |
+
|
766 |
+
return labels
|
767 |
+
|
768 |
+
|
769 |
+
def create_folder(path='./new'):
|
770 |
+
# Create folder
|
771 |
+
if os.path.exists(path):
|
772 |
+
shutil.rmtree(path) # delete output folder
|
773 |
+
os.makedirs(path) # make new output folder
|
774 |
+
|
775 |
+
|
776 |
+
def flatten_recursive(path='../coco128'):
|
777 |
+
# Flatten a recursive directory by bringing all files to top level
|
778 |
+
new_path = Path(path + '_flat')
|
779 |
+
create_folder(new_path)
|
780 |
+
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
|
781 |
+
shutil.copyfile(file, new_path / Path(file).name)
|
782 |
+
|
783 |
+
|
784 |
+
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
|
785 |
+
# Convert detection dataset into classification dataset, with one directory per class
|
786 |
+
|
787 |
+
path = Path(path) # images dir
|
788 |
+
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
|
789 |
+
files = list(path.rglob('*.*'))
|
790 |
+
n = len(files) # number of files
|
791 |
+
for im_file in tqdm(files, total=n):
|
792 |
+
if im_file.suffix[1:] in img_formats:
|
793 |
+
# image
|
794 |
+
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
|
795 |
+
h, w = im.shape[:2]
|
796 |
+
|
797 |
+
# labels
|
798 |
+
lb_file = Path(img2label_paths([str(im_file)])[0])
|
799 |
+
if Path(lb_file).exists():
|
800 |
+
with open(lb_file, 'r') as f:
|
801 |
+
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
|
802 |
+
|
803 |
+
for j, x in enumerate(lb):
|
804 |
+
c = int(x[0]) # class
|
805 |
+
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
|
806 |
+
if not f.parent.is_dir():
|
807 |
+
f.parent.mkdir(parents=True)
|
808 |
+
|
809 |
+
b = x[1:] * [w, h, w, h] # box
|
810 |
+
# b[2:] = b[2:].max() # rectangle to square
|
811 |
+
b[2:] = b[2:] * 1.2 + 3 # pad
|
812 |
+
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
|
813 |
+
|
814 |
+
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
|
815 |
+
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
|
816 |
+
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
|
817 |
+
|
818 |
+
|
819 |
+
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
|
820 |
+
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
|
821 |
+
# Arguments
|
822 |
+
path: Path to images directory
|
823 |
+
weights: Train, val, test weights (list)
|
824 |
+
"""
|
825 |
+
path = Path(path) # images dir
|
826 |
+
files = list(path.rglob('*.*'))
|
827 |
+
n = len(files) # number of files
|
828 |
+
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
|
829 |
+
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
|
830 |
+
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
|
831 |
+
for i, img in tqdm(zip(indices, files), total=n):
|
832 |
+
if img.suffix[1:] in img_formats:
|
833 |
+
with open(path / txt[i], 'a') as f:
|
834 |
+
f.write(str(img) + '\n') # add image to txt file
|
utils/general.py
ADDED
@@ -0,0 +1,646 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# General utils
|
2 |
+
|
3 |
+
import glob
|
4 |
+
import logging
|
5 |
+
import math
|
6 |
+
import os
|
7 |
+
import random
|
8 |
+
import re
|
9 |
+
import subprocess
|
10 |
+
import time
|
11 |
+
from pathlib import Path
|
12 |
+
|
13 |
+
import cv2
|
14 |
+
import numpy as np
|
15 |
+
import torch
|
16 |
+
import torchvision
|
17 |
+
import yaml
|
18 |
+
|
19 |
+
from utils.google_utils import gsutil_getsize
|
20 |
+
from utils.metrics import fitness
|
21 |
+
from utils.torch_utils import init_torch_seeds
|
22 |
+
|
23 |
+
# Settings
|
24 |
+
torch.set_printoptions(linewidth=320, precision=5, profile='long')
|
25 |
+
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
|
26 |
+
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
|
27 |
+
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
|
28 |
+
|
29 |
+
|
30 |
+
def set_logging(rank=-1):
|
31 |
+
logging.basicConfig(
|
32 |
+
format="%(message)s",
|
33 |
+
level=logging.INFO if rank in [-1, 0] else logging.WARN)
|
34 |
+
|
35 |
+
|
36 |
+
def init_seeds(seed=0):
|
37 |
+
# Initialize random number generator (RNG) seeds
|
38 |
+
random.seed(seed)
|
39 |
+
np.random.seed(seed)
|
40 |
+
init_torch_seeds(seed)
|
41 |
+
|
42 |
+
|
43 |
+
def get_latest_run(search_dir='.'):
|
44 |
+
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
|
45 |
+
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
|
46 |
+
return max(last_list, key=os.path.getctime) if last_list else ''
|
47 |
+
|
48 |
+
|
49 |
+
def check_online():
|
50 |
+
# Check internet connectivity
|
51 |
+
import socket
|
52 |
+
try:
|
53 |
+
socket.create_connection(("1.1.1.1", 53)) # check host accesability
|
54 |
+
return True
|
55 |
+
except OSError:
|
56 |
+
return False
|
57 |
+
|
58 |
+
|
59 |
+
def check_git_status():
|
60 |
+
# Recommend 'git pull' if code is out of date
|
61 |
+
print(colorstr('github: '), end='')
|
62 |
+
try:
|
63 |
+
assert Path('.git').exists(), 'skipping check (not a git repository)'
|
64 |
+
assert not Path('/workspace').exists(), 'skipping check (Docker image)' # not Path('/.dockerenv').exists()
|
65 |
+
assert check_online(), 'skipping check (offline)'
|
66 |
+
|
67 |
+
cmd = 'git fetch && git config --get remote.origin.url' # github repo url
|
68 |
+
url = subprocess.check_output(cmd, shell=True).decode()[:-1]
|
69 |
+
cmd = 'git rev-list $(git rev-parse --abbrev-ref HEAD)..origin/master --count' # commits behind
|
70 |
+
n = int(subprocess.check_output(cmd, shell=True))
|
71 |
+
if n > 0:
|
72 |
+
print(f"⚠️ WARNING: code is out of date by {n} {'commits' if n > 1 else 'commmit'}. "
|
73 |
+
f"Use 'git pull' to update or 'git clone {url}' to download latest.")
|
74 |
+
else:
|
75 |
+
print(f'up to date with {url} ✅')
|
76 |
+
except Exception as e:
|
77 |
+
print(e)
|
78 |
+
|
79 |
+
|
80 |
+
def check_requirements(file='requirements.txt'):
|
81 |
+
# Check installed dependencies meet requirements
|
82 |
+
import pkg_resources
|
83 |
+
requirements = pkg_resources.parse_requirements(Path(file).open())
|
84 |
+
requirements = [x.name + ''.join(*x.specs) if len(x.specs) else x.name for x in requirements]
|
85 |
+
pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met
|
86 |
+
|
87 |
+
|
88 |
+
def check_img_size(img_size, s=32):
|
89 |
+
# Verify img_size is a multiple of stride s
|
90 |
+
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
|
91 |
+
if new_size != img_size:
|
92 |
+
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
|
93 |
+
return new_size
|
94 |
+
|
95 |
+
|
96 |
+
def check_file(file):
|
97 |
+
# Search for file if not found
|
98 |
+
if os.path.isfile(file) or file == '':
|
99 |
+
return file
|
100 |
+
else:
|
101 |
+
files = glob.glob('./**/' + file, recursive=True) # find file
|
102 |
+
assert len(files), 'File Not Found: %s' % file # assert file was found
|
103 |
+
assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
|
104 |
+
return files[0] # return file
|
105 |
+
|
106 |
+
|
107 |
+
def check_dataset(dict):
|
108 |
+
# Download dataset if not found locally
|
109 |
+
val, s = dict.get('val'), dict.get('download')
|
110 |
+
if val and len(val):
|
111 |
+
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
|
112 |
+
if not all(x.exists() for x in val):
|
113 |
+
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
|
114 |
+
if s and len(s): # download script
|
115 |
+
print('Downloading %s ...' % s)
|
116 |
+
if s.startswith('http') and s.endswith('.zip'): # URL
|
117 |
+
f = Path(s).name # filename
|
118 |
+
torch.hub.download_url_to_file(s, f)
|
119 |
+
r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
|
120 |
+
else: # bash script
|
121 |
+
r = os.system(s)
|
122 |
+
print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
|
123 |
+
else:
|
124 |
+
raise Exception('Dataset not found.')
|
125 |
+
|
126 |
+
|
127 |
+
def make_divisible(x, divisor):
|
128 |
+
# Returns x evenly divisible by divisor
|
129 |
+
return math.ceil(x / divisor) * divisor
|
130 |
+
|
131 |
+
|
132 |
+
def clean_str(s):
|
133 |
+
# Cleans a string by replacing special characters with underscore _
|
134 |
+
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,��´><+]", repl="_", string=s)
|
135 |
+
|
136 |
+
|
137 |
+
def one_cycle(y1=0.0, y2=1.0, steps=100):
|
138 |
+
# lambda function for sinusoidal ramp from y1 to y2
|
139 |
+
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
|
140 |
+
|
141 |
+
|
142 |
+
def colorstr(*input):
|
143 |
+
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
|
144 |
+
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
|
145 |
+
colors = {'black': '\033[30m', # basic colors
|
146 |
+
'red': '\033[31m',
|
147 |
+
'green': '\033[32m',
|
148 |
+
'yellow': '\033[33m',
|
149 |
+
'blue': '\033[34m',
|
150 |
+
'magenta': '\033[35m',
|
151 |
+
'cyan': '\033[36m',
|
152 |
+
'white': '\033[37m',
|
153 |
+
'bright_black': '\033[90m', # bright colors
|
154 |
+
'bright_red': '\033[91m',
|
155 |
+
'bright_green': '\033[92m',
|
156 |
+
'bright_yellow': '\033[93m',
|
157 |
+
'bright_blue': '\033[94m',
|
158 |
+
'bright_magenta': '\033[95m',
|
159 |
+
'bright_cyan': '\033[96m',
|
160 |
+
'bright_white': '\033[97m',
|
161 |
+
'end': '\033[0m', # misc
|
162 |
+
'bold': '\033[1m',
|
163 |
+
'underline': '\033[4m'}
|
164 |
+
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
|
165 |
+
|
166 |
+
|
167 |
+
def labels_to_class_weights(labels, nc=80):
|
168 |
+
# Get class weights (inverse frequency) from training labels
|
169 |
+
if labels[0] is None: # no labels loaded
|
170 |
+
return torch.Tensor()
|
171 |
+
|
172 |
+
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
|
173 |
+
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
|
174 |
+
weights = np.bincount(classes, minlength=nc) # occurrences per class
|
175 |
+
|
176 |
+
# Prepend gridpoint count (for uCE training)
|
177 |
+
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
|
178 |
+
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
|
179 |
+
|
180 |
+
weights[weights == 0] = 1 # replace empty bins with 1
|
181 |
+
weights = 1 / weights # number of targets per class
|
182 |
+
weights /= weights.sum() # normalize
|
183 |
+
return torch.from_numpy(weights)
|
184 |
+
|
185 |
+
|
186 |
+
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
|
187 |
+
# Produces image weights based on class_weights and image contents
|
188 |
+
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
|
189 |
+
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
|
190 |
+
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
|
191 |
+
return image_weights
|
192 |
+
|
193 |
+
|
194 |
+
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
|
195 |
+
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
|
196 |
+
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
|
197 |
+
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
|
198 |
+
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
|
199 |
+
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
|
200 |
+
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
|
201 |
+
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
|
202 |
+
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
|
203 |
+
return x
|
204 |
+
|
205 |
+
|
206 |
+
def xyxy2xywh(x):
|
207 |
+
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
|
208 |
+
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
209 |
+
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
|
210 |
+
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
|
211 |
+
y[:, 2] = x[:, 2] - x[:, 0] # width
|
212 |
+
y[:, 3] = x[:, 3] - x[:, 1] # height
|
213 |
+
return y
|
214 |
+
|
215 |
+
|
216 |
+
def xywh2xyxy(x):
|
217 |
+
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
218 |
+
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
219 |
+
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
|
220 |
+
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
|
221 |
+
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
|
222 |
+
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
|
223 |
+
return y
|
224 |
+
|
225 |
+
|
226 |
+
def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32):
|
227 |
+
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
228 |
+
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
229 |
+
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
|
230 |
+
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
|
231 |
+
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
|
232 |
+
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
|
233 |
+
return y
|
234 |
+
|
235 |
+
|
236 |
+
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
|
237 |
+
# Rescale coords (xyxy) from img1_shape to img0_shape
|
238 |
+
if ratio_pad is None: # calculate from img0_shape
|
239 |
+
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
|
240 |
+
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
241 |
+
else:
|
242 |
+
gain = ratio_pad[0][0]
|
243 |
+
pad = ratio_pad[1]
|
244 |
+
|
245 |
+
coords[:, [0, 2]] -= pad[0] # x padding
|
246 |
+
coords[:, [1, 3]] -= pad[1] # y padding
|
247 |
+
coords[:, :4] /= gain
|
248 |
+
clip_coords(coords, img0_shape)
|
249 |
+
return coords
|
250 |
+
|
251 |
+
|
252 |
+
def clip_coords(boxes, img_shape):
|
253 |
+
# Clip bounding xyxy bounding boxes to image shape (height, width)
|
254 |
+
boxes[:, 0].clamp_(0, img_shape[1]) # x1
|
255 |
+
boxes[:, 1].clamp_(0, img_shape[0]) # y1
|
256 |
+
boxes[:, 2].clamp_(0, img_shape[1]) # x2
|
257 |
+
boxes[:, 3].clamp_(0, img_shape[0]) # y2
|
258 |
+
|
259 |
+
|
260 |
+
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
|
261 |
+
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
|
262 |
+
box2 = box2.T
|
263 |
+
|
264 |
+
# Get the coordinates of bounding boxes
|
265 |
+
if x1y1x2y2: # x1, y1, x2, y2 = box1
|
266 |
+
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
|
267 |
+
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
|
268 |
+
else: # transform from xywh to xyxy
|
269 |
+
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
|
270 |
+
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
|
271 |
+
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
|
272 |
+
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
|
273 |
+
|
274 |
+
# Intersection area
|
275 |
+
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
|
276 |
+
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
|
277 |
+
|
278 |
+
# Union Area
|
279 |
+
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
|
280 |
+
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
|
281 |
+
union = w1 * h1 + w2 * h2 - inter + eps
|
282 |
+
|
283 |
+
iou = inter / union
|
284 |
+
if GIoU or DIoU or CIoU:
|
285 |
+
# convex (smallest enclosing box) width
|
286 |
+
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1)
|
287 |
+
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
|
288 |
+
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
|
289 |
+
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
|
290 |
+
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
|
291 |
+
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
|
292 |
+
if DIoU:
|
293 |
+
return iou - rho2 / c2 # DIoU
|
294 |
+
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
|
295 |
+
v = (4 / math.pi ** 2) * \
|
296 |
+
torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
|
297 |
+
with torch.no_grad():
|
298 |
+
alpha = v / ((1 + eps) - iou + v)
|
299 |
+
return iou - (rho2 / c2 + v * alpha) # CIoU
|
300 |
+
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
|
301 |
+
c_area = cw * ch + eps # convex area
|
302 |
+
return iou - (c_area - union) / c_area # GIoU
|
303 |
+
else:
|
304 |
+
return iou # IoU
|
305 |
+
|
306 |
+
|
307 |
+
def box_iou(box1, box2):
|
308 |
+
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
|
309 |
+
"""
|
310 |
+
Return intersection-over-union (Jaccard index) of boxes.
|
311 |
+
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
312 |
+
Arguments:
|
313 |
+
box1 (Tensor[N, 4])
|
314 |
+
box2 (Tensor[M, 4])
|
315 |
+
Returns:
|
316 |
+
iou (Tensor[N, M]): the NxM matrix containing the pairwise
|
317 |
+
IoU values for every element in boxes1 and boxes2
|
318 |
+
"""
|
319 |
+
|
320 |
+
def box_area(box):
|
321 |
+
# box = 4xn
|
322 |
+
return (box[2] - box[0]) * (box[3] - box[1])
|
323 |
+
|
324 |
+
area1 = box_area(box1.T)
|
325 |
+
area2 = box_area(box2.T)
|
326 |
+
|
327 |
+
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
|
328 |
+
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) -
|
329 |
+
torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
|
330 |
+
# iou = inter / (area1 + area2 - inter)
|
331 |
+
return inter / (area1[:, None] + area2 - inter)
|
332 |
+
|
333 |
+
|
334 |
+
def wh_iou(wh1, wh2):
|
335 |
+
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
|
336 |
+
wh1 = wh1[:, None] # [N,1,2]
|
337 |
+
wh2 = wh2[None] # [1,M,2]
|
338 |
+
inter = torch.min(wh1, wh2).prod(2) # [N,M]
|
339 |
+
# iou = inter / (area1 + area2 - inter)
|
340 |
+
return inter / (wh1.prod(2) + wh2.prod(2) - inter)
|
341 |
+
|
342 |
+
def jaccard_diou(box_a, box_b, iscrowd:bool=False):
|
343 |
+
use_batch = True
|
344 |
+
if box_a.dim() == 2:
|
345 |
+
use_batch = False
|
346 |
+
box_a = box_a[None, ...]
|
347 |
+
box_b = box_b[None, ...]
|
348 |
+
|
349 |
+
inter = intersect(box_a, box_b)
|
350 |
+
area_a = ((box_a[:, :, 2]-box_a[:, :, 0]) *
|
351 |
+
(box_a[:, :, 3]-box_a[:, :, 1])).unsqueeze(2).expand_as(inter) # [A,B]
|
352 |
+
area_b = ((box_b[:, :, 2]-box_b[:, :, 0]) *
|
353 |
+
(box_b[:, :, 3]-box_b[:, :, 1])).unsqueeze(1).expand_as(inter) # [A,B]
|
354 |
+
union = area_a + area_b - inter
|
355 |
+
x1 = ((box_a[:, :, 2]+box_a[:, :, 0]) / 2).unsqueeze(2).expand_as(inter)
|
356 |
+
y1 = ((box_a[:, :, 3]+box_a[:, :, 1]) / 2).unsqueeze(2).expand_as(inter)
|
357 |
+
x2 = ((box_b[:, :, 2]+box_b[:, :, 0]) / 2).unsqueeze(1).expand_as(inter)
|
358 |
+
y2 = ((box_b[:, :, 3]+box_b[:, :, 1]) / 2).unsqueeze(1).expand_as(inter)
|
359 |
+
|
360 |
+
t1 = box_a[:, :, 1].unsqueeze(2).expand_as(inter)
|
361 |
+
b1 = box_a[:, :, 3].unsqueeze(2).expand_as(inter)
|
362 |
+
l1 = box_a[:, :, 0].unsqueeze(2).expand_as(inter)
|
363 |
+
r1 = box_a[:, :, 2].unsqueeze(2).expand_as(inter)
|
364 |
+
|
365 |
+
t2 = box_b[:, :, 1].unsqueeze(1).expand_as(inter)
|
366 |
+
b2 = box_b[:, :, 3].unsqueeze(1).expand_as(inter)
|
367 |
+
l2 = box_b[:, :, 0].unsqueeze(1).expand_as(inter)
|
368 |
+
r2 = box_b[:, :, 2].unsqueeze(1).expand_as(inter)
|
369 |
+
|
370 |
+
cr = torch.max(r1, r2)
|
371 |
+
cl = torch.min(l1, l2)
|
372 |
+
ct = torch.min(t1, t2)
|
373 |
+
cb = torch.max(b1, b2)
|
374 |
+
D = (((x2 - x1)**2 + (y2 - y1)**2) / ((cr-cl)**2 + (cb-ct)**2 + 1e-7))
|
375 |
+
out = inter / area_a if iscrowd else inter / (union + 1e-7) - D ** 0.7
|
376 |
+
return out if use_batch else out.squeeze(0)
|
377 |
+
|
378 |
+
|
379 |
+
def non_max_suppression_face(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
|
380 |
+
"""Performs Non-Maximum Suppression (NMS) on inference results
|
381 |
+
Returns:
|
382 |
+
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
|
383 |
+
"""
|
384 |
+
|
385 |
+
nc = prediction.shape[2] - 15 # number of classes
|
386 |
+
xc = prediction[..., 4] > conf_thres # candidates
|
387 |
+
|
388 |
+
# Settings
|
389 |
+
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
|
390 |
+
time_limit = 10.0 # seconds to quit after
|
391 |
+
redundant = True # require redundant detections
|
392 |
+
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
393 |
+
merge = False # use merge-NMS
|
394 |
+
|
395 |
+
t = time.time()
|
396 |
+
output = [torch.zeros((0, 16), device=prediction.device)] * prediction.shape[0]
|
397 |
+
for xi, x in enumerate(prediction): # image index, image inference
|
398 |
+
# Apply constraints
|
399 |
+
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
|
400 |
+
x = x[xc[xi]] # confidence
|
401 |
+
|
402 |
+
# Cat apriori labels if autolabelling
|
403 |
+
if labels and len(labels[xi]):
|
404 |
+
l = labels[xi]
|
405 |
+
v = torch.zeros((len(l), nc + 15), device=x.device)
|
406 |
+
v[:, :4] = l[:, 1:5] # box
|
407 |
+
v[:, 4] = 1.0 # conf
|
408 |
+
v[range(len(l)), l[:, 0].long() + 15] = 1.0 # cls
|
409 |
+
x = torch.cat((x, v), 0)
|
410 |
+
|
411 |
+
# If none remain process next image
|
412 |
+
if not x.shape[0]:
|
413 |
+
continue
|
414 |
+
|
415 |
+
# Compute conf
|
416 |
+
x[:, 15:] *= x[:, 4:5] # conf = obj_conf * cls_conf
|
417 |
+
|
418 |
+
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
|
419 |
+
box = xywh2xyxy(x[:, :4])
|
420 |
+
|
421 |
+
# Detections matrix nx6 (xyxy, conf, landmarks, cls)
|
422 |
+
if multi_label:
|
423 |
+
i, j = (x[:, 15:] > conf_thres).nonzero(as_tuple=False).T
|
424 |
+
x = torch.cat((box[i], x[i, j + 15, None], x[i, 5:15] ,j[:, None].float()), 1)
|
425 |
+
else: # best class only
|
426 |
+
conf, j = x[:, 15:].max(1, keepdim=True)
|
427 |
+
x = torch.cat((box, conf, x[:, 5:15], j.float()), 1)[conf.view(-1) > conf_thres]
|
428 |
+
|
429 |
+
# Filter by class
|
430 |
+
if classes is not None:
|
431 |
+
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
|
432 |
+
|
433 |
+
# If none remain process next image
|
434 |
+
n = x.shape[0] # number of boxes
|
435 |
+
if not n:
|
436 |
+
continue
|
437 |
+
|
438 |
+
# Batched NMS
|
439 |
+
c = x[:, 15:16] * (0 if agnostic else max_wh) # classes
|
440 |
+
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
|
441 |
+
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
|
442 |
+
#if i.shape[0] > max_det: # limit detections
|
443 |
+
# i = i[:max_det]
|
444 |
+
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
|
445 |
+
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
|
446 |
+
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
|
447 |
+
weights = iou * scores[None] # box weights
|
448 |
+
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
|
449 |
+
if redundant:
|
450 |
+
i = i[iou.sum(1) > 1] # require redundancy
|
451 |
+
|
452 |
+
output[xi] = x[i]
|
453 |
+
if (time.time() - t) > time_limit:
|
454 |
+
break # time limit exceeded
|
455 |
+
|
456 |
+
return output
|
457 |
+
|
458 |
+
|
459 |
+
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
|
460 |
+
"""Performs Non-Maximum Suppression (NMS) on inference results
|
461 |
+
|
462 |
+
Returns:
|
463 |
+
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
|
464 |
+
"""
|
465 |
+
|
466 |
+
nc = prediction.shape[2] - 5 # number of classes
|
467 |
+
xc = prediction[..., 4] > conf_thres # candidates
|
468 |
+
|
469 |
+
# Settings
|
470 |
+
# (pixels) minimum and maximum box width and height
|
471 |
+
min_wh, max_wh = 2, 4096
|
472 |
+
#max_det = 300 # maximum number of detections per image
|
473 |
+
#max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
|
474 |
+
time_limit = 10.0 # seconds to quit after
|
475 |
+
redundant = True # require redundant detections
|
476 |
+
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
477 |
+
merge = False # use merge-NMS
|
478 |
+
|
479 |
+
t = time.time()
|
480 |
+
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
|
481 |
+
for xi, x in enumerate(prediction): # image index, image inference
|
482 |
+
# Apply constraints
|
483 |
+
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
|
484 |
+
x = x[xc[xi]] # confidence
|
485 |
+
|
486 |
+
# Cat apriori labels if autolabelling
|
487 |
+
if labels and len(labels[xi]):
|
488 |
+
l = labels[xi]
|
489 |
+
v = torch.zeros((len(l), nc + 5), device=x.device)
|
490 |
+
v[:, :4] = l[:, 1:5] # box
|
491 |
+
v[:, 4] = 1.0 # conf
|
492 |
+
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
|
493 |
+
x = torch.cat((x, v), 0)
|
494 |
+
|
495 |
+
# If none remain process next image
|
496 |
+
if not x.shape[0]:
|
497 |
+
continue
|
498 |
+
|
499 |
+
# Compute conf
|
500 |
+
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
|
501 |
+
|
502 |
+
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
|
503 |
+
box = xywh2xyxy(x[:, :4])
|
504 |
+
|
505 |
+
# Detections matrix nx6 (xyxy, conf, cls)
|
506 |
+
if multi_label:
|
507 |
+
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
|
508 |
+
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
|
509 |
+
else: # best class only
|
510 |
+
conf, j = x[:, 5:].max(1, keepdim=True)
|
511 |
+
x = torch.cat((box, conf, j.float()), 1)[
|
512 |
+
conf.view(-1) > conf_thres]
|
513 |
+
|
514 |
+
# Filter by class
|
515 |
+
if classes is not None:
|
516 |
+
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
|
517 |
+
|
518 |
+
# Apply finite constraint
|
519 |
+
# if not torch.isfinite(x).all():
|
520 |
+
# x = x[torch.isfinite(x).all(1)]
|
521 |
+
|
522 |
+
# Check shape
|
523 |
+
n = x.shape[0] # number of boxes
|
524 |
+
if not n: # no boxes
|
525 |
+
continue
|
526 |
+
#elif n > max_nms: # excess boxes
|
527 |
+
# x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
|
528 |
+
x = x[x[:, 4].argsort(descending=True)] # sort by confidence
|
529 |
+
|
530 |
+
# Batched NMS
|
531 |
+
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
|
532 |
+
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
|
533 |
+
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
|
534 |
+
#if i.shape[0] > max_det: # limit detections
|
535 |
+
# i = i[:max_det]
|
536 |
+
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
|
537 |
+
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
|
538 |
+
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
|
539 |
+
weights = iou * scores[None] # box weights
|
540 |
+
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
|
541 |
+
if redundant:
|
542 |
+
i = i[iou.sum(1) > 1] # require redundancy
|
543 |
+
|
544 |
+
output[xi] = x[i]
|
545 |
+
if (time.time() - t) > time_limit:
|
546 |
+
print(f'WARNING: NMS time limit {time_limit}s exceeded')
|
547 |
+
break # time limit exceeded
|
548 |
+
|
549 |
+
return output
|
550 |
+
|
551 |
+
|
552 |
+
def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
|
553 |
+
# Strip optimizer from 'f' to finalize training, optionally save as 's'
|
554 |
+
x = torch.load(f, map_location=torch.device('cpu'))
|
555 |
+
for key in 'optimizer', 'training_results', 'wandb_id':
|
556 |
+
x[key] = None
|
557 |
+
x['epoch'] = -1
|
558 |
+
x['model'].half() # to FP16
|
559 |
+
for p in x['model'].parameters():
|
560 |
+
p.requires_grad = False
|
561 |
+
torch.save(x, s or f)
|
562 |
+
mb = os.path.getsize(s or f) / 1E6 # filesize
|
563 |
+
print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
|
564 |
+
|
565 |
+
|
566 |
+
def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
|
567 |
+
# Print mutation results to evolve.txt (for use with train.py --evolve)
|
568 |
+
a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
|
569 |
+
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
|
570 |
+
c = '%10.4g' * len(results) % results # results (P, R, [email protected], [email protected]:0.95, val_losses x 3)
|
571 |
+
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
|
572 |
+
|
573 |
+
if bucket:
|
574 |
+
url = 'gs://%s/evolve.txt' % bucket
|
575 |
+
if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
|
576 |
+
os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
|
577 |
+
|
578 |
+
with open('evolve.txt', 'a') as f: # append result
|
579 |
+
f.write(c + b + '\n')
|
580 |
+
x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
|
581 |
+
x = x[np.argsort(-fitness(x))] # sort
|
582 |
+
np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
|
583 |
+
|
584 |
+
# Save yaml
|
585 |
+
for i, k in enumerate(hyp.keys()):
|
586 |
+
hyp[k] = float(x[0, i + 7])
|
587 |
+
with open(yaml_file, 'w') as f:
|
588 |
+
results = tuple(x[0, :7])
|
589 |
+
c = '%10.4g' * len(results) % results # results (P, R, [email protected], [email protected]:0.95, val_losses x 3)
|
590 |
+
f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
|
591 |
+
yaml.dump(hyp, f, sort_keys=False)
|
592 |
+
|
593 |
+
if bucket:
|
594 |
+
os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
|
595 |
+
|
596 |
+
|
597 |
+
def apply_classifier(x, model, img, im0):
|
598 |
+
# applies a second stage classifier to yolo outputs
|
599 |
+
im0 = [im0] if isinstance(im0, np.ndarray) else im0
|
600 |
+
for i, d in enumerate(x): # per image
|
601 |
+
if d is not None and len(d):
|
602 |
+
d = d.clone()
|
603 |
+
|
604 |
+
# Reshape and pad cutouts
|
605 |
+
b = xyxy2xywh(d[:, :4]) # boxes
|
606 |
+
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
|
607 |
+
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
|
608 |
+
d[:, :4] = xywh2xyxy(b).long()
|
609 |
+
|
610 |
+
# Rescale boxes from img_size to im0 size
|
611 |
+
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
|
612 |
+
|
613 |
+
# Classes
|
614 |
+
pred_cls1 = d[:, 5].long()
|
615 |
+
ims = []
|
616 |
+
for j, a in enumerate(d): # per item
|
617 |
+
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
|
618 |
+
im = cv2.resize(cutout, (224, 224)) # BGR
|
619 |
+
# cv2.imwrite('test%i.jpg' % j, cutout)
|
620 |
+
|
621 |
+
# BGR to RGB, to 3x416x416
|
622 |
+
im = im[:, :, ::-1].transpose(2, 0, 1)
|
623 |
+
im = np.ascontiguousarray(
|
624 |
+
im, dtype=np.float32) # uint8 to float32
|
625 |
+
im /= 255.0 # 0 - 255 to 0.0 - 1.0
|
626 |
+
ims.append(im)
|
627 |
+
|
628 |
+
pred_cls2 = model(torch.Tensor(ims).to(d.device)
|
629 |
+
).argmax(1) # classifier prediction
|
630 |
+
# retain matching class detections
|
631 |
+
x[i] = x[i][pred_cls1 == pred_cls2]
|
632 |
+
|
633 |
+
return x
|
634 |
+
|
635 |
+
|
636 |
+
def increment_path(path, exist_ok=True, sep=''):
|
637 |
+
# Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
|
638 |
+
path = Path(path) # os-agnostic
|
639 |
+
if (path.exists() and exist_ok) or (not path.exists()):
|
640 |
+
return str(path)
|
641 |
+
else:
|
642 |
+
dirs = glob.glob(f"{path}{sep}*") # similar paths
|
643 |
+
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
|
644 |
+
i = [int(m.groups()[0]) for m in matches if m] # indices
|
645 |
+
n = max(i) + 1 if i else 2 # increment number
|
646 |
+
return f"{path}{sep}{n}" # update path
|
utils/google_app_engine/Dockerfile
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM gcr.io/google-appengine/python
|
2 |
+
|
3 |
+
# Create a virtualenv for dependencies. This isolates these packages from
|
4 |
+
# system-level packages.
|
5 |
+
# Use -p python3 or -p python3.7 to select python version. Default is version 2.
|
6 |
+
RUN virtualenv /env -p python3
|
7 |
+
|
8 |
+
# Setting these environment variables are the same as running
|
9 |
+
# source /env/bin/activate.
|
10 |
+
ENV VIRTUAL_ENV /env
|
11 |
+
ENV PATH /env/bin:$PATH
|
12 |
+
|
13 |
+
RUN apt-get update && apt-get install -y python-opencv
|
14 |
+
|
15 |
+
# Copy the application's requirements.txt and run pip to install all
|
16 |
+
# dependencies into the virtualenv.
|
17 |
+
ADD requirements.txt /app/requirements.txt
|
18 |
+
RUN pip install -r /app/requirements.txt
|
19 |
+
|
20 |
+
# Add the application source code.
|
21 |
+
ADD . /app
|
22 |
+
|
23 |
+
# Run a WSGI server to serve the application. gunicorn must be declared as
|
24 |
+
# a dependency in requirements.txt.
|
25 |
+
CMD gunicorn -b :$PORT main:app
|
utils/google_app_engine/additional_requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# add these requirements in your app on top of the existing ones
|
2 |
+
pip==18.1
|
3 |
+
Flask==1.0.2
|
4 |
+
gunicorn==19.9.0
|
utils/google_app_engine/app.yaml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
runtime: custom
|
2 |
+
env: flex
|
3 |
+
|
4 |
+
service: yolov5app
|
5 |
+
|
6 |
+
liveness_check:
|
7 |
+
initial_delay_sec: 600
|
8 |
+
|
9 |
+
manual_scaling:
|
10 |
+
instances: 1
|
11 |
+
resources:
|
12 |
+
cpu: 1
|
13 |
+
memory_gb: 4
|
14 |
+
disk_size_gb: 20
|
utils/google_utils.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Google utils: https://cloud.google.com/storage/docs/reference/libraries
|
2 |
+
|
3 |
+
import os
|
4 |
+
import platform
|
5 |
+
import subprocess
|
6 |
+
import time
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
import requests
|
10 |
+
import torch
|
11 |
+
|
12 |
+
|
13 |
+
def gsutil_getsize(url=''):
|
14 |
+
# gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
|
15 |
+
s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
|
16 |
+
return eval(s.split(' ')[0]) if len(s) else 0 # bytes
|
17 |
+
|
18 |
+
|
19 |
+
def attempt_download(file, repo='ultralytics/yolov5'):
|
20 |
+
# Attempt file download if does not exist
|
21 |
+
file = Path(str(file).strip().replace("'", '').lower())
|
22 |
+
|
23 |
+
if not file.exists():
|
24 |
+
try:
|
25 |
+
response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
|
26 |
+
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
|
27 |
+
tag = response['tag_name'] # i.e. 'v1.0'
|
28 |
+
except: # fallback plan
|
29 |
+
assets = ['yolov5.pt', 'yolov5.pt', 'yolov5l.pt', 'yolov5x.pt']
|
30 |
+
tag = subprocess.check_output('git tag', shell=True).decode('utf-8').split('\n')[-2]
|
31 |
+
|
32 |
+
name = file.name
|
33 |
+
if name in assets:
|
34 |
+
msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/'
|
35 |
+
redundant = False # second download option
|
36 |
+
try: # GitHub
|
37 |
+
url = f'https://github.com/{repo}/releases/download/{tag}/{name}'
|
38 |
+
print(f'Downloading {url} to {file}...')
|
39 |
+
torch.hub.download_url_to_file(url, file)
|
40 |
+
assert file.exists() and file.stat().st_size > 1E6 # check
|
41 |
+
except Exception as e: # GCP
|
42 |
+
print(f'Download error: {e}')
|
43 |
+
assert redundant, 'No secondary mirror'
|
44 |
+
url = f'https://storage.googleapis.com/{repo}/ckpt/{name}'
|
45 |
+
print(f'Downloading {url} to {file}...')
|
46 |
+
os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights)
|
47 |
+
finally:
|
48 |
+
if not file.exists() or file.stat().st_size < 1E6: # check
|
49 |
+
file.unlink(missing_ok=True) # remove partial downloads
|
50 |
+
print(f'ERROR: Download failure: {msg}')
|
51 |
+
print('')
|
52 |
+
return
|
53 |
+
|
54 |
+
|
55 |
+
def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
|
56 |
+
# Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download()
|
57 |
+
t = time.time()
|
58 |
+
file = Path(file)
|
59 |
+
cookie = Path('cookie') # gdrive cookie
|
60 |
+
print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
|
61 |
+
file.unlink(missing_ok=True) # remove existing file
|
62 |
+
cookie.unlink(missing_ok=True) # remove existing cookie
|
63 |
+
|
64 |
+
# Attempt file download
|
65 |
+
out = "NUL" if platform.system() == "Windows" else "/dev/null"
|
66 |
+
os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
|
67 |
+
if os.path.exists('cookie'): # large file
|
68 |
+
s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
|
69 |
+
else: # small file
|
70 |
+
s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
|
71 |
+
r = os.system(s) # execute, capture return
|
72 |
+
cookie.unlink(missing_ok=True) # remove existing cookie
|
73 |
+
|
74 |
+
# Error check
|
75 |
+
if r != 0:
|
76 |
+
file.unlink(missing_ok=True) # remove partial
|
77 |
+
print('Download error ') # raise Exception('Download error')
|
78 |
+
return r
|
79 |
+
|
80 |
+
# Unzip if archive
|
81 |
+
if file.suffix == '.zip':
|
82 |
+
print('unzipping... ', end='')
|
83 |
+
os.system(f'unzip -q {file}') # unzip
|
84 |
+
file.unlink() # remove zip to free space
|
85 |
+
|
86 |
+
print(f'Done ({time.time() - t:.1f}s)')
|
87 |
+
return r
|
88 |
+
|
89 |
+
|
90 |
+
def get_token(cookie="./cookie"):
|
91 |
+
with open(cookie) as f:
|
92 |
+
for line in f:
|
93 |
+
if "download" in line:
|
94 |
+
return line.split()[-1]
|
95 |
+
return ""
|
96 |
+
|
97 |
+
# def upload_blob(bucket_name, source_file_name, destination_blob_name):
|
98 |
+
# # Uploads a file to a bucket
|
99 |
+
# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
|
100 |
+
#
|
101 |
+
# storage_client = storage.Client()
|
102 |
+
# bucket = storage_client.get_bucket(bucket_name)
|
103 |
+
# blob = bucket.blob(destination_blob_name)
|
104 |
+
#
|
105 |
+
# blob.upload_from_filename(source_file_name)
|
106 |
+
#
|
107 |
+
# print('File {} uploaded to {}.'.format(
|
108 |
+
# source_file_name,
|
109 |
+
# destination_blob_name))
|
110 |
+
#
|
111 |
+
#
|
112 |
+
# def download_blob(bucket_name, source_blob_name, destination_file_name):
|
113 |
+
# # Uploads a blob from a bucket
|
114 |
+
# storage_client = storage.Client()
|
115 |
+
# bucket = storage_client.get_bucket(bucket_name)
|
116 |
+
# blob = bucket.blob(source_blob_name)
|
117 |
+
#
|
118 |
+
# blob.download_to_filename(destination_file_name)
|
119 |
+
#
|
120 |
+
# print('Blob {} downloaded to {}.'.format(
|
121 |
+
# source_blob_name,
|
122 |
+
# destination_file_name))
|
utils/infer_utils.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
|
4 |
+
|
5 |
+
def decode_infer(output, stride):
|
6 |
+
# logging.info(torch.tensor(output.shape[0]))
|
7 |
+
# logging.info(output.shape)
|
8 |
+
# # bz is batch-size
|
9 |
+
# bz = tuple(torch.tensor(output.shape[0]))
|
10 |
+
# gridsize = tuple(torch.tensor(output.shape[-1]))
|
11 |
+
# logging.info(gridsize)
|
12 |
+
sh = torch.tensor(output.shape)
|
13 |
+
bz = sh[0]
|
14 |
+
gridsize = sh[-1]
|
15 |
+
|
16 |
+
output = output.permute(0, 2, 3, 1)
|
17 |
+
output = output.view(bz, gridsize, gridsize, self.gt_per_grid, 5+self.numclass)
|
18 |
+
x1y1, x2y2, conf, prob = torch.split(
|
19 |
+
output, [2, 2, 1, self.numclass], dim=4)
|
20 |
+
|
21 |
+
shiftx = torch.arange(0, gridsize, dtype=torch.float32)
|
22 |
+
shifty = torch.arange(0, gridsize, dtype=torch.float32)
|
23 |
+
shifty, shiftx = torch.meshgrid([shiftx, shifty])
|
24 |
+
shiftx = shiftx.unsqueeze(-1).repeat(bz, 1, 1, self.gt_per_grid)
|
25 |
+
shifty = shifty.unsqueeze(-1).repeat(bz, 1, 1, self.gt_per_grid)
|
26 |
+
|
27 |
+
xy_grid = torch.stack([shiftx, shifty], dim=4).cuda()
|
28 |
+
x1y1 = (xy_grid+0.5-torch.exp(x1y1))*stride
|
29 |
+
x2y2 = (xy_grid+0.5+torch.exp(x2y2))*stride
|
30 |
+
|
31 |
+
xyxy = torch.cat((x1y1, x2y2), dim=4)
|
32 |
+
conf = torch.sigmoid(conf)
|
33 |
+
prob = torch.sigmoid(prob)
|
34 |
+
output = torch.cat((xyxy, conf, prob), 4)
|
35 |
+
output = output.view(bz, -1, 5+self.numclass)
|
36 |
+
return output
|
utils/loss.py
ADDED
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Loss functions
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import numpy as np
|
6 |
+
from utils.general import bbox_iou
|
7 |
+
from utils.torch_utils import is_parallel
|
8 |
+
|
9 |
+
|
10 |
+
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
|
11 |
+
# return positive, negative label smoothing BCE targets
|
12 |
+
return 1.0 - 0.5 * eps, 0.5 * eps
|
13 |
+
|
14 |
+
|
15 |
+
class BCEBlurWithLogitsLoss(nn.Module):
|
16 |
+
# BCEwithLogitLoss() with reduced missing label effects.
|
17 |
+
def __init__(self, alpha=0.05):
|
18 |
+
super(BCEBlurWithLogitsLoss, self).__init__()
|
19 |
+
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
|
20 |
+
self.alpha = alpha
|
21 |
+
|
22 |
+
def forward(self, pred, true):
|
23 |
+
loss = self.loss_fcn(pred, true)
|
24 |
+
pred = torch.sigmoid(pred) # prob from logits
|
25 |
+
dx = pred - true # reduce only missing label effects
|
26 |
+
# dx = (pred - true).abs() # reduce missing label and false label effects
|
27 |
+
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
|
28 |
+
loss *= alpha_factor
|
29 |
+
return loss.mean()
|
30 |
+
|
31 |
+
|
32 |
+
class FocalLoss(nn.Module):
|
33 |
+
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
|
34 |
+
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
|
35 |
+
super(FocalLoss, self).__init__()
|
36 |
+
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
|
37 |
+
self.gamma = gamma
|
38 |
+
self.alpha = alpha
|
39 |
+
self.reduction = loss_fcn.reduction
|
40 |
+
self.loss_fcn.reduction = 'none' # required to apply FL to each element
|
41 |
+
|
42 |
+
def forward(self, pred, true):
|
43 |
+
loss = self.loss_fcn(pred, true)
|
44 |
+
# p_t = torch.exp(-loss)
|
45 |
+
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
|
46 |
+
|
47 |
+
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
|
48 |
+
pred_prob = torch.sigmoid(pred) # prob from logits
|
49 |
+
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
|
50 |
+
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
|
51 |
+
modulating_factor = (1.0 - p_t) ** self.gamma
|
52 |
+
loss *= alpha_factor * modulating_factor
|
53 |
+
|
54 |
+
if self.reduction == 'mean':
|
55 |
+
return loss.mean()
|
56 |
+
elif self.reduction == 'sum':
|
57 |
+
return loss.sum()
|
58 |
+
else: # 'none'
|
59 |
+
return loss
|
60 |
+
|
61 |
+
|
62 |
+
class QFocalLoss(nn.Module):
|
63 |
+
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
|
64 |
+
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
|
65 |
+
super(QFocalLoss, self).__init__()
|
66 |
+
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
|
67 |
+
self.gamma = gamma
|
68 |
+
self.alpha = alpha
|
69 |
+
self.reduction = loss_fcn.reduction
|
70 |
+
self.loss_fcn.reduction = 'none' # required to apply FL to each element
|
71 |
+
|
72 |
+
def forward(self, pred, true):
|
73 |
+
loss = self.loss_fcn(pred, true)
|
74 |
+
|
75 |
+
pred_prob = torch.sigmoid(pred) # prob from logits
|
76 |
+
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
|
77 |
+
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
|
78 |
+
loss *= alpha_factor * modulating_factor
|
79 |
+
|
80 |
+
if self.reduction == 'mean':
|
81 |
+
return loss.mean()
|
82 |
+
elif self.reduction == 'sum':
|
83 |
+
return loss.sum()
|
84 |
+
else: # 'none'
|
85 |
+
return loss
|
86 |
+
|
87 |
+
class WingLoss(nn.Module):
|
88 |
+
def __init__(self, w=10, e=2):
|
89 |
+
super(WingLoss, self).__init__()
|
90 |
+
# https://arxiv.org/pdf/1711.06753v4.pdf Figure 5
|
91 |
+
self.w = w
|
92 |
+
self.e = e
|
93 |
+
self.C = self.w - self.w * np.log(1 + self.w / self.e)
|
94 |
+
|
95 |
+
def forward(self, x, t, sigma=1):
|
96 |
+
weight = torch.ones_like(t)
|
97 |
+
weight[torch.where(t==-1)] = 0
|
98 |
+
diff = weight * (x - t)
|
99 |
+
abs_diff = diff.abs()
|
100 |
+
flag = (abs_diff.data < self.w).float()
|
101 |
+
y = flag * self.w * torch.log(1 + abs_diff / self.e) + (1 - flag) * (abs_diff - self.C)
|
102 |
+
return y.sum()
|
103 |
+
|
104 |
+
class LandmarksLoss(nn.Module):
|
105 |
+
# BCEwithLogitLoss() with reduced missing label effects.
|
106 |
+
def __init__(self, alpha=1.0):
|
107 |
+
super(LandmarksLoss, self).__init__()
|
108 |
+
self.loss_fcn = WingLoss()#nn.SmoothL1Loss(reduction='sum')
|
109 |
+
self.alpha = alpha
|
110 |
+
|
111 |
+
def forward(self, pred, truel, mask):
|
112 |
+
loss = self.loss_fcn(pred*mask, truel*mask)
|
113 |
+
return loss / (torch.sum(mask) + 10e-14)
|
114 |
+
|
115 |
+
|
116 |
+
def compute_loss(p, targets, model): # predictions, targets, model
|
117 |
+
device = targets.device
|
118 |
+
lcls, lbox, lobj, lmark = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
|
119 |
+
tcls, tbox, indices, anchors, tlandmarks, lmks_mask = build_targets(p, targets, model) # targets
|
120 |
+
h = model.hyp # hyperparameters
|
121 |
+
|
122 |
+
# Define criteria
|
123 |
+
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) # weight=model.class_weights)
|
124 |
+
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
|
125 |
+
|
126 |
+
landmarks_loss = LandmarksLoss(1.0)
|
127 |
+
|
128 |
+
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
|
129 |
+
cp, cn = smooth_BCE(eps=0.0)
|
130 |
+
|
131 |
+
# Focal loss
|
132 |
+
g = h['fl_gamma'] # focal loss gamma
|
133 |
+
if g > 0:
|
134 |
+
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
|
135 |
+
|
136 |
+
# Losses
|
137 |
+
nt = 0 # number of targets
|
138 |
+
no = len(p) # number of outputs
|
139 |
+
balance = [4.0, 1.0, 0.4] if no == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
|
140 |
+
for i, pi in enumerate(p): # layer index, layer predictions
|
141 |
+
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
|
142 |
+
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
|
143 |
+
|
144 |
+
n = b.shape[0] # number of targets
|
145 |
+
if n:
|
146 |
+
nt += n # cumulative targets
|
147 |
+
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
|
148 |
+
|
149 |
+
# Regression
|
150 |
+
pxy = ps[:, :2].sigmoid() * 2. - 0.5
|
151 |
+
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
|
152 |
+
pbox = torch.cat((pxy, pwh), 1) # predicted box
|
153 |
+
iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
|
154 |
+
lbox += (1.0 - iou).mean() # iou loss
|
155 |
+
|
156 |
+
# Objectness
|
157 |
+
tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
|
158 |
+
|
159 |
+
# Classification
|
160 |
+
if model.nc > 1: # cls loss (only if multiple classes)
|
161 |
+
t = torch.full_like(ps[:, 15:], cn, device=device) # targets
|
162 |
+
t[range(n), tcls[i]] = cp
|
163 |
+
lcls += BCEcls(ps[:, 15:], t) # BCE
|
164 |
+
|
165 |
+
# Append targets to text file
|
166 |
+
# with open('targets.txt', 'a') as file:
|
167 |
+
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
|
168 |
+
|
169 |
+
#landmarks loss
|
170 |
+
#plandmarks = ps[:,5:15].sigmoid() * 8. - 4.
|
171 |
+
plandmarks = ps[:,5:15]
|
172 |
+
|
173 |
+
plandmarks[:, 0:2] = plandmarks[:, 0:2] * anchors[i]
|
174 |
+
plandmarks[:, 2:4] = plandmarks[:, 2:4] * anchors[i]
|
175 |
+
plandmarks[:, 4:6] = plandmarks[:, 4:6] * anchors[i]
|
176 |
+
plandmarks[:, 6:8] = plandmarks[:, 6:8] * anchors[i]
|
177 |
+
plandmarks[:, 8:10] = plandmarks[:,8:10] * anchors[i]
|
178 |
+
|
179 |
+
lmark += landmarks_loss(plandmarks, tlandmarks[i], lmks_mask[i])
|
180 |
+
|
181 |
+
|
182 |
+
lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
|
183 |
+
|
184 |
+
s = 3 / no # output count scaling
|
185 |
+
lbox *= h['box'] * s
|
186 |
+
lobj *= h['obj'] * s * (1.4 if no == 4 else 1.)
|
187 |
+
lcls *= h['cls'] * s
|
188 |
+
lmark *= h['landmark'] * s
|
189 |
+
|
190 |
+
bs = tobj.shape[0] # batch size
|
191 |
+
|
192 |
+
loss = lbox + lobj + lcls + lmark
|
193 |
+
return loss * bs, torch.cat((lbox, lobj, lcls, lmark, loss)).detach()
|
194 |
+
|
195 |
+
|
196 |
+
def build_targets(p, targets, model):
|
197 |
+
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
|
198 |
+
det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
|
199 |
+
na, nt = det.na, targets.shape[0] # number of anchors, targets
|
200 |
+
tcls, tbox, indices, anch, landmarks, lmks_mask = [], [], [], [], [], []
|
201 |
+
#gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
|
202 |
+
gain = torch.ones(17, device=targets.device)
|
203 |
+
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
|
204 |
+
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
|
205 |
+
|
206 |
+
g = 0.5 # bias
|
207 |
+
off = torch.tensor([[0, 0],
|
208 |
+
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
|
209 |
+
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
|
210 |
+
], device=targets.device).float() * g # offsets
|
211 |
+
|
212 |
+
for i in range(det.nl):
|
213 |
+
anchors = det.anchors[i]
|
214 |
+
gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
|
215 |
+
#landmarks 10
|
216 |
+
gain[6:16] = torch.tensor(p[i].shape)[[3, 2, 3, 2, 3, 2, 3, 2, 3, 2]] # xyxy gain
|
217 |
+
|
218 |
+
# Match targets to anchors
|
219 |
+
t = targets * gain
|
220 |
+
if nt:
|
221 |
+
# Matches
|
222 |
+
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
|
223 |
+
j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
|
224 |
+
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
|
225 |
+
t = t[j] # filter
|
226 |
+
|
227 |
+
# Offsets
|
228 |
+
gxy = t[:, 2:4] # grid xy
|
229 |
+
gxi = gain[[2, 3]] - gxy # inverse
|
230 |
+
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
|
231 |
+
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
|
232 |
+
j = torch.stack((torch.ones_like(j), j, k, l, m))
|
233 |
+
t = t.repeat((5, 1, 1))[j]
|
234 |
+
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
|
235 |
+
else:
|
236 |
+
t = targets[0]
|
237 |
+
offsets = 0
|
238 |
+
|
239 |
+
# Define
|
240 |
+
b, c = t[:, :2].long().T # image, class
|
241 |
+
gxy = t[:, 2:4] # grid xy
|
242 |
+
gwh = t[:, 4:6] # grid wh
|
243 |
+
gij = (gxy - offsets).long()
|
244 |
+
gi, gj = gij.T # grid xy indices
|
245 |
+
|
246 |
+
# Append
|
247 |
+
a = t[:, 16].long() # anchor indices
|
248 |
+
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
|
249 |
+
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
|
250 |
+
anch.append(anchors[a]) # anchors
|
251 |
+
tcls.append(c) # class
|
252 |
+
|
253 |
+
#landmarks
|
254 |
+
lks = t[:,6:16]
|
255 |
+
#lks_mask = lks > 0
|
256 |
+
#lks_mask = lks_mask.float()
|
257 |
+
lks_mask = torch.where(lks < 0, torch.full_like(lks, 0.), torch.full_like(lks, 1.0))
|
258 |
+
|
259 |
+
#应该是关键点的坐标除以anch的宽高才对,便于模型学习。使用gwh会导致不同关键点的编码不同,没有统一的参考标准
|
260 |
+
|
261 |
+
lks[:, [0, 1]] = (lks[:, [0, 1]] - gij)
|
262 |
+
lks[:, [2, 3]] = (lks[:, [2, 3]] - gij)
|
263 |
+
lks[:, [4, 5]] = (lks[:, [4, 5]] - gij)
|
264 |
+
lks[:, [6, 7]] = (lks[:, [6, 7]] - gij)
|
265 |
+
lks[:, [8, 9]] = (lks[:, [8, 9]] - gij)
|
266 |
+
|
267 |
+
'''
|
268 |
+
#anch_w = torch.ones(5, device=targets.device).fill_(anchors[0][0])
|
269 |
+
#anch_wh = torch.ones(5, device=targets.device)
|
270 |
+
anch_f_0 = (a == 0).unsqueeze(1).repeat(1, 5)
|
271 |
+
anch_f_1 = (a == 1).unsqueeze(1).repeat(1, 5)
|
272 |
+
anch_f_2 = (a == 2).unsqueeze(1).repeat(1, 5)
|
273 |
+
lks[:, [0, 2, 4, 6, 8]] = torch.where(anch_f_0, lks[:, [0, 2, 4, 6, 8]] / anchors[0][0], lks[:, [0, 2, 4, 6, 8]])
|
274 |
+
lks[:, [0, 2, 4, 6, 8]] = torch.where(anch_f_1, lks[:, [0, 2, 4, 6, 8]] / anchors[1][0], lks[:, [0, 2, 4, 6, 8]])
|
275 |
+
lks[:, [0, 2, 4, 6, 8]] = torch.where(anch_f_2, lks[:, [0, 2, 4, 6, 8]] / anchors[2][0], lks[:, [0, 2, 4, 6, 8]])
|
276 |
+
|
277 |
+
lks[:, [1, 3, 5, 7, 9]] = torch.where(anch_f_0, lks[:, [1, 3, 5, 7, 9]] / anchors[0][1], lks[:, [1, 3, 5, 7, 9]])
|
278 |
+
lks[:, [1, 3, 5, 7, 9]] = torch.where(anch_f_1, lks[:, [1, 3, 5, 7, 9]] / anchors[1][1], lks[:, [1, 3, 5, 7, 9]])
|
279 |
+
lks[:, [1, 3, 5, 7, 9]] = torch.where(anch_f_2, lks[:, [1, 3, 5, 7, 9]] / anchors[2][1], lks[:, [1, 3, 5, 7, 9]])
|
280 |
+
|
281 |
+
#new_lks = lks[lks_mask>0]
|
282 |
+
#print('new_lks: min --- ', torch.min(new_lks), ' max --- ', torch.max(new_lks))
|
283 |
+
|
284 |
+
lks_mask_1 = torch.where(lks < -3, torch.full_like(lks, 0.), torch.full_like(lks, 1.0))
|
285 |
+
lks_mask_2 = torch.where(lks > 3, torch.full_like(lks, 0.), torch.full_like(lks, 1.0))
|
286 |
+
|
287 |
+
lks_mask_new = lks_mask * lks_mask_1 * lks_mask_2
|
288 |
+
lks_mask_new[:, 0] = lks_mask_new[:, 0] * lks_mask_new[:, 1]
|
289 |
+
lks_mask_new[:, 1] = lks_mask_new[:, 0] * lks_mask_new[:, 1]
|
290 |
+
lks_mask_new[:, 2] = lks_mask_new[:, 2] * lks_mask_new[:, 3]
|
291 |
+
lks_mask_new[:, 3] = lks_mask_new[:, 2] * lks_mask_new[:, 3]
|
292 |
+
lks_mask_new[:, 4] = lks_mask_new[:, 4] * lks_mask_new[:, 5]
|
293 |
+
lks_mask_new[:, 5] = lks_mask_new[:, 4] * lks_mask_new[:, 5]
|
294 |
+
lks_mask_new[:, 6] = lks_mask_new[:, 6] * lks_mask_new[:, 7]
|
295 |
+
lks_mask_new[:, 7] = lks_mask_new[:, 6] * lks_mask_new[:, 7]
|
296 |
+
lks_mask_new[:, 8] = lks_mask_new[:, 8] * lks_mask_new[:, 9]
|
297 |
+
lks_mask_new[:, 9] = lks_mask_new[:, 8] * lks_mask_new[:, 9]
|
298 |
+
'''
|
299 |
+
lks_mask_new = lks_mask
|
300 |
+
lmks_mask.append(lks_mask_new)
|
301 |
+
landmarks.append(lks)
|
302 |
+
#print('lks: ', lks.size())
|
303 |
+
|
304 |
+
return tcls, tbox, indices, anch, landmarks, lmks_mask
|
utils/metrics.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model validation metrics
|
2 |
+
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
import numpy as np
|
7 |
+
import torch
|
8 |
+
|
9 |
+
from . import general
|
10 |
+
|
11 |
+
|
12 |
+
def fitness(x):
|
13 |
+
# Model fitness as a weighted combination of metrics
|
14 |
+
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, [email protected], [email protected]:0.95]
|
15 |
+
return (x[:, :4] * w).sum(1)
|
16 |
+
|
17 |
+
|
18 |
+
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]):
|
19 |
+
""" Compute the average precision, given the recall and precision curves.
|
20 |
+
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
|
21 |
+
# Arguments
|
22 |
+
tp: True positives (nparray, nx1 or nx10).
|
23 |
+
conf: Objectness value from 0-1 (nparray).
|
24 |
+
pred_cls: Predicted object classes (nparray).
|
25 |
+
target_cls: True object classes (nparray).
|
26 |
+
plot: Plot precision-recall curve at [email protected]
|
27 |
+
save_dir: Plot save directory
|
28 |
+
# Returns
|
29 |
+
The average precision as computed in py-faster-rcnn.
|
30 |
+
"""
|
31 |
+
|
32 |
+
# Sort by objectness
|
33 |
+
i = np.argsort(-conf)
|
34 |
+
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
|
35 |
+
|
36 |
+
# Find unique classes
|
37 |
+
unique_classes = np.unique(target_cls)
|
38 |
+
|
39 |
+
# Create Precision-Recall curve and compute AP for each class
|
40 |
+
px, py = np.linspace(0, 1, 1000), [] # for plotting
|
41 |
+
pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
|
42 |
+
s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
|
43 |
+
ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
|
44 |
+
for ci, c in enumerate(unique_classes):
|
45 |
+
i = pred_cls == c
|
46 |
+
n_l = (target_cls == c).sum() # number of labels
|
47 |
+
n_p = i.sum() # number of predictions
|
48 |
+
|
49 |
+
if n_p == 0 or n_l == 0:
|
50 |
+
continue
|
51 |
+
else:
|
52 |
+
# Accumulate FPs and TPs
|
53 |
+
fpc = (1 - tp[i]).cumsum(0)
|
54 |
+
tpc = tp[i].cumsum(0)
|
55 |
+
|
56 |
+
# Recall
|
57 |
+
recall = tpc / (n_l + 1e-16) # recall curve
|
58 |
+
r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
|
59 |
+
|
60 |
+
# Precision
|
61 |
+
precision = tpc / (tpc + fpc) # precision curve
|
62 |
+
p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
|
63 |
+
|
64 |
+
# AP from recall-precision curve
|
65 |
+
for j in range(tp.shape[1]):
|
66 |
+
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
|
67 |
+
if plot and (j == 0):
|
68 |
+
py.append(np.interp(px, mrec, mpre)) # precision at [email protected]
|
69 |
+
|
70 |
+
# Compute F1 score (harmonic mean of precision and recall)
|
71 |
+
f1 = 2 * p * r / (p + r + 1e-16)
|
72 |
+
|
73 |
+
if plot:
|
74 |
+
plot_pr_curve(px, py, ap, save_dir, names)
|
75 |
+
|
76 |
+
return p, r, ap, f1, unique_classes.astype('int32')
|
77 |
+
|
78 |
+
|
79 |
+
def compute_ap(recall, precision):
|
80 |
+
""" Compute the average precision, given the recall and precision curves
|
81 |
+
# Arguments
|
82 |
+
recall: The recall curve (list)
|
83 |
+
precision: The precision curve (list)
|
84 |
+
# Returns
|
85 |
+
Average precision, precision curve, recall curve
|
86 |
+
"""
|
87 |
+
|
88 |
+
# Append sentinel values to beginning and end
|
89 |
+
mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01]))
|
90 |
+
mpre = np.concatenate(([1.], precision, [0.]))
|
91 |
+
|
92 |
+
# Compute the precision envelope
|
93 |
+
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
|
94 |
+
|
95 |
+
# Integrate area under curve
|
96 |
+
method = 'interp' # methods: 'continuous', 'interp'
|
97 |
+
if method == 'interp':
|
98 |
+
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
|
99 |
+
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
|
100 |
+
else: # 'continuous'
|
101 |
+
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
|
102 |
+
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
|
103 |
+
|
104 |
+
return ap, mpre, mrec
|
105 |
+
|
106 |
+
|
107 |
+
class ConfusionMatrix:
|
108 |
+
# Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
|
109 |
+
def __init__(self, nc, conf=0.25, iou_thres=0.45):
|
110 |
+
self.matrix = np.zeros((nc + 1, nc + 1))
|
111 |
+
self.nc = nc # number of classes
|
112 |
+
self.conf = conf
|
113 |
+
self.iou_thres = iou_thres
|
114 |
+
|
115 |
+
def process_batch(self, detections, labels):
|
116 |
+
"""
|
117 |
+
Return intersection-over-union (Jaccard index) of boxes.
|
118 |
+
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
119 |
+
Arguments:
|
120 |
+
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
|
121 |
+
labels (Array[M, 5]), class, x1, y1, x2, y2
|
122 |
+
Returns:
|
123 |
+
None, updates confusion matrix accordingly
|
124 |
+
"""
|
125 |
+
detections = detections[detections[:, 4] > self.conf]
|
126 |
+
gt_classes = labels[:, 0].int()
|
127 |
+
detection_classes = detections[:, 5].int()
|
128 |
+
iou = general.box_iou(labels[:, 1:], detections[:, :4])
|
129 |
+
|
130 |
+
x = torch.where(iou > self.iou_thres)
|
131 |
+
if x[0].shape[0]:
|
132 |
+
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
|
133 |
+
if x[0].shape[0] > 1:
|
134 |
+
matches = matches[matches[:, 2].argsort()[::-1]]
|
135 |
+
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
|
136 |
+
matches = matches[matches[:, 2].argsort()[::-1]]
|
137 |
+
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
|
138 |
+
else:
|
139 |
+
matches = np.zeros((0, 3))
|
140 |
+
|
141 |
+
n = matches.shape[0] > 0
|
142 |
+
m0, m1, _ = matches.transpose().astype(np.int16)
|
143 |
+
for i, gc in enumerate(gt_classes):
|
144 |
+
j = m0 == i
|
145 |
+
if n and sum(j) == 1:
|
146 |
+
self.matrix[gc, detection_classes[m1[j]]] += 1 # correct
|
147 |
+
else:
|
148 |
+
self.matrix[gc, self.nc] += 1 # background FP
|
149 |
+
|
150 |
+
if n:
|
151 |
+
for i, dc in enumerate(detection_classes):
|
152 |
+
if not any(m1 == i):
|
153 |
+
self.matrix[self.nc, dc] += 1 # background FN
|
154 |
+
|
155 |
+
def matrix(self):
|
156 |
+
return self.matrix
|
157 |
+
|
158 |
+
def plot(self, save_dir='', names=()):
|
159 |
+
try:
|
160 |
+
import seaborn as sn
|
161 |
+
|
162 |
+
array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize
|
163 |
+
array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
|
164 |
+
|
165 |
+
fig = plt.figure(figsize=(12, 9), tight_layout=True)
|
166 |
+
sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size
|
167 |
+
labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels
|
168 |
+
sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,
|
169 |
+
xticklabels=names + ['background FN'] if labels else "auto",
|
170 |
+
yticklabels=names + ['background FP'] if labels else "auto").set_facecolor((1, 1, 1))
|
171 |
+
fig.axes[0].set_xlabel('True')
|
172 |
+
fig.axes[0].set_ylabel('Predicted')
|
173 |
+
fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
|
174 |
+
except Exception as e:
|
175 |
+
pass
|
176 |
+
|
177 |
+
def print(self):
|
178 |
+
for i in range(self.nc + 1):
|
179 |
+
print(' '.join(map(str, self.matrix[i])))
|
180 |
+
|
181 |
+
|
182 |
+
# Plots ----------------------------------------------------------------------------------------------------------------
|
183 |
+
|
184 |
+
def plot_pr_curve(px, py, ap, save_dir='.', names=()):
|
185 |
+
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
|
186 |
+
py = np.stack(py, axis=1)
|
187 |
+
|
188 |
+
if 0 < len(names) < 21: # show mAP in legend if < 10 classes
|
189 |
+
for i, y in enumerate(py.T):
|
190 |
+
ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % ap[i, 0]) # plot(recall, precision)
|
191 |
+
else:
|
192 |
+
ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
|
193 |
+
|
194 |
+
ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f [email protected]' % ap[:, 0].mean())
|
195 |
+
ax.set_xlabel('Recall')
|
196 |
+
ax.set_ylabel('Precision')
|
197 |
+
ax.set_xlim(0, 1)
|
198 |
+
ax.set_ylim(0, 1)
|
199 |
+
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
|
200 |
+
fig.savefig(Path(save_dir) / 'precision_recall_curve.png', dpi=250)
|
utils/plots.py
ADDED
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Plotting utils
|
2 |
+
|
3 |
+
import glob
|
4 |
+
import math
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
from copy import copy
|
8 |
+
from pathlib import Path
|
9 |
+
|
10 |
+
import cv2
|
11 |
+
import matplotlib
|
12 |
+
import matplotlib.pyplot as plt
|
13 |
+
import numpy as np
|
14 |
+
import pandas as pd
|
15 |
+
import seaborn as sns
|
16 |
+
import torch
|
17 |
+
import yaml
|
18 |
+
from PIL import Image, ImageDraw
|
19 |
+
from scipy.signal import butter, filtfilt
|
20 |
+
|
21 |
+
from utils.general import xywh2xyxy, xyxy2xywh
|
22 |
+
from utils.metrics import fitness
|
23 |
+
|
24 |
+
# Settings
|
25 |
+
matplotlib.rc('font', **{'size': 11})
|
26 |
+
matplotlib.use('Agg') # for writing to files only
|
27 |
+
|
28 |
+
|
29 |
+
def color_list():
|
30 |
+
# Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
|
31 |
+
def hex2rgb(h):
|
32 |
+
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
|
33 |
+
|
34 |
+
return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']]
|
35 |
+
|
36 |
+
|
37 |
+
def hist2d(x, y, n=100):
|
38 |
+
# 2d histogram used in labels.png and evolve.png
|
39 |
+
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
|
40 |
+
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
|
41 |
+
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
|
42 |
+
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
|
43 |
+
return np.log(hist[xidx, yidx])
|
44 |
+
|
45 |
+
|
46 |
+
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
|
47 |
+
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
|
48 |
+
def butter_lowpass(cutoff, fs, order):
|
49 |
+
nyq = 0.5 * fs
|
50 |
+
normal_cutoff = cutoff / nyq
|
51 |
+
return butter(order, normal_cutoff, btype='low', analog=False)
|
52 |
+
|
53 |
+
b, a = butter_lowpass(cutoff, fs, order=order)
|
54 |
+
return filtfilt(b, a, data) # forward-backward filter
|
55 |
+
|
56 |
+
|
57 |
+
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
|
58 |
+
# Plots one bounding box on image img
|
59 |
+
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
|
60 |
+
color = color or [random.randint(0, 255) for _ in range(3)]
|
61 |
+
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
|
62 |
+
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
|
63 |
+
if label:
|
64 |
+
tf = max(tl - 1, 1) # font thickness
|
65 |
+
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
|
66 |
+
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
|
67 |
+
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
|
68 |
+
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
|
69 |
+
|
70 |
+
|
71 |
+
def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
|
72 |
+
# Compares the two methods for width-height anchor multiplication
|
73 |
+
# https://github.com/ultralytics/yolov3/issues/168
|
74 |
+
x = np.arange(-4.0, 4.0, .1)
|
75 |
+
ya = np.exp(x)
|
76 |
+
yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
|
77 |
+
|
78 |
+
fig = plt.figure(figsize=(6, 3), tight_layout=True)
|
79 |
+
plt.plot(x, ya, '.-', label='YOLOv3')
|
80 |
+
plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2')
|
81 |
+
plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6')
|
82 |
+
plt.xlim(left=-4, right=4)
|
83 |
+
plt.ylim(bottom=0, top=6)
|
84 |
+
plt.xlabel('input')
|
85 |
+
plt.ylabel('output')
|
86 |
+
plt.grid()
|
87 |
+
plt.legend()
|
88 |
+
fig.savefig('comparison.png', dpi=200)
|
89 |
+
|
90 |
+
|
91 |
+
def output_to_target(output):
|
92 |
+
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
|
93 |
+
targets = []
|
94 |
+
for i, o in enumerate(output):
|
95 |
+
for *box, conf, cls in o.cpu().numpy():
|
96 |
+
targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
|
97 |
+
return np.array(targets)
|
98 |
+
|
99 |
+
|
100 |
+
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
|
101 |
+
# Plot image grid with labels
|
102 |
+
|
103 |
+
if isinstance(images, torch.Tensor):
|
104 |
+
images = images.cpu().float().numpy()
|
105 |
+
if isinstance(targets, torch.Tensor):
|
106 |
+
targets = targets.cpu().numpy()
|
107 |
+
|
108 |
+
# un-normalise
|
109 |
+
if np.max(images[0]) <= 1:
|
110 |
+
images *= 255
|
111 |
+
|
112 |
+
tl = 3 # line thickness
|
113 |
+
tf = max(tl - 1, 1) # font thickness
|
114 |
+
bs, _, h, w = images.shape # batch size, _, height, width
|
115 |
+
bs = min(bs, max_subplots) # limit plot images
|
116 |
+
ns = np.ceil(bs ** 0.5) # number of subplots (square)
|
117 |
+
|
118 |
+
# Check if we should resize
|
119 |
+
scale_factor = max_size / max(h, w)
|
120 |
+
if scale_factor < 1:
|
121 |
+
h = math.ceil(scale_factor * h)
|
122 |
+
w = math.ceil(scale_factor * w)
|
123 |
+
|
124 |
+
# colors = color_list() # list of colors
|
125 |
+
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
|
126 |
+
for i, img in enumerate(images):
|
127 |
+
if i == max_subplots: # if last batch has fewer images than we expect
|
128 |
+
break
|
129 |
+
|
130 |
+
block_x = int(w * (i // ns))
|
131 |
+
block_y = int(h * (i % ns))
|
132 |
+
|
133 |
+
img = img.transpose(1, 2, 0)
|
134 |
+
if scale_factor < 1:
|
135 |
+
img = cv2.resize(img, (w, h))
|
136 |
+
|
137 |
+
mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
|
138 |
+
if len(targets) > 0:
|
139 |
+
image_targets = targets[targets[:, 0] == i]
|
140 |
+
boxes = xywh2xyxy(image_targets[:, 2:6]).T
|
141 |
+
classes = image_targets[:, 1].astype('int')
|
142 |
+
labels = image_targets.shape[1] == 6 # labels if no conf column
|
143 |
+
conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)
|
144 |
+
|
145 |
+
if boxes.shape[1]:
|
146 |
+
if boxes.max() <= 1.01: # if normalized with tolerance 0.01
|
147 |
+
boxes[[0, 2]] *= w # scale to pixels
|
148 |
+
boxes[[1, 3]] *= h
|
149 |
+
elif scale_factor < 1: # absolute coords need scale if image scales
|
150 |
+
boxes *= scale_factor
|
151 |
+
boxes[[0, 2]] += block_x
|
152 |
+
boxes[[1, 3]] += block_y
|
153 |
+
for j, box in enumerate(boxes.T):
|
154 |
+
cls = int(classes[j])
|
155 |
+
# color = colors[cls % len(colors)]
|
156 |
+
cls = names[cls] if names else cls
|
157 |
+
if labels or conf[j] > 0.25: # 0.25 conf thresh
|
158 |
+
label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
|
159 |
+
plot_one_box(box, mosaic, label=label, color=None, line_thickness=tl)
|
160 |
+
|
161 |
+
# Draw image filename labels
|
162 |
+
if paths:
|
163 |
+
label = Path(paths[i]).name[:40] # trim to 40 char
|
164 |
+
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
|
165 |
+
cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
|
166 |
+
lineType=cv2.LINE_AA)
|
167 |
+
|
168 |
+
# Image border
|
169 |
+
cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
|
170 |
+
|
171 |
+
if fname:
|
172 |
+
r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
|
173 |
+
mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
|
174 |
+
# cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
|
175 |
+
Image.fromarray(mosaic).save(fname) # PIL save
|
176 |
+
return mosaic
|
177 |
+
|
178 |
+
|
179 |
+
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
|
180 |
+
# Plot LR simulating training for full epochs
|
181 |
+
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
|
182 |
+
y = []
|
183 |
+
for _ in range(epochs):
|
184 |
+
scheduler.step()
|
185 |
+
y.append(optimizer.param_groups[0]['lr'])
|
186 |
+
plt.plot(y, '.-', label='LR')
|
187 |
+
plt.xlabel('epoch')
|
188 |
+
plt.ylabel('LR')
|
189 |
+
plt.grid()
|
190 |
+
plt.xlim(0, epochs)
|
191 |
+
plt.ylim(0)
|
192 |
+
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
|
193 |
+
plt.close()
|
194 |
+
|
195 |
+
|
196 |
+
def plot_test_txt(): # from utils.plots import *; plot_test()
|
197 |
+
# Plot test.txt histograms
|
198 |
+
x = np.loadtxt('test.txt', dtype=np.float32)
|
199 |
+
box = xyxy2xywh(x[:, :4])
|
200 |
+
cx, cy = box[:, 0], box[:, 1]
|
201 |
+
|
202 |
+
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
|
203 |
+
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
|
204 |
+
ax.set_aspect('equal')
|
205 |
+
plt.savefig('hist2d.png', dpi=300)
|
206 |
+
|
207 |
+
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
|
208 |
+
ax[0].hist(cx, bins=600)
|
209 |
+
ax[1].hist(cy, bins=600)
|
210 |
+
plt.savefig('hist1d.png', dpi=200)
|
211 |
+
|
212 |
+
|
213 |
+
def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
|
214 |
+
# Plot targets.txt histograms
|
215 |
+
x = np.loadtxt('targets.txt', dtype=np.float32).T
|
216 |
+
s = ['x targets', 'y targets', 'width targets', 'height targets']
|
217 |
+
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
|
218 |
+
ax = ax.ravel()
|
219 |
+
for i in range(4):
|
220 |
+
ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
|
221 |
+
ax[i].legend()
|
222 |
+
ax[i].set_title(s[i])
|
223 |
+
plt.savefig('targets.jpg', dpi=200)
|
224 |
+
|
225 |
+
|
226 |
+
def plot_study_txt(path='study/', x=None): # from utils.plots import *; plot_study_txt()
|
227 |
+
# Plot study.txt generated by test.py
|
228 |
+
fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
|
229 |
+
ax = ax.ravel()
|
230 |
+
|
231 |
+
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
|
232 |
+
for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]:
|
233 |
+
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
|
234 |
+
x = np.arange(y.shape[1]) if x is None else np.array(x)
|
235 |
+
s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
|
236 |
+
for i in range(7):
|
237 |
+
ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
|
238 |
+
ax[i].set_title(s[i])
|
239 |
+
|
240 |
+
j = y[3].argmax() + 1
|
241 |
+
ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
|
242 |
+
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
|
243 |
+
|
244 |
+
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
|
245 |
+
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
|
246 |
+
|
247 |
+
ax2.grid()
|
248 |
+
ax2.set_yticks(np.arange(30, 60, 5))
|
249 |
+
ax2.set_xlim(0, 30)
|
250 |
+
ax2.set_ylim(29, 51)
|
251 |
+
ax2.set_xlabel('GPU Speed (ms/img)')
|
252 |
+
ax2.set_ylabel('COCO AP val')
|
253 |
+
ax2.legend(loc='lower right')
|
254 |
+
plt.savefig('test_study.png', dpi=300)
|
255 |
+
|
256 |
+
|
257 |
+
def plot_labels(labels, save_dir=Path(''), loggers=None):
|
258 |
+
# plot dataset labels
|
259 |
+
print('Plotting labels... ')
|
260 |
+
c, b = labels[:, 0], labels[:, 1:5].transpose() # classes, boxes
|
261 |
+
nc = int(c.max() + 1) # number of classes
|
262 |
+
colors = color_list()
|
263 |
+
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
|
264 |
+
|
265 |
+
# seaborn correlogram
|
266 |
+
sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
|
267 |
+
plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
|
268 |
+
plt.close()
|
269 |
+
|
270 |
+
# matplotlib labels
|
271 |
+
matplotlib.use('svg') # faster
|
272 |
+
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
|
273 |
+
ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
|
274 |
+
ax[0].set_xlabel('classes')
|
275 |
+
sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
|
276 |
+
sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
|
277 |
+
|
278 |
+
# rectangles
|
279 |
+
labels[:, 1:3] = 0.5 # center
|
280 |
+
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
|
281 |
+
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
|
282 |
+
# for cls, *box in labels[:1000]:
|
283 |
+
# ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot
|
284 |
+
ax[1].imshow(img)
|
285 |
+
ax[1].axis('off')
|
286 |
+
|
287 |
+
for a in [0, 1, 2, 3]:
|
288 |
+
for s in ['top', 'right', 'left', 'bottom']:
|
289 |
+
ax[a].spines[s].set_visible(False)
|
290 |
+
|
291 |
+
plt.savefig(save_dir / 'labels.jpg', dpi=200)
|
292 |
+
matplotlib.use('Agg')
|
293 |
+
plt.close()
|
294 |
+
|
295 |
+
# loggers
|
296 |
+
for k, v in loggers.items() or {}:
|
297 |
+
if k == 'wandb' and v:
|
298 |
+
v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]})
|
299 |
+
|
300 |
+
|
301 |
+
def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
|
302 |
+
# Plot hyperparameter evolution results in evolve.txt
|
303 |
+
with open(yaml_file) as f:
|
304 |
+
hyp = yaml.load(f, Loader=yaml.SafeLoader)
|
305 |
+
x = np.loadtxt('evolve.txt', ndmin=2)
|
306 |
+
f = fitness(x)
|
307 |
+
# weights = (f - f.min()) ** 2 # for weighted results
|
308 |
+
plt.figure(figsize=(10, 12), tight_layout=True)
|
309 |
+
matplotlib.rc('font', **{'size': 8})
|
310 |
+
for i, (k, v) in enumerate(hyp.items()):
|
311 |
+
y = x[:, i + 7]
|
312 |
+
# mu = (y * weights).sum() / weights.sum() # best weighted result
|
313 |
+
mu = y[f.argmax()] # best single result
|
314 |
+
plt.subplot(6, 5, i + 1)
|
315 |
+
plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
|
316 |
+
plt.plot(mu, f.max(), 'k+', markersize=15)
|
317 |
+
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
|
318 |
+
if i % 5 != 0:
|
319 |
+
plt.yticks([])
|
320 |
+
print('%15s: %.3g' % (k, mu))
|
321 |
+
plt.savefig('evolve.png', dpi=200)
|
322 |
+
print('\nPlot saved as evolve.png')
|
323 |
+
|
324 |
+
|
325 |
+
def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
|
326 |
+
# Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
|
327 |
+
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
|
328 |
+
s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
|
329 |
+
files = list(Path(save_dir).glob('frames*.txt'))
|
330 |
+
for fi, f in enumerate(files):
|
331 |
+
try:
|
332 |
+
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
|
333 |
+
n = results.shape[1] # number of rows
|
334 |
+
x = np.arange(start, min(stop, n) if stop else n)
|
335 |
+
results = results[:, x]
|
336 |
+
t = (results[0] - results[0].min()) # set t0=0s
|
337 |
+
results[0] = x
|
338 |
+
for i, a in enumerate(ax):
|
339 |
+
if i < len(results):
|
340 |
+
label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
|
341 |
+
a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
|
342 |
+
a.set_title(s[i])
|
343 |
+
a.set_xlabel('time (s)')
|
344 |
+
# if fi == len(files) - 1:
|
345 |
+
# a.set_ylim(bottom=0)
|
346 |
+
for side in ['top', 'right']:
|
347 |
+
a.spines[side].set_visible(False)
|
348 |
+
else:
|
349 |
+
a.remove()
|
350 |
+
except Exception as e:
|
351 |
+
print('Warning: Plotting error for %s; %s' % (f, e))
|
352 |
+
|
353 |
+
ax[1].legend()
|
354 |
+
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
|
355 |
+
|
356 |
+
|
357 |
+
def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay()
|
358 |
+
# Plot training 'results*.txt', overlaying train and val losses
|
359 |
+
s = ['train', 'train', 'train', 'Precision', '[email protected]', 'val', 'val', 'val', 'Recall', '[email protected]:0.95'] # legends
|
360 |
+
t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
|
361 |
+
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
|
362 |
+
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
|
363 |
+
n = results.shape[1] # number of rows
|
364 |
+
x = range(start, min(stop, n) if stop else n)
|
365 |
+
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
|
366 |
+
ax = ax.ravel()
|
367 |
+
for i in range(5):
|
368 |
+
for j in [i, i + 5]:
|
369 |
+
y = results[j, x]
|
370 |
+
ax[i].plot(x, y, marker='.', label=s[j])
|
371 |
+
# y_smooth = butter_lowpass_filtfilt(y)
|
372 |
+
# ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
|
373 |
+
|
374 |
+
ax[i].set_title(t[i])
|
375 |
+
ax[i].legend()
|
376 |
+
ax[i].set_ylabel(f) if i == 0 else None # add filename
|
377 |
+
fig.savefig(f.replace('.txt', '.png'), dpi=200)
|
378 |
+
|
379 |
+
|
380 |
+
def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
|
381 |
+
# Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')
|
382 |
+
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
|
383 |
+
ax = ax.ravel()
|
384 |
+
s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
|
385 |
+
'val Box', 'val Objectness', 'val Classification', '[email protected]', '[email protected]:0.95']
|
386 |
+
if bucket:
|
387 |
+
# files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
|
388 |
+
files = ['results%g.txt' % x for x in id]
|
389 |
+
c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
|
390 |
+
os.system(c)
|
391 |
+
else:
|
392 |
+
files = list(Path(save_dir).glob('results*.txt'))
|
393 |
+
assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
|
394 |
+
for fi, f in enumerate(files):
|
395 |
+
try:
|
396 |
+
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
|
397 |
+
n = results.shape[1] # number of rows
|
398 |
+
x = range(start, min(stop, n) if stop else n)
|
399 |
+
for i in range(10):
|
400 |
+
y = results[i, x]
|
401 |
+
if i in [0, 1, 2, 5, 6, 7]:
|
402 |
+
y[y == 0] = np.nan # don't show zero loss values
|
403 |
+
# y /= y[0] # normalize
|
404 |
+
label = labels[fi] if len(labels) else f.stem
|
405 |
+
ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
|
406 |
+
ax[i].set_title(s[i])
|
407 |
+
# if i in [5, 6, 7]: # share train and val loss y axes
|
408 |
+
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
|
409 |
+
except Exception as e:
|
410 |
+
print('Warning: Plotting error for %s; %s' % (f, e))
|
411 |
+
|
412 |
+
ax[1].legend()
|
413 |
+
fig.savefig(Path(save_dir) / 'results.png', dpi=200)
|
utils/torch_utils.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# PyTorch utils
|
2 |
+
|
3 |
+
import logging
|
4 |
+
import math
|
5 |
+
import os
|
6 |
+
import subprocess
|
7 |
+
import time
|
8 |
+
from contextlib import contextmanager
|
9 |
+
from copy import deepcopy
|
10 |
+
from pathlib import Path
|
11 |
+
|
12 |
+
import torch
|
13 |
+
import torch.backends.cudnn as cudnn
|
14 |
+
import torch.nn as nn
|
15 |
+
import torch.nn.functional as F
|
16 |
+
import torchvision
|
17 |
+
|
18 |
+
try:
|
19 |
+
import thop # for FLOPS computation
|
20 |
+
except ImportError:
|
21 |
+
thop = None
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
@contextmanager
|
26 |
+
def torch_distributed_zero_first(local_rank: int):
|
27 |
+
"""
|
28 |
+
Decorator to make all processes in distributed training wait for each local_master to do something.
|
29 |
+
"""
|
30 |
+
if local_rank not in [-1, 0]:
|
31 |
+
torch.distributed.barrier()
|
32 |
+
yield
|
33 |
+
if local_rank == 0:
|
34 |
+
torch.distributed.barrier()
|
35 |
+
|
36 |
+
|
37 |
+
def init_torch_seeds(seed=0):
|
38 |
+
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
|
39 |
+
torch.manual_seed(seed)
|
40 |
+
if seed == 0: # slower, more reproducible
|
41 |
+
cudnn.benchmark, cudnn.deterministic = False, True
|
42 |
+
else: # faster, less reproducible
|
43 |
+
cudnn.benchmark, cudnn.deterministic = True, False
|
44 |
+
|
45 |
+
|
46 |
+
def git_describe():
|
47 |
+
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
|
48 |
+
if Path('.git').exists():
|
49 |
+
return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1]
|
50 |
+
else:
|
51 |
+
return ''
|
52 |
+
|
53 |
+
|
54 |
+
def select_device(device='', batch_size=None):
|
55 |
+
# device = 'cpu' or '0' or '0,1,2,3'
|
56 |
+
s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string
|
57 |
+
cpu = device.lower() == 'cpu'
|
58 |
+
if cpu:
|
59 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
|
60 |
+
elif device: # non-cpu device requested
|
61 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
|
62 |
+
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
|
63 |
+
|
64 |
+
cuda = not cpu and torch.cuda.is_available()
|
65 |
+
if cuda:
|
66 |
+
n = torch.cuda.device_count()
|
67 |
+
if n > 1 and batch_size: # check that batch_size is compatible with device_count
|
68 |
+
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
|
69 |
+
space = ' ' * len(s)
|
70 |
+
for i, d in enumerate(device.split(',') if device else range(n)):
|
71 |
+
p = torch.cuda.get_device_properties(i)
|
72 |
+
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
|
73 |
+
else:
|
74 |
+
s += 'CPU\n'
|
75 |
+
|
76 |
+
logger.info(s) # skip a line
|
77 |
+
return torch.device('cuda:0' if cuda else 'cpu')
|
78 |
+
|
79 |
+
|
80 |
+
def time_synchronized():
|
81 |
+
# pytorch-accurate time
|
82 |
+
if torch.cuda.is_available():
|
83 |
+
torch.cuda.synchronize()
|
84 |
+
return time.time()
|
85 |
+
|
86 |
+
|
87 |
+
def profile(x, ops, n=100, device=None):
|
88 |
+
# profile a pytorch module or list of modules. Example usage:
|
89 |
+
# x = torch.randn(16, 3, 640, 640) # input
|
90 |
+
# m1 = lambda x: x * torch.sigmoid(x)
|
91 |
+
# m2 = nn.SiLU()
|
92 |
+
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations
|
93 |
+
|
94 |
+
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
95 |
+
x = x.to(device)
|
96 |
+
x.requires_grad = True
|
97 |
+
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
|
98 |
+
print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
|
99 |
+
for m in ops if isinstance(ops, list) else [ops]:
|
100 |
+
m = m.to(device) if hasattr(m, 'to') else m # device
|
101 |
+
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
|
102 |
+
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
|
103 |
+
try:
|
104 |
+
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
|
105 |
+
except:
|
106 |
+
flops = 0
|
107 |
+
|
108 |
+
for _ in range(n):
|
109 |
+
t[0] = time_synchronized()
|
110 |
+
y = m(x)
|
111 |
+
t[1] = time_synchronized()
|
112 |
+
try:
|
113 |
+
_ = y.sum().backward()
|
114 |
+
t[2] = time_synchronized()
|
115 |
+
except: # no backward method
|
116 |
+
t[2] = float('nan')
|
117 |
+
dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
|
118 |
+
dtb += (t[2] - t[1]) * 1000 / n # ms per op backward
|
119 |
+
|
120 |
+
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
|
121 |
+
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
|
122 |
+
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
|
123 |
+
print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
|
124 |
+
|
125 |
+
|
126 |
+
def is_parallel(model):
|
127 |
+
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
|
128 |
+
|
129 |
+
|
130 |
+
def intersect_dicts(da, db, exclude=()):
|
131 |
+
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
|
132 |
+
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
|
133 |
+
|
134 |
+
|
135 |
+
def initialize_weights(model):
|
136 |
+
for m in model.modules():
|
137 |
+
t = type(m)
|
138 |
+
if t is nn.Conv2d:
|
139 |
+
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
140 |
+
elif t is nn.BatchNorm2d:
|
141 |
+
m.eps = 1e-3
|
142 |
+
m.momentum = 0.03
|
143 |
+
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
|
144 |
+
m.inplace = True
|
145 |
+
|
146 |
+
|
147 |
+
def find_modules(model, mclass=nn.Conv2d):
|
148 |
+
# Finds layer indices matching module class 'mclass'
|
149 |
+
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
|
150 |
+
|
151 |
+
|
152 |
+
def sparsity(model):
|
153 |
+
# Return global model sparsity
|
154 |
+
a, b = 0., 0.
|
155 |
+
for p in model.parameters():
|
156 |
+
a += p.numel()
|
157 |
+
b += (p == 0).sum()
|
158 |
+
return b / a
|
159 |
+
|
160 |
+
|
161 |
+
def prune(model, amount=0.3):
|
162 |
+
# Prune model to requested global sparsity
|
163 |
+
import torch.nn.utils.prune as prune
|
164 |
+
print('Pruning model... ', end='')
|
165 |
+
for name, m in model.named_modules():
|
166 |
+
if isinstance(m, nn.Conv2d):
|
167 |
+
prune.l1_unstructured(m, name='weight', amount=amount) # prune
|
168 |
+
prune.remove(m, 'weight') # make permanent
|
169 |
+
print(' %.3g global sparsity' % sparsity(model))
|
170 |
+
|
171 |
+
|
172 |
+
def fuse_conv_and_bn(conv, bn):
|
173 |
+
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
|
174 |
+
fusedconv = nn.Conv2d(conv.in_channels,
|
175 |
+
conv.out_channels,
|
176 |
+
kernel_size=conv.kernel_size,
|
177 |
+
stride=conv.stride,
|
178 |
+
padding=conv.padding,
|
179 |
+
groups=conv.groups,
|
180 |
+
bias=True).requires_grad_(False).to(conv.weight.device)
|
181 |
+
|
182 |
+
# prepare filters
|
183 |
+
w_conv = conv.weight.clone().view(conv.out_channels, -1)
|
184 |
+
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
|
185 |
+
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
|
186 |
+
|
187 |
+
# prepare spatial bias
|
188 |
+
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
|
189 |
+
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
|
190 |
+
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
|
191 |
+
|
192 |
+
return fusedconv
|
193 |
+
|
194 |
+
|
195 |
+
def model_info(model, verbose=False, img_size=640):
|
196 |
+
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
|
197 |
+
n_p = sum(x.numel() for x in model.parameters()) # number parameters
|
198 |
+
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
|
199 |
+
if verbose:
|
200 |
+
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
|
201 |
+
for i, (name, p) in enumerate(model.named_parameters()):
|
202 |
+
name = name.replace('module_list.', '')
|
203 |
+
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
|
204 |
+
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
|
205 |
+
|
206 |
+
try: # FLOPS
|
207 |
+
from thop import profile
|
208 |
+
stride = int(model.stride.max()) if hasattr(model, 'stride') else 32
|
209 |
+
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
|
210 |
+
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
|
211 |
+
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
|
212 |
+
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
|
213 |
+
except (ImportError, Exception):
|
214 |
+
fs = ''
|
215 |
+
|
216 |
+
logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
|
217 |
+
|
218 |
+
|
219 |
+
def load_classifier(name='resnet101', n=2):
|
220 |
+
# Loads a pretrained model reshaped to n-class output
|
221 |
+
model = torchvision.models.__dict__[name](pretrained=True)
|
222 |
+
|
223 |
+
# ResNet model properties
|
224 |
+
# input_size = [3, 224, 224]
|
225 |
+
# input_space = 'RGB'
|
226 |
+
# input_range = [0, 1]
|
227 |
+
# mean = [0.485, 0.456, 0.406]
|
228 |
+
# std = [0.229, 0.224, 0.225]
|
229 |
+
|
230 |
+
# Reshape output to n classes
|
231 |
+
filters = model.fc.weight.shape[1]
|
232 |
+
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
|
233 |
+
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
|
234 |
+
model.fc.out_features = n
|
235 |
+
return model
|
236 |
+
|
237 |
+
|
238 |
+
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
|
239 |
+
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
|
240 |
+
if ratio == 1.0:
|
241 |
+
return img
|
242 |
+
else:
|
243 |
+
h, w = img.shape[2:]
|
244 |
+
s = (int(h * ratio), int(w * ratio)) # new size
|
245 |
+
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
|
246 |
+
if not same_shape: # pad/crop img
|
247 |
+
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
|
248 |
+
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
|
249 |
+
|
250 |
+
|
251 |
+
def copy_attr(a, b, include=(), exclude=()):
|
252 |
+
# Copy attributes from b to a, options to only include [...] and to exclude [...]
|
253 |
+
for k, v in b.__dict__.items():
|
254 |
+
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
|
255 |
+
continue
|
256 |
+
else:
|
257 |
+
setattr(a, k, v)
|
258 |
+
|
259 |
+
|
260 |
+
class ModelEMA:
|
261 |
+
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
|
262 |
+
Keep a moving average of everything in the model state_dict (parameters and buffers).
|
263 |
+
This is intended to allow functionality like
|
264 |
+
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
|
265 |
+
A smoothed version of the weights is necessary for some training schemes to perform well.
|
266 |
+
This class is sensitive where it is initialized in the sequence of model init,
|
267 |
+
GPU assignment and distributed training wrappers.
|
268 |
+
"""
|
269 |
+
|
270 |
+
def __init__(self, model, decay=0.9999, updates=0):
|
271 |
+
# Create EMA
|
272 |
+
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
|
273 |
+
# if next(model.parameters()).device.type != 'cpu':
|
274 |
+
# self.ema.half() # FP16 EMA
|
275 |
+
self.updates = updates # number of EMA updates
|
276 |
+
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
|
277 |
+
for p in self.ema.parameters():
|
278 |
+
p.requires_grad_(False)
|
279 |
+
|
280 |
+
def update(self, model):
|
281 |
+
# Update EMA parameters
|
282 |
+
with torch.no_grad():
|
283 |
+
self.updates += 1
|
284 |
+
d = self.decay(self.updates)
|
285 |
+
|
286 |
+
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
|
287 |
+
for k, v in self.ema.state_dict().items():
|
288 |
+
if v.dtype.is_floating_point:
|
289 |
+
v *= d
|
290 |
+
v += (1. - d) * msd[k].detach()
|
291 |
+
|
292 |
+
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
|
293 |
+
# Update EMA attributes
|
294 |
+
copy_attr(self.ema, model, include, exclude)
|
utils/wandb_logging/__init__.py
ADDED
File without changes
|
utils/wandb_logging/log_dataset.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
|
3 |
+
import yaml
|
4 |
+
|
5 |
+
from wandb_utils import WandbLogger
|
6 |
+
|
7 |
+
WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
|
8 |
+
|
9 |
+
|
10 |
+
def create_dataset_artifact(opt):
|
11 |
+
with open(opt.data) as f:
|
12 |
+
data = yaml.load(f, Loader=yaml.SafeLoader) # data dict
|
13 |
+
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation')
|
14 |
+
|
15 |
+
|
16 |
+
if __name__ == '__main__':
|
17 |
+
parser = argparse.ArgumentParser()
|
18 |
+
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
|
19 |
+
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
|
20 |
+
parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
|
21 |
+
opt = parser.parse_args()
|
22 |
+
opt.resume = False # Explicitly disallow resume check for dataset upload job
|
23 |
+
|
24 |
+
create_dataset_artifact(opt)
|
utils/wandb_logging/wandb_utils.py
ADDED
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import sys
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import yaml
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path
|
10 |
+
from utils.datasets import LoadImagesAndLabels
|
11 |
+
from utils.datasets import img2label_paths
|
12 |
+
from utils.general import colorstr, xywh2xyxy, check_dataset
|
13 |
+
|
14 |
+
try:
|
15 |
+
import wandb
|
16 |
+
from wandb import init, finish
|
17 |
+
except ImportError:
|
18 |
+
wandb = None
|
19 |
+
|
20 |
+
WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
|
21 |
+
|
22 |
+
|
23 |
+
def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
|
24 |
+
return from_string[len(prefix):]
|
25 |
+
|
26 |
+
|
27 |
+
def check_wandb_config_file(data_config_file):
|
28 |
+
wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path
|
29 |
+
if Path(wandb_config).is_file():
|
30 |
+
return wandb_config
|
31 |
+
return data_config_file
|
32 |
+
|
33 |
+
|
34 |
+
def get_run_info(run_path):
|
35 |
+
run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
|
36 |
+
run_id = run_path.stem
|
37 |
+
project = run_path.parent.stem
|
38 |
+
model_artifact_name = 'run_' + run_id + '_model'
|
39 |
+
return run_id, project, model_artifact_name
|
40 |
+
|
41 |
+
|
42 |
+
def check_wandb_resume(opt):
|
43 |
+
process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None
|
44 |
+
if isinstance(opt.resume, str):
|
45 |
+
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
|
46 |
+
if opt.global_rank not in [-1, 0]: # For resuming DDP runs
|
47 |
+
run_id, project, model_artifact_name = get_run_info(opt.resume)
|
48 |
+
api = wandb.Api()
|
49 |
+
artifact = api.artifact(project + '/' + model_artifact_name + ':latest')
|
50 |
+
modeldir = artifact.download()
|
51 |
+
opt.weights = str(Path(modeldir) / "last.pt")
|
52 |
+
return True
|
53 |
+
return None
|
54 |
+
|
55 |
+
|
56 |
+
def process_wandb_config_ddp_mode(opt):
|
57 |
+
with open(opt.data) as f:
|
58 |
+
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
|
59 |
+
train_dir, val_dir = None, None
|
60 |
+
if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
|
61 |
+
api = wandb.Api()
|
62 |
+
train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias)
|
63 |
+
train_dir = train_artifact.download()
|
64 |
+
train_path = Path(train_dir) / 'data/images/'
|
65 |
+
data_dict['train'] = str(train_path)
|
66 |
+
|
67 |
+
if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX):
|
68 |
+
api = wandb.Api()
|
69 |
+
val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias)
|
70 |
+
val_dir = val_artifact.download()
|
71 |
+
val_path = Path(val_dir) / 'data/images/'
|
72 |
+
data_dict['val'] = str(val_path)
|
73 |
+
if train_dir or val_dir:
|
74 |
+
ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')
|
75 |
+
with open(ddp_data_path, 'w') as f:
|
76 |
+
yaml.dump(data_dict, f)
|
77 |
+
opt.data = ddp_data_path
|
78 |
+
|
79 |
+
|
80 |
+
class WandbLogger():
|
81 |
+
def __init__(self, opt, name, run_id, data_dict, job_type='Training'):
|
82 |
+
# Pre-training routine --
|
83 |
+
self.job_type = job_type
|
84 |
+
self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict
|
85 |
+
# It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call
|
86 |
+
if isinstance(opt.resume, str): # checks resume from artifact
|
87 |
+
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
|
88 |
+
run_id, project, model_artifact_name = get_run_info(opt.resume)
|
89 |
+
model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name
|
90 |
+
assert wandb, 'install wandb to resume wandb runs'
|
91 |
+
# Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config
|
92 |
+
self.wandb_run = wandb.init(id=run_id, project=project, resume='allow')
|
93 |
+
opt.resume = model_artifact_name
|
94 |
+
elif self.wandb:
|
95 |
+
self.wandb_run = wandb.init(config=opt,
|
96 |
+
resume="allow",
|
97 |
+
project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
|
98 |
+
name=name,
|
99 |
+
job_type=job_type,
|
100 |
+
id=run_id) if not wandb.run else wandb.run
|
101 |
+
if self.wandb_run:
|
102 |
+
if self.job_type == 'Training':
|
103 |
+
if not opt.resume:
|
104 |
+
wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict
|
105 |
+
# Info useful for resuming from artifacts
|
106 |
+
self.wandb_run.config.opt = vars(opt)
|
107 |
+
self.wandb_run.config.data_dict = wandb_data_dict
|
108 |
+
self.data_dict = self.setup_training(opt, data_dict)
|
109 |
+
if self.job_type == 'Dataset Creation':
|
110 |
+
self.data_dict = self.check_and_upload_dataset(opt)
|
111 |
+
else:
|
112 |
+
prefix = colorstr('wandb: ')
|
113 |
+
print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)")
|
114 |
+
|
115 |
+
def check_and_upload_dataset(self, opt):
|
116 |
+
assert wandb, 'Install wandb to upload dataset'
|
117 |
+
check_dataset(self.data_dict)
|
118 |
+
config_path = self.log_dataset_artifact(opt.data,
|
119 |
+
opt.single_cls,
|
120 |
+
'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
|
121 |
+
print("Created dataset config file ", config_path)
|
122 |
+
with open(config_path) as f:
|
123 |
+
wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader)
|
124 |
+
return wandb_data_dict
|
125 |
+
|
126 |
+
def setup_training(self, opt, data_dict):
|
127 |
+
self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants
|
128 |
+
self.bbox_interval = opt.bbox_interval
|
129 |
+
if isinstance(opt.resume, str):
|
130 |
+
modeldir, _ = self.download_model_artifact(opt)
|
131 |
+
if modeldir:
|
132 |
+
self.weights = Path(modeldir) / "last.pt"
|
133 |
+
config = self.wandb_run.config
|
134 |
+
opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
|
135 |
+
self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \
|
136 |
+
config.opt['hyp']
|
137 |
+
data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume
|
138 |
+
if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download
|
139 |
+
self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'),
|
140 |
+
opt.artifact_alias)
|
141 |
+
self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'),
|
142 |
+
opt.artifact_alias)
|
143 |
+
self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None
|
144 |
+
if self.train_artifact_path is not None:
|
145 |
+
train_path = Path(self.train_artifact_path) / 'data/images/'
|
146 |
+
data_dict['train'] = str(train_path)
|
147 |
+
if self.val_artifact_path is not None:
|
148 |
+
val_path = Path(self.val_artifact_path) / 'data/images/'
|
149 |
+
data_dict['val'] = str(val_path)
|
150 |
+
self.val_table = self.val_artifact.get("val")
|
151 |
+
self.map_val_table_path()
|
152 |
+
if self.val_artifact is not None:
|
153 |
+
self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
|
154 |
+
self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"])
|
155 |
+
if opt.bbox_interval == -1:
|
156 |
+
self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
|
157 |
+
return data_dict
|
158 |
+
|
159 |
+
def download_dataset_artifact(self, path, alias):
|
160 |
+
if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):
|
161 |
+
dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias)
|
162 |
+
assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'"
|
163 |
+
datadir = dataset_artifact.download()
|
164 |
+
return datadir, dataset_artifact
|
165 |
+
return None, None
|
166 |
+
|
167 |
+
def download_model_artifact(self, opt):
|
168 |
+
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
|
169 |
+
model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
|
170 |
+
assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
|
171 |
+
modeldir = model_artifact.download()
|
172 |
+
epochs_trained = model_artifact.metadata.get('epochs_trained')
|
173 |
+
total_epochs = model_artifact.metadata.get('total_epochs')
|
174 |
+
assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % (
|
175 |
+
total_epochs)
|
176 |
+
return modeldir, model_artifact
|
177 |
+
return None, None
|
178 |
+
|
179 |
+
def log_model(self, path, opt, epoch, fitness_score, best_model=False):
|
180 |
+
model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
|
181 |
+
'original_url': str(path),
|
182 |
+
'epochs_trained': epoch + 1,
|
183 |
+
'save period': opt.save_period,
|
184 |
+
'project': opt.project,
|
185 |
+
'total_epochs': opt.epochs,
|
186 |
+
'fitness_score': fitness_score
|
187 |
+
})
|
188 |
+
model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
|
189 |
+
wandb.log_artifact(model_artifact,
|
190 |
+
aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
|
191 |
+
print("Saving model artifact on epoch ", epoch + 1)
|
192 |
+
|
193 |
+
def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
|
194 |
+
with open(data_file) as f:
|
195 |
+
data = yaml.load(f, Loader=yaml.SafeLoader) # data dict
|
196 |
+
nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
|
197 |
+
names = {k: v for k, v in enumerate(names)} # to index dictionary
|
198 |
+
self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(
|
199 |
+
data['train']), names, name='train') if data.get('train') else None
|
200 |
+
self.val_artifact = self.create_dataset_table(LoadImagesAndLabels(
|
201 |
+
data['val']), names, name='val') if data.get('val') else None
|
202 |
+
if data.get('train'):
|
203 |
+
data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train')
|
204 |
+
if data.get('val'):
|
205 |
+
data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val')
|
206 |
+
path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path
|
207 |
+
data.pop('download', None)
|
208 |
+
with open(path, 'w') as f:
|
209 |
+
yaml.dump(data, f)
|
210 |
+
|
211 |
+
if self.job_type == 'Training': # builds correct artifact pipeline graph
|
212 |
+
self.wandb_run.use_artifact(self.val_artifact)
|
213 |
+
self.wandb_run.use_artifact(self.train_artifact)
|
214 |
+
self.val_artifact.wait()
|
215 |
+
self.val_table = self.val_artifact.get('val')
|
216 |
+
self.map_val_table_path()
|
217 |
+
else:
|
218 |
+
self.wandb_run.log_artifact(self.train_artifact)
|
219 |
+
self.wandb_run.log_artifact(self.val_artifact)
|
220 |
+
return path
|
221 |
+
|
222 |
+
def map_val_table_path(self):
|
223 |
+
self.val_table_map = {}
|
224 |
+
print("Mapping dataset")
|
225 |
+
for i, data in enumerate(tqdm(self.val_table.data)):
|
226 |
+
self.val_table_map[data[3]] = data[0]
|
227 |
+
|
228 |
+
def create_dataset_table(self, dataset, class_to_id, name='dataset'):
|
229 |
+
# TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
|
230 |
+
artifact = wandb.Artifact(name=name, type="dataset")
|
231 |
+
img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None
|
232 |
+
img_files = tqdm(dataset.img_files) if not img_files else img_files
|
233 |
+
for img_file in img_files:
|
234 |
+
if Path(img_file).is_dir():
|
235 |
+
artifact.add_dir(img_file, name='data/images')
|
236 |
+
labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
|
237 |
+
artifact.add_dir(labels_path, name='data/labels')
|
238 |
+
else:
|
239 |
+
artifact.add_file(img_file, name='data/images/' + Path(img_file).name)
|
240 |
+
label_file = Path(img2label_paths([img_file])[0])
|
241 |
+
artifact.add_file(str(label_file),
|
242 |
+
name='data/labels/' + label_file.name) if label_file.exists() else None
|
243 |
+
table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
|
244 |
+
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
|
245 |
+
for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
|
246 |
+
height, width = shapes[0]
|
247 |
+
labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height])
|
248 |
+
box_data, img_classes = [], {}
|
249 |
+
for cls, *xyxy in labels[:, 1:].tolist():
|
250 |
+
cls = int(cls)
|
251 |
+
box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
|
252 |
+
"class_id": cls,
|
253 |
+
"box_caption": "%s" % (class_to_id[cls]),
|
254 |
+
"scores": {"acc": 1},
|
255 |
+
"domain": "pixel"})
|
256 |
+
img_classes[cls] = class_to_id[cls]
|
257 |
+
boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space
|
258 |
+
table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes),
|
259 |
+
Path(paths).name)
|
260 |
+
artifact.add(table, name)
|
261 |
+
return artifact
|
262 |
+
|
263 |
+
def log_training_progress(self, predn, path, names):
|
264 |
+
if self.val_table and self.result_table:
|
265 |
+
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
|
266 |
+
box_data = []
|
267 |
+
total_conf = 0
|
268 |
+
for *xyxy, conf, cls in predn.tolist():
|
269 |
+
if conf >= 0.25:
|
270 |
+
box_data.append(
|
271 |
+
{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
|
272 |
+
"class_id": int(cls),
|
273 |
+
"box_caption": "%s %.3f" % (names[cls], conf),
|
274 |
+
"scores": {"class_score": conf},
|
275 |
+
"domain": "pixel"})
|
276 |
+
total_conf = total_conf + conf
|
277 |
+
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
|
278 |
+
id = self.val_table_map[Path(path).name]
|
279 |
+
self.result_table.add_data(self.current_epoch,
|
280 |
+
id,
|
281 |
+
wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set),
|
282 |
+
total_conf / max(1, len(box_data))
|
283 |
+
)
|
284 |
+
|
285 |
+
def log(self, log_dict):
|
286 |
+
if self.wandb_run:
|
287 |
+
for key, value in log_dict.items():
|
288 |
+
self.log_dict[key] = value
|
289 |
+
|
290 |
+
def end_epoch(self, best_result=False):
|
291 |
+
if self.wandb_run:
|
292 |
+
wandb.log(self.log_dict)
|
293 |
+
self.log_dict = {}
|
294 |
+
if self.result_artifact:
|
295 |
+
train_results = wandb.JoinedTable(self.val_table, self.result_table, "id")
|
296 |
+
self.result_artifact.add(train_results, 'result')
|
297 |
+
wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch),
|
298 |
+
('best' if best_result else '')])
|
299 |
+
self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"])
|
300 |
+
self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
|
301 |
+
|
302 |
+
def finish_run(self):
|
303 |
+
if self.wandb_run:
|
304 |
+
if self.log_dict:
|
305 |
+
wandb.log(self.log_dict)
|
306 |
+
wandb.run.finish()
|