Spaces:
Starting
on
L40S
Starting
on
L40S
File size: 5,682 Bytes
d7e58f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE
_USING_PARROTS = True
try:
from parrots.autograd import gradcheck
except ImportError:
from torch.autograd import gradcheck
_USING_PARROTS = False
# torch.set_printoptions(precision=8, threshold=100)
inputs = [
([[1., 0], [0, 1.]], [0, 1]),
([[1., 0, -1.], [0, 1., 2.]], [2, 1]),
([[1e-6, 2e-6, 3e-6], [4e-6, 5e-5, 6e-4], [7e-3, 8e-2, 9e-1]], [1, 2, 0]),
]
softmax_outputs = [(0.00566451, [[-0.00657264, 0.00657264],
[0.00657264, -0.00657264]]),
(0.34956908, [[0.10165970, 0.03739851, -0.13905823],
[0.01227554, -0.10298023, 0.09070466]]),
(0.15754992, [[0.02590877, -0.05181759, 0.02590882],
[0.02589641, 0.02589760, -0.05179400],
[-0.07307514, 0.02234372, 0.05073142]])]
sigmoid_outputs = [(0.13562961, [[-0.00657264, 0.11185755],
[0.11185755, -0.00657264]]),
(1.10251057, [[0.28808805, 0.11185755, -0.09602935],
[0.11185755, -0.00657264, 0.40376765]]),
(0.42287254, [[0.07457182, -0.02485716, 0.07457201],
[0.07457211, 0.07457669, -0.02483728],
[-0.02462499, 0.08277918, 0.18050370]])]
class Testfocalloss:
def _test_softmax(self, dtype=torch.float):
if not torch.cuda.is_available():
return
from mmcv.ops import softmax_focal_loss
alpha = 0.25
gamma = 2.0
for case, output in zip(inputs, softmax_outputs):
np_x = np.array(case[0])
np_y = np.array(case[1])
np_x_grad = np.array(output[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
loss = softmax_focal_loss(x, y, gamma, alpha, None, 'mean')
loss.backward()
assert np.allclose(loss.data.cpu().numpy(), output[0], 1e-2)
assert np.allclose(x.grad.data.cpu(), np_x_grad, 1e-2)
def _test_sigmoid(self, device, dtype=torch.float):
from mmcv.ops import sigmoid_focal_loss
alpha = 0.25
gamma = 2.0
for case, output in zip(inputs, sigmoid_outputs):
np_x = np.array(case[0])
np_y = np.array(case[1])
np_x_grad = np.array(output[1])
x = torch.from_numpy(np_x).to(device).type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).to(device).long()
loss = sigmoid_focal_loss(x, y, gamma, alpha, None, 'mean')
loss.backward()
assert np.allclose(loss.data.cpu().numpy(), output[0], 1e-2)
assert np.allclose(x.grad.data.cpu(), np_x_grad, 1e-2)
def _test_grad_softmax(self, dtype=torch.float):
if not torch.cuda.is_available():
return
from mmcv.ops import SoftmaxFocalLoss
alpha = 0.25
gamma = 2.0
for case in inputs:
np_x = np.array(case[0])
np_y = np.array(case[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
floss = SoftmaxFocalLoss(gamma, alpha)
if _USING_PARROTS:
# gradcheck(floss, (x, y),
# no_grads=[y])
pass
else:
gradcheck(floss, (x, y), eps=1e-2, atol=1e-2)
def _test_grad_sigmoid(self, dtype=torch.float):
if not torch.cuda.is_available():
return
from mmcv.ops import SigmoidFocalLoss
alpha = 0.25
gamma = 2.0
for case in inputs:
np_x = np.array(case[0])
np_y = np.array(case[1])
x = torch.from_numpy(np_x).cuda().type(dtype)
x.requires_grad_()
y = torch.from_numpy(np_y).cuda().long()
floss = SigmoidFocalLoss(gamma, alpha)
if _USING_PARROTS:
# gradcheck(floss, (x, y),
# no_grads=[y])
pass
else:
gradcheck(floss, (x, y), eps=1e-2, atol=1e-2)
def test_softmax_float(self):
self._test_softmax(dtype=torch.float)
def test_softmax_half(self):
self._test_softmax(dtype=torch.half)
@pytest.mark.parametrize('device', [
pytest.param(
'cuda',
marks=pytest.mark.skipif(
not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
pytest.param(
'mlu',
marks=pytest.mark.skipif(
not IS_MLU_AVAILABLE, reason='requires MLU support'))
])
def test_sigmoid_float(self, device):
self._test_sigmoid(device=device, dtype=torch.float)
@pytest.mark.parametrize('device', [
pytest.param(
'cuda',
marks=pytest.mark.skipif(
not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
pytest.param(
'mlu',
marks=pytest.mark.skipif(
not IS_MLU_AVAILABLE, reason='requires MLU support'))
])
def test_sigmoid_half(self, device):
self._test_sigmoid(device, dtype=torch.half)
def test_grad_softmax_float(self):
self._test_grad_softmax(dtype=torch.float)
def test_grad_sigmoid_float(self):
self._test_grad_sigmoid(dtype=torch.float)
|