prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.serialization.save(checkpoint, model_path)
def run_test(
model_path, use_jit, use_symbolic, sublinear_memory_config=None, max_err=None,
):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
checkpoint = mge.load(model_path)
data = checkpoint["data"]
label = checkpoint["label"]
@dist.launcher
def worker(max_err):
net = MnistNet(has_bn=True)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
# use same data and label for all gpu's
# such that the result does not depend on number of gpu
data_train = | Tensor(data) | megengine.tensor.Tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.serialization.save(checkpoint, model_path)
def run_test(
model_path, use_jit, use_symbolic, sublinear_memory_config=None, max_err=None,
):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
checkpoint = mge.load(model_path)
data = checkpoint["data"]
label = checkpoint["label"]
@dist.launcher
def worker(max_err):
net = MnistNet(has_bn=True)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
# use same data and label for all gpu's
# such that the result does not depend on number of gpu
data_train = Tensor(data)
label_train = | Tensor(label) | megengine.tensor.Tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.serialization.save(checkpoint, model_path)
def run_test(
model_path, use_jit, use_symbolic, sublinear_memory_config=None, max_err=None,
):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
checkpoint = mge.load(model_path)
data = checkpoint["data"]
label = checkpoint["label"]
@dist.launcher
def worker(max_err):
net = MnistNet(has_bn=True)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
# use same data and label for all gpu's
# such that the result does not depend on number of gpu
data_train = Tensor(data)
label_train = Tensor(label)
loss = train(data_train, label_train, net, opt, gm)
np.testing.assert_allclose(loss.numpy(), checkpoint["loss"], atol=max_err)
if | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = | BatchNorm2d(20) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = | BatchNorm2d(20) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = | ad.GradManager() | megengine.autodiff.GradManager |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[ | dist.make_allreduce_cb("MEAN", dist.WORLD) | megengine.distributed.make_allreduce_cb |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.serialization.save(checkpoint, model_path)
def run_test(
model_path, use_jit, use_symbolic, sublinear_memory_config=None, max_err=None,
):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
checkpoint = mge.load(model_path)
data = checkpoint["data"]
label = checkpoint["label"]
@dist.launcher
def worker(max_err):
net = MnistNet(has_bn=True)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = | ad.GradManager() | megengine.autodiff.GradManager |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.serialization.save(checkpoint, model_path)
def run_test(
model_path, use_jit, use_symbolic, sublinear_memory_config=None, max_err=None,
):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
checkpoint = mge.load(model_path)
data = checkpoint["data"]
label = checkpoint["label"]
@dist.launcher
def worker(max_err):
net = MnistNet(has_bn=True)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[ | dist.make_allreduce_cb("MEAN", dist.WORLD) | megengine.distributed.make_allreduce_cb |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = | mgb.load_comp_graph_from_file(fpath) | megengine._internal.load_comp_graph_from_file |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = | mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy") | megengine._internal.cgtools.get_dep_vars |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@ | jit.trace(symbolic=False) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@ | jit.trace(symbolic=True, opt_level=0) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@ | jit.trace(symbolic=True, opt_level=1) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@ | jit.trace(symbolic=True, profiling=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = | tensor(7) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = | tensor(7) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False),
M.BatchNorm2d(4),
M.ReLU(),
)
net = ConvBNReLU()
net.eval()
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False),
M.BatchNorm2d(4),
M.ReLU(),
)
net = ConvBNReLU()
net.eval()
@jit.trace(symbolic=True)
def fun(data):
return net(data)
data = np.random.random([1, 3, 224, 224]).astype(np.float32)
fun.trace(data)
with mkstemp() as out:
fun.dump(out, optimize_for_inference=True)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
inputs = | mgb.cgtools.get_inputs(out) | megengine._internal.cgtools.get_inputs |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False),
M.BatchNorm2d(4),
M.ReLU(),
)
net = ConvBNReLU()
net.eval()
@jit.trace(symbolic=True)
def fun(data):
return net(data)
data = np.random.random([1, 3, 224, 224]).astype(np.float32)
fun.trace(data)
with mkstemp() as out:
fun.dump(out, optimize_for_inference=True)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
inputs = mgb.cgtools.get_inputs(out)
assert len(inputs) == 2 and (
mgb.cgtools.get_type(inputs[0]) == "MultipleDeviceTensorHolder"
and mgb.cgtools.get_type(inputs[1]) == "ConvolutionForward"
)
# Simply verify the options passed down
def test_sublinear():
config = | SublinearMemoryConfig(genetic_nr_iter=10) | megengine.jit.SublinearMemoryConfig |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False),
M.BatchNorm2d(4),
M.ReLU(),
)
net = ConvBNReLU()
net.eval()
@jit.trace(symbolic=True)
def fun(data):
return net(data)
data = np.random.random([1, 3, 224, 224]).astype(np.float32)
fun.trace(data)
with mkstemp() as out:
fun.dump(out, optimize_for_inference=True)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
inputs = mgb.cgtools.get_inputs(out)
assert len(inputs) == 2 and (
mgb.cgtools.get_type(inputs[0]) == "MultipleDeviceTensorHolder"
and mgb.cgtools.get_type(inputs[1]) == "ConvolutionForward"
)
# Simply verify the options passed down
def test_sublinear():
config = SublinearMemoryConfig(genetic_nr_iter=10)
@ | jit.trace(symbolic=True, sublinear_memory_config=config) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = | mgb.load_comp_graph_from_file(out) | megengine._internal.load_comp_graph_from_file |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@ | jit.trace(symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False),
M.BatchNorm2d(4),
M.ReLU(),
)
net = ConvBNReLU()
net.eval()
@jit.trace(symbolic=True)
def fun(data):
return net(data)
data = np.random.random([1, 3, 224, 224]).astype(np.float32)
fun.trace(data)
with mkstemp() as out:
fun.dump(out, optimize_for_inference=True)
cg, _, outputs = | mgb.load_comp_graph_from_file(out) | megengine._internal.load_comp_graph_from_file |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor( | mgb.opr.assert_equal(x._symvar, x._symvar + 1) | megengine._internal.opr.assert_equal |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor( | mgb.opr.assert_equal(x._symvar, x._symvar + 1) | megengine._internal.opr.assert_equal |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type( | mgb.cgtools.get_inputs(out) | megengine._internal.cgtools.get_inputs |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
| M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False),
| M.BatchNorm2d(4) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False),
M.BatchNorm2d(4),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False),
M.BatchNorm2d(4),
M.ReLU(),
)
net = ConvBNReLU()
net.eval()
@jit.trace(symbolic=True)
def fun(data):
return net(data)
data = np.random.random([1, 3, 224, 224]).astype(np.float32)
fun.trace(data)
with mkstemp() as out:
fun.dump(out, optimize_for_inference=True)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
inputs = mgb.cgtools.get_inputs(out)
assert len(inputs) == 2 and (
| mgb.cgtools.get_type(inputs[0]) | megengine._internal.cgtools.get_type |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False),
M.BatchNorm2d(4),
M.ReLU(),
)
net = ConvBNReLU()
net.eval()
@jit.trace(symbolic=True)
def fun(data):
return net(data)
data = np.random.random([1, 3, 224, 224]).astype(np.float32)
fun.trace(data)
with mkstemp() as out:
fun.dump(out, optimize_for_inference=True)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
inputs = mgb.cgtools.get_inputs(out)
assert len(inputs) == 2 and (
mgb.cgtools.get_type(inputs[0]) == "MultipleDeviceTensorHolder"
and | mgb.cgtools.get_type(inputs[1]) | megengine._internal.cgtools.get_type |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = | megengine.logger.get_logger() | megengine.logger.get_logger |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
logging.info(
"Test Acc@1 %.3f, Acc@5 %.3f",
valid_acc1,
valid_acc5,
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataloader = None
valid_dataset = | data.dataset.ImageNet(args.data, train=False) | megengine.data.dataset.ImageNet |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = | dist.Server(port=args.dist_port) | megengine.distributed.Server |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = | megengine.load(args.model) | megengine.load |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = | F.nn.cross_entropy(logits, label) | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = | F.topk_accuracy(logits, label, topk=(1, 5)) | megengine.functional.topk_accuracy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
logging.info(
"Test Acc@1 %.3f, Acc@5 %.3f",
valid_acc1,
valid_acc5,
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = | megengine.tensor(image, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
logging.info(
"Test Acc@1 %.3f, Acc@5 %.3f",
valid_acc1,
valid_acc5,
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = | megengine.tensor(label, dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = | F.distributed.all_reduce_sum(loss) | megengine.functional.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = | F.distributed.all_reduce_sum(acc1) | megengine.functional.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = | F.distributed.all_reduce_sum(acc5) | megengine.functional.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
logging.info(
"Test Acc@1 %.3f, Acc@5 %.3f",
valid_acc1,
valid_acc5,
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
logging.info(
"Test Acc@1 %.3f, Acc@5 %.3f",
valid_acc1,
valid_acc5,
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataloader = None
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
| T.Resize(256) | megengine.data.transform.Resize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
logging.info(
"Test Acc@1 %.3f, Acc@5 %.3f",
valid_acc1,
valid_acc5,
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataloader = None
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
| T.CenterCrop(224) | megengine.data.transform.CenterCrop |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
logging.info(
"Test Acc@1 %.3f, Acc@5 %.3f",
valid_acc1,
valid_acc5,
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataloader = None
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
| T.ToMode("CHW") | megengine.data.transform.ToMode |
import math
import numpy
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .layer_norm import LayerNorm
class DecoderLayer(M.Module):
"""Single decoder layer module."""
def __init__(
self,
size,
self_attn,
src_attn,
feed_forward,
dropout_rate,
normalize_before=True,
):
"""Construct an DecoderLayer object."""
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.norm1 = LayerNorm(size)
self.norm2 = LayerNorm(size)
self.norm3 = LayerNorm(size)
self.dropout = | M.dropout.Dropout(dropout_rate) | megengine.module.dropout.Dropout |
import math
import numpy
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .layer_norm import LayerNorm
class DecoderLayer(M.Module):
"""Single decoder layer module."""
def __init__(
self,
size,
self_attn,
src_attn,
feed_forward,
dropout_rate,
normalize_before=True,
):
"""Construct an DecoderLayer object."""
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.norm1 = LayerNorm(size)
self.norm2 = LayerNorm(size)
self.norm3 = LayerNorm(size)
self.dropout = M.dropout.Dropout(dropout_rate)
self.normalize_before = normalize_before
def forward(self, tgt, tgt_mask, memory, memory_mask, cache=None):
"""Compute decoded features.
Args:
tgt (megengine.Tensor): decoded previous target features (batch, max_time_out, size)
tgt_mask (megengine.Tensor): mask for x (batch, max_time_out)
memory (megengine.Tensor): encoded source features (batch, max_time_in, size)
memory_mask (megengine.Tensor): mask for memory (batch, max_time_in)
cache (megengine.Tensor): cached output (batch, max_time_out-1, size)
"""
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
if cache is None:
tgt_q = tgt
tgt_q_mask = tgt_mask
else:
# compute only the last frame query keeping dim: max_time_out -> 1
assert cache.shape == (
tgt.shape[0],
tgt.shape[1] - 1,
self.size,
), f"{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}"
tgt_q = tgt[:, -1:, :]
residual = residual[:, -1:, :]
tgt_q_mask = None
if tgt_mask is not None:
tgt_q_mask = tgt_mask[:, -1:, :]
x = residual + self.dropout(self.self_attn(tgt_q, tgt, tgt, tgt_q_mask))
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + self.dropout(self.src_attn(x, memory, memory, memory_mask))
if not self.normalize_before:
x = self.norm2(x)
residual = x
if self.normalize_before:
x = self.norm3(x)
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm3(x)
if cache is not None:
x = | F.concat([cache, x], axis=1) | megengine.functional.concat |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
from loguru import logger
import multiprocessing as mp
import megengine as mge
import megengine.distributed as dist
from yolox.core import Trainer
from yolox.exp import get_exp
from yolox.utils import configure_nccl
def make_parser():
parser = argparse.ArgumentParser("YOLOX train parser")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size")
parser.add_argument(
"-d", "--devices", default=None, type=int, help="device for training"
)
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="plz input your expriment description file",
)
parser.add_argument(
"--resume", default=False, action="store_true", help="resume training"
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="checkpoint file")
parser.add_argument(
"--num_machine", default=1, type=int, help="num of node for training"
)
parser.add_argument(
"--machine_rank", default=0, type=int, help="node rank for multi-node training"
)
parser.add_argument(
"--sync_level", type=int, default=None, help="config sync level, use 0 to debug"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def main(exp, args):
if not args.experiment_name:
args.experiment_name = exp.exp_name
# set environment variables for distributed training
configure_nccl()
# enable dtr to avoid CUDA OOM
| mge.dtr.enable() | megengine.dtr.enable |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
from loguru import logger
import multiprocessing as mp
import megengine as mge
import megengine.distributed as dist
from yolox.core import Trainer
from yolox.exp import get_exp
from yolox.utils import configure_nccl
def make_parser():
parser = argparse.ArgumentParser("YOLOX train parser")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size")
parser.add_argument(
"-d", "--devices", default=None, type=int, help="device for training"
)
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="plz input your expriment description file",
)
parser.add_argument(
"--resume", default=False, action="store_true", help="resume training"
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="checkpoint file")
parser.add_argument(
"--num_machine", default=1, type=int, help="num of node for training"
)
parser.add_argument(
"--machine_rank", default=0, type=int, help="node rank for multi-node training"
)
parser.add_argument(
"--sync_level", type=int, default=None, help="config sync level, use 0 to debug"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def main(exp, args):
if not args.experiment_name:
args.experiment_name = exp.exp_name
# set environment variables for distributed training
configure_nccl()
# enable dtr to avoid CUDA OOM
mge.dtr.enable()
if args.sync_level is not None:
# NOTE: use sync_level = 0 to debug mge error
from megengine.core._imperative_rt.core2 import config_async_level
logger.info("Using aysnc_level {}".format(args.sync_level))
config_async_level(args.sync_level)
trainer = Trainer(exp, args)
trainer.train()
if __name__ == "__main__":
args = make_parser().parse_args()
exp = get_exp(args.exp_file, args.name)
exp.merge(args.opts)
mp.set_start_method("spawn")
num_gpus = | dist.helper.get_device_count_by_fork("gpu") | megengine.distributed.helper.get_device_count_by_fork |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
from loguru import logger
import multiprocessing as mp
import megengine as mge
import megengine.distributed as dist
from yolox.core import Trainer
from yolox.exp import get_exp
from yolox.utils import configure_nccl
def make_parser():
parser = argparse.ArgumentParser("YOLOX train parser")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size")
parser.add_argument(
"-d", "--devices", default=None, type=int, help="device for training"
)
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="plz input your expriment description file",
)
parser.add_argument(
"--resume", default=False, action="store_true", help="resume training"
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="checkpoint file")
parser.add_argument(
"--num_machine", default=1, type=int, help="num of node for training"
)
parser.add_argument(
"--machine_rank", default=0, type=int, help="node rank for multi-node training"
)
parser.add_argument(
"--sync_level", type=int, default=None, help="config sync level, use 0 to debug"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def main(exp, args):
if not args.experiment_name:
args.experiment_name = exp.exp_name
# set environment variables for distributed training
configure_nccl()
# enable dtr to avoid CUDA OOM
mge.dtr.enable()
if args.sync_level is not None:
# NOTE: use sync_level = 0 to debug mge error
from megengine.core._imperative_rt.core2 import config_async_level
logger.info("Using aysnc_level {}".format(args.sync_level))
| config_async_level(args.sync_level) | megengine.core._imperative_rt.core2.config_async_level |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
from loguru import logger
import multiprocessing as mp
import megengine as mge
import megengine.distributed as dist
from yolox.core import Trainer
from yolox.exp import get_exp
from yolox.utils import configure_nccl
def make_parser():
parser = argparse.ArgumentParser("YOLOX train parser")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size")
parser.add_argument(
"-d", "--devices", default=None, type=int, help="device for training"
)
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="plz input your expriment description file",
)
parser.add_argument(
"--resume", default=False, action="store_true", help="resume training"
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="checkpoint file")
parser.add_argument(
"--num_machine", default=1, type=int, help="num of node for training"
)
parser.add_argument(
"--machine_rank", default=0, type=int, help="node rank for multi-node training"
)
parser.add_argument(
"--sync_level", type=int, default=None, help="config sync level, use 0 to debug"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def main(exp, args):
if not args.experiment_name:
args.experiment_name = exp.exp_name
# set environment variables for distributed training
configure_nccl()
# enable dtr to avoid CUDA OOM
mge.dtr.enable()
if args.sync_level is not None:
# NOTE: use sync_level = 0 to debug mge error
from megengine.core._imperative_rt.core2 import config_async_level
logger.info("Using aysnc_level {}".format(args.sync_level))
config_async_level(args.sync_level)
trainer = Trainer(exp, args)
trainer.train()
if __name__ == "__main__":
args = make_parser().parse_args()
exp = get_exp(args.exp_file, args.name)
exp.merge(args.opts)
mp.set_start_method("spawn")
num_gpus = dist.helper.get_device_count_by_fork("gpu")
if args.devices is None:
args.devices = num_gpus
assert args.devices <= num_gpus
if args.devices > 1:
train = | dist.launcher(main, n_gpus=args.devices) | megengine.distributed.launcher |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = | F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1) | megengine.functional.mean |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = | F.mean((t_gt - t_pred)**2, axis=1) | megengine.functional.mean |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = | F.mean(r_mse) | megengine.functional.mean |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = | F.mean(t_mse) | megengine.functional.mean |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = | F.mean(r_mae) | megengine.functional.mean |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = | F.mean(t_mae) | megengine.functional.mean |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = F.mean(t_mae)
# Rotation, translation errors (isotropic, i.e. doesn"t depend on error
# direction, which is more representative of the actual error)
concatenated = se3.mge_concatenate(se3.mge_inverse(gt_transforms), pred_transforms)
rot_trace = concatenated[:, 0, 0] + concatenated[:, 1, 1] + concatenated[:, 2, 2]
residual_rotdeg = F.acos(F.clip(0.5 * (rot_trace - 1), -1.0, 1.0)) * 180.0 / np.pi
residual_transmag = | F.norm(concatenated[:, :, 3], axis=-1) | megengine.functional.norm |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = F.mean(t_mae)
# Rotation, translation errors (isotropic, i.e. doesn"t depend on error
# direction, which is more representative of the actual error)
concatenated = se3.mge_concatenate(se3.mge_inverse(gt_transforms), pred_transforms)
rot_trace = concatenated[:, 0, 0] + concatenated[:, 1, 1] + concatenated[:, 2, 2]
residual_rotdeg = F.acos(F.clip(0.5 * (rot_trace - 1), -1.0, 1.0)) * 180.0 / np.pi
residual_transmag = F.norm(concatenated[:, :, 3], axis=-1)
err_r = | F.mean(residual_rotdeg) | megengine.functional.mean |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = F.mean(t_mae)
# Rotation, translation errors (isotropic, i.e. doesn"t depend on error
# direction, which is more representative of the actual error)
concatenated = se3.mge_concatenate(se3.mge_inverse(gt_transforms), pred_transforms)
rot_trace = concatenated[:, 0, 0] + concatenated[:, 1, 1] + concatenated[:, 2, 2]
residual_rotdeg = F.acos(F.clip(0.5 * (rot_trace - 1), -1.0, 1.0)) * 180.0 / np.pi
residual_transmag = F.norm(concatenated[:, :, 3], axis=-1)
err_r = F.mean(residual_rotdeg)
err_t = | F.mean(residual_transmag) | megengine.functional.mean |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean( | F.abs(r_gt_euler_deg - r_pred_euler_deg) | megengine.functional.abs |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean( | F.abs(t_gt - t_pred) | megengine.functional.abs |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum( | F.concat(total_losses) | megengine.functional.concat |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = | F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) | megengine.functional.nn.l1_loss |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = | F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) | megengine.functional.nn.square_loss |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = | F.nn.square_loss(all_t_feats[0], all_t_feats[1]) | megengine.functional.nn.square_loss |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = | F.nn.square_loss(all_R_feats[0], all_R_feats[1]) | megengine.functional.nn.square_loss |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = | F.nn.square_loss(all_R_feats[0], all_R_feats[2]) | megengine.functional.nn.square_loss |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = | F.nn.square_loss(all_t_feats[0], all_t_feats[2]) | megengine.functional.nn.square_loss |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = | F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) | megengine.functional.nn.square_loss |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = | F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) | megengine.functional.nn.square_loss |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = | F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) | megengine.functional.nn.square_loss |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = | F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) | megengine.functional.nn.square_loss |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = F.mean(t_mae)
# Rotation, translation errors (isotropic, i.e. doesn"t depend on error
# direction, which is more representative of the actual error)
concatenated = se3.mge_concatenate(se3.mge_inverse(gt_transforms), pred_transforms)
rot_trace = concatenated[:, 0, 0] + concatenated[:, 1, 1] + concatenated[:, 2, 2]
residual_rotdeg = F.acos( | F.clip(0.5 * (rot_trace - 1), -1.0, 1.0) | megengine.functional.clip |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = ( | F.clip(-R_feats_neg + params.margin[i], lower=0.0) | megengine.functional.clip |
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = ( | F.clip(-t_feats_neg + params.margin[i], lower=0.0) | megengine.functional.clip |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = scatter(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = x + 1
data = (x, y)
_x = np.split(x, 2, axis=axis)
_x = np.concatenate(_x, axis=0)
expect = (_x[: _x.shape[0] // 2], _x[_x.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 4, 6, 8)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize(
"split_axis,concat_axis", [(0, 1), (1, 0), (2, 0), (0, 2), (2, 3)], ids=str
)
@pytest.mark.isolated_distributed
def test_all_to_all(shape, symbolic, split_axis, concat_axis):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = scatter(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = x + 1
data = (x, y)
_x = np.split(x, 2, axis=axis)
_x = np.concatenate(_x, axis=0)
expect = (_x[: _x.shape[0] // 2], _x[_x.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 4, 6, 8)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize(
"split_axis,concat_axis", [(0, 1), (1, 0), (2, 0), (0, 2), (2, 3)], ids=str
)
@pytest.mark.isolated_distributed
def test_all_to_all(shape, symbolic, split_axis, concat_axis):
@dist.launcher(n_gpus=2)
def worker(data):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = scatter(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = x + 1
data = (x, y)
_x = np.split(x, 2, axis=axis)
_x = np.concatenate(_x, axis=0)
expect = (_x[: _x.shape[0] // 2], _x[_x.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 4, 6, 8)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize(
"split_axis,concat_axis", [(0, 1), (1, 0), (2, 0), (0, 2), (2, 3)], ids=str
)
@pytest.mark.isolated_distributed
def test_all_to_all(shape, symbolic, split_axis, concat_axis):
@dist.launcher(n_gpus=2)
def worker(data):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = | all_gather(inp, axis=axis) | megengine.distributed.functional.all_gather |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = | trace(symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = | reduce_scatter_sum(inp, axis=axis) | megengine.distributed.functional.reduce_scatter_sum |