prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
from megengine.core import Tensor
from official.vision.detection import layers
def get_focal_loss(
logits: Tensor,
labels: Tensor,
ignore_label: int = -1,
background: int = 0,
alpha: float = 0.5,
gamma: float = 0,
norm_type: str = "fg",
) -> Tensor:
r"""Focal Loss for Dense Object Detection:
<https://arxiv.org/pdf/1708.02002.pdf>
.. math::
FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t)
Args:
logits (Tensor):
the predicted logits with the shape of :math:`(B, A, C)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
ignore_label (int):
the value of ignore class. Default: -1
background (int):
the value of background class. Default: 0
alpha (float):
parameter to mitigate class imbalance. Default: 0.5
gamma (float):
parameter to mitigate easy/hard loss imbalance. Default: 0
norm_type (str): current support "fg", "none":
"fg": loss will be normalized by number of fore-ground samples
"none": not norm
Returns:
the calculated focal loss.
"""
class_range = | F.arange(1, logits.shape[2] + 1) | megengine.functional.arange |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
from megengine.core import Tensor
from official.vision.detection import layers
def get_focal_loss(
logits: Tensor,
labels: Tensor,
ignore_label: int = -1,
background: int = 0,
alpha: float = 0.5,
gamma: float = 0,
norm_type: str = "fg",
) -> Tensor:
r"""Focal Loss for Dense Object Detection:
<https://arxiv.org/pdf/1708.02002.pdf>
.. math::
FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t)
Args:
logits (Tensor):
the predicted logits with the shape of :math:`(B, A, C)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
ignore_label (int):
the value of ignore class. Default: -1
background (int):
the value of background class. Default: 0
alpha (float):
parameter to mitigate class imbalance. Default: 0.5
gamma (float):
parameter to mitigate easy/hard loss imbalance. Default: 0
norm_type (str): current support "fg", "none":
"fg": loss will be normalized by number of fore-ground samples
"none": not norm
Returns:
the calculated focal loss.
"""
class_range = F.arange(1, logits.shape[2] + 1)
labels = | F.add_axis(labels, axis=2) | megengine.functional.add_axis |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
from megengine.core import Tensor
from official.vision.detection import layers
def get_focal_loss(
logits: Tensor,
labels: Tensor,
ignore_label: int = -1,
background: int = 0,
alpha: float = 0.5,
gamma: float = 0,
norm_type: str = "fg",
) -> Tensor:
r"""Focal Loss for Dense Object Detection:
<https://arxiv.org/pdf/1708.02002.pdf>
.. math::
FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t)
Args:
logits (Tensor):
the predicted logits with the shape of :math:`(B, A, C)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
ignore_label (int):
the value of ignore class. Default: -1
background (int):
the value of background class. Default: 0
alpha (float):
parameter to mitigate class imbalance. Default: 0.5
gamma (float):
parameter to mitigate easy/hard loss imbalance. Default: 0
norm_type (str): current support "fg", "none":
"fg": loss will be normalized by number of fore-ground samples
"none": not norm
Returns:
the calculated focal loss.
"""
class_range = F.arange(1, logits.shape[2] + 1)
labels = F.add_axis(labels, axis=2)
scores = | F.sigmoid(logits) | megengine.functional.sigmoid |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
from megengine.core import Tensor
from official.vision.detection import layers
def get_focal_loss(
logits: Tensor,
labels: Tensor,
ignore_label: int = -1,
background: int = 0,
alpha: float = 0.5,
gamma: float = 0,
norm_type: str = "fg",
) -> Tensor:
r"""Focal Loss for Dense Object Detection:
<https://arxiv.org/pdf/1708.02002.pdf>
.. math::
FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t)
Args:
logits (Tensor):
the predicted logits with the shape of :math:`(B, A, C)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
ignore_label (int):
the value of ignore class. Default: -1
background (int):
the value of background class. Default: 0
alpha (float):
parameter to mitigate class imbalance. Default: 0.5
gamma (float):
parameter to mitigate easy/hard loss imbalance. Default: 0
norm_type (str): current support "fg", "none":
"fg": loss will be normalized by number of fore-ground samples
"none": not norm
Returns:
the calculated focal loss.
"""
class_range = F.arange(1, logits.shape[2] + 1)
labels = F.add_axis(labels, axis=2)
scores = F.sigmoid(logits)
pos_part = (1 - scores) ** gamma * layers.logsigmoid(logits)
neg_part = scores ** gamma * layers.logsigmoid(-logits)
pos_loss = -(labels == class_range) * pos_part * alpha
neg_loss = (
-(labels != class_range) * (labels != ignore_label) * neg_part * (1 - alpha)
)
loss = (pos_loss + neg_loss).sum()
if norm_type == "fg":
fg_mask = (labels != background) * (labels != ignore_label)
return loss / F.maximum(fg_mask.sum(), 1)
elif norm_type == "none":
return loss
else:
raise NotImplementedError
def get_smooth_l1_loss(
pred_bbox: Tensor,
gt_bbox: Tensor,
labels: Tensor,
beta: int = 1,
background: int = 0,
ignore_label: int = -1,
norm_type: str = "fg",
) -> Tensor:
r"""Smooth l1 loss used in RetinaNet.
Args:
pred_bbox (Tensor):
the predicted bbox with the shape of :math:`(B, A, 4)`
gt_bbox (Tensor):
the ground-truth bbox with the shape of :math:`(B, A, 4)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
beta (int):
the parameter of smooth l1 loss. Default: 1
background (int):
the value of background class. Default: 0
ignore_label (int):
the value of ignore class. Default: -1
norm_type (str): current support "fg", "all", "none":
"fg": loss will be normalized by number of fore-ground samples
"all": loss will be normalized by number of all samples
"none": not norm
Returns:
the calculated smooth l1 loss.
"""
pred_bbox = pred_bbox.reshape(-1, 4)
gt_bbox = gt_bbox.reshape(-1, 4)
labels = labels.reshape(-1)
fg_mask = (labels != background) * (labels != ignore_label)
loss = get_smooth_l1_base(pred_bbox, gt_bbox, beta)
loss = (loss.sum(axis=1) * fg_mask).sum()
if norm_type == "fg":
loss = loss / F.maximum(fg_mask.sum(), 1)
elif norm_type == "all":
all_mask = labels != ignore_label
loss = loss / F.maximum(all_mask.sum(), 1)
elif norm_type == "none":
return loss
else:
raise NotImplementedError
return loss
def get_smooth_l1_base(pred_bbox: Tensor, gt_bbox: Tensor, beta: float) -> Tensor:
r"""
Args:
pred_bbox (Tensor):
the predicted bbox with the shape of :math:`(N, 4)`
gt_bbox (Tensor):
the ground-truth bbox with the shape of :math:`(N, 4)`
beta (int):
the parameter of smooth l1 loss.
Returns:
the calculated smooth l1 loss.
"""
x = pred_bbox - gt_bbox
abs_x = | F.abs(x) | megengine.functional.abs |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
from megengine.core import Tensor
from official.vision.detection import layers
def get_focal_loss(
logits: Tensor,
labels: Tensor,
ignore_label: int = -1,
background: int = 0,
alpha: float = 0.5,
gamma: float = 0,
norm_type: str = "fg",
) -> Tensor:
r"""Focal Loss for Dense Object Detection:
<https://arxiv.org/pdf/1708.02002.pdf>
.. math::
FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t)
Args:
logits (Tensor):
the predicted logits with the shape of :math:`(B, A, C)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
ignore_label (int):
the value of ignore class. Default: -1
background (int):
the value of background class. Default: 0
alpha (float):
parameter to mitigate class imbalance. Default: 0.5
gamma (float):
parameter to mitigate easy/hard loss imbalance. Default: 0
norm_type (str): current support "fg", "none":
"fg": loss will be normalized by number of fore-ground samples
"none": not norm
Returns:
the calculated focal loss.
"""
class_range = F.arange(1, logits.shape[2] + 1)
labels = F.add_axis(labels, axis=2)
scores = F.sigmoid(logits)
pos_part = (1 - scores) ** gamma * layers.logsigmoid(logits)
neg_part = scores ** gamma * layers.logsigmoid(-logits)
pos_loss = -(labels == class_range) * pos_part * alpha
neg_loss = (
-(labels != class_range) * (labels != ignore_label) * neg_part * (1 - alpha)
)
loss = (pos_loss + neg_loss).sum()
if norm_type == "fg":
fg_mask = (labels != background) * (labels != ignore_label)
return loss / F.maximum(fg_mask.sum(), 1)
elif norm_type == "none":
return loss
else:
raise NotImplementedError
def get_smooth_l1_loss(
pred_bbox: Tensor,
gt_bbox: Tensor,
labels: Tensor,
beta: int = 1,
background: int = 0,
ignore_label: int = -1,
norm_type: str = "fg",
) -> Tensor:
r"""Smooth l1 loss used in RetinaNet.
Args:
pred_bbox (Tensor):
the predicted bbox with the shape of :math:`(B, A, 4)`
gt_bbox (Tensor):
the ground-truth bbox with the shape of :math:`(B, A, 4)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
beta (int):
the parameter of smooth l1 loss. Default: 1
background (int):
the value of background class. Default: 0
ignore_label (int):
the value of ignore class. Default: -1
norm_type (str): current support "fg", "all", "none":
"fg": loss will be normalized by number of fore-ground samples
"all": loss will be normalized by number of all samples
"none": not norm
Returns:
the calculated smooth l1 loss.
"""
pred_bbox = pred_bbox.reshape(-1, 4)
gt_bbox = gt_bbox.reshape(-1, 4)
labels = labels.reshape(-1)
fg_mask = (labels != background) * (labels != ignore_label)
loss = get_smooth_l1_base(pred_bbox, gt_bbox, beta)
loss = (loss.sum(axis=1) * fg_mask).sum()
if norm_type == "fg":
loss = loss / F.maximum(fg_mask.sum(), 1)
elif norm_type == "all":
all_mask = labels != ignore_label
loss = loss / F.maximum(all_mask.sum(), 1)
elif norm_type == "none":
return loss
else:
raise NotImplementedError
return loss
def get_smooth_l1_base(pred_bbox: Tensor, gt_bbox: Tensor, beta: float) -> Tensor:
r"""
Args:
pred_bbox (Tensor):
the predicted bbox with the shape of :math:`(N, 4)`
gt_bbox (Tensor):
the ground-truth bbox with the shape of :math:`(N, 4)`
beta (int):
the parameter of smooth l1 loss.
Returns:
the calculated smooth l1 loss.
"""
x = pred_bbox - gt_bbox
abs_x = F.abs(x)
if beta < 1e-5:
loss = abs_x
else:
in_loss = 0.5 * x ** 2 / beta
out_loss = abs_x - 0.5 * beta
# FIXME: F.where cannot handle 0-shape tensor yet
# loss = F.where(abs_x < beta, in_loss, out_loss)
in_mask = abs_x < beta
loss = in_loss * in_mask + out_loss * (1 - in_mask)
return loss
def softmax_loss(scores: Tensor, labels: Tensor, ignore_label: int = -1) -> Tensor:
max_scores = F.zero_grad(scores.max(axis=1, keepdims=True))
scores -= max_scores
log_prob = scores - F.log( | F.exp(scores) | megengine.functional.exp |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.module as M
import numpy as np
import pytest
from basecls.models.repvgg import RepVGGBlock
@pytest.mark.parametrize("w_in", [32, 64])
@pytest.mark.parametrize("w_out", [64])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("groups", [1, 2, 4])
@pytest.mark.parametrize("se_r", [0.0, 0.25])
@pytest.mark.parametrize("act_name", ["relu"])
def test_block(w_in, w_out, stride, groups, se_r, act_name):
m = RepVGGBlock(w_in, w_out, stride, groups, se_r, act_name, deploy=False)
assert isinstance(m, M.Module)
m.eval()
x = | mge.random.uniform(size=(2, w_in, 8, 8)) | megengine.random.uniform |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = | M.Linear(input_size, 3 * hidden_size, bias=bias) | megengine.module.Linear |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = | M.Linear(hidden_size, 3 * hidden_size, bias=bias) | megengine.module.Linear |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = | F.reshape(x, (-1, x.shape[1])) | megengine.functional.reshape |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = | F.split(gate_x, 3, axis=1) | megengine.functional.split |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = | F.split(gate_h, 3, axis=1) | megengine.functional.split |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = | F.sigmoid(i_r + h_r) | megengine.functional.sigmoid |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = | F.sigmoid(i_i + h_i) | megengine.functional.sigmoid |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = | M.Linear(input_size, 4 * hidden_size, bias=bias) | megengine.module.Linear |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = | M.Linear(hidden_size, 4 * hidden_size, bias=bias) | megengine.module.Linear |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = | F.reshape(x, (-1, x.shape[1])) | megengine.functional.reshape |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = | F.split(gates, 4, axis=1) | megengine.functional.split |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = | F.sigmoid(ingate) | megengine.functional.sigmoid |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = | F.sigmoid(forgetgate) | megengine.functional.sigmoid |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = | F.tanh(cellgate) | megengine.functional.tanh |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = | F.sigmoid(outgate) | megengine.functional.sigmoid |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
| M.init.uniform_(w, -std, std) | megengine.module.init.uniform_ |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = | F.zeros((self.num_layers, batch, self.hidden_size)) | megengine.functional.zeros |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = | F.stack(outs, axis=1) | megengine.functional.stack |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = | F.stack(outs, axis=0) | megengine.functional.stack |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
| M.init.uniform_(w, -std, std) | megengine.module.init.uniform_ |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = | F.mul(cx, forgetgate) | megengine.functional.mul |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + | F.mul(ingate, cellgate) | megengine.functional.mul |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate)
hy = F.mul(outgate, | F.tanh(cy) | megengine.functional.tanh |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate)
hy = F.mul(outgate, F.tanh(cy))
return (hy, cy)
class LSTM(M.Module):
"""
An implementation of LSTMModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(
LSTMCell(self.input_size, self.hidden_size, self.bias)
)
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
LSTMCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = | F.zeros((self.num_layers, batch, self.hidden_size)) | megengine.functional.zeros |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate)
hy = F.mul(outgate, F.tanh(cy))
return (hy, cy)
class LSTM(M.Module):
"""
An implementation of LSTMModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(
LSTMCell(self.input_size, self.hidden_size, self.bias)
)
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
LSTMCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
c0 = | F.zeros((self.num_layers, batch, self.hidden_size)) | megengine.functional.zeros |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate)
hy = F.mul(outgate, F.tanh(cy))
return (hy, cy)
class LSTM(M.Module):
"""
An implementation of LSTMModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(
LSTMCell(self.input_size, self.hidden_size, self.bias)
)
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
LSTMCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
c0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx[0]
c0 = hx[1]
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append((h0[layer, :, :], c0[layer, :, :]))
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
inp = input[:, t, :] if self.batch_first else input[t, :, :]
hidden_l = self.rnn_cell_list[layer](
inp, (hidden[layer][0], hidden[layer][1])
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1])
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = (
F.dropout(hidden_l[0], self.dropout),
F.dropout(hidden_l[1], self.dropout),
)
hidden[layer] = hidden_l
outs.append(hidden_l[0])
if self.batch_first:
output = | F.stack(outs, axis=1) | megengine.functional.stack |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate)
hy = F.mul(outgate, F.tanh(cy))
return (hy, cy)
class LSTM(M.Module):
"""
An implementation of LSTMModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(
LSTMCell(self.input_size, self.hidden_size, self.bias)
)
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
LSTMCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
c0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx[0]
c0 = hx[1]
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append((h0[layer, :, :], c0[layer, :, :]))
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
inp = input[:, t, :] if self.batch_first else input[t, :, :]
hidden_l = self.rnn_cell_list[layer](
inp, (hidden[layer][0], hidden[layer][1])
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1])
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = (
F.dropout(hidden_l[0], self.dropout),
F.dropout(hidden_l[1], self.dropout),
)
hidden[layer] = hidden_l
outs.append(hidden_l[0])
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = | F.stack(outs, axis=0) | megengine.functional.stack |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = | F.dropout(hidden_l, self.dropout) | megengine.functional.dropout |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate)
hy = F.mul(outgate, F.tanh(cy))
return (hy, cy)
class LSTM(M.Module):
"""
An implementation of LSTMModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(
LSTMCell(self.input_size, self.hidden_size, self.bias)
)
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
LSTMCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
c0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx[0]
c0 = hx[1]
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append((h0[layer, :, :], c0[layer, :, :]))
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
inp = input[:, t, :] if self.batch_first else input[t, :, :]
hidden_l = self.rnn_cell_list[layer](
inp, (hidden[layer][0], hidden[layer][1])
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1])
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = (
| F.dropout(hidden_l[0], self.dropout) | megengine.functional.dropout |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate)
hy = F.mul(outgate, F.tanh(cy))
return (hy, cy)
class LSTM(M.Module):
"""
An implementation of LSTMModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(
LSTMCell(self.input_size, self.hidden_size, self.bias)
)
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
LSTMCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
c0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx[0]
c0 = hx[1]
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append((h0[layer, :, :], c0[layer, :, :]))
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
inp = input[:, t, :] if self.batch_first else input[t, :, :]
hidden_l = self.rnn_cell_list[layer](
inp, (hidden[layer][0], hidden[layer][1])
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1])
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = (
F.dropout(hidden_l[0], self.dropout),
| F.dropout(hidden_l[1], self.dropout) | megengine.functional.dropout |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = | M.Conv2d(input_dim, hidden_dim, 3, padding=1) | megengine.module.Conv2d |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = | M.Conv2d(hidden_dim, 2, 3, padding=1) | megengine.module.Conv2d |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = | M.ReLU() | megengine.module.ReLU |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = | F.concat([h, x], axis=1) | megengine.functional.concat |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = | F.concat([h, x], axis=1) | megengine.functional.concat |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = | M.Conv2d(cor_planes, 256, 1, padding=0) | megengine.module.Conv2d |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = | M.Conv2d(256, 192, 3, padding=1) | megengine.module.Conv2d |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = | M.Conv2d(2, 128, 7, padding=3) | megengine.module.Conv2d |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = | M.Conv2d(128, 64, 3, padding=1) | megengine.module.Conv2d |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = | M.Conv2d(64 + 192, 128 - 2, 3, padding=1) | megengine.module.Conv2d |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = | F.concat([cor, flo], axis=1) | megengine.functional.concat |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = F.concat([cor, flo], axis=1)
out = F.relu(self.conv(cor_flo))
return | F.concat([out, flow], axis=1) | megengine.functional.concat |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = F.concat([cor, flo], axis=1)
out = F.relu(self.conv(cor_flo))
return F.concat([out, flow], axis=1)
class BasicUpdateBlock(M.Module):
def __init__(self, hidden_dim, cor_planes, mask_size=8):
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder(cor_planes)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = M.Sequential(
M.Conv2d(128, 256, 3, padding=1),
M.ReLU(),
M.Conv2d(256, mask_size**2 * 9, 1, padding=0),
)
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = | F.concat([inp, motion_features], axis=1) | megengine.functional.concat |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = F.concat([cor, flo], axis=1)
out = F.relu(self.conv(cor_flo))
return F.concat([out, flow], axis=1)
class BasicUpdateBlock(M.Module):
def __init__(self, hidden_dim, cor_planes, mask_size=8):
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder(cor_planes)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = M.Sequential(
| M.Conv2d(128, 256, 3, padding=1) | megengine.module.Conv2d |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = F.concat([cor, flo], axis=1)
out = F.relu(self.conv(cor_flo))
return F.concat([out, flow], axis=1)
class BasicUpdateBlock(M.Module):
def __init__(self, hidden_dim, cor_planes, mask_size=8):
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder(cor_planes)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = M.Sequential(
M.Conv2d(128, 256, 3, padding=1),
| M.ReLU() | megengine.module.ReLU |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = F.concat([cor, flo], axis=1)
out = F.relu(self.conv(cor_flo))
return F.concat([out, flow], axis=1)
class BasicUpdateBlock(M.Module):
def __init__(self, hidden_dim, cor_planes, mask_size=8):
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder(cor_planes)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = M.Sequential(
M.Conv2d(128, 256, 3, padding=1),
M.ReLU(),
| M.Conv2d(256, mask_size**2 * 9, 1, padding=0) | megengine.module.Conv2d |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1( | F.concat([r * h, x], axis=1) | megengine.functional.concat |
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2( | F.concat([r * h, x], axis=1) | megengine.functional.concat |
#!/usr/bin/env python3
from dataset import SIDDValData
from model import UNetD
import megengine.data as data
from utils import batch_PSNR
from tqdm import tqdm
import argparse
import pickle
import megengine
def test(args):
valid_dataset = SIDDValData(args.data)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=1, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
num_workers=8,
)
model = UNetD(3)
with open(args.checkpoint, "rb") as f:
state = pickle.load(f)
model.load_state_dict(state["state_dict"])
model.eval()
def valid_step(image, label):
pred = model(image)
pred = image - pred
psnr_it = batch_PSNR(pred, label)
return psnr_it
def valid(func, data_queue):
psnr_v = 0.
for step, (image, label) in tqdm(enumerate(data_queue)):
image = | megengine.tensor(image) | megengine.tensor |
#!/usr/bin/env python3
from dataset import SIDDValData
from model import UNetD
import megengine.data as data
from utils import batch_PSNR
from tqdm import tqdm
import argparse
import pickle
import megengine
def test(args):
valid_dataset = SIDDValData(args.data)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=1, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
num_workers=8,
)
model = UNetD(3)
with open(args.checkpoint, "rb") as f:
state = pickle.load(f)
model.load_state_dict(state["state_dict"])
model.eval()
def valid_step(image, label):
pred = model(image)
pred = image - pred
psnr_it = batch_PSNR(pred, label)
return psnr_it
def valid(func, data_queue):
psnr_v = 0.
for step, (image, label) in tqdm(enumerate(data_queue)):
image = megengine.tensor(image)
label = | megengine.tensor(label) | megengine.tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
| mge.Tensor(text_input) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
| mge.Tensor(text_output) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
| mge.Tensor(mel) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
| mge.Tensor(pos_text) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
| mge.Tensor(pos_mel) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
| mge.Tensor(text_length) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
| mge.Tensor(mel_length) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
| mge.Tensor(text_input) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
| mge.Tensor(text_output) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
| mge.Tensor(mel) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
| mge.Tensor(pos_text) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
| mge.Tensor(pos_mel) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
| mge.Tensor(text_length) | megengine.Tensor |
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
| mge.Tensor(mel_length) | megengine.Tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = | megengine.logger.get_logger() | megengine.logger.get_logger |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = | data.dataset.ImageNet(args.data, train=True) | megengine.data.dataset.ImageNet |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
valid_dataset = | data.dataset.ImageNet(args.data, train=False) | megengine.data.dataset.ImageNet |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = | dist.Server(port=args.dist_port) | megengine.distributed.Server |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = | DTR(memory_budget=5*1024**3) | megengine.utils.dtr.DTR |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = | F.nn.cross_entropy(logits, label) | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = | F.topk_accuracy(logits, label, topk=(1, 5)) | megengine.functional.topk_accuracy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = | megengine.tensor(image, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = | megengine.tensor(label, dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = | megengine.tensor(image, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = | megengine.tensor(label, dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
| data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True) | megengine.data.RandomSampler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = | autodiff.GradManager() | megengine.autodiff.GradManager |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = | F.nn.cross_entropy(logits, label) | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = | F.topk_accuracy(logits, label, topk=(1, 5)) | megengine.functional.topk_accuracy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks= | dist.make_allreduce_cb("SUM") | megengine.distributed.make_allreduce_cb |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = | F.distributed.all_reduce_sum(loss) | megengine.functional.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = | F.distributed.all_reduce_sum(acc1) | megengine.functional.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = | F.distributed.all_reduce_sum(acc5) | megengine.functional.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
| T.Resize(256) | megengine.data.transform.Resize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
| T.CenterCrop(224) | megengine.data.transform.CenterCrop |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
| T.ToMode("CHW") | megengine.data.transform.ToMode |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
| T.RandomResizedCrop(224) | megengine.data.transform.RandomResizedCrop |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
| T.RandomHorizontalFlip() | megengine.data.transform.RandomHorizontalFlip |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
| T.ToMode("CHW") | megengine.data.transform.ToMode |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
| T.RandomResizedCrop(224) | megengine.data.transform.RandomResizedCrop |