prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * F.expand_dims(src_pred_mask, axis=1)
ref_feat = F.concat(ref_decoder_feats, axis=1) * F.expand_dims(ref_pred_mask, axis=1)
concat_feat = F.concat((src_fused_feat, src_feat, ref_fused_feat, ref_feat), axis=1)
concat_feat = F.max(concat_feat, axis=-1)
pose_pred_iter = self.regression[i](concat_feat) # (B, 7)
xyz_src_iter = quaternion.mge_quat_transform(pose_pred_iter, xyz_src_iter.detach())
pose_pred = quaternion.mge_transform_pose(pose_pred.detach(), pose_pred_iter)
transform_pred = quaternion.mge_quat2mat(pose_pred)
# compute overlap and cls gt
overlap_src_mask, overlap_ref_mask = self.generate_overlap_mask(F.copy(xyz_src, device=xyz_src.device),
F.copy(xyz_ref, device=xyz_ref.device), src_pred_mask,
ref_pred_mask, transform_gt)
# overlap_src_mask, overlap_ref_mask = self.generate_overlap_mask(xyz_src, xyz_ref, src_pred_mask, ref_pred_mask, transform_gt)
src_cls_gt = | F.ones((B, src_N)) | megengine.functional.ones |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * F.expand_dims(src_pred_mask, axis=1)
ref_feat = F.concat(ref_decoder_feats, axis=1) * F.expand_dims(ref_pred_mask, axis=1)
concat_feat = F.concat((src_fused_feat, src_feat, ref_fused_feat, ref_feat), axis=1)
concat_feat = F.max(concat_feat, axis=-1)
pose_pred_iter = self.regression[i](concat_feat) # (B, 7)
xyz_src_iter = quaternion.mge_quat_transform(pose_pred_iter, xyz_src_iter.detach())
pose_pred = quaternion.mge_transform_pose(pose_pred.detach(), pose_pred_iter)
transform_pred = quaternion.mge_quat2mat(pose_pred)
# compute overlap and cls gt
overlap_src_mask, overlap_ref_mask = self.generate_overlap_mask(F.copy(xyz_src, device=xyz_src.device),
F.copy(xyz_ref, device=xyz_ref.device), src_pred_mask,
ref_pred_mask, transform_gt)
# overlap_src_mask, overlap_ref_mask = self.generate_overlap_mask(xyz_src, xyz_ref, src_pred_mask, ref_pred_mask, transform_gt)
src_cls_gt = F.ones((B, src_N)) * overlap_src_mask
ref_cls_gt = | F.ones((B, ref_N)) | megengine.functional.ones |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = | nn.init.calculate_fan_in_and_fan_out(m.weight) | megengine.module.init.calculate_fan_in_and_fan_out |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
| nn.init.uniform_(m.bias, -bound, bound) | megengine.module.init.uniform_ |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], | F.repeat(src_glob_feat, src_N, axis=2) | megengine.functional.repeat |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), | F.repeat(ref_glob_feat, src_N, axis=2) | megengine.functional.repeat |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], | F.repeat(ref_glob_feat, ref_N, axis=2) | megengine.functional.repeat |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), | F.repeat(src_glob_feat, ref_N, axis=2) | megengine.functional.repeat |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = | get_logger(__name__) | megengine.logger.get_logger |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
| enable_receptive_field() | megengine.utils.module_stats.enable_receptive_field |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = | Network.load(model_path) | megengine.utils.network.Network.load |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
extra_info["total_param_size"] = sizeof_fmt(total_param_size)
if logging_to_stdout:
print_param_stats(params_list)
if cal_flops:
total_flops, flops_list = sum_op_stats(flops_list, bar_length_max)
extra_info["total_flops"] = sizeof_fmt(total_flops, suffix="OPs")
if logging_to_stdout:
print_op_stats(flops_list)
if cal_activations:
total_act_dims, total_act_size, activations_list = sum_activations_stats(
activations_list, bar_length_max
)
extra_info["total_act_dims"] = sizeof_fmt(total_act_dims, suffix="")
extra_info["total_act_size"] = sizeof_fmt(total_act_size)
if logging_to_stdout:
print_activations_stats(activations_list, has_input=has_input)
if cal_flops and cal_params:
extra_info["flops/param_size"] = "{:3.3f}".format(
total_flops / total_param_size
)
if log_path:
graph_def = GraphDef(node=node_list, versions=VersionDef(producer=22))
device = "/device:CPU:0"
stepstats = RunMetadata(
step_stats=StepStats(dev_stats=[DeviceStepStats(device=device)])
)
writer = SummaryWriter(log_path)
writer._get_file_writer().add_graph((graph_def, stepstats))
| print_summary(**extra_info) | megengine.utils.module_stats.print_summary |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = | sizeof_fmt(total_param_dims, suffix="") | megengine.utils.module_stats.sizeof_fmt |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
extra_info["total_param_size"] = | sizeof_fmt(total_param_size) | megengine.utils.module_stats.sizeof_fmt |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
extra_info["total_param_size"] = sizeof_fmt(total_param_size)
if logging_to_stdout:
print_param_stats(params_list)
if cal_flops:
total_flops, flops_list = | sum_op_stats(flops_list, bar_length_max) | megengine.utils.module_stats.sum_op_stats |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
extra_info["total_param_size"] = sizeof_fmt(total_param_size)
if logging_to_stdout:
print_param_stats(params_list)
if cal_flops:
total_flops, flops_list = sum_op_stats(flops_list, bar_length_max)
extra_info["total_flops"] = | sizeof_fmt(total_flops, suffix="OPs") | megengine.utils.module_stats.sizeof_fmt |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
extra_info["total_param_size"] = sizeof_fmt(total_param_size)
if logging_to_stdout:
print_param_stats(params_list)
if cal_flops:
total_flops, flops_list = sum_op_stats(flops_list, bar_length_max)
extra_info["total_flops"] = sizeof_fmt(total_flops, suffix="OPs")
if logging_to_stdout:
print_op_stats(flops_list)
if cal_activations:
total_act_dims, total_act_size, activations_list = sum_activations_stats(
activations_list, bar_length_max
)
extra_info["total_act_dims"] = | sizeof_fmt(total_act_dims, suffix="") | megengine.utils.module_stats.sizeof_fmt |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
extra_info["total_param_size"] = sizeof_fmt(total_param_size)
if logging_to_stdout:
print_param_stats(params_list)
if cal_flops:
total_flops, flops_list = sum_op_stats(flops_list, bar_length_max)
extra_info["total_flops"] = sizeof_fmt(total_flops, suffix="OPs")
if logging_to_stdout:
print_op_stats(flops_list)
if cal_activations:
total_act_dims, total_act_size, activations_list = sum_activations_stats(
activations_list, bar_length_max
)
extra_info["total_act_dims"] = sizeof_fmt(total_act_dims, suffix="")
extra_info["total_act_size"] = | sizeof_fmt(total_act_size) | megengine.utils.module_stats.sizeof_fmt |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
extra_info["total_param_size"] = sizeof_fmt(total_param_size)
if logging_to_stdout:
print_param_stats(params_list)
if cal_flops:
total_flops, flops_list = sum_op_stats(flops_list, bar_length_max)
extra_info["total_flops"] = sizeof_fmt(total_flops, suffix="OPs")
if logging_to_stdout:
print_op_stats(flops_list)
if cal_activations:
total_act_dims, total_act_size, activations_list = sum_activations_stats(
activations_list, bar_length_max
)
extra_info["total_act_dims"] = sizeof_fmt(total_act_dims, suffix="")
extra_info["total_act_size"] = sizeof_fmt(total_act_size)
if logging_to_stdout:
print_activations_stats(activations_list, has_input=has_input)
if cal_flops and cal_params:
extra_info["flops/param_size"] = "{:3.3f}".format(
total_flops / total_param_size
)
if log_path:
graph_def = GraphDef(node=node_list, versions=VersionDef(producer=22))
device = "/device:CPU:0"
stepstats = RunMetadata(
step_stats=StepStats(dev_stats=[DeviceStepStats(device=device)])
)
writer = SummaryWriter(log_path)
writer._get_file_writer().add_graph((graph_def, stepstats))
print_summary(**extra_info)
return (
total_stats(
param_size=total_param_size,
param_dims=total_param_dims,
flops=total_flops,
act_size=total_act_size,
act_dims=total_act_dims,
),
stats_details(
params=params_list, flops=flops_list, activations=activations_list
),
)
def main():
parser = argparse.ArgumentParser(
description="load a megengine dumped model and export log file for tensorboard visualization.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("model_path", help="dumped model path.")
parser.add_argument("--log_path", help="tensorboard log path.")
parser.add_argument(
"--load_input_data",
help="load input data from pickle file; it should be a numpy array or a dict of numpy array",
)
parser.add_argument(
"--bar_length_max",
type=int,
default=20,
help="size of bar indicating max flops or parameter size in net stats.",
)
parser.add_argument(
"--cal_params",
action="store_true",
help="whether calculate and record params size.",
)
parser.add_argument(
"--cal_flops",
action="store_true",
help="whether calculate and record op flops.",
)
parser.add_argument(
"--cal_activations",
action="store_true",
help="whether calculate and record op activations.",
)
parser.add_argument(
"--logging_to_stdout",
action="store_true",
help="whether print all calculated statistic details.",
)
parser.add_argument(
"--all",
action="store_true",
help="whether print all stats. Tensorboard logs will be placed in './log' if not specified.",
)
args = parser.parse_args()
if args.load_input_data:
logger.info("load data from {}".format(args.load_input_data))
data = | mge.load(args.load_input_data) | megengine.load |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = | get_op_stats(node, node.inputs, node.outputs) | megengine.utils.module_stats.get_op_stats |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = | get_activation_stats(node_oup, has_input=has_input) | megengine.utils.module_stats.get_activation_stats |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
extra_info["total_param_size"] = sizeof_fmt(total_param_size)
if logging_to_stdout:
| print_param_stats(params_list) | megengine.utils.module_stats.print_param_stats |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
extra_info["total_param_size"] = sizeof_fmt(total_param_size)
if logging_to_stdout:
print_param_stats(params_list)
if cal_flops:
total_flops, flops_list = sum_op_stats(flops_list, bar_length_max)
extra_info["total_flops"] = sizeof_fmt(total_flops, suffix="OPs")
if logging_to_stdout:
| print_op_stats(flops_list) | megengine.utils.module_stats.print_op_stats |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s=sizeof_fmt(param_stats["size"]).encode(encoding="utf-8")
)
param_stats["name"] = node.name
params_list.append(param_stats)
if log_path:
node_list.append(
NodeDef(
name=process_name(node.name),
op=node.type,
input=inp_list,
attr=attr,
)
)
# summary
extra_info = {
"#ops": len(graph.all_oprs),
"#params": len(params_list),
}
(
total_flops,
total_param_dims,
total_param_size,
total_act_dims,
total_act_size,
) = (0, 0, 0, 0, 0)
if cal_params:
total_param_dims, total_param_size, params_list = sum_param_stats(
params_list, bar_length_max
)
extra_info["total_param_dims"] = sizeof_fmt(total_param_dims, suffix="")
extra_info["total_param_size"] = sizeof_fmt(total_param_size)
if logging_to_stdout:
print_param_stats(params_list)
if cal_flops:
total_flops, flops_list = sum_op_stats(flops_list, bar_length_max)
extra_info["total_flops"] = sizeof_fmt(total_flops, suffix="OPs")
if logging_to_stdout:
print_op_stats(flops_list)
if cal_activations:
total_act_dims, total_act_size, activations_list = sum_activations_stats(
activations_list, bar_length_max
)
extra_info["total_act_dims"] = sizeof_fmt(total_act_dims, suffix="")
extra_info["total_act_size"] = sizeof_fmt(total_act_size)
if logging_to_stdout:
| print_activations_stats(activations_list, has_input=has_input) | megengine.utils.module_stats.print_activations_stats |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = | get_param_stats(node_oup) | megengine.utils.module_stats.get_param_stats |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s= | sizeof_fmt(flops_stats["flops"]) | megengine.utils.module_stats.sizeof_fmt |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import logging
import re
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine.core.tensor.dtype import is_quantize
from megengine.logger import _imperative_rt_logger, get_logger, set_mgb_log_level
from megengine.utils.module_stats import (
enable_receptive_field,
get_activation_stats,
get_op_stats,
get_param_stats,
print_activations_stats,
print_op_stats,
print_param_stats,
print_summary,
sizeof_fmt,
sum_activations_stats,
sum_op_stats,
sum_param_stats,
)
from megengine.utils.network import Network
logger = get_logger(__name__)
def visualize(
model_path: str,
log_path: str,
input: np.ndarray = None,
inp_dict: dict = None,
cal_params: bool = True,
cal_flops: bool = True,
cal_activations: bool = True,
logging_to_stdout: bool = True,
bar_length_max: int = 20,
):
r"""Load megengine dumped model and visualize graph structure with tensorboard log files.
Can also record and print model's statistics like :func:`~.module_stats`
Args:
model_path: dir path for megengine dumped model.
log_path: dir path for tensorboard graph log.
input: user defined input data for running model and calculating stats, alternative with inp_dict, used when the model has only one input.
inp_dict: input dict for running model and calculating stats, alternative with input, used when the model has more than one input. When both input and inp_dict are None, a random input will be used.
cal_params: whether calculate and record params size.
cal_flops: whether calculate and record op flops.
cal_activations: whether calculate and record op activations.
logging_to_stdout: whether print all calculated statistic details.
bar_length_max: size of bar indicating max flops or parameter size in net stats.
model_path: str:
log_path: str:
input: np.ndarray:
inp_dict: dict:
cal_params: bool:
cal_flops: bool:
cal_activations: bool:
logging_to_stdout: bool:
bar_length_max: int:
"""
if log_path:
try:
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.config_pb2 import RunMetadata
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.step_stats_pb2 import (
AllocatorMemoryUsed,
DeviceStepStats,
NodeExecStats,
StepStats,
)
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboardX import SummaryWriter
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.",
exc_info=True,
)
return
enable_receptive_field()
graph = Network.load(model_path)
graph.reset_batch_size(1)
has_input = False
if input is not None or inp_dict is not None:
has_input = True
repl_dict = {}
inp_vars = graph.input_vars
if inp_dict is not None:
assert len(inp_dict) == len(
inp_vars
), "Inputs are not sufficient for calculation."
for v in inp_vars:
new_input = graph.make_const(inp_dict[v.name], name=v.name)
repl_dict[v] = new_input
else:
assert len(inp_vars) == 1, "The graph needs more than one input."
inp_var = inp_vars[0]
repl_dict[inp_var] = graph.make_const(input, name=inp_var.name)
graph.replace_vars(repl_dict=repl_dict)
graph._compile()
def process_name(name):
# nodes that start with point or contain float const will lead to display bug
if not re.match(r"^[+-]?\d*\.\d*", name):
name = name.replace(".", "/")
return name.encode(encoding="utf-8")
summary = [["item", "value"]]
node_list = []
flops_list = []
params_list = []
activations_list = []
total_stats = namedtuple(
"total_stats", ["param_size", "param_dims", "flops", "act_size", "act_dims"]
)
stats_details = namedtuple("module_stats", ["params", "flops", "activations"])
for node in tqdm(graph.all_oprs):
if hasattr(node, "output_idx"):
node_oup = node.outputs[node.output_idx]
else:
if len(node.outputs) != 1:
logger.warning(
"OpNode {} has more than one output and not has 'output_idx' attr.".format(
node
)
)
node_oup = node.outputs[0]
inp_list = [process_name(var.owner.name) for var in node.inputs]
if log_path:
# detail format see tensorboard/compat/proto/attr_value.proto
attr = {
"_output_shapes": AttrValue(
list=AttrValue.ListValue(
shape=[
TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d) for d in node_oup.shape
]
)
]
)
),
"params": AttrValue(s=str(node.params).encode(encoding="utf-8")),
"dtype": AttrValue(s=str(node_oup.dtype).encode(encoding="utf-8")),
}
if cal_flops:
flops_stats = get_op_stats(node, node.inputs, node.outputs)
if flops_stats is not None:
# add op flops attr
if log_path and hasattr(flops_stats, "flops_num"):
attr["flops"] = AttrValue(
s=sizeof_fmt(flops_stats["flops"]).encode(encoding="utf-8")
)
flops_stats["name"] = node.name
flops_stats["class_name"] = node.type
flops_list.append(flops_stats)
if cal_activations:
acts = get_activation_stats(node_oup, has_input=has_input)
acts["name"] = node.name
acts["class_name"] = node.type
activations_list.append(acts)
if cal_params:
if node.type == "ImmutableTensor":
param_stats = get_param_stats(node_oup)
# add tensor size attr
if log_path:
attr["size"] = AttrValue(
s= | sizeof_fmt(param_stats["size"]) | megengine.utils.module_stats.sizeof_fmt |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
from megengine.core.tensor_factory import zeros
def ns_loss_gen(output_fake):
r"""
Non-saturating loss for generator.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
Returns:
Tensor: A scalar tensor loss output.
"""
output_fake = | F.sigmoid(output_fake) | megengine.functional.sigmoid |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
from megengine.core.tensor_factory import zeros
def ns_loss_gen(output_fake):
r"""
Non-saturating loss for generator.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
Returns:
Tensor: A scalar tensor loss output.
"""
output_fake = F.sigmoid(output_fake)
return -F.log(output_fake + 1e-8).mean()
# def ns_loss_gen(output_fake):
# """numerical stable version"""
# return F.log(1 + F.exp(-output_fake)).mean()
def _bce_loss_with_logits(output, labels, **kwargs):
r"""
Sigmoid cross entropy with logits, see tensorflow
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
"""
loss = F.maximum(output, 0) - output * labels + F.log(1 + F.exp(-F.abs(output)))
return loss.mean()
def minimax_loss_dis(output_fake,
output_real,
real_label_val=1.0,
fake_label_val=0.0,
**kwargs):
r"""
Standard minimax loss for GANs through the BCE Loss with logits fn.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
output_real (Tensor): Discriminator output logits for real images.
real_label_val (int): Label for real images.
fake_label_val (int): Label for fake images.
device (torch.device): Torch device object for sending created data.
Returns:
Tensor: A scalar tensor loss output.
"""
# Produce real and fake labels.
fake_labels = | zeros((output_fake.shape[0], 1)) | megengine.core.tensor_factory.zeros |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
from megengine.core.tensor_factory import zeros
def ns_loss_gen(output_fake):
r"""
Non-saturating loss for generator.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
Returns:
Tensor: A scalar tensor loss output.
"""
output_fake = F.sigmoid(output_fake)
return -F.log(output_fake + 1e-8).mean()
# def ns_loss_gen(output_fake):
# """numerical stable version"""
# return F.log(1 + F.exp(-output_fake)).mean()
def _bce_loss_with_logits(output, labels, **kwargs):
r"""
Sigmoid cross entropy with logits, see tensorflow
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
"""
loss = F.maximum(output, 0) - output * labels + F.log(1 + F.exp(-F.abs(output)))
return loss.mean()
def minimax_loss_dis(output_fake,
output_real,
real_label_val=1.0,
fake_label_val=0.0,
**kwargs):
r"""
Standard minimax loss for GANs through the BCE Loss with logits fn.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
output_real (Tensor): Discriminator output logits for real images.
real_label_val (int): Label for real images.
fake_label_val (int): Label for fake images.
device (torch.device): Torch device object for sending created data.
Returns:
Tensor: A scalar tensor loss output.
"""
# Produce real and fake labels.
fake_labels = zeros((output_fake.shape[0], 1)) + fake_label_val
real_labels = | zeros((output_real.shape[0], 1)) | megengine.core.tensor_factory.zeros |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
from megengine.core.tensor_factory import zeros
def ns_loss_gen(output_fake):
r"""
Non-saturating loss for generator.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
Returns:
Tensor: A scalar tensor loss output.
"""
output_fake = F.sigmoid(output_fake)
return -F.log(output_fake + 1e-8).mean()
# def ns_loss_gen(output_fake):
# """numerical stable version"""
# return F.log(1 + F.exp(-output_fake)).mean()
def _bce_loss_with_logits(output, labels, **kwargs):
r"""
Sigmoid cross entropy with logits, see tensorflow
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
"""
loss = | F.maximum(output, 0) | megengine.functional.maximum |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
from megengine.core.tensor_factory import zeros
def ns_loss_gen(output_fake):
r"""
Non-saturating loss for generator.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
Returns:
Tensor: A scalar tensor loss output.
"""
output_fake = F.sigmoid(output_fake)
return - | F.log(output_fake + 1e-8) | megengine.functional.log |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
from megengine.core.tensor_factory import zeros
def ns_loss_gen(output_fake):
r"""
Non-saturating loss for generator.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
Returns:
Tensor: A scalar tensor loss output.
"""
output_fake = F.sigmoid(output_fake)
return -F.log(output_fake + 1e-8).mean()
# def ns_loss_gen(output_fake):
# """numerical stable version"""
# return F.log(1 + F.exp(-output_fake)).mean()
def _bce_loss_with_logits(output, labels, **kwargs):
r"""
Sigmoid cross entropy with logits, see tensorflow
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
"""
loss = F.maximum(output, 0) - output * labels + F.log(1 + F.exp(- | F.abs(output) | megengine.functional.abs |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import MLP
import megengine._internal as mgb
import megengine.functional as F
from megengine.core import Graph
from megengine.module import Linear, Module
from megengine.optimizer import SGD
from megengine.test import assertTensorClose
def test_compile_multi_times_eager():
return # XXX: rewrite or remove this test
data = Input("data", shape=(2, 28))
label = Input("label", shape=(2,), dtype=np.int32)
mlp = MLP()
opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)
pred0 = mlp(data)
pred = | F.softmax(pred0) | megengine.functional.softmax |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import MLP
import megengine._internal as mgb
import megengine.functional as F
from megengine.core import Graph
from megengine.module import Linear, Module
from megengine.optimizer import SGD
from megengine.test import assertTensorClose
def test_compile_multi_times_eager():
return # XXX: rewrite or remove this test
data = Input("data", shape=(2, 28))
label = Input("label", shape=(2,), dtype=np.int32)
mlp = MLP()
opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)
pred0 = mlp(data)
pred = F.softmax(pred0)
loss = F.square_loss(pred, label.reshape(2, 1))
opt.zero_grad()
grads = opt.backward(loss)
opt.step()
f0 = compile(pred, None)
f1 = compile([pred, loss], grads, copy=False)
for _ in range(3):
data = np.random.random((2, 28)).astype(np.float32)
label = np.random.randint(0, 10, (2,)).astype(np.float32)
out0 = f0(data=data)
out1 = f1(data=data, label=label)
| assertTensorClose(out0[0], out1[0]) | megengine.test.assertTensorClose |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import MLP
import megengine._internal as mgb
import megengine.functional as F
from megengine.core import Graph
from megengine.module import Linear, Module
from megengine.optimizer import SGD
from megengine.test import assertTensorClose
def test_compile_multi_times_eager():
return # XXX: rewrite or remove this test
data = Input("data", shape=(2, 28))
label = Input("label", shape=(2,), dtype=np.int32)
mlp = MLP()
opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)
pred0 = mlp(data)
pred = F.softmax(pred0)
loss = F.square_loss(pred, label.reshape(2, 1))
opt.zero_grad()
grads = opt.backward(loss)
opt.step()
f0 = compile(pred, None)
f1 = compile([pred, loss], grads, copy=False)
for _ in range(3):
data = np.random.random((2, 28)).astype(np.float32)
label = np.random.randint(0, 10, (2,)).astype(np.float32)
out0 = f0(data=data)
out1 = f1(data=data, label=label)
assertTensorClose(out0[0], out1[0])
def test_compile_multi_times_static():
return # XXX: rewrite or remove this test
with | Graph() | megengine.core.Graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import MLP
import megengine._internal as mgb
import megengine.functional as F
from megengine.core import Graph
from megengine.module import Linear, Module
from megengine.optimizer import SGD
from megengine.test import assertTensorClose
def test_compile_multi_times_eager():
return # XXX: rewrite or remove this test
data = Input("data", shape=(2, 28))
label = Input("label", shape=(2,), dtype=np.int32)
mlp = MLP()
opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)
pred0 = mlp(data)
pred = F.softmax(pred0)
loss = F.square_loss(pred, label.reshape(2, 1))
opt.zero_grad()
grads = opt.backward(loss)
opt.step()
f0 = compile(pred, None)
f1 = compile([pred, loss], grads, copy=False)
for _ in range(3):
data = np.random.random((2, 28)).astype(np.float32)
label = np.random.randint(0, 10, (2,)).astype(np.float32)
out0 = f0(data=data)
out1 = f1(data=data, label=label)
assertTensorClose(out0[0], out1[0])
def test_compile_multi_times_static():
return # XXX: rewrite or remove this test
with Graph() as cg:
cg.set_option("eager_evaluation", False)
data = Input("data", shape=(2, 28))
label = Input("label", shape=(2,), dtype=np.int32)
mlp = MLP()
opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)
pred0 = mlp(data)
pred = | F.softmax(pred0) | megengine.functional.softmax |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import MLP
import megengine._internal as mgb
import megengine.functional as F
from megengine.core import Graph
from megengine.module import Linear, Module
from megengine.optimizer import SGD
from megengine.test import assertTensorClose
def test_compile_multi_times_eager():
return # XXX: rewrite or remove this test
data = Input("data", shape=(2, 28))
label = Input("label", shape=(2,), dtype=np.int32)
mlp = MLP()
opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)
pred0 = mlp(data)
pred = F.softmax(pred0)
loss = F.square_loss(pred, label.reshape(2, 1))
opt.zero_grad()
grads = opt.backward(loss)
opt.step()
f0 = compile(pred, None)
f1 = compile([pred, loss], grads, copy=False)
for _ in range(3):
data = np.random.random((2, 28)).astype(np.float32)
label = np.random.randint(0, 10, (2,)).astype(np.float32)
out0 = f0(data=data)
out1 = f1(data=data, label=label)
assertTensorClose(out0[0], out1[0])
def test_compile_multi_times_static():
return # XXX: rewrite or remove this test
with Graph() as cg:
cg.set_option("eager_evaluation", False)
data = Input("data", shape=(2, 28))
label = Input("label", shape=(2,), dtype=np.int32)
mlp = MLP()
opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)
pred0 = mlp(data)
pred = F.softmax(pred0)
loss = F.square_loss(pred, label.reshape(2, 1))
opt.zero_grad()
grads = opt.backward(loss)
opt.step()
f0 = compile(pred, None)
f1 = compile([pred, loss], grads, copy=True)
data = np.random.random((2, 28)).astype(np.float32)
label = np.random.randint(0, 10, (2,)).astype(np.float32)
out0 = f0(data=data)
out1 = f1(data=data, label=label)
| assertTensorClose(out0[0], out1[0]) | megengine.test.assertTensorClose |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = | ArrayDataset(rand_data, label) | megengine.data.dataset.ArrayDataset |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = | DataLoader(dataset) | megengine.data.dataloader.DataLoader |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = | DataLoader(dataset, num_workers=2, divide=True) | megengine.data.dataloader.DataLoader |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = | DataLoader(dataset, num_workers=-1) | megengine.data.dataloader.DataLoader |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = | DataLoader(dataset, timeout=-1) | megengine.data.dataloader.DataLoader |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = | DataLoader(dataset, num_workers=0, divide=True) | megengine.data.dataloader.DataLoader |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler= | RandomSampler(dataset, batch_size=6, drop_last=False) | megengine.data.sampler.RandomSampler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False)
)
assert len(dataloader) == 17
dataloader = DataLoader(
dataset, sampler= | RandomSampler(dataset, batch_size=6, drop_last=True) | megengine.data.sampler.RandomSampler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False)
)
assert len(dataloader) == 17
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True)
)
assert len(dataloader) == 16
def test_dataloader_serial():
dataset = init_dataset()
dataloader = DataLoader(
dataset, sampler= | RandomSampler(dataset, batch_size=4, drop_last=False) | megengine.data.sampler.RandomSampler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False)
)
assert len(dataloader) == 17
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True)
)
assert len(dataloader) == 16
def test_dataloader_serial():
dataset = init_dataset()
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False)
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
dataset = init_dataset()
dataloader = DataLoader(
dataset,
sampler= | RandomSampler(dataset, batch_size=4, drop_last=False) | megengine.data.sampler.RandomSampler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False)
)
assert len(dataloader) == 17
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True)
)
assert len(dataloader) == 16
def test_dataloader_serial():
dataset = init_dataset()
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False)
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
dataset = init_dataset()
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=False,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
dataloader = DataLoader(
dataset,
sampler= | RandomSampler(dataset, batch_size=4, drop_last=False) | megengine.data.sampler.RandomSampler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False)
)
assert len(dataloader) == 17
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True)
)
assert len(dataloader) == 16
def test_dataloader_serial():
dataset = init_dataset()
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False)
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
dataset = init_dataset()
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=False,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=True,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel_timeout():
dataset = init_dataset()
class TimeoutTransform(Transform):
def __init__(self):
pass
def apply(self, input):
time.sleep(10)
return input
dataloader = DataLoader(
dataset,
sampler= | RandomSampler(dataset, batch_size=4, drop_last=False) | megengine.data.sampler.RandomSampler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False)
)
assert len(dataloader) == 17
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True)
)
assert len(dataloader) == 16
def test_dataloader_serial():
dataset = init_dataset()
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False)
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
dataset = init_dataset()
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=False,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=True,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel_timeout():
dataset = init_dataset()
class TimeoutTransform(Transform):
def __init__(self):
pass
def apply(self, input):
time.sleep(10)
return input
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
transform=TimeoutTransform(),
num_workers=2,
timeout=2,
)
with pytest.raises(RuntimeError, match=r".*timeout.*"):
data_iter = iter(dataloader)
batch_data = next(data_iter)
def test_dataloader_parallel_worker_exception():
dataset = init_dataset()
class FakeErrorTransform(Transform):
def __init__(self):
pass
def apply(self, input):
y = x + 1
return input
dataloader = DataLoader(
dataset,
sampler= | RandomSampler(dataset, batch_size=4, drop_last=False) | megengine.data.sampler.RandomSampler |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = | get_device_count("gpu") | megengine.device.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": | trace_option.use_symbolic_shape() | megengine.core._trace_option.use_symbolic_shape |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": | get_option("async_level") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": | get_option("enable_drop") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": | get_option("max_recompute_time") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": | get_option("catch_worker_execption") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": | get_option("enable_host_compute") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": | get_option("disable_memory_forwarding") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": | get_option("enable_dtr_auto_drop") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": | get_option("enable_dtr_sqrt_sampling") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": | get_option("dtr_eviction_threshold") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": | get_option("dtr_evictee_minimum_size") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": | _get_convert_inputs() | megengine.core._imperative_rt.core2._get_convert_inputs |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": | _get_amp_dtype_autocast() | megengine.core._imperative_rt.core2._get_amp_dtype_autocast |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": | _get_amp_high_prec_dtype() | megengine.core._imperative_rt.core2._get_amp_high_prec_dtype |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": | _get_amp_low_prec_dtype() | megengine.core._imperative_rt.core2._get_amp_low_prec_dtype |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": | trace_option.use_symbolic_shape() | megengine.core._trace_option.use_symbolic_shape |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": | get_option("async_level") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": | get_option("enable_drop") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": | get_option("max_recompute_time") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": | get_option("catch_worker_execption") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": | get_option("enable_host_compute") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": | get_option("disable_memory_forwarding") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": | get_option("enable_dtr_auto_drop") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": | get_option("enable_dtr_sqrt_sampling") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": | get_option("dtr_eviction_threshold") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": | get_option("dtr_evictee_minimum_size") | megengine.core.get_option |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": | _get_convert_inputs() | megengine.core._imperative_rt.core2._get_convert_inputs |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": | _get_amp_dtype_autocast() | megengine.core._imperative_rt.core2._get_amp_dtype_autocast |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": | _get_amp_high_prec_dtype() | megengine.core._imperative_rt.core2._get_amp_high_prec_dtype |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": | _get_amp_low_prec_dtype() | megengine.core._imperative_rt.core2._get_amp_low_prec_dtype |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if | mge.is_cuda_available() | megengine.is_cuda_available |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = | mge.load(model_path) | megengine.load |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = | Tensor(checkpoint["data"], dtype=np.float32) | megengine.tensor.Tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = | Tensor(checkpoint["label"], dtype=np.int32) | megengine.tensor.Tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
| mge.serialization.save(checkpoint, model_path) | megengine.serialization.save |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.serialization.save(checkpoint, model_path)
def run_test(
model_path, use_jit, use_symbolic, sublinear_memory_config=None, max_err=None,
):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
checkpoint = | mge.load(model_path) | megengine.load |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.serialization.save(checkpoint, model_path)
def run_test(
model_path, use_jit, use_symbolic, sublinear_memory_config=None, max_err=None,
):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
checkpoint = mge.load(model_path)
data = checkpoint["data"]
label = checkpoint["label"]
@dist.launcher
def worker(max_err):
net = MnistNet(has_bn=True)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
# use same data and label for all gpu's
# such that the result does not depend on number of gpu
data_train = Tensor(data)
label_train = Tensor(label)
loss = train(data_train, label_train, net, opt, gm)
np.testing.assert_allclose(loss.numpy(), checkpoint["loss"], atol=max_err)
if dist.get_rank():
return
for param, param_ref in zip(
net.state_dict().items(), checkpoint["net_updated"].items()
):
assert param[0] == param_ref[0]
if "bn" in param[0]:
ref = param_ref[1].reshape(param[1].shape)
np.testing.assert_allclose(param[1], ref, atol=max_err)
else:
np.testing.assert_allclose(param[1], param_ref[1], atol=max_err)
worker(max_err)
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
def test_dp_correctness():
model_name = "mnist_model_with_test.mge"
model_path = os.path.join(os.path.dirname(__file__), model_name)
| set_conv_execution_strategy("HEURISTIC_REPRODUCIBLE") | megengine.functional.debug_param.set_conv_execution_strategy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = | Conv2d(1, 20, kernel_size=5, bias=True) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = | AvgPool2d(2) | megengine.module.AvgPool2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = | Conv2d(20, 20, kernel_size=5, bias=True) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = | AvgPool2d(2) | megengine.module.AvgPool2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = | Linear(20 * 4 * 4, 500, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = | Linear(500, 10, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = | F.relu(x) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = | F.relu(x) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = | F.flatten(x, 1) | megengine.functional.flatten |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = | F.relu(x) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = | F.nn.cross_entropy(pred, label) | megengine.functional.nn.cross_entropy |