Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
# Copyright (c) Facebook, Inc. and its affiliates. | |
from typing import Tuple | |
import torch | |
from torch import nn | |
from torch.nn import functional as F | |
from torchvision.ops import batched_nms, masks_to_boxes | |
from detectron2.config import configurable | |
from detectron2.data import MetadataCatalog | |
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head | |
from detectron2.modeling.backbone import Backbone | |
from detectron2.modeling.postprocessing import sem_seg_postprocess | |
from detectron2.structures import Boxes, ImageList, Instances, BitMasks | |
from detectron2.utils.memory import retry_if_cuda_oom | |
from mask2former.modeling.criterion import SetCriterion | |
from mask2former.modeling.matcher import HungarianMatcher | |
import modeling_pretrain as vmae_tranformers | |
import matplotlib.pyplot as plt | |
from detectron2.utils.visualizer import Visualizer | |
import os | |
from detectron2.data import DatasetCatalog, MetadataCatalog | |
from detectron2.data.datasets import register_coco_instances | |
root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets")) | |
register_coco_instances("cls_agnostic_coco", {}, | |
os.path.join(root, "coco/annotations/coco_cls_agnostic_instances_val2017.json"), | |
os.path.join(root, "coco/val2017") | |
) | |
class CWMSegmentPredictorV2(nn.Module): | |
""" | |
Main class for mask classification semantic segmentation architectures. | |
""" | |
def __init__( | |
self, | |
*, | |
criterion: nn.Module, | |
num_queries: int, | |
object_mask_threshold: float, | |
overlap_threshold: float, | |
metadata, | |
size_divisibility: int, | |
sem_seg_postprocess_before_inference: bool, | |
pixel_mean: Tuple[float], | |
pixel_std: Tuple[float], | |
# inference | |
semantic_on: bool, | |
panoptic_on: bool, | |
instance_on: bool, | |
test_topk_per_image: int, | |
output_dir: str, | |
): | |
""" | |
Args: | |
backbone: a backbone module, must follow detectron2's backbone interface | |
sem_seg_head: a module that predicts semantic segmentation from backbone features | |
criterion: a module that defines the loss | |
num_queries: int, number of queries | |
object_mask_threshold: float, threshold to filter query based on classification score | |
for panoptic segmentation inference | |
overlap_threshold: overlap threshold used in general inference for panoptic segmentation | |
metadata: dataset meta, get `thing` and `stuff` category names for panoptic | |
segmentation inference | |
size_divisibility: Some backbones require the input height and width to be divisible by a | |
specific integer. We can use this to override such requirement. | |
sem_seg_postprocess_before_inference: whether to resize the prediction back | |
to original input size before semantic segmentation inference or after. | |
For high-resolution dataset like Mapillary, resizing predictions before | |
inference will cause OOM error. | |
pixel_mean, pixel_std: list or tuple with #channels element, representing | |
the per-channel mean and std to be used to normalize the input image | |
semantic_on: bool, whether to output semantic segmentation prediction | |
instance_on: bool, whether to output instance segmentation prediction | |
panoptic_on: bool, whether to output panoptic segmentation prediction | |
test_topk_per_image: int, instance segmentation parameter, keep topk instances per image | |
""" | |
super().__init__() | |
self.criterion = criterion | |
self.num_queries = num_queries | |
self.overlap_threshold = overlap_threshold | |
self.object_mask_threshold = object_mask_threshold | |
self.metadata = metadata | |
if size_divisibility < 0: | |
# use backbone size_divisibility if not set | |
size_divisibility = self.backbone.size_divisibility | |
self.size_divisibility = size_divisibility | |
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference | |
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) | |
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) | |
# additional args | |
self.semantic_on = semantic_on | |
self.instance_on = instance_on | |
self.panoptic_on = panoptic_on | |
self.test_topk_per_image = test_topk_per_image | |
if not self.semantic_on: | |
assert self.sem_seg_postprocess_before_inference | |
# Load CWM predictor | |
self.output_dir = output_dir | |
if 'cwm' in output_dir: | |
model_func = vmae_tranformers.base_8x8patch_2frames_1tube_flash | |
predictor = model_func().cuda() | |
load_path = '/ccn2/u/feigelis/model_checkpoints/kevin_checkpoints/' + \ | |
'fulltrain_kinetics_8x8patch_rotated_table_distributed_with_ddp' + \ | |
'_copied_from_oldnode/checkpoint-3199.pth' | |
did_load = predictor.load_state_dict(torch.load(load_path, map_location=torch.device("cpu"))['model']) | |
print('Load CWM pretrained predictor', did_load) | |
self.predictor = predictor.eval().requires_grad_(False) | |
self.num_patches = self.predictor.encoder.num_patches | |
self.patch_size = self.predictor.encoder.patch_size[-1] | |
self.mask_ratio = 0.99 | |
num_hidden_layers = 4 | |
hidden_dim = 1024 | |
input_dim = self.predictor.decoder.embed_dim | |
decoder_layers = [torch.nn.Linear(input_dim, hidden_dim), torch.nn.ReLU()] | |
for i in range(num_hidden_layers): | |
decoder_layers.append(torch.nn.Linear(hidden_dim, hidden_dim)) | |
decoder_layers.append(torch.nn.ReLU()) | |
decoder_layers.append(torch.nn.Linear(hidden_dim, num_queries)) | |
self.decoder = torch.nn.Sequential(*decoder_layers).cuda() | |
def from_config(cls, cfg): | |
# Loss parameters: | |
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT | |
# loss weights | |
class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT | |
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT | |
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT | |
# building criterion | |
matcher = HungarianMatcher( | |
cost_class=class_weight, | |
cost_mask=mask_weight, | |
cost_dice=dice_weight, | |
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, | |
) | |
weight_dict = {"loss_mask": mask_weight, "loss_dice": dice_weight} | |
losses = ["masks"] | |
criterion = SetCriterion( | |
num_classes=80, | |
matcher=matcher, | |
weight_dict=weight_dict, | |
eos_coef=no_object_weight, | |
losses=losses, | |
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, | |
oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO, | |
importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO, | |
) | |
return { | |
"criterion": criterion, | |
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES, | |
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD, | |
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD, | |
"metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), | |
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY, | |
"sem_seg_postprocess_before_inference": ( | |
cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE | |
or cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON | |
or cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON | |
), | |
"pixel_mean": cfg.MODEL.PIXEL_MEAN, | |
"pixel_std": cfg.MODEL.PIXEL_STD, | |
# inference | |
"semantic_on": cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON, | |
"instance_on": cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON, | |
"panoptic_on": cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON, | |
"test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, | |
"output_dir": cfg.OUTPUT_DIR, | |
} | |
def device(self): | |
return self.pixel_mean.device | |
def forward(self, batched_inputs): | |
""" | |
Args: | |
batched_inputs: a list, batched outputs of :class:`DatasetMapper`. | |
Each item in the list contains the inputs for one image. | |
For now, each item in the list is a dict that contains: | |
* "image": Tensor, image in (C, H, W) format. | |
* "instances": per-region ground truth | |
* Other information that's included in the original dicts, such as: | |
"height", "width" (int): the output resolution of the model (may be different | |
from input resolution), used in inference. | |
Returns: | |
list[dict]: | |
each dict has the results for one image. The dict contains the following keys: | |
* "sem_seg": | |
A Tensor that represents the | |
per-pixel segmentation prediced by the head. | |
The prediction has shape KxHxW that represents the logits of | |
each class for each pixel. | |
* "panoptic_seg": | |
A tuple that represent panoptic output | |
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. | |
segments_info (list[dict]): Describe each segment in `panoptic_seg`. | |
Each dict contains keys "id", "category_id", "isthing". | |
""" | |
images = [x["image"].to(self.device) for x in batched_inputs] | |
images = [(x - self.pixel_mean) / self.pixel_std for x in images] | |
images = ImageList.from_tensors(images, self.size_divisibility) | |
### | |
# image_size = images.image_sizes[0] | |
# processed_results = [] | |
# input_per_image = batched_inputs[0] | |
# height = input_per_image.get("height", image_size[0]) | |
# width = input_per_image.get("width", image_size[1]) | |
# | |
# gt_instances = [x["instances"] for x in batched_inputs] | |
# targets = [] | |
# for targets_per_image in gt_instances: | |
# # pad gt | |
# try: | |
# gt_masks = targets_per_image.gt_masks | |
# except: | |
# print('NO GT MASKS') | |
# gt_masks = torch.zeros(1, height, width) | |
# | |
# targets.append( | |
# { | |
# "labels": targets_per_image.gt_classes, | |
# "masks": gt_masks, | |
# } | |
# ) | |
# | |
# mask_cls_results = torch.ones(1, self.num_queries, 81)#.to(self.device) | |
# mask_pred_result = targets[0]['masks']#.to(self.device) | |
# | |
# processed_results.append({}) | |
# if self.instance_on: | |
# instance_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_results[0], mask_pred_result) | |
# processed_results[-1]["instances"] = instance_r | |
# return processed_results | |
### | |
with torch.cuda.amp.autocast(enabled=True): | |
with torch.no_grad(): | |
if not self.training: | |
# resize to patch size | |
x = F.interpolate(images.tensor, size=(224, 224), mode="bilinear", align_corners=False) | |
x = x.to(torch.float16).unsqueeze(2).expand(-1, -1, 2, -1, -1) | |
else: | |
x = images.tensor.to(torch.float16).unsqueeze(2).expand(-1, -1, 2, -1, -1) | |
# mask out the second frame | |
mask = torch.zeros([x.shape[0], self.num_patches]).to(x.device).bool() | |
mask[:, int(self.num_patches // 2):] = 1 | |
# num_visibles = int((1 - self.mask_ratio) * int(self.num_patches // 2)) + 1 | |
# rand_idx = torch.randint(low=int(self.num_patches//2), high=self.num_patches, size=(x.shape[0], int(num_visibles))) | |
# for i in range(x.shape[0]): | |
# mask[i, rand_idx[i]] = 0 | |
feature = self.predictor.encoder(x, mask=mask) | |
feature = self.predictor.encoder_to_decoder(feature) | |
# out = self.predictor(x, mask) | |
logits = self.decoder(feature).float() | |
B, N, _ = logits.shape | |
pred_masks = logits.view(B, int(N ** 0.5), int(N ** 0.5), self.num_queries).permute(0, 3, 1, | |
2) # [B, num_queries, H, W] | |
outputs = {"pred_masks": pred_masks} | |
if self.training: | |
# mask classification target | |
if "instances" in batched_inputs[0]: | |
gt_instances = [x["instances"].to(self.device) for x in batched_inputs] | |
targets = self.prepare_targets(gt_instances, images) | |
else: | |
targets = None | |
# bipartite matching-based loss | |
losses = self.criterion(outputs, targets) | |
for k in list(losses.keys()): | |
if k in self.criterion.weight_dict: | |
losses[k] *= self.criterion.weight_dict[k] | |
else: | |
# remove this loss if not specified in `weight_dict` | |
losses.pop(k) | |
return losses | |
else: | |
# mask_cls_results = outputs["pred_logits"] | |
mask_cls_results = torch.ones(x.shape[0], self.num_queries, 81).to(self.device) | |
mask_pred_results = outputs["pred_masks"] | |
# upsample masks | |
mask_pred_results = F.interpolate( | |
mask_pred_results, | |
size=(images.tensor.shape[-2], images.tensor.shape[-1]), | |
mode="bilinear", | |
align_corners=False, | |
) | |
# if "instances" in batched_inputs[0]: | |
# gt_instances = [x["instances"].to(self.device) for x in batched_inputs] | |
# targets = self.prepare_targets(gt_instances, images) | |
# else: | |
# targets = None | |
del outputs | |
processed_results = [] | |
for mask_cls_result, mask_pred_result, input_per_image, image_size in zip( | |
mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes | |
): | |
height = input_per_image.get("height", image_size[0]) | |
width = input_per_image.get("width", image_size[1]) | |
processed_results.append({}) | |
if self.sem_seg_postprocess_before_inference: | |
mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)( | |
mask_pred_result, image_size, height, width | |
) | |
mask_cls_result = mask_cls_result.to(mask_pred_result) | |
# semantic segmentation inference | |
if self.semantic_on: | |
r = retry_if_cuda_oom(self.semantic_inference)(mask_cls_result, mask_pred_result) | |
if not self.sem_seg_postprocess_before_inference: | |
r = retry_if_cuda_oom(sem_seg_postprocess)(r, image_size, height, width) | |
processed_results[-1]["sem_seg"] = r | |
# panoptic segmentation inference | |
if self.panoptic_on: | |
panoptic_r = retry_if_cuda_oom(self.panoptic_inference)(mask_cls_result, mask_pred_result) | |
processed_results[-1]["panoptic_seg"] = panoptic_r | |
# instance segmentation inference | |
if self.instance_on: | |
instance_r, nms_idx = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result) | |
processed_results[-1]["instances"] = instance_r | |
# Visualization | |
''' | |
rgb_image = F.interpolate(images.tensor.float(), size=(height, width), mode='bilinear') | |
visualizer = Visualizer(rgb_image.cpu().detach()[0].permute(1,2,0)) | |
visualizer = visualizer.draw_instance_predictions(instance_r) | |
recon = torch.zeros(1, self.num_patches, self.patch_size ** 2 * 3) | |
recon[mask] = out.float().cpu().detach() | |
recon = self.unpatchify(recon[:, int(self.num_patches // 2):]) | |
recon = recon[0].permute(1, 2, 0).float().clamp(0, 1) | |
# fig, axs = plt.subplots(1, 7, figsize=(20, 3)) | |
# | |
# axs[0].imshow(images.tensor.float()[0].permute(1, 2, 0).cpu().detach()) | |
# axs[1].imshow(images.tensor.float()[0].permute(1, 2, 0).cpu().detach()) | |
# # axs[1].imshow(batched_inputs[0]['instances'].gt_masks.argmax(0)) | |
# axs[2].imshow(recon) | |
# axs[3].imshow(feature[0].view(28, 28, -1)[..., 0:3].cpu().detach().float()) | |
# axs[4].imshow(feature[0].view(28, 28, -1)[..., 100:103].cpu().detach().float()) | |
# axs[5].imshow(feature[0].view(28, 28, -1)[..., 200:203].cpu().detach().float()) | |
# axs[6].imshow(visualizer.get_image()) | |
file_name = batched_inputs[0]['file_name'].split('/')[-1].split('.jpg')[0] | |
# for a in axs: | |
# a.set_axis_off() | |
fig, axs = plt.subplots(1, 2, figsize=(16, 6)) | |
axs[0].imshow(images.tensor.float()[0].permute(1, 2, 0).cpu().detach()) | |
axs[1].imshow(visualizer.get_image()) | |
plt.savefig(f"/ccn2/u/honglinc/temp/{file_name}.png", bbox_inches='tight') | |
fig, axs = plt.subplots(10, 10, figsize=(10, 10)) | |
for a in axs: | |
for _a in a: | |
_a.set_axis_off() | |
for i in range(mask_pred_result.shape[0]): | |
# print(mask_pred_result.shape, height, width) | |
mask_area_ratio = mask_pred_result[i].sigmoid().float().flatten().sum() / (height * width) | |
axs[i // 10, i % 10].imshow(mask_pred_result[i].cpu().detach() > 0) | |
nms = 1 if i in nms_idx else -1 | |
axs[i // 10, i % 10].set_title(f'{mask_area_ratio.item():.2f}, {nms}', fontsize=11) | |
plt.savefig(f"/ccn2/u/honglinc/temp/{file_name}_mask.png", bbox_inches='tight') | |
''' | |
return processed_results | |
def prepare_targets(self, targets, images): | |
h_pad, w_pad = images.tensor.shape[-2:] | |
new_targets = [] | |
for targets_per_image in targets: | |
# pad gt | |
gt_masks = targets_per_image.gt_masks | |
padded_masks = torch.zeros((gt_masks.shape[0], h_pad, w_pad), dtype=gt_masks.dtype, device=gt_masks.device) | |
padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks | |
new_targets.append( | |
{ | |
"labels": targets_per_image.gt_classes, | |
"masks": padded_masks, | |
} | |
) | |
return new_targets | |
def semantic_inference(self, mask_cls, mask_pred): | |
mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1] | |
mask_pred = mask_pred.sigmoid() | |
semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred) | |
return semseg | |
def panoptic_inference(self, mask_cls, mask_pred): | |
scores, labels = F.softmax(mask_cls, dim=-1).max(-1) | |
mask_pred = mask_pred.sigmoid() | |
keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold) | |
cur_scores = scores[keep] | |
cur_classes = labels[keep] | |
cur_masks = mask_pred[keep] | |
cur_mask_cls = mask_cls[keep] | |
cur_mask_cls = cur_mask_cls[:, :-1] | |
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks | |
h, w = cur_masks.shape[-2:] | |
panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device) | |
segments_info = [] | |
current_segment_id = 0 | |
if cur_masks.shape[0] == 0: | |
# We didn't detect any mask :( | |
return panoptic_seg, segments_info | |
else: | |
# take argmax | |
cur_mask_ids = cur_prob_masks.argmax(0) | |
stuff_memory_list = {} | |
for k in range(cur_classes.shape[0]): | |
pred_class = cur_classes[k].item() | |
isthing = pred_class in self.metadata.thing_dataset_id_to_contiguous_id.values() | |
mask_area = (cur_mask_ids == k).sum().item() | |
original_area = (cur_masks[k] >= 0.5).sum().item() | |
mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5) | |
if mask_area > 0 and original_area > 0 and mask.sum().item() > 0: | |
if mask_area / original_area < self.overlap_threshold: | |
continue | |
# merge stuff regions | |
if not isthing: | |
if int(pred_class) in stuff_memory_list.keys(): | |
panoptic_seg[mask] = stuff_memory_list[int(pred_class)] | |
continue | |
else: | |
stuff_memory_list[int(pred_class)] = current_segment_id + 1 | |
current_segment_id += 1 | |
panoptic_seg[mask] = current_segment_id | |
segments_info.append( | |
{ | |
"id": current_segment_id, | |
"isthing": bool(isthing), | |
"category_id": int(pred_class), | |
} | |
) | |
return panoptic_seg, segments_info | |
def instance_inference(self, mask_cls, mask_pred): | |
# mask_pred is already processed to have the same shape as original input | |
image_size = mask_pred.shape[-2:] | |
mask_area_ratio = (mask_pred > 0).float().flatten(1, 2).sum(1) / (image_size[0] * image_size[1]) | |
mask_area_filter = (mask_area_ratio > 0.01) & (mask_area_ratio < 0.9) | |
mask_pred = mask_pred[mask_area_filter] | |
original_idx = torch.arange(mask_area_filter.shape[0])[mask_area_filter] | |
try: | |
box = masks_to_boxes(mask_pred > 0) | |
scores = (mask_pred.sigmoid().flatten(1) * (mask_pred > 0).flatten(1)).sum(1) / ( | |
(mask_pred > 0).flatten(1).sum(1) + 1e-6) | |
nms_idx = batched_nms(box, scores, torch.zeros(box.shape[0]).long(), 0.3) | |
mask_pred = mask_pred[nms_idx] | |
box = box[nms_idx] | |
except Exception as e: | |
import pdb; | |
pdb.set_trace() | |
print(e, mask_pred.shape, mask_area_filter.sum()) | |
box = torch.zeros(mask_pred.shape[0], 4).to(mask_pred) | |
nms_idx = original_idx[nms_idx] | |
mask_pred = mask_pred.cpu() | |
result = Instances(image_size) | |
# mask (before sigmoid) | |
result.pred_masks = (mask_pred > 0).float() | |
result.pred_boxes = Boxes(box.cpu()) | |
# Uncomment the following to get boxes from masks (this is slow) | |
# result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes() | |
# calculate average mask prob | |
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / ( | |
result.pred_masks.flatten(1).sum(1) + 1e-6) | |
scores_per_image = torch.ones(mask_pred.size(0)).to(mask_pred.device) | |
labels_per_image = torch.zeros(mask_pred.size(0)).to(mask_pred.device) | |
result.scores = scores_per_image * mask_scores_per_image | |
result.pred_classes = labels_per_image | |
return result, nms_idx | |
def unpatchify(self, x): | |
""" | |
x: (N, L, patch_size**2 *3) | |
imgs: (N, 3, H, W) | |
""" | |
p = self.patch_size | |
h = w = int(x.shape[1] ** .5) | |
assert h * w == x.shape[1] | |
x = x.reshape(shape=(x.shape[0], h, w, p, p, 3)) | |
x = torch.einsum('nhwpqc->nchpwq', x) | |
imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p)) | |
return imgs |