NSAQA / microprograms /errors /splash_micro_program.py
laurenok24's picture
Upload 10 files
97a245c verified
import sys, os, distutils.core
# os.system('python -m pip install pyyaml==5.3.1')
# dist = distutils.core.run_setup("./detectron2/setup.py")
# temp = ' '.join([f"'{x}'" for x in dist.install_requires])
# cmd = "python -m pip install {0}".format(temp)
# os.system(cmd)
sys.path.insert(0, os.path.abspath('./detectron2'))
import detectron2
import numpy as np
import cv2
# from detectron2.utils.logger import setup_logger
# setup_logger()
# # from detectron2.modeling import build_model
# from detectron2 import model_zoo
# from detectron2.engine import DefaultPredictor
# from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
# from detectron2.data import MetadataCatalog, DatasetCatalog
# from detectron2.checkpoint import DetectionCheckpointer
# from detectron2.data.datasets import register_coco_instances
# cfg = get_cfg()
# cfg.OUTPUT_DIR = "./output/splash/"
# # model = build_model(cfg) # returns a torch.nn.Module
# cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
# cfg.DATASETS.TRAIN = ("splash_trains",)
# cfg.DATASETS.TEST = ()
# cfg.DATALOADER.NUM_WORKERS = 2
# cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo
# cfg.SOLVER.IMS_PER_BATCH = 2 # This is the real "batch size" commonly known to deep learning people
# cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR
# cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset
# cfg.SOLVER.STEPS = [] # do not decay learning rate
# cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # The "RoIHead batch size". 128 is faster, and good enough for this toy dataset (default: 512)
# cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
# cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") # path to the model we just trained
# cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold
# predictor = DefaultPredictor(cfg)
# register_coco_instances("splash_trains", {}, "./coco_annotations/splash/train.json", "./data/Splashes")
# register_coco_instances("splash_vals", {}, "./coco_annotations/splash/val.json", "./data/Splashes")
# from detectron2.utils.visualizer import ColorMode
# splash_metadata = MetadataCatalog.get('splash_vals')
# dataset_dicts = DatasetCatalog.get("splash_vals")
# outputs_array = []
# for d in dataset_dicts:
# im = cv2.imread(d["file_name"])
# outputs = predictor(im)
# outputs_array.append(outputs) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
# v = Visualizer(im[:, :, ::-1],
# metadata=splash_metadata,
# scale=0.5,
# instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
# )
# out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
# img = out.get_image()[:, :, ::-1]
# filename = os.path.join("./output", d["file_name"][2:])
# if not cv2.imwrite(filename, img):
# print('no image written')
import torch
# SPLASH MICRO PROGRAM
# find percentage of "True" in pred_masks, lower the percentage the better. get from outputs
# may need to calibrate to points
def get_splash_pred_mask(output):
pred_classes = output['instances'].pred_classes.cpu().numpy()
splashes = np.where(pred_classes == 0)[0]
scores = output['instances'].scores[splashes]
if len(scores) == 0:
return None
pred_masks = output['instances'].pred_masks[splashes]
max_instance = torch.argmax(scores)
# pred_mask = pred_masks[max_instance] # splash instance with highest confidence
pred_mask = np.array(pred_masks[max_instance].cpu())
return pred_mask
# function that finds the splash instance with the highest percent confidence
# and returns the
def splash_area_percentage(output, pred_mask=None):
if pred_mask is None:
return
# loops over pixels to get sum of splash pixels
totalSum = 0
for j in range(len(pred_mask)):
totalSum += pred_mask[j].sum()
# return percentage of image that is splash
return totalSum/(len(pred_mask) * len(pred_mask[0]))
# loops over each image
# i = 0
# for output in outputs_array:
# print(dataset_dicts[i]["file_name"])
# # print(output)
# print(splash_area_percentage(output))
# i+=1
# TODO: run splash micro program on one diving clip
# plot splash area percentage and save image
import matplotlib.pyplot as plt
from models.detectron2.splash_detector_setup import get_splash_detector
def get_splash_from_one_frame(filepath, im=None, predictor=None, visualize=False, dive_folder_num=""):
if predictor is None:
predictor=get_splash_detector()
if im is None:
im = cv2.imread(filepath)
outputs = predictor(im)
pred_mask = get_splash_pred_mask(outputs)
area = splash_area_percentage(outputs, pred_mask=pred_mask)
if area is None:
# print("no splash detected in", filepath)
return None, None
if visualize:
pred_boxes = outputs['instances'].pred_boxes
print("pred_boxes", pred_boxes)
for box in pred_boxes:
image = cv2.rectangle(im, (int(box[0]),int(box[1])), (int(box[2]),int(box[3])), color=(0, 0, 255), thickness=2)
out_folder= "./output/data/splash/{}".format(dive_folder_num)
if not os.path.exists(out_folder):
os.makedirs(out_folder)
filename = os.path.join(out_folder, filepath.split('/')[-1])
if not cv2.imwrite(filename, image):
print('no image written to', filename)
break
return area.tolist(), pred_mask
# outputs_array2 = []
# directory = "./MTL-AQA/"
# file_names = os.listdir(directory)
# for file_name in file_names:
# if file_name[:10] == "img_01_06_":
# path = os.path.join(directory, file_name)
# # im = caffe.io.load_image(path)
# print("PATH IM_01_06:", path)
# im = cv2.imread(path)
# outputs = predictor(im)
# outputs_array2.append(outputs) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
# v = Visualizer(im[:, :, ::-1],
# metadata=splash_metadata,
# scale=0.5,
# instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
# )
# out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
# img = out.get_image()[:, :, ::-1]
# filename = os.path.join("./output/data/img_01_06/", file_name)
# if not cv2.imwrite(filename, img):
# print('no image written')
# i = 0
# splash_area = []
# for output in outputs_array2:
# # print(output)
# area = splash_area_percentage(output)
# if area is None:
# splash_area.append(0)
# else:
# print(area.cpu().data.item())
# splash_area.append(area.cpu().data.item())
# # print(splash_area_percentage(output))
# i+=1
# print(range(i))
# print(splash_area)
# plt.plot(range(i), splash_area)
# plt.savefig('./output/data/img_01_06/img_01_06_splash_graph.png')
# from detectron2.evaluation import COCOEvaluator, inference_on_dataset
# from detectron2.data import build_detection_test_loader
# evaluator = COCOEvaluator("splash_vals", output_dir="./output")
# val_loader = build_detection_test_loader(cfg, "splash_vals")
# print(inference_on_dataset(predictor.model, val_loader, evaluator))