Spaces:
Runtime error
Runtime error
import os | |
import torch | |
import hotr.util.misc as utils | |
import hotr.util.logger as loggers | |
from hotr.data.evaluators.coco_eval import CocoEvaluator | |
def coco_evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir): | |
model.eval() | |
criterion.eval() | |
metric_logger = loggers.MetricLogger(delimiter=" ") | |
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) | |
header = 'Evaluation' | |
iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) | |
coco_evaluator = CocoEvaluator(base_ds, iou_types) | |
print_freq = len(data_loader) | |
# coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] | |
print("\n>>> [MS-COCO Evaluation] <<<") | |
for samples, targets in metric_logger.log_every(data_loader, print_freq, header): | |
samples = samples.to(device) | |
targets = [{k: v.to(device) for k, v in t.items()} for t in targets] | |
outputs = model(samples) | |
loss_dict = criterion(outputs, targets) | |
weight_dict = criterion.weight_dict | |
# reduce losses over all GPUs for logging purposes | |
loss_dict_reduced = utils.reduce_dict(loss_dict) | |
loss_dict_reduced_scaled = {k: v * weight_dict[k] | |
for k, v in loss_dict_reduced.items() if k in weight_dict} | |
loss_dict_reduced_unscaled = {f'{k}_unscaled': v | |
for k, v in loss_dict_reduced.items()} | |
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), | |
**loss_dict_reduced_scaled, | |
**loss_dict_reduced_unscaled) | |
metric_logger.update(class_error=loss_dict_reduced['class_error']) | |
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) | |
results = postprocessors['bbox'](outputs, orig_target_sizes) | |
res = {target['image_id'].item(): output for target, output in zip(targets, results)} | |
if coco_evaluator is not None: | |
coco_evaluator.update(res) | |
# gather the stats from all processes | |
metric_logger.synchronize_between_processes() | |
print("\n>>> [Averaged stats] <<<\n", metric_logger) | |
if coco_evaluator is not None: | |
coco_evaluator.synchronize_between_processes() | |
# accumulate predictions from all images | |
if coco_evaluator is not None: | |
coco_evaluator.accumulate() | |
coco_evaluator.summarize() | |
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} | |
if coco_evaluator is not None: | |
if 'bbox' in postprocessors.keys(): | |
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() | |
return stats, coco_evaluator |