error_analysis: # detection_classes: ["person", "bicycle"] labels_dict: {"person": 1, "bicycle": 2} # GT's index for the classes should be zero-index but COCO wants to be special. inference_labels_dict: {"person": 0, "bicycle": 1} # model's index for the classes. Should be zero-index, but sometimes not conf_thresholds: [0.2, 0.35, 0.5, 0.65, 0.8] # some call it score threshold iou_thresholds: [0.2, 0.35, 0.5, 0.65, 0.8] # back in my day we call it NMS threshold *shakes fist* # nms_thresholds: [0.2, 0.5, 0.8] bbox_format: "pascal_voc" # yolo / coco / pascal_voc (WIP feature) peekingduck: True # False if using your own model for inference without peekingduck wrapper, else True ground_truth_format: "coco" # yolo / coco / pascal_voc (WIP feature) idx_base : 1 # to indicate whether the class index is zero or one based. Applies to both GT and pred class task: seg # either "det" or "seg" pkd: model: "yolact_edge" # either "yolo" or "yolact_edge" yolo_ver: "v4tiny" yolact_ver: "r50-fpn" dataset: classes: ["person", "bicycle"] # same as ['error_analysis']['detection_classes'] field above img_folder_path: 'data/annotations_trainval2017/coco_small/' # relative path from root for saving the coco dataset images annotations_folder_path: 'data/annotations_trainval2017/annotations/' # relative path from root to the annotations file annotations_fname: "instances_val2017.json" # what is the name of your json file? visual_tool: bbox_thickness: 2 # how thicc you want the bbox to be font_scale: 1 # how big you want the fonts to be font_thickness: 2 # how thicc you want the fonts to be pred_colour: [255, 0, 0] # prediction colour, [B,G,R] gt_colour: [0, 255, 0] # Ground truth colour, [B,G,R] conf_threshold: 0.2 # Confidence Threshold for use in the Visual Tool. [0, 1] iou_threshold: 0.2 # IOU/NMS Threshold for use in the Visual Tool. [0, 1]