Spaces:
Runtime error
Runtime error
# dataset settings | |
dataset_type = 'CocoDataset' | |
data_root = 'data/coco/' | |
# file_client_args = dict( | |
# backend='petrel', | |
# path_mapping=dict({ | |
# './data/': 's3://openmmlab/datasets/detection/', | |
# 'data/': 's3://openmmlab/datasets/detection/' | |
# })) | |
file_client_args = dict(backend='disk') | |
color_space = [ | |
[dict(type='ColorTransform')], | |
[dict(type='AutoContrast')], | |
[dict(type='Equalize')], | |
[dict(type='Sharpness')], | |
[dict(type='Posterize')], | |
[dict(type='Solarize')], | |
[dict(type='Color')], | |
[dict(type='Contrast')], | |
[dict(type='Brightness')], | |
] | |
geometric = [ | |
[dict(type='Rotate')], | |
[dict(type='ShearX')], | |
[dict(type='ShearY')], | |
[dict(type='TranslateX')], | |
[dict(type='TranslateY')], | |
] | |
scale = [(1333, 400), (1333, 1200)] | |
branch_field = ['sup', 'unsup_teacher', 'unsup_student'] | |
# pipeline used to augment labeled data, | |
# which will be sent to student model for supervised training. | |
sup_pipeline = [ | |
dict(type='LoadImageFromFile', file_client_args=file_client_args), | |
dict(type='LoadAnnotations', with_bbox=True), | |
dict(type='RandomResize', scale=scale, keep_ratio=True), | |
dict(type='RandomFlip', prob=0.5), | |
dict(type='RandAugment', aug_space=color_space, aug_num=1), | |
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), | |
dict( | |
type='MultiBranch', | |
branch_field=branch_field, | |
sup=dict(type='PackDetInputs')) | |
] | |
# pipeline used to augment unlabeled data weakly, | |
# which will be sent to teacher model for predicting pseudo instances. | |
weak_pipeline = [ | |
dict(type='RandomResize', scale=scale, keep_ratio=True), | |
dict(type='RandomFlip', prob=0.5), | |
dict( | |
type='PackDetInputs', | |
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', | |
'scale_factor', 'flip', 'flip_direction', | |
'homography_matrix')), | |
] | |
# pipeline used to augment unlabeled data strongly, | |
# which will be sent to student model for unsupervised training. | |
strong_pipeline = [ | |
dict(type='RandomResize', scale=scale, keep_ratio=True), | |
dict(type='RandomFlip', prob=0.5), | |
dict( | |
type='RandomOrder', | |
transforms=[ | |
dict(type='RandAugment', aug_space=color_space, aug_num=1), | |
dict(type='RandAugment', aug_space=geometric, aug_num=1), | |
]), | |
dict(type='RandomErasing', n_patches=(1, 5), ratio=(0, 0.2)), | |
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), | |
dict( | |
type='PackDetInputs', | |
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', | |
'scale_factor', 'flip', 'flip_direction', | |
'homography_matrix')), | |
] | |
# pipeline used to augment unlabeled data into different views | |
unsup_pipeline = [ | |
dict(type='LoadImageFromFile', file_client_args=file_client_args), | |
dict(type='LoadEmptyAnnotations'), | |
dict( | |
type='MultiBranch', | |
branch_field=branch_field, | |
unsup_teacher=weak_pipeline, | |
unsup_student=strong_pipeline, | |
) | |
] | |
test_pipeline = [ | |
dict(type='LoadImageFromFile', file_client_args=file_client_args), | |
dict(type='Resize', scale=(1333, 800), keep_ratio=True), | |
dict( | |
type='PackDetInputs', | |
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', | |
'scale_factor')) | |
] | |
batch_size = 5 | |
num_workers = 5 | |
# There are two common semi-supervised learning settings on the coco dataset: | |
# (1) Divide the train2017 into labeled and unlabeled datasets | |
# by a fixed percentage, such as 1%, 2%, 5% and 10%. | |
# The format of labeled_ann_file and unlabeled_ann_file are | |
# instances_train2017.{fold}@{percent}.json, and | |
# instances_train2017.{fold}@{percent}-unlabeled.json | |
# `fold` is used for cross-validation, and `percent` represents | |
# the proportion of labeled data in the train2017. | |
# (2) Choose the train2017 as the labeled dataset | |
# and unlabeled2017 as the unlabeled dataset. | |
# The labeled_ann_file and unlabeled_ann_file are | |
# instances_train2017.json and image_info_unlabeled2017.json | |
# We use this configuration by default. | |
labeled_dataset = dict( | |
type=dataset_type, | |
data_root=data_root, | |
ann_file='annotations/instances_train2017.json', | |
data_prefix=dict(img='train2017/'), | |
filter_cfg=dict(filter_empty_gt=True, min_size=32), | |
pipeline=sup_pipeline) | |
unlabeled_dataset = dict( | |
type=dataset_type, | |
data_root=data_root, | |
ann_file='annotations/instances_unlabeled2017.json', | |
data_prefix=dict(img='unlabeled2017/'), | |
filter_cfg=dict(filter_empty_gt=False), | |
pipeline=unsup_pipeline) | |
train_dataloader = dict( | |
batch_size=batch_size, | |
num_workers=num_workers, | |
persistent_workers=True, | |
sampler=dict( | |
type='GroupMultiSourceSampler', | |
batch_size=batch_size, | |
source_ratio=[1, 4]), | |
dataset=dict( | |
type='ConcatDataset', datasets=[labeled_dataset, unlabeled_dataset])) | |
val_dataloader = dict( | |
batch_size=1, | |
num_workers=2, | |
persistent_workers=True, | |
drop_last=False, | |
sampler=dict(type='DefaultSampler', shuffle=False), | |
dataset=dict( | |
type=dataset_type, | |
data_root=data_root, | |
ann_file='annotations/instances_val2017.json', | |
data_prefix=dict(img='val2017/'), | |
test_mode=True, | |
pipeline=test_pipeline)) | |
test_dataloader = val_dataloader | |
val_evaluator = dict( | |
type='CocoMetric', | |
ann_file=data_root + 'annotations/instances_val2017.json', | |
metric='bbox', | |
format_only=False) | |
test_evaluator = val_evaluator | |