JialeCao001 / D2Det
run from
github.com/Randl/D2Det
(
forked from github.com/JialeCao001/D2Det)
BOX AP | AP50 | AP75 | APS | APM | APL |
SPEED
|
|||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
MODEL | CODE | PAPER |
ε-REPR
|
CODE | PAPER |
ε-REPR
|
CODE | PAPER |
ε-REPR
|
CODE | PAPER |
ε-REPR
|
CODE | PAPER |
ε-REPR
|
CODE | PAPER |
ε-REPR
|
PAPER |
GLOBAL RANK
|
|||||||||
D2Det
(ResNet101) |
0.449 | 0.449 | 0.633 | -- | 0.491 | -- | 0.268 | -- | 0.491 | -- | 0.595 | -- | 7.5 | #10 | |||||||||||||||
D2Det
(ResNet101-DCN) |
0.469 | 0.469 | 0.654 | 0.659 | 0.513 | 0.517 | 0.287 | 0.272 |
|
0.508 | 0.504 | 0.620 | 0.613 |
|
7.1 | #7 | |||||||||||||
D2Det
(ResNet50) |
0.437 | 0.437 | 0.624 | -- | 0.474 | -- | 0.254 | -- | 0.472 | -- | 0.583 | -- | 8.2 | #11 |
[](https://sotabench.com/user/EvgeniiZh/repos/Randl/D2Det)
How the Repository is Evaluated
The fullsotabench.py
file - source
import copy
import json
import os
import os.path as osp
import shutil
import tempfile
import urllib.request
import mmcv
import torch
from sotabencheval.object_detection import COCOEvaluator
torch.manual_seed(0)
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, load_checkpoint, init_dist
from mmdet.core import wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
# Extract val2017 zip
from torchbench.utils import extract_archive
from torchbench.datasets.utils import download_file_from_google_drive
image_dir_zip = osp.join('./.data/vision/coco', 'val2017.zip')
extract_archive(from_path=image_dir_zip, to_path='./.data/vision/coco')
from pathlib import Path
if not os.path.isdir(Path.home() / '.cache/torch'):
os.mkdir(Path.home() / '.cache/torch')
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def det2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
try:
result = results[idx]
except IndexError:
break
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def segm2json(dataset, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
try:
det, seg = results[idx]
except IndexError:
break
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different score for det and segm
if len(seg) == 2:
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['score'] = float(mask_score[i])
data['category_id'] = dataset.cat_ids[label]
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def cached_results2json(dataset, results, out_file):
result_files = dict()
if isinstance(results[0], list):
json_results = det2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = segm2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = proposal2json(dataset, results)
result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def single_gpu_test(model, data_loader, show=False, evaluator=None):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=not show, **data)
results.append(result)
if i == 0:
temp_result_files = cached_results2json(copy.deepcopy(dataset), copy.deepcopy(results), 'temp_results.pkl')
anns = json.load(open(temp_result_files['bbox']))
evaluator.add(anns)
print(evaluator.batch_hash)
print(evaluator.cache_exists)
if evaluator.cache_exists:
return results, True
if show:
model.module.show_result(data, result, dataset.img_norm_cfg)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results, False
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def collect_results(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN,),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def evaluate_model(model_name, paper_arxiv_id, file_id, weights_name, paper_results, config):
print('---')
print('Now Evaluating %s' % model_name)
evaluator = COCOEvaluator(
root='./.data/vision/coco',
model_name=model_name,
paper_arxiv_id=paper_arxiv_id,
paper_results=paper_results)
out = 'results.pkl'
launcher = 'none'
if out is not None and not out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(config)
cfg.data.test['ann_file'] = './.data/vision/coco/annotations/instances_val2017.json'
cfg.data.test['img_prefix'] = './.data/vision/coco/val2017/'
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if launcher == 'none':
distributed = False
else:
distributed = True
init_dist(launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
destination = '%s/.cache/torch/' % (str(Path.home()))
download_file_from_google_drive(file_id, destination, filename=weights_name)
local_checkpoint = os.path.join(destination, weights_name)
print(local_checkpoint)
# '/home/ubuntu/GCNet/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth'
checkpoint = load_checkpoint(model, local_checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
evaluator.reset_time()
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs, cache_exists = single_gpu_test(model, data_loader, False, evaluator)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
if cache_exists:
print('Cache exists: %s' % (evaluator.batch_hash))
evaluator.save()
else:
rank, _ = get_dist_info()
if out and rank == 0:
print('\nwriting results to {}'.format(out))
mmcv.dump(outputs, out)
eval_types = ['bbox']
if eval_types:
print('Starting evaluate {}'.format(' and '.join(eval_types)))
if eval_types == ['proposal_fast']:
result_file = out
else:
if not isinstance(outputs[0], dict):
result_files = dataset.results2json(outputs, out)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = out + '.{}'.format(name)
result_files = dataset.results2json(outputs_, result_file)
anns = json.load(open(result_files['bbox']))
evaluator.detections = []
evaluator.add(anns)
evaluator.save()
model_configs = []
## D2Det
model_configs.append(
{'model_name': 'D2Det (ResNet50)',
'paper_arxiv_id': None,
'file_id': '1es6y8Uu-fByOmTq_Y_M5uMuO42_ARI7k',
'weights_name': 'D2Det-detection-res50.pth',
'config': './configs/D2Det/D2Det_detection_r50_fpn_2x.py',
'paper_results': {'box AP': 0.437}}
)
model_configs.append(
{'model_name': 'D2Det (ResNet101)',
'paper_arxiv_id': None,
'file_id': '14Cw9Y3vSdirkR3xLcb6F6H1hHr3qzLNj',
'weights_name': 'D2Det-detection-res101.pth',
'config': './configs/D2Det/D2Det_detection_r101_fpn_2x.py',
'paper_results': {'box AP': 0.449}}
)
model_configs.append(
{'model_name': 'D2Det (ResNet101-DCN)',
'paper_arxiv_id': None,
'file_id': '1jDeAj_rMKLMf64BGwqiysis9IyZzTQ6w',
'weights_name': 'D2Det-detection-res101-dcn.pth',
'config': './configs/D2Det/D2Det_detection_r101_fpn_dcn_2x.py',
'paper_results': {'box AP': 0.469, 'AP50': 0.659, 'AP75': 0.517, 'APS': 0.272, 'APM': 0.504, 'APL': 0.613}}
)
import torch.distributed as dist
dist.init_process_group('gloo', init_method='file:///tmp/somefile', rank=0, world_size=1)
for model_config in model_configs:
if 'paper_results' not in model_config:
model_config['paper_results'] = None
evaluate_model(model_name=model_config['model_name'],
paper_arxiv_id=model_config['paper_arxiv_id'],
file_id=model_config['file_id'],
weights_name=model_config['weights_name'],
paper_results=model_config['paper_results'],
config=model_config['config'])