rwightman / efficientdet-pytorch
BOX AP | AP50 | AP75 | APS | APM | APL |
SPEED
|
|||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
MODEL | CODE | PAPER |
ε-REPR
|
CODE | PAPER |
ε-REPR
|
CODE | PAPER |
ε-REPR
|
CODE | PAPER |
ε-REPR
|
CODE | PAPER |
ε-REPR
|
CODE | PAPER |
ε-REPR
|
PAPER |
GLOBAL RANK
|
|||||||||
EfficientDet-D0
|
0.336 | -- | 0.522 | -- | 0.357 | -- | 0.125 | -- | 0.387 | -- | 0.522 | -- | 34.6 | #16 | |||||||||||||||
EfficientDet-D1
|
0.393 | -- | 0.582 | -- | 0.419 | -- | 0.187 | -- | 0.448 | -- | 0.572 | -- | 32.2 | #15 | |||||||||||||||
EfficientDet-D2
|
0.426 | -- | 0.618 | -- | 0.452 | -- | 0.237 | -- | 0.481 | -- | 0.590 | -- | #13 | ||||||||||||||||
EfficientDet-D3
|
0.460 | -- | 0.651 | -- | 0.493 | -- | 0.283 | -- | 0.503 | -- | 0.618 | -- | #9 | ||||||||||||||||
EfficientDet-D4
|
0.491 | -- | 0.685 | -- | 0.531 | -- | 0.334 | -- | 0.539 | -- | 0.641 | -- | #6 | ||||||||||||||||
EfficientDet-D5
|
0.504 | -- | 0.700 | -- | 0.543 | -- | 0.337 | -- | 0.549 | -- | 0.645 | -- | #5 | ||||||||||||||||
EfficientDet-D6
|
0.512 | -- | 0.706 | -- | 0.551 | -- | 0.348 | -- | 0.555 | -- | 0.653 | -- | 8.5 | #3 | |||||||||||||||
EfficientDet-D7
|
0.531 | -- | 0.725 | -- | 0.572 | -- | 0.369 | -- | 0.574 | -- | 0.668 | -- | 5.2 | #2 |
[](https://sotabench.com/user/rwightman/repos/rwightman/efficientdet-pytorch)
How the Repository is Evaluated
The fullsotabench.py
file - source
import os
import tqdm
import torch
try:
from apex import amp
has_amp = True
except ImportError:
has_amp = False
from sotabencheval.object_detection import COCOEvaluator
from sotabencheval.utils import is_server, extract_archive
from effdet import create_model
from data import CocoDetection, create_loader
NUM_GPU = 1
BATCH_SIZE = (128 if has_amp else 64) * NUM_GPU
ANNO_SET = 'val2017'
if is_server():
DATA_ROOT = './.data/vision/coco'
image_dir_zip = os.path.join('./.data/vision/coco', f'{ANNO_SET}.zip')
extract_archive(from_path=image_dir_zip, to_path='./.data/vision/coco')
else:
# local settings
DATA_ROOT = ''
def _bs(b=64):
b *= NUM_GPU
if has_amp:
b *= 2
return b
def _entry(model_name, paper_model_name, paper_arxiv_id, batch_size=BATCH_SIZE, model_desc=None):
return dict(
model_name=model_name,
model_description=model_desc,
paper_model_name=paper_model_name,
paper_arxiv_id=paper_arxiv_id,
batch_size=batch_size)
# NOTE For any original PyTorch models, I'll remove from this list when you add to sotabench to
# avoid overlap and confusion. Please contact me.
model_list = [
## Weights ported by myself from other frameworks
_entry('tf_efficientdet_d0', 'EfficientDet-D0', '1911.09070', batch_size=_bs(112),
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientdet_d1', 'EfficientDet-D1', '1911.09070', batch_size=_bs(72),
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientdet_d2', 'EfficientDet-D2', '1911.09070', batch_size=_bs(48),
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientdet_d3', 'EfficientDet-D3', '1911.09070', batch_size=_bs(32),
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientdet_d4', 'EfficientDet-D4', '1911.09070', batch_size=_bs(16),
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientdet_d5', 'EfficientDet-D5', '1911.09070', batch_size=_bs(12),
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientdet_d6', 'EfficientDet-D6', '1911.09070', batch_size=_bs(8),
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientdet_d7', 'EfficientDet-D7', '1911.09070', batch_size=_bs(4),
model_desc='Ported from official Google AI Tensorflow weights'),
## Weights trained by myself in PyTorch
_entry('efficientdet_d0', 'EfficientDet-D0', '1911.09070', batch_size=_bs(112),
model_desc='Trained in PyTorch with https://github.com/rwightman/efficientdet-pytorch'),
]
def eval_model(model_name, paper_model_name, paper_arxiv_id, batch_size=64, model_description=''):
# create model
bench = create_model(
model_name,
bench_task='predict',
pretrained=True,
)
bench.eval()
input_size = bench.config.image_size
param_count = sum([m.numel() for m in bench.parameters()])
print('Model %s created, param count: %d' % (model_name, param_count))
bench = bench.cuda()
if has_amp:
print('Using AMP mixed precision.')
bench = amp.initialize(bench, opt_level='O1')
else:
print('AMP not installed, running network in FP32.')
annotation_path = os.path.join(DATA_ROOT, 'annotations', f'instances_{ANNO_SET}.json')
evaluator = COCOEvaluator(
root=DATA_ROOT,
model_name=paper_model_name,
model_description=model_description,
paper_arxiv_id=paper_arxiv_id)
dataset = CocoDetection(os.path.join(DATA_ROOT, ANNO_SET), annotation_path)
loader = create_loader(
dataset,
input_size=input_size,
batch_size=batch_size,
use_prefetcher=True,
fill_color='mean',
num_workers=4,
pin_mem=True)
iterator = tqdm.tqdm(loader, desc="Evaluation", mininterval=5)
evaluator.reset_time()
with torch.no_grad():
for i, (input, target) in enumerate(iterator):
output = bench(input, target['img_scale'], target['img_size'])
output = output.cpu()
sample_ids = target['img_id'].cpu()
results = []
for index, sample in enumerate(output):
image_id = int(sample_ids[index])
for det in sample:
score = float(det[4])
if score < .001: # stop when below this threshold, scores in descending order
break
coco_det = dict(
image_id=image_id,
bbox=det[0:4].tolist(),
score=score,
category_id=int(det[5]))
results.append(coco_det)
evaluator.add(results)
if evaluator.cache_exists:
break
evaluator.save()
for m in model_list:
eval_model(**m)
torch.cuda.empty_cache()
STATUS
BUILD
COMMIT MESSAGE
RUN TIME
#
25
Fix #169, remove recompute_scale_factor arg from interpolate.
rwightman
36a232b
· Feb 18 2021
#
24
Try another recompute_scale_factor approach. Leaving it None res…
rwightman
4ef9c15
92bb66f
· Feb 18 2021
#
22
Merge pull request #186 from rwightman/bbox_aug_fix
Fix for aug…
rwightman
2ac9f92
b3dcb20
· Feb 25 2021
#
20
Port all updated TF original weights (D0-D2, D4-D6). Add soft-nm…
rwightman
1394d5b
· Sep 03 2020
#
19
Update feature_info usage to support upcoming timm changes with …
rwightman
0b36cc1
· Aug 01 2020
#
18
Alternate fix for Windows distributed issues that won't break Li…
rwightman
d763618
· Aug 01 2020
#
16
Merge pull request #66 from authman/master
Small correction on …
rwightman
84d3c41
75c10c8
· Jul 27 2020
#
13
Port and updated D7 weights from official TF repo (53.1 mAP in P…
rwightman
8fc03d4
· Jun 14 2020
#
9
Add pretrained weights for non tf efficientdet_d1 and tf_efficie…
rwightman
53002b4
· Jun 14 2020
#
8
Merge pull request #34 from rwightman/model_configs
Model confi…
rwightman
5332cfa
(+6 commits )
· Jun 12 2020
#
6
Add D0 weights trained from scratch with this code, match/slighl…
rwightman
7756783
· Jun 04 2020
#
5
Update README, fix small mistake in non tf model configs, fix #21
rwightman
f5df7ed
· May 27 2020
#
4
Remove drop_rate config entries that have no impact, bump up D0/…
rwightman
6ff9140
· May 24 2020
#
3
sotabench almost there, last batch sizes a bit too optimistic?
rwightman
9b9d1d9
· May 24 2020