Skip to content

Commit b19fffa

Browse files
committed
switch to new config
1 parent e781fcf commit b19fffa

File tree

3 files changed

+57
-34
lines changed

3 files changed

+57
-34
lines changed

configs/body_2d_keypoint/edpose/coco/edpose_coco.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,3 +58,5 @@ Results on COCO val2017.
5858
| [edpose_res50_coco](/configs/body_2d_keypoint/edpose/coco/edpose_res50_8xb2-50e_coco-800x1333.py) | ResNet-50 | 0.716 | 0.898 | 0.783 | 0.793 | 0.944 | [ckpt](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/edpose/coco/edpose_res50_coco_3rdparty.pth) | [log](https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/edpose/coco/edpose_res50_coco_3rdparty.json) |
5959

6060
The checkpoint is converted from the official repo. The training of EDPose is not supported yet. It will be supported in the future updates.
61+
62+
The above config follows [Pure Python style](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta). Please install `mmengine>=0.8.2` to use this config.

configs/body_2d_keypoint/edpose/coco/edpose_res50_8xb2-50e_coco-800x1333.py

Lines changed: 53 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,40 @@
1-
_base_ = ['../../../_base_/default_runtime.py']
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
from mmengine.config import read_base
3+
4+
with read_base():
5+
from mmpose.configs._base_.default_runtime import * # noqa
6+
7+
from mmcv.transforms import RandomChoice, RandomChoiceResize
8+
from mmengine.dataset import DefaultSampler
9+
from mmengine.model import PretrainedInit
10+
from mmengine.optim import LinearLR, MultiStepLR
11+
from torch.nn import GroupNorm
12+
from torch.optim import Adam
13+
14+
from mmpose.codecs import EDPoseLabel
15+
from mmpose.datasets import (BottomupRandomChoiceResize, BottomupRandomCrop,
16+
CocoDataset, LoadImage, PackPoseInputs,
17+
RandomFlip)
18+
from mmpose.evaluation import CocoMetric
19+
from mmpose.models import (BottomupPoseEstimator, ChannelMapper, EDPoseHead,
20+
PoseDataPreprocessor, ResNet)
21+
from mmpose.models.utils import FrozenBatchNorm2d
222

323
# runtime
4-
train_cfg = dict(max_epochs=50, val_interval=10)
24+
train_cfg.update(max_epochs=50, val_interval=10) # noqa
525

626
# optimizer
727
optim_wrapper = dict(optimizer=dict(
8-
type='Adam',
28+
type=Adam,
929
lr=1e-3,
1030
))
1131

1232
# learning policy
1333
param_scheduler = [
34+
dict(type=LinearLR, begin=0, end=500, start_factor=0.001,
35+
by_epoch=False), # warm-up
1436
dict(
15-
type='LinearLR', begin=0, end=500, start_factor=0.001,
16-
by_epoch=False), # warm-up
17-
dict(
18-
type='MultiStepLR',
37+
type=MultiStepLR,
1938
begin=0,
2039
end=140,
2140
milestones=[33, 45],
@@ -27,40 +46,42 @@
2746
auto_scale_lr = dict(base_batch_size=80)
2847

2948
# hooks
30-
default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater'))
49+
default_hooks.update( # noqa
50+
checkpoint=dict(save_best='coco/AP', rule='greater'))
3151

3252
# codec settings
33-
codec = dict(type='EDPoseLabel', num_select=50, num_keypoints=17)
53+
codec = dict(type=EDPoseLabel, num_select=50, num_keypoints=17)
3454

3555
# model settings
3656
model = dict(
37-
type='BottomupPoseEstimator',
57+
type=BottomupPoseEstimator,
3858
data_preprocessor=dict(
39-
type='PoseDataPreprocessor',
59+
type=PoseDataPreprocessor,
4060
mean=[123.675, 116.28, 103.53],
4161
std=[58.395, 57.12, 57.375],
4262
bgr_to_rgb=True,
4363
pad_size_divisor=1),
4464
backbone=dict(
45-
type='ResNet',
65+
type=ResNet,
4666
depth=50,
4767
num_stages=4,
4868
out_indices=(1, 2, 3),
4969
frozen_stages=1,
50-
norm_cfg=dict(type='FrozenBatchNorm2d', requires_grad=False),
70+
norm_cfg=dict(type=FrozenBatchNorm2d, requires_grad=False),
5171
norm_eval=True,
5272
style='pytorch',
53-
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
73+
init_cfg=dict(
74+
type=PretrainedInit, checkpoint='torchvision://resnet50')),
5475
neck=dict(
55-
type='ChannelMapper',
76+
type=ChannelMapper,
5677
in_channels=[512, 1024, 2048],
5778
kernel_size=1,
5879
out_channels=256,
5980
act_cfg=None,
60-
norm_cfg=dict(type='GN', num_groups=32),
81+
norm_cfg=dict(type=GroupNorm, num_groups=32),
6182
num_outs=4),
6283
head=dict(
63-
type='EDPoseHead',
84+
type=EDPoseHead,
6485
num_queries=900,
6586
num_feature_levels=4,
6687
num_keypoints=17,
@@ -117,57 +138,57 @@
117138
find_unused_parameters = True
118139

119140
# base dataset settings
120-
dataset_type = 'CocoDataset'
141+
dataset_type = CocoDataset
121142
data_mode = 'bottomup'
122143
data_root = 'data/coco/'
123144

124145
# pipelines
125146
train_pipeline = [
126-
dict(type='LoadImage'),
127-
dict(type='RandomFlip', direction='horizontal'),
147+
dict(type=LoadImage),
148+
dict(type=RandomFlip, direction='horizontal'),
128149
dict(
129-
type='RandomChoice',
150+
type=RandomChoice,
130151
transforms=[
131152
[
132153
dict(
133-
type='RandomChoiceResize',
154+
type=RandomChoiceResize,
134155
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
135156
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
136157
(736, 1333), (768, 1333), (800, 1333)],
137158
keep_ratio=True)
138159
],
139160
[
140161
dict(
141-
type='BottomupRandomChoiceResize',
162+
type=BottomupRandomChoiceResize,
142163
# The radio of all image in train dataset < 7
143164
# follow the original implement
144165
scales=[(400, 4200), (500, 4200), (600, 4200)],
145166
keep_ratio=True),
146167
dict(
147-
type='BottomupRandomCrop',
168+
type=BottomupRandomCrop,
148169
crop_type='absolute_range',
149170
crop_size=(384, 600),
150171
allow_negative_crop=True),
151172
dict(
152-
type='BottomupRandomChoiceResize',
173+
type=BottomupRandomChoiceResize,
153174
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
154175
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
155176
(736, 1333), (768, 1333), (800, 1333)],
156177
keep_ratio=True)
157178
]
158179
]),
159-
dict(type='PackPoseInputs'),
180+
dict(type=PackPoseInputs),
160181
]
161182

162183
val_pipeline = [
163-
dict(type='LoadImage'),
184+
dict(type=LoadImage),
164185
dict(
165-
type='BottomupRandomChoiceResize',
186+
type=BottomupRandomChoiceResize,
166187
scales=[(800, 1333)],
167188
keep_ratio=True,
168189
backend='pillow'),
169190
dict(
170-
type='PackPoseInputs',
191+
type=PackPoseInputs,
171192
meta_keys=('id', 'img_id', 'img_path', 'crowd_index', 'ori_shape',
172193
'img_shape', 'input_size', 'input_center', 'input_scale',
173194
'flip', 'flip_direction', 'flip_indices', 'raw_ann_info',
@@ -179,7 +200,7 @@
179200
batch_size=1,
180201
num_workers=1,
181202
persistent_workers=True,
182-
sampler=dict(type='DefaultSampler', shuffle=False),
203+
sampler=dict(type=DefaultSampler, shuffle=False),
183204
dataset=dict(
184205
type=dataset_type,
185206
data_root=data_root,
@@ -194,7 +215,7 @@
194215
num_workers=8,
195216
persistent_workers=True,
196217
drop_last=False,
197-
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
218+
sampler=dict(type=DefaultSampler, shuffle=False, round_up=False),
198219
dataset=dict(
199220
type=dataset_type,
200221
data_root=data_root,
@@ -208,8 +229,7 @@
208229

209230
# evaluators
210231
val_evaluator = dict(
211-
type='CocoMetric',
212-
ann_file=data_root + 'annotations/person_keypoints_val2017.json',
232+
type=CocoMetric,
213233
nms_mode='none',
214234
score_mode='keypoint',
215235
)

mmpose/apis/inference.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,8 @@ def dataset_meta_from_config(config: Config,
5353
import mmpose.datasets.datasets # noqa: F401, F403
5454
from mmpose.registry import DATASETS
5555

56-
dataset_class = DATASETS.get(dataset_cfg.type)
56+
dataset_class = dataset_cfg.type if isinstance(
57+
dataset_cfg.type, type) else DATASETS.get(dataset_cfg.type)
5758
metainfo = dataset_class.METAINFO
5859

5960
metainfo = parse_pose_metainfo(metainfo)

0 commit comments

Comments
 (0)