Skip to content

Commit

Permalink
exp : hpe model configs #17
Browse files Browse the repository at this point in the history
  • Loading branch information
Lee Seung Hoon committed Dec 14, 2022
1 parent d08b083 commit 5705dab
Show file tree
Hide file tree
Showing 16 changed files with 2,017 additions and 3 deletions.
7 changes: 6 additions & 1 deletion AI/pose-estimation/mmpose/configs/_base_/default_runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,18 @@
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs={
'project': 'HPE modesl',
'entity' : 'somang',
'name' : 'mobile_net3_bottom_up'},
)
# dict(type='TensorboardLoggerHook')
# dict(type='PaviLoggerHook') # for internal services
])

log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
checkpoint_config = dict(interval=10)

log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs={
'project': 'HPE modesl',
'entity' : 'somang',
'name' : 'CID_mobile'},
)
# dict(type='TensorboardLoggerHook')
# dict(type='PaviLoggerHook') # for internal services
])

log_level = 'INFO'
load_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]

# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
25 changes: 25 additions & 0 deletions AI/pose-estimation/mmpose/configs/_base_/default_runtime_hrnet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
checkpoint_config = dict(interval=10)

log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs={
'project': 'HPE modesl',
'entity' : 'somang',
'name' : 'CID_hrnet_prun'},
)
# dict(type='TensorboardLoggerHook')
# dict(type='PaviLoggerHook') # for internal services
])

log_level = 'INFO'
load_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]

# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
checkpoint_config = dict(interval=1)

log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs={
'project': 'HPE modesl',
'entity' : 'somang',
'name' : 'CID_hrnet_lite1'},
)
# dict(type='TensorboardLoggerHook')
# dict(type='PaviLoggerHook') # for internal services
])

log_level = 'INFO'
load_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]

# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
148 changes: 148 additions & 0 deletions AI/pose-estimation/mmpose/configs/myconfigs/cid_mobile_coco_512x512.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
_base_ = [
'../_base_/default_runtime_cid_mobile.py',
'../_base_/datasets/coco.py'
]
checkpoint_config = dict(interval=20)
evaluation = dict(interval=20, metric='mAP', save_best='AP')

optimizer = dict(
type='Adam',
lr=0.001,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[90, 120])
total_epochs = 140
channel_cfg = dict(
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])

data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=1,
scale_aware_sigma=False,
with_bbox=True,
use_nms=True,
soft_nms=False,
oks_thr=0.8,
)

# model settings
model = dict(
type='CID',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
backbone=dict(type='MobileNetV2', widen_factor=1., out_indices=(7, )),
),
keypoint_head=dict(
type='CIDHead',
in_channels=480,
gfd_channels=32,
num_joints=17,
multi_hm_loss_factor=1.0,
single_hm_loss_factor=4.0,
contrastive_loss_factor=1.0,
max_train_instances=200,
prior_prob=0.01),
train_cfg=dict(),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
flip_test=True,
max_num_people=30,
detection_threshold=0.01,
center_pool_kernel=3))

train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='CIDGenerateTarget',
max_num_people=30,
),
dict(
type='Collect',
keys=[
'img', 'multi_heatmap', 'multi_mask', 'instance_coord',
'instance_heatmap', 'instance_mask', 'instance_valid'
],
meta_keys=[]),
]

val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]

test_pipeline = val_pipeline

data_root = '/shared/home/navermind/coco'

data = dict(
workers_per_gpu=2,
train_dataloader=dict(samples_per_gpu=20),
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1),
train=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations_trainval2017/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/train2017',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}
),
val=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations_trainval2017/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/val2017',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}
),
test=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations_trainval2017/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/val2017',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info={{_base_.dataset_info}}
)
)
Loading

0 comments on commit 5705dab

Please sign in to comment.