forked from zhyever/Monocular-Depth-Estimation-Toolbox
-
Notifications
You must be signed in to change notification settings - Fork 0
/
dpt_vit-b16_kitti.py
99 lines (92 loc) · 2.97 KB
/
dpt_vit-b16_kitti.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
_base_ = [
'../_base_/models/dpt.py', '../_base_/datasets/kitti.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_24x.py'
]
model = dict(
pretrained='nfs/checkpoints/jx_vit_base_p16_224-80ecf9dd.pth',
decode_head=dict(
min_depth=1e-3,
max_depth=80,
loss_decode=dict(
type='SigLoss', valid_mask=True, loss_weight=1.0, warm_up=True),
)
)
# AdamW optimizer, no weight decay for position embedding & layer norm
# in backbone
max_lr=1e-4
optimizer = dict(
_delete_=True,
type='AdamW',
lr=max_lr,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'pos_embed': dict(decay_mult=0.),
'cls_token': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1),
}))
lr_config = dict(
policy='OneCycle',
max_lr=max_lr,
div_factor=25,
final_div_factor=100,
by_epoch=False,
)
momentum_config = dict(
policy='OneCycle'
)
evaluation = dict(interval=1)
img_norm_cfg = dict(
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
crop_size= (352, 704)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='DepthLoadAnnotations'),
dict(type='LoadKITTICamIntrinsic'),
dict(type='KBCrop', depth=True),
dict(type='RandomRotate', prob=0.5, degree=2.5),
dict(type='RandomFlip', prob=0.5),
dict(type='RandomCrop', crop_size=(352, 704)),
dict(type='ColorAug', prob=0.5, gamma_range=[0.9, 1.1], brightness_range=[0.9, 1.1], color_range=[0.9, 1.1]),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect',
keys=['img', 'depth_gt'],
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor',
'flip', 'flip_direction', 'img_norm_cfg',
'cam_intrinsic')),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadKITTICamIntrinsic'),
dict(type='KBCrop', depth=False),
dict(
type='MultiScaleFlipAug',
img_scale=(1216, 352),
flip=True,
flip_direction='horizontal',
transforms=[
dict(type='RandomFlip', direction='horizontal'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor',
'flip', 'flip_direction', 'img_norm_cfg',
'cam_intrinsic')),
])
]
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
pipeline=train_pipeline,),
val=dict(
pipeline=test_pipeline),
test=dict(
pipeline=test_pipeline,))