-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
159 lines (130 loc) · 5.94 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import argparse
import copy
import os
import warnings
import mmcv
import torch
from mmcv import Config
from mmcv.runner import init_dist
from mmdet.apis import set_random_seed, train_detector
from mmdet.utils import collect_env, get_device, get_root_logger, setup_multi_processes
from nets.nn import build_detector
from utils.dataset import build_dataset
warnings.filterwarnings("ignore")
def train(args):
cfg = Config.fromfile(args.config)
# set multiprocess settings
setup_multi_processes(cfg)
exp_name = os.path.splitext(os.path.basename(args.config))[0]
cfg.work_dir = os.path.join('./weights', exp_name)
cfg.gpu_ids = [0]
# init distributed env first, since logger depends on the dist info.
if args.distributed:
init_dist('pytorch', **cfg.dist_params)
cfg.gpu_ids = range(args.world_size)
# create work_dir
mmcv.mkdir_or_exist(os.path.abspath(cfg.work_dir))
# dump config
cfg.dump(os.path.join(cfg.work_dir, os.path.basename(args.config)))
# init the logger before other steps
log_file = os.path.join(cfg.work_dir, f'{exp_name}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info = '\n'.join([f'{k}: {v}' for k, v in collect_env().items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {args.distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
cfg.device = get_device()
cfg.seed = 0
meta['seed'] = 0
meta['exp_name'] = os.path.basename(args.config)
model = build_detector(cfg.model, cfg.get('train_cfg'), cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in checkpoints as meta data
cfg.checkpoint_config.meta = dict(mmdet_version=collect_env()['MMDetection'],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, args.distributed, True, exp_name, meta)
def test(args):
def validate(val_config, val_outputs, val_dataset):
kwargs = val_config.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for k in ['interval', 'tmpdir', 'start', 'gpu_collect',
'save_best', 'rule', 'dynamic_intervals']:
kwargs.pop(k, None)
print(val_dataset.evaluate(val_outputs, **kwargs))
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import build_dataloader, replace_ImageToTensor
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, load_checkpoint, wrap_fp16_model
cfg = Config.fromfile(args.config)
# set multiprocess settings
setup_multi_processes(cfg)
# in case the test dataset is concatenated
samples_per_gpu = cfg.data.samples_per_gpu
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
if args.distributed:
# init distributed env first, since logger depends on the dist info.
init_dist('pytorch', **cfg.dist_params)
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=args.distributed, shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
if cfg.get('fp16', None) is not None:
wrap_fp16_model(model)
config_name = os.path.basename(args.config)
config_name = os.path.splitext(config_name)[0]
checkpoint = load_checkpoint(model, f'./weights/{config_name}/{config_name}.pth', 'cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
if args.distributed:
model = MMDistributedDataParallel(model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, gpu_collect=True)
if get_dist_info()[0] == 0:
validate(cfg, outputs, dataset)
dataset.format_results(outputs, jsonfile_prefix=f"./weights/{config_name}/{config_name}")
else:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
validate(cfg, outputs, dataset)
dataset.format_results(outputs, jsonfile_prefix=f"./weights/{config_name}/{config_name}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('config', help='train config file path')
parser.add_argument('--train', action='store_true')
parser.add_argument('--test', action='store_true')
parser.add_argument('--local_rank', default=0, type=int)
args = parser.parse_args()
args.world_size = int(os.getenv('WORLD_SIZE', 1))
args.distributed = int(os.getenv('WORLD_SIZE', 1)) > 1
set_random_seed(0, deterministic=True)
if args.train:
train(args)
if args.test:
test(args)
if __name__ == '__main__':
main()