-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtrain.py
120 lines (89 loc) · 3.36 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import argparse
import importlib.util
import os
import torch
from isegm.utils.exp import init_experiment
def main():
args = parse_args()
if args.temp_model_path:
model_script = load_module(args.temp_model_path)
else:
model_script = load_module(args.model_path)
model_base_name = getattr(model_script, 'MODEL_NAME', None)
args.distributed = 'WORLD_SIZE' in os.environ
cfg = init_experiment(args, model_base_name)
torch.backends.cudnn.benchmark = True
torch.multiprocessing.set_sharing_strategy('file_system')
model_script.main(cfg)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='Path to the model script.')
parser.add_argument(
'--exp-name',
type=str,
default='',
help='Here you can specify the name of the experiment. '
'It will be added as a suffix to the experiment folder.')
parser.add_argument('--workers', type=int, default=4, metavar='N', help='Dataloader threads.')
parser.add_argument(
'--batch-size',
type=int,
default=-1,
help='You can override model batch size by specify positive number.')
parser.add_argument(
'--ngpus',
type=int,
default=1,
help='Number of GPUs. '
'If you only specify "--gpus" argument, the ngpus value will be calculated automatically. '
'You should use either this argument or "--gpus".')
parser.add_argument(
'--gpus',
type=str,
default='',
required=False,
help='Ids of used GPUs. You should use either this argument or "--ngpus".')
parser.add_argument(
'--resume-exp',
type=str,
default=None,
help='The prefix of the name of the experiment to be continued. '
'If you use this field, you must specify the "--resume-prefix" argument.')
parser.add_argument(
'--resume-prefix',
type=str,
default='latest',
help='The prefix of the name of the checkpoint to be loaded.')
parser.add_argument(
'--start-epoch',
type=int,
default=0,
help='The number of the starting epoch from which training will continue. '
'(it is important for correct logging and learning rate)')
parser.add_argument(
'--weights',
type=str,
default=None,
help='Model weights will be loaded from the specified path if you use this argument.')
parser.add_argument(
'--temp-model-path',
type=str,
default='',
help='Do not use this argument (for internal purposes).')
parser.add_argument('--local_rank', type=int, default=0)
# parameters for experimenting
parser.add_argument(
'--layerwise-decay', action='store_true', help='layer wise decay for transformer blocks.')
parser.add_argument('--upsample', type=str, default='x1', help='upsample the output.')
parser.add_argument(
'--random-split',
action='store_true',
help='random split the patch instead of window split.')
return parser.parse_args()
def load_module(script_path):
spec = importlib.util.spec_from_file_location('model_script', script_path)
model_script = importlib.util.module_from_spec(spec)
spec.loader.exec_module(model_script)
return model_script
if __name__ == '__main__':
main()