forked from lsying009/V2E2V
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_e2v.py
156 lines (120 loc) · 5.95 KB
/
train_e2v.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import os
import GPUtil
# Get a list of available GPUs
gpus = GPUtil.getGPUs()
# Select the GPU with the lowest utilization
chosen_gpu = None
for gpu in gpus:
if not chosen_gpu:
chosen_gpu = gpu
elif gpu.memoryUtil < chosen_gpu.memoryUtil:
chosen_gpu = gpu
# Set CUDA device to the selected GPU
os.environ["CUDA_VISIBLE_DEVICES"] = str(chosen_gpu.id)
import torch
import torch.utils.data as data
from torch import optim, nn
from tensorboardX import SummaryWriter
import numpy as np
import argparse
from utils.configs import set_configs
from data_readers.train_data_loaders import TrainFixNEventData
from e2v.e2v_model import CistaLSTCNet, CistaTCNet
from utils.evaluate import PerceptualLoss
from pytorch_msssim import SSIM
class Train:
def __init__(self, cfgs, device):
# self.image_dim = cfgs.image_dim
self.device = device
self.model_name = '{}_{}_b{}_d{}_c{}'.format(cfgs.model_name, cfgs.model_mode, \
cfgs.num_bins, cfgs.depth, cfgs.base_channels)
self.path_to_model = os.path.join(cfgs.path_to_model, self.model_name)
if not os.path.exists(self.path_to_model):
os.makedirs(self.path_to_model)
if cfgs.model_mode == 'cista-lstc':
self.model = self.e2v_net = CistaLSTCNet(image_dim=cfgs.image_dim, base_channels=cfgs.base_channels, depth=cfgs.depth, num_bins=cfgs.num_bins)
elif cfgs.model_mode == 'cista-tc':
self.model = CistaTCNet(image_dim=cfgs.image_dim, base_channels=cfgs.base_channels, depth=cfgs.depth, num_bins=cfgs.num_bins)
else:
assert self.model_mode in ['cista-lstc', 'cista-tc'], "Model_mode should be 'cista-lstc' and 'cista-tc'! "
self.model = self.model.to(device)
print(self.model)
if cfgs.load_epoch_for_train:
checkpoint = torch.load(os.path.join(self.path_to_model, '{}_{}.pth.tar'\
.format(self.model_name, cfgs.load_epoch_for_train)), map_location=self.device)
self.model.load_state_dict(checkpoint['state_dict'], strict=True)
# Load training data
path_to_train_data = cfgs.path_to_train_data
train_data = TrainFixNEventData(os.path.join(path_to_train_data, 'train_e2v.txt'), cfgs)
self.train_loader = data.DataLoader(train_data,batch_size=cfgs.batch_size, shuffle=cfgs.shuffle, num_workers=4)
lr = cfgs.lr*(0.9**np.floor(cfgs.load_epoch_for_train/10.))
self.optimizer = optim.Adam(self.model.parameters(),lr=lr)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=10, gamma=0.9)
# Loss
self.lpips_loss_fn = PerceptualLoss(net='vgg', device=device)
self.L1_loss_fn = nn.L1Loss()
self.ssim_loss_fn = SSIM(data_range=1, size_average=True, channel=1, nonnegative_ssim=False).to(device)
# Save training results
self.is_SummaryWriter = cfgs.is_SummaryWriter
if self.is_SummaryWriter:
self.writer = SummaryWriter('./summary/{}'\
.format(cfgs.model_name))
def run_train(self, cfgs):
for epoch in range(cfgs.load_epoch_for_train, cfgs.epochs):
lr = self.scheduler.get_last_lr()[0]
print('lr:', lr)
self.train_many_to_one(epoch)
self.scheduler.step()
torch.save({'epoch': epoch, 'state_dict': self.model.state_dict()},
os.path.join(self.path_to_model, '{}_{}.pth.tar'\
.format(self.model_name, epoch+1)))
def train_many_to_one(self, epoch):
torch.cuda.empty_cache()
self.model.train()
batch_num =len(self.train_loader)
loss = 0
prev_img = None
state = None
for batch_idx, train_data in enumerate(self.train_loader):
seq_event_patch, img_patch, gt_img_patch = train_data
img_patch = img_patch.to(self.device)
gt_img_patch = gt_img_patch.to(self.device)
loss = 0
state = None
output = None
prev_img = None
for s in range(len(seq_event_patch)):
event_patch = seq_event_patch[s].to(self.device)
if s == 0:
prev_img = torch.zeros_like(img_patch)
state = None
output, state = self.model(event_patch, prev_img, state)
prev_img = output.clone()
loss_lpips = self.lpips_loss_fn(output, gt_img_patch,normalize=True)
loss_mse = self.L1_loss_fn(output, gt_img_patch)
loss_ssim = 1 - self.ssim_loss_fn(output, gt_img_patch)
loss = loss_lpips + loss_mse + loss_ssim
if self.is_SummaryWriter:
self.writer.add_scalar('LPIPS', loss_lpips, batch_num*epoch+batch_idx)
self.writer.add_scalar('MSE', loss_mse, batch_num*epoch+batch_idx)
self.writer.add_scalar('SSIM', loss_ssim, batch_num*epoch+batch_idx)
self.writer.add_scalar('loss', loss, batch_num*epoch+batch_idx)
self.optimizer.zero_grad()
loss.backward(retain_graph=False)
self.optimizer.step()
if batch_idx%50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tloss: {:.6f}'.format(\
epoch+1, batch_idx*self.train_loader.batch_size, len(self.train_loader.dataset),\
100.*batch_idx/len(self.train_loader), loss.data)) # .data.cpu().numpy()
self.optimizer.zero_grad()
if __name__ == '__main__':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('device: ', device)
## config parameters
parser = argparse.ArgumentParser(
description='Training options')
set_configs(parser)
cfgs = parser.parse_args()
cfgs.shuffle = True
model_train = Train(cfgs, device)
model_train.run_train(cfgs)