-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathloss.py
115 lines (86 loc) · 3.5 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
#coding: utf-8
import chainer.functions as F
from chainer import Variable, grad, report
from chainer.backends import cuda
def compute_grad(d_out, x):
d_out_sum = F.mean(d_out, axis=(1, 2, 3))
gradient = grad([d_out_sum], [x], enable_double_backprop=True)[0]
gradient = F.sum(gradient, axis=(1, 2, 3))
out = gradient**2
return out
def l2f(loss):
if isinstance(loss, Variable):
loss = loss.array
return float(loss)
def dis_loss(opt, real_d, fake_d, real_g, fake_g, observer=None, tag=str()):
#gradient penalty
real_gp = 0
fake_gp = 0
if opt.zero_gp_mode == 'real' or opt.zero_gp_mode == 'real_fake':
real_gp = opt.gp_coef * compute_grad(real_d, real_g)
if opt.zero_gp_mode == 'fake' or opt.zero_gp_mode == 'real_fake':
fake_gp = opt.gp_coef * compute_grad(fake_d, fake_g)
#adversarial loss
adv_loss = 0
real_loss = 0
fake_loss = 0
if opt.adv_loss_mode == 'wgan':
adv_loss = -F.mean(real_d - fake_d)
gp = real_gp + fake_gp
else:
if opt.adv_loss_mode == 'bce':
real_loss = F.mean(F.softplus(-real_d))
fake_loss = F.mean(F.softplus(fake_d))
if opt.adv_loss_mode == 'mse':
xp = cuda.get_array_module(real_d.array)
real_loss = F.mean_squared_error(real_d, xp.ones_like(real_d.array))
fake_loss = F.mean_squared_error(fake_d, xp.zeros_like(fake_d.array))
if opt.adv_loss_mode == 'hinge':
real_loss = F.mean(F.relu(1.0 - real_d))
fake_loss = F.mean(F.relu(1.0 + fake_d))
adv_loss = (real_loss + fake_loss) * 0.5
gp = (real_gp + fake_gp) * 0.5
loss = adv_loss + gp
if observer is not None:
if tag:
tag += '_'
report({tag + 'loss': l2f(loss),
tag + 'adv_loss': l2f(adv_loss),
tag + 'real_loss': l2f(real_loss),
tag + 'fake_loss': l2f(fake_loss),
tag + 'gp': l2f(gp),
tag + 'adv_loss_with_gp': l2f(adv_loss + gp)}, observer=observer)
return loss
def gen_loss(opt, fake_d, real_g, fake_g, real_d_fm, fake_d_fm, perceptual_func=None, observer=None, tag=str()):
#adversarial loss
adv_loss = 0
fake_loss = 0
if opt.adv_loss_mode == 'bce':
fake_loss = F.mean(F.softplus(-fake_d))
if opt.adv_loss_mode == 'mse':
xp = cuda.get_array_module(fake_d.array)
fake_loss = F.mean_squared_error(fake_d, xp.ones_like(fake_d.array))
if opt.adv_loss_mode == 'hinge':
fake_loss = -F.mean(fake_d)
if opt.adv_loss_mode == 'wgan':
fake_loss += -F.mean(fake_d)
adv_loss = fake_loss
fm_loss = 0
#feature matching loss
if opt.fm_coef != 0:
layer_num = len(fake_d_fm)
for rfm, ffm in zip(real_d_fm, fake_d_fm):
fm_loss += opt.fm_coef * F.mean_absolute_error(rfm.array, ffm) / layer_num
perceptual_loss = 0
if perceptual_func is not None:
perceptual_loss += perceptual_func(real_g, fake_g)
loss = adv_loss + fm_loss + perceptual_loss
if observer is not None:
if tag:
tag += '_'
report({tag + 'loss': l2f(loss),
tag + 'adv_loss': l2f(adv_loss),
tag + 'fake_loss': l2f(fake_loss),
tag + 'fm_loss': l2f(fm_loss),
tag + 'perceptual_loss': l2f(perceptual_loss)}, observer=observer)
return loss