diff --git a/trojanvision/attacks/backdoor/normal/imc.py b/trojanvision/attacks/backdoor/normal/imc.py index 7bade751..beb3d395 100644 --- a/trojanvision/attacks/backdoor/normal/imc.py +++ b/trojanvision/attacks/backdoor/normal/imc.py @@ -66,7 +66,7 @@ def optimize_mark(self, loss_fn: Callable[..., torch.Tensor] = None, **kwargs): r"""Optimize watermark at the beginning of each training epoch.""" loss_fn = loss_fn or self.model.loss - atanh_mark = torch.randn_like(self.mark.mark[:-1], requires_grad=True) + atanh_mark = self.mark.mark[:-1].mul(2).sub(1).mul(0.999).atanh().detach().requires_grad_() optimizer = optim.Adam([atanh_mark], lr=self.attack_remask_lr) optimizer.zero_grad()