Skip to content

Commit

Permalink
Add loss logging to trainable models (#804)
Browse files Browse the repository at this point in the history
  • Loading branch information
ashwinvaidya17 authored Dec 20, 2022
1 parent 8535ada commit f5893b7
Show file tree
Hide file tree
Showing 7 changed files with 8 additions and 0 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).

### Added

- Log loss for existing trainable models (<https://github.com/openvinotoolkit/anomalib/pull/804>)
- Add section for community project (<https://github.com/openvinotoolkit/anomalib/pull/768>)
- ✨ Add torchfx feature extractor (<https://github.com/openvinotoolkit/anomalib/pull/675>)
- Add tiling notebook (<https://github.com/openvinotoolkit/anomalib/pull/712>)
Expand Down
1 change: 1 addition & 0 deletions anomalib/models/cflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ
opt.step()
avg_loss += loss.sum()

self.log("train_loss", avg_loss.item(), on_epoch=True, prog_bar=True, logger=True)
return {"loss": avg_loss}

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down
2 changes: 2 additions & 0 deletions anomalib/models/draem/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,8 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ
loss += self.sspcab_lambda * self.sspcab_loss(
self.sspcab_activations["input"], self.sspcab_activations["output"]
)

self.log("train_loss", loss.item(), on_epoch=True, prog_bar=True, logger=True)
return {"loss": loss}

def validation_step(self, batch, _):
Expand Down
1 change: 1 addition & 0 deletions anomalib/models/fastflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ
"""
hidden_variables, jacobians = self.model(batch["image"])
loss = self.loss(hidden_variables, jacobians)
self.log("train_loss", loss.item(), on_epoch=True, prog_bar=True, logger=True)
return {"loss": loss}

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down
1 change: 1 addition & 0 deletions anomalib/models/ganomaly/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ def training_step(self, batch, _, optimizer_idx): # pylint: disable=arguments-d
pred_fake, _ = self.model.discriminator(fake)
loss = self.generator_loss(latent_i, latent_o, padded, fake, pred_real, pred_fake)

self.log("train_loss", loss.item(), on_epoch=True, prog_bar=True, logger=True)
return {"loss": loss}

def on_validation_start(self) -> None:
Expand Down
1 change: 1 addition & 0 deletions anomalib/models/reverse_distillation/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def training_step(self, batch, _) -> Dict[str, Tensor]: # type: ignore
Feature Map
"""
loss = self.loss(*self.model(batch["image"]))
self.log("train_loss", loss.item(), on_epoch=True, prog_bar=True, logger=True)
return {"loss": loss}

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down
1 change: 1 addition & 0 deletions anomalib/models/stfpm/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ
self.model.teacher_model.eval()
teacher_features, student_features = self.model.forward(batch["image"])
loss = self.loss(teacher_features, student_features)
self.log("train_loss", loss.item(), on_epoch=True, prog_bar=True, logger=True)
return {"loss": loss}

def validation_step(self, batch, _): # pylint: disable=arguments-differ
Expand Down

0 comments on commit f5893b7

Please sign in to comment.