diff --git a/piglot/objective.py b/piglot/objective.py index 21cf0fb..183c25d 100644 --- a/piglot/objective.py +++ b/piglot/objective.py @@ -218,11 +218,11 @@ def scalarise(self, composition: Composition = None) -> float: """ if composition is None: # Sanitise scalarisation method - if self.scalarisation not in ['mean', 'stch']: + if self.scalarisation not in ('mean', 'stch', 'linear'): raise ValueError( - f"Invalid scalarisation '{self.scalarisation}'. Use 'mean' or 'stch'." + f"Invalid scalarisation '{self.scalarisation}'. Use 'mean', 'stch' or 'linear'." ) - if self.scalarisation == "stch": + if self.scalarisation in ('stch', 'linear'): # Sanitise the weights weights = np.array(self.weights) if np.sum(weights) != 1: @@ -232,16 +232,19 @@ def scalarise(self, composition: Composition = None) -> float: # Set the bounds and types bounds = np.array(self.bounds) types = np.array(self.types) - # Calculate the costs and ideal point + # Calculate the costs costs = np.where(types, -1, 1) - ideal_point = np.where(types, 1, 0) - # Smoothing parameter for STCH - u = 0.01 # Calculate the normalised objective values norm_funcs = self.normalise_objective(values, bounds) - # Calculate the Tchebycheff function value - tch_values = (np.abs((norm_funcs - ideal_point) * costs) / u) * weights - return np.log(np.sum(np.exp(tch_values))) * u + if self.scalarisation == 'stch': + # Calculate the ideal point + ideal_point = np.where(types, 1, 0) + # Smoothing parameter for STCH + u = 0.01 + # Calculate the Tchebycheff function value + tch_values = (np.abs((norm_funcs - ideal_point) * costs) / u) * weights + return np.log(np.sum(np.exp(tch_values))) * u + return np.sum(norm_funcs * weights) return np.mean(self.values) return composition.composition(self.values, self.params).item() diff --git a/piglot/objectives/design.py b/piglot/objectives/design.py index ff9f352..84b8bb2 100644 --- a/piglot/objectives/design.py +++ b/piglot/objectives/design.py @@ -189,8 +189,10 @@ def __composition( "All targets must have a number of points specified for the composition." ) # Sanitise scalarisation method - if scalarisation not in ['mean', 'stch']: - raise ValueError(f"Invalid scalarisation '{scalarisation}'. Use 'mean' or 'stch'.") + if scalarisation not in ('mean', 'stch', 'linear'): + raise ValueError( + f"Invalid scalarisation '{scalarisation}'. Use 'mean', 'stch' or 'linear'." + ) return ResponseComposition( scalarise=scalarise, stochastic=stochastic, @@ -242,8 +244,10 @@ def _objective(self, values: np.ndarray, concurrent: bool = False) -> ObjectiveR Objective result. """ # Sanitise scalarisation method - if self.scalarisation not in ['mean', 'stch']: - raise ValueError(f"Invalid scalarisation '{self.scalarisation}'. Use 'mean' or 'stch'.") + if self.scalarisation not in ('mean', 'stch', 'linear'): + raise ValueError( + f"Invalid scalarisation '{self.scalarisation}'. Use 'mean', 'stch' or 'linear'." + ) raw_responses = self.solver.solve(values, concurrent) # Transform responses diff --git a/piglot/utils/composition/responses.py b/piglot/utils/composition/responses.py index 5739944..94e87c8 100644 --- a/piglot/utils/composition/responses.py +++ b/piglot/utils/composition/responses.py @@ -407,8 +407,10 @@ def composition_torch(self, inner: torch.Tensor, params: torch.Tensor) -> torch. Composition result. """ # Sanitise the scalarisation method - if self.scalarise and self.scalarisation not in ['mean', 'stch']: - raise ValueError(f"Invalid scalarisation '{self.scalarisation}'. Use 'mean' or 'stch'.") + if self.scalarise and self.scalarisation not in ('mean', 'stch', 'linear'): + raise ValueError( + f"Invalid scalarisation '{self.scalarisation}'. Use 'mean', 'stch' or 'linear'." + ) # Split the inner responses responses = self.concat.split_torch(inner) # Unflatten each response @@ -423,10 +425,11 @@ def composition_torch(self, inner: torch.Tensor, params: torch.Tensor) -> torch. ], dim=-1) # Mean scalarisation if requested if self.scalarise: - # Smooth TCHebycheff scalarisation (STCH) if requested - if self.scalarisation == 'stch': + # Sanitise the weights + weights = torch.tensor(self.weights).to(inner.device) + # Smooth TCHebycheff scalarisation (STCH) or linear if requested + if self.scalarisation in ('stch', 'linear'): # Sanitise the weights - weights = torch.tensor(self.weights).to(inner.device) if torch.sum(weights) != 1.0: raise ValueError(f'Weights must sum to 1.0, got {torch.sum(weights)}.') # Set all the objectives to be positive @@ -434,18 +437,21 @@ def composition_torch(self, inner: torch.Tensor, params: torch.Tensor) -> torch. # Set the bounds and types bounds = torch.tensor(self.bounds).to(inner.device) types = torch.tensor(self.types).to(inner.device) - # Calculate the costs and ideal point + # Calculate the costs costs = torch.where(types, torch.tensor(-1.0), torch.tensor(1.0)) - ideal_point = torch.where(types, torch.tensor(1.0), torch.tensor(0.0)) - # Smoothing parameter for STCH - u = 0.01 # Calculate the normalised objective values norm_objective = self.normalise_objective(objective, bounds) - # Calculate the Tchebycheff function value - tch_values = (torch.abs((norm_objective - ideal_point) * costs) / u) * weights - return torch.log(torch.sum(torch.exp(tch_values), dim=-1)) * u + if self.scalarisation == 'stch': + # Calculate the ideal point + ideal_point = torch.where(types, torch.tensor(1.0), torch.tensor(0.0)) + # Smoothing parameter for STCH + u = 0.01 + # Calculate the Tchebycheff function value + tch_values = (torch.abs((norm_objective - ideal_point) * costs) / u) * weights + return torch.log(torch.sum(torch.exp(tch_values), dim=-1)) * u + return torch.sum(norm_objective * weights, dim=-1) # Mean scalarisation otherwise # Apply the weights - objective = objective * torch.tensor(self.weights).to(inner.device) + objective = objective * weights return torch.mean(objective, dim=-1) return objective * torch.tensor(self.weights).to(inner.device)