Skip to content

Commit

Permalink
Add support for stochastic MOBO and refactor acquisitions
Browse files Browse the repository at this point in the history
  • Loading branch information
ruicoelhopedro committed Mar 14, 2024
1 parent 1dd9c6b commit be6e2f9
Show file tree
Hide file tree
Showing 3 changed files with 359 additions and 68 deletions.
63 changes: 50 additions & 13 deletions piglot/bin/piglot_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,28 +337,65 @@ def plot_pareto(args):
raise ValueError("Can only plot the Pareto front for a two-objective optimisation problem.")
data = objective.get_history()
fig, ax = plt.subplots()
# Read the Pareto front and build the Pareto hull
# Read all the points and variances
total_points = np.array([entry['values'] for entry in data.values()]).T
variances = np.array([entry['variances'] for entry in data.values() if 'variances' in entry]).T
has_variance = variances.size > 0
# Separate the dominated points
dominated = []
nondominated = []
pareto = pd.read_table(os.path.join(config["output"], 'pareto_front')).to_numpy()
for i, point in enumerate(total_points):
if np.isclose(point, pareto).all(axis=1).any():
nondominated.append((point, variances[i, :] if has_variance else None))
else:
dominated.append((point, variances[i, :] if has_variance else None))
# Build the Pareto hull
hull = ConvexHull(pareto)
for simplex in hull.simplices:
# Hacky: filter the line between the two endpoints (assuming the list is sorted by x)
if abs(simplex[0] - simplex[1]) < pareto.shape[0] - 1:
ax.plot(pareto[simplex, 0], pareto[simplex, 1], 'r', ls='--')
ax.scatter(pareto[:, 0], pareto[:, 1], c='r', label='Pareto front')
if args.all:
# Read the dominated points
total_points = np.array([entry['values'] for entry in data.values()]).T
# Remove the Pareto front from the dominated points
dominated = []
for point in total_points:
if np.isclose(point, pareto).all(axis=1).any():
continue
dominated.append(point)
dominated = np.array(dominated)
ax.scatter(dominated[:, 0], dominated[:, 1], c='k', label='Dominated points')
# Plot the points
if has_variance:
ax.errorbar(
[point[0][0] for point in nondominated],
[point[0][1] for point in nondominated],
xerr=np.sqrt([point[1][0] for point in nondominated]),
yerr=np.sqrt([point[1][0] for point in nondominated]),
c='r',
fmt='o',
label='Pareto front',
)
if args.all:
ax.errorbar(
[point[0][0] for point in dominated],
[point[0][1] for point in dominated],
xerr=np.sqrt([point[1][0] for point in dominated]),
yerr=np.sqrt([point[1][0] for point in dominated]),
c='k',
fmt='o',
label='Dominated points',
)
else:
ax.scatter(
[point[0][0] for point in nondominated],
[point[0][1] for point in nondominated],
c='r',
label='Pareto front',
)
if args.all:
ax.scatter(
[point[0][0] for point in dominated],
[point[0][1] for point in dominated],
c='k',
label='Dominated points',
)
if args.log:
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Objective 1')
ax.set_ylabel('Objective 2')
ax.legend()
ax.grid()
fig.tight_layout()
Expand Down
82 changes: 68 additions & 14 deletions piglot/objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,37 @@ class ObjectiveResult:
values: List[np.ndarray]
variances: Optional[List[np.ndarray]] = None

@staticmethod
def __mc_variance(
values: np.ndarray,
variances: np.ndarray,
composition: Composition,
num_samples: int = 1000,
) -> Tuple[float, float]:
"""Compute the objective variance using Monte Carlo (using fixed base samples).
Parameters
----------
values : np.ndarray
Objective values.
variances : np.ndarray
Objective variances.
composition : Composition
Composition functional to use.
num_samples : int, optional
Number of Monte Carlo samples, by default 1000.
Returns
-------
float
Estimated variance of the objective.
"""
biased = [norm.rvs(loc=0, scale=1) for _ in range(num_samples)]
mc_objectives = [composition(values + bias * np.sqrt(variances)) for bias in biased]
return np.var(mc_objectives)

def scalarise(self, composition: Composition = None) -> float:
"""Scalarise the result.
"""Scalarise the result under noise-free single-objective optimisation.
Parameters
----------
Expand All @@ -178,8 +207,8 @@ def scalarise(self, composition: Composition = None) -> float:
return composition(np.concatenate(self.values))
return np.mean(self.values)

def scalarise_mo(self, composition: Composition = None) -> float:
"""Scalarise the result.
def scalarise_mo(self, composition: Composition = None) -> List[float]:
"""Pseudo-scalarise the result under noise-free multi-objective optimisation.
Parameters
----------
Expand All @@ -188,15 +217,15 @@ def scalarise_mo(self, composition: Composition = None) -> float:
Returns
-------
float
Scalarised result.
List[float]
Pseudo-scalarised result.
"""
if composition is not None:
return [composition(vals) for vals in self.values]
return [np.mean(vals) for vals in self.values]

def scalarise_stochastic(self, composition: Composition = None) -> Tuple[float, float]:
"""Scalarise the result.
"""Scalarise the result under stochastic single-objective optimisation.
Parameters
----------
Expand All @@ -211,12 +240,29 @@ def scalarise_stochastic(self, composition: Composition = None) -> Tuple[float,
if composition is not None:
values = np.concatenate(self.values)
variances = np.concatenate(self.variances)
# Compute the objective variance using Monte Carlo (using fixed base samples)
biased = [norm.rvs(loc=0, scale=1) for _ in range(1000)]
mc_objectives = [composition(values + bias * np.sqrt(variances)) for bias in biased]
return composition(values), np.var(mc_objectives)
return composition(values), self.__mc_variance(values, variances, composition)
return np.mean(self.values), np.sum(self.variances)

def scalarise_mo_stochastic(self, composition: Composition = None) -> List[Tuple[float, float]]:
"""Pseudo-scalarise the result under stochastic multi-objective optimisation.
Parameters
----------
composition : Composition, optional
Composition functional to use, by default None.
Returns
-------
List[Tuple[float, float]]
Pseudo-scalarised mean and variance.
"""
if composition is not None:
return [
(composition(vals), self.__mc_variance(vals, vars, composition))
for vals, vars in zip(self.values, self.variances)
]
return [(np.mean(vals), np.sum(vars)) for vals, vars in zip(self.values, self.variances)]


class GenericObjective(Objective):
"""Class for generic objectives."""
Expand Down Expand Up @@ -251,6 +297,8 @@ def prepare(self) -> None:
if self.multi_objective:
for i in range(self.num_objectives):
file.write(f'\t{"Objective_" + str(i + 1):>15}')
if self.stochastic:
file.write(f'\t{"Variance_" + str(i + 1):>15}')
else:
file.write(f'\t{"Objective":>15}')
if self.stochastic:
Expand Down Expand Up @@ -303,9 +351,13 @@ def __call__(self, values: np.ndarray, concurrent: bool = False) -> ObjectiveRes
file.write(f'{begin_time - self.begin_time:>15.8e}\t')
file.write(f'{end_time - begin_time:>15.8e}\t')
if self.multi_objective:
vals = objective_result.scalarise_mo(self.composition)
for i in range(self.num_objectives):
file.write(f'{vals[i]:>15.8e}\t')
if self.stochastic:
vals_vars = objective_result.scalarise_mo_stochastic(self.composition)
for value, var in vals_vars:
file.write(f'{value:>15.8e}\t{var:>15.8e}\t')
else:
for val in objective_result.scalarise_mo(self.composition):
file.write(f'{val:>15.8e}\t')
else:
if self.stochastic:
value, var = objective_result.scalarise_stochastic(self.composition)
Expand Down Expand Up @@ -363,6 +415,8 @@ def get_history(self) -> Dict[str, Dict[str, Any]]:
# Multi-objective case
if self.multi_objective:
values = [df[f"Objective_{i + 1}"] for i in range(self.num_objectives)]
if self.stochastic:
variances = [df[f"Variance_{i + 1}"] for i in range(self.num_objectives)]
return_dict = {}
for i in range(self.num_objectives):
result = {
Expand All @@ -372,7 +426,7 @@ def get_history(self) -> Dict[str, Dict[str, Any]]:
"hashes": param_hash,
}
if self.stochastic:
result["variances"] = df["Variance"].to_numpy()
result["variances"] = variances[i]
return_dict[f"Objective_{i + 1}"] = result
return return_dict
# Single objective case
Expand Down
Loading

0 comments on commit be6e2f9

Please sign in to comment.