-
Notifications
You must be signed in to change notification settings - Fork 35
/
aemna.py
119 lines (105 loc) · 5.77 KB
/
aemna.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import numpy as np # engine for numerical computing
# abstract class of all Estimation of Distribution Algorithms (EDA) classes
from pypop7.optimizers.eda.eda import EDA
class AEMNA(EDA):
"""Adaptive Estimation of Multivariate Normal Algorithm (AEMNA).
.. note:: `AEMNA` learns the *full* covariance matrix of the Gaussian sampling
distribution, resulting in a *cubic* time complexity w.r.t. each sampling.
Therefore, now it is **rarely** used for large-scale black-box optimization
(LBO). It is **highly recommended** to first attempt other more advanced
optimization methods for LBO.
Parameters
----------
problem : dict
problem arguments with the following common settings (`keys`):
* 'fitness_function' - objective function to be **minimized** (`func`),
* 'ndim_problem' - number of dimensionality (`int`),
* 'upper_boundary' - upper boundary of search range (`array_like`),
* 'lower_boundary' - lower boundary of search range (`array_like`).
options : dict
optimizer options with the following common settings (`keys`):
* 'max_function_evaluations' - maximum of function evaluations (`int`,
default: `np.inf`),
* 'max_runtime' - maximal runtime (`float`,
default: `np.inf`),
* 'seed_rng' - seed for random number generation needed
to be *explicitly* set (`int`);
and with the following particular settings (`keys`):
* 'n_individuals' - number of offspring, aka offspring population size
(`int`, default: `200`),
* 'n_parents' - number of parents, aka parental population size
(`int`, default: `int(options['n_individuals']/2)`).
Examples
--------
Use the optimizer to minimize the well-known test function
`Rosenbrock <http://en.wikipedia.org/wiki/Rosenbrock_function>`_:
.. code-block:: python
:linenos:
>>> import numpy # engine for numerical computing
>>> from pypop7.benchmarks.base_functions import rosenbrock # function to be minimized
>>> from pypop7.optimizers.eda.aemna import AEMNA
>>> problem = {'fitness_function': rosenbrock, # define problem arguments
... 'ndim_problem': 2,
... 'lower_boundary': -5*numpy.ones((2,)),
... 'upper_boundary': 5*numpy.ones((2,))}
>>> options = {'max_function_evaluations': 5000, # set optimizer options
... 'seed_rng': 2022}
>>> aemna = AEMNA(problem, options) # initialize the optimizer class
>>> results = aemna.optimize() # run the optimization process
>>> # return the number of function evaluations and best-so-far fitness
>>> print(f"AEMNA: {results['n_function_evaluations']}, {results['best_so_far_y']}")
AEMNA: 5000, 0.0023607608362747035
For its correctness checking of coding, refer to `this code-based repeatability report
<hhttps://tinyurl.com/5ec2uest>`_ for more details.
Attributes
----------
n_individuals : `int`
number of offspring, aka offspring population size.
n_parents : `int`
number of parents, aka parental population size.
References
----------
Larrañaga, P. and Lozano, J.A. eds., 2002.
`Estimation of distribution algorithms: A new tool for evolutionary computation.
<https://link.springer.com/book/10.1007/978-1-4615-1539-5>`_
Springer Science & Business Media.
"""
def __init__(self, problem, options):
EDA.__init__(self, problem, options)
def initialize(self, args=None):
x = self.rng_optimization.uniform(self.initial_lower_boundary, self.initial_upper_boundary,
size=(self.n_individuals, self.ndim_problem)) # population
y = np.empty((self.n_individuals,)) # fitness
for i in range(self.n_individuals):
if self._check_terminations():
break
y[i] = self._evaluate_fitness(x[i], args)
order = np.argsort(y)[:self.n_parents]
mean, cov = np.mean(x[order], axis=0), np.cov(np.transpose(x[order]))
return x, y, mean, cov
def iterate(self, x=None, y=None, mean=None, cov=None, args=None):
xx = self.rng_optimization.multivariate_normal(mean, cov)
yy = self._evaluate_fitness(xx, args)
order = np.argsort(y)[:self.n_parents]
worst = order[-1]
if yy < y[worst]:
mean_bak = np.copy(mean)
mean += (xx - x[worst])/self.n_parents
ndim2 = np.power(self.n_parents, 2)
for i in range(self.ndim_problem):
for j in range(self.ndim_problem):
cov[i, j] = (cov[i, j] - ((xx[i] - x[worst, i])*np.sum(x[order, j] - mean_bak[j]))/ndim2 -
((xx[j] - x[worst, j])*np.sum(x[order, i] - mean_bak[i]))/ndim2 +
((xx[i] - x[worst, i])*(xx[j] - x[worst, j]))/ndim2 -
((x[worst, i] - mean[i])*(x[worst, j] - mean[j]))/self.n_parents +
((xx[i] - mean[i])*(xx[j] - mean[j]))/self.n_parents)
x[worst], y[worst] = xx, yy
self._n_generations += 1
return x, y, mean, cov
def optimize(self, fitness_function=None, args=None):
fitness = EDA.optimize(self, fitness_function)
x, y, mean, cov = self.initialize(args)
while not self._check_terminations(): # similar to steady-state genetic algorithm
self._print_verbose_info(fitness, y)
x, y, mean, cov = self.iterate(x, y, mean, cov, args)
return self._collect(fitness, y)