Skip to content

Commit

Permalink
Merge pull request qiskit-community/qiskit-aqua#817 from woodsp-ibm/m…
Browse files Browse the repository at this point in the history
…oredocs

Updating component documentation
  • Loading branch information
woodsp-ibm authored Feb 10, 2020
2 parents 9ff9b24 + e644e2b commit 7b5cb78
Show file tree
Hide file tree
Showing 24 changed files with 432 additions and 218 deletions.
16 changes: 13 additions & 3 deletions qiskit/aqua/components/optimizers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,17 @@
"""
Optimizers (:mod:`qiskit.aqua.components.optimizers`)
=====================================================
Optimizers, local and global
Aqua contains a variety of classical optimizers for use by quantum variational algorithms,
such as :class:`~qiskit.aqua.algorithms.VQE`.
Logically, these optimizers can be divided into two categories:
`Local Optimizers`_
Given an optimization problem, a **local optimizer** is a function
that attempts to find an optimal value within the neighboring set of a candidate solution.
`Global Optimizers`_
Given an optimization problem, a **global optimizer** is a function
that attempts to find an optimal value among all possible solutions.
.. currentmodule:: qiskit.aqua.components.optimizers
Expand All @@ -36,6 +46,7 @@
:nosignatures:
ADAM
AQGD
CG
COBYLA
L_BFGS_B
Expand All @@ -45,7 +56,6 @@
SLSQP
SPSA
TNC
AQGD
Global Optimizers
=================
Expand Down Expand Up @@ -91,6 +101,7 @@

__all__ = ['Optimizer',
'ADAM',
'AQGD',
'CG',
'COBYLA',
'L_BFGS_B',
Expand All @@ -100,5 +111,4 @@
'SLSQP',
'SPSA',
'TNC',
'AQGD',
'CRS', 'DIRECT_L', 'DIRECT_L_RAND', 'ESCH', 'ISRES']
14 changes: 11 additions & 3 deletions qiskit/aqua/components/optimizers/adam_amsgrad.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,18 +38,26 @@


class ADAM(Optimizer):

"""
Adam and AMSGRAD Optimizer
Adam and AMSGRAD optimizer.
| **Adam**
| *Kingma, Diederik & Ba, Jimmy. (2014).*
| Adam: A Method for Stochastic Optimization. \
International Conference on Learning Representations.
Adam is a gradient-based optimization algorithm that is relies on adaptive estimates of
lower-order moments. The algorithm requires little memory and is invariant to diagonal
rescaling of the gradients. Furthermore, it is able to cope with non-stationary objective
functions and noisy and/or sparse gradients.
|
| **AMSGRAD**
| *Sashank J. Reddi and Satyen Kale and Sanjiv Kumar. (2018).*
| On the Convergence of Adam and Beyond. International Conference on Learning Representations.
AMSGRAD (a variant of ADAM) uses a 'long-term memory' of past gradients and, thereby,
improves convergence properties.
"""

_OPTIONS = ['maxiter', 'tol', 'lr', 'beta_1', 'beta_2',
Expand Down Expand Up @@ -109,7 +117,7 @@ def __init__(self,
writer.writeheader()

def get_support_level(self):
""" return support level dictionary """
""" Return support level dictionary """
return {
'gradient': Optimizer.SupportLevel.supported,
'bounds': Optimizer.SupportLevel.ignored,
Expand Down
2 changes: 1 addition & 1 deletion qiskit/aqua/components/optimizers/aqgd.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def __init__(self,
self._previous_loss = None

def get_support_level(self):
""" return support level dictionary """
""" Return support level dictionary """
return {
'gradient': Optimizer.SupportLevel.ignored,
'bounds': Optimizer.SupportLevel.ignored,
Expand Down
22 changes: 12 additions & 10 deletions qiskit/aqua/components/optimizers/cg.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.

"""Conjugate Gradient algorithm."""
"""Conjugate Gradient optimizer."""

from typing import Optional
import logging
Expand All @@ -25,10 +25,17 @@


class CG(Optimizer):
"""Conjugate Gradient algorithm.
"""Conjugate Gradient optimizer.
Uses scipy.optimize.minimize CG
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
CG is an algorithm for the numerical solution of systems of linear equations whose matrices are
symmetric and positive-definite. It is an *iterative algorithm* in that it uses an initial
guess to generate a sequence of improving approximate solutions for a problem,
in which each approximation is derived from the previous ones. It is often used to solve
unconstrained optimization problems, such as energy minimization.
Uses scipy.optimize.minimize CG.
For further detail, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
"""

_OPTIONS = ['maxiter', 'disp', 'gtol', 'eps']
Expand All @@ -41,11 +48,6 @@ def __init__(self,
tol: Optional[float] = None,
eps: float = 1.4901161193847656e-08) -> None:
"""
Constructor.
For details, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Args:
maxiter: Maximum number of iterations to perform.
disp: Set to True to print convergence messages.
Expand All @@ -60,7 +62,7 @@ def __init__(self,
self._tol = tol

def get_support_level(self):
""" return support level dictionary """
""" Return support level dictionary """
return {
'gradient': Optimizer.SupportLevel.supported,
'bounds': Optimizer.SupportLevel.ignored,
Expand Down
22 changes: 11 additions & 11 deletions qiskit/aqua/components/optimizers/cobyla.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.

"""Constrained Optimization By Linear Approximation algorithm."""
"""Constrained Optimization By Linear Approximation optimizer."""

from typing import Optional
import logging
Expand All @@ -24,10 +24,15 @@


class COBYLA(Optimizer):
"""Constrained Optimization By Linear Approximation algorithm.
"""
Constrained Optimization By Linear Approximation optimizer.
COBYLA is a numerical optimization method for constrained problems
where the derivative of the objective function is not known.
Uses scipy.optimize.minimize COBYLA
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
Uses scipy.optimize.minimize COBYLA.
For further detail, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
"""

_OPTIONS = ['maxiter', 'disp', 'rhobeg']
Expand All @@ -39,17 +44,12 @@ def __init__(self,
rhobeg: float = 1.0,
tol: Optional[float] = None) -> None:
"""
Constructor.
For details, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Args:
maxiter: Maximum number of function evaluations.
disp: Set to True to print convergence messages.
rhobeg: Reasonable initial changes to the variables.
tol: Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
This is a lower bound on the size of the trust region.
"""
super().__init__()
for k, v in locals().items():
Expand All @@ -58,7 +58,7 @@ def __init__(self,
self._tol = tol

def get_support_level(self):
""" return support level dictionary """
""" Return support level dictionary """
return {
'gradient': Optimizer.SupportLevel.ignored,
'bounds': Optimizer.SupportLevel.ignored,
Expand Down
53 changes: 34 additions & 19 deletions qiskit/aqua/components/optimizers/l_bfgs_b.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.

"""Limited-memory BFGS algorithm."""
"""Limited-memory BFGS Bound optimizer."""

import logging

Expand All @@ -26,7 +26,26 @@

class L_BFGS_B(Optimizer):
"""
Limited-memory BFGS algorithm.
Limited-memory BFGS Bound optimizer.
The target goal of Limited-memory Broyden-Fletcher-Goldfarb-Shanno Bound (L-BFGS-B)
is to minimize the value of a differentiable scalar function :math:`f`.
This optimizer is a quasi-Newton method, meaning that, in contrast to Newtons's method,
it does not require :math:`f`'s Hessian (the matrix of :math:`f`'s second derivatives)
when attempting to compute :math:`f`'s minimum value.
Like BFGS, L-BFGS is an iterative method for solving unconstrained, non-linear optimization
problems, but approximates BFGS using a limited amount of computer memory.
L-BFGS starts with an initial estimate of the optimal value, and proceeds iteratively
to refine that estimate with a sequence of better estimates.
The derivatives of :math:`f` are used to identify the direction of steepest descent,
and also to form an estimate of the Hessian matrix (second derivative) of :math:`f`.
L-BFGS-B extends L-BFGS to handle simple, per-variable bound constraints.
Uses scipy.optimize.fmin_l_bfgs_b.
For further detail, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
"""

_OPTIONS = ['maxfun', 'maxiter', 'factr', 'iprint', 'epsilon']
Expand All @@ -39,36 +58,32 @@ def __init__(self,
iprint: int = -1,
epsilon: float = 1e-08) -> None:
r"""
Uses scipy.optimize.fmin_l_bfgs_b
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
Args:
maxfun: Maximum number of function evaluations.
maxiter: Maximum number of iterations.
factr: The iteration stops when (f\^k - f\^{k+1})/max{\|f\^k\|,
\|f\^{k+1}\|,1} <= factr * eps, where eps is the machine precision,
which is automatically generated by the code. Typical values for
factr are: 1e12 for low accuracy; 1e7 for moderate accuracy;
10.0 for extremely high accuracy. See Notes for relationship to ftol,
which is exposed (instead of factr) by the scipy.optimize.minimize
interface to L-BFGS-B.
\|f\^{k+1}\|,1} <= factr * eps, where eps is the machine precision,
which is automatically generated by the code. Typical values for
factr are: 1e12 for low accuracy; 1e7 for moderate accuracy;
10.0 for extremely high accuracy. See Notes for relationship to ftol,
which is exposed (instead of factr) by the scipy.optimize.minimize
interface to L-BFGS-B.
iprint: Controls the frequency of output. iprint < 0 means no output;
iprint = 0 print only one line at the last iteration; 0 < iprint < 99
print also f and \|proj g\| every iprint iterations; iprint = 99 print
details of every iteration except n-vectors; iprint = 100 print also the
changes of active set and final x; iprint > 100 print details of
every iteration including x and g.
iprint = 0 print only one line at the last iteration; 0 < iprint < 99
print also f and \|proj g\| every iprint iterations; iprint = 99 print
details of every iteration except n-vectors; iprint = 100 print also the
changes of active set and final x; iprint > 100 print details of
every iteration including x and g.
epsilon: Step size used when approx_grad is True, for numerically
calculating the gradient
calculating the gradient
"""
super().__init__()
for k, v in locals().items():
if k in self._OPTIONS:
self._options[k] = v

def get_support_level(self):
""" return support level dictionary """
""" Return support level dictionary """
return {
'gradient': Optimizer.SupportLevel.supported,
'bounds': Optimizer.SupportLevel.supported,
Expand Down
36 changes: 22 additions & 14 deletions qiskit/aqua/components/optimizers/nelder_mead.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.

"""Nelder-Mead algorithm."""
"""Nelder-Mead optimizer."""

from typing import Optional
import logging
Expand All @@ -26,9 +26,23 @@


class NELDER_MEAD(Optimizer):
"""Nelder-Mead algorithm.
Uses scipy.optimize.minimize Nelder-Mead
"""
Nelder-Mead optimizer.
The Nelder-Mead algorithm performs unconstrained optimization; it ignores bounds
or constraints. It is used to find the minimum or maximum of an objective function
in a multidimensional space. It is based on the Simplex algorithm. Nelder-Mead
is robust in many applications, especially when the first and second derivatives of the
objective function are not known.
However, if the numerical computation of the derivatives can be trusted to be accurate,
other algorithms using the first and/or second derivatives information might be preferred to
Nelder-Mead for their better performance in the general case, especially in consideration of
the fact that the Nelder–Mead technique is a heuristic search method that can converge to
non-stationary points.
Uses scipy.optimize.minimize Nelder-Mead.
For further detail, please refer to
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
"""

Expand All @@ -43,19 +57,13 @@ def __init__(self,
tol: Optional[float] = None,
adaptive: bool = False) -> None:
"""
Constructor.
For details, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Args:
maxiter: Maximum allowed number of iterations. If both maxiter and maxfev are set,
minimization will stop at the first reached.
minimization will stop at the first reached.
maxfev: Maximum allowed number of function evaluations. If both maxiter and
maxfev are set, minimization will stop at the first reached.
maxfev are set, minimization will stop at the first reached.
disp: Set to True to print convergence messages.
xatol: Absolute error in xopt between iterations
that is acceptable for convergence.
xatol: Absolute error in xopt between iterations that is acceptable for convergence.
tol: Tolerance for termination.
adaptive: Adapt algorithm parameters to dimensionality of problem.
"""
Expand All @@ -66,7 +74,7 @@ def __init__(self,
self._tol = tol

def get_support_level(self):
""" return support level dictionary """
""" Return support level dictionary """
return {
'gradient': Optimizer.SupportLevel.ignored,
'bounds': Optimizer.SupportLevel.ignored,
Expand Down
19 changes: 12 additions & 7 deletions qiskit/aqua/components/optimizers/nlopts/crs.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,26 @@
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.

"""Controlled Random Search (CRS) with local mutation."""
"""Controlled Random Search (CRS) with local mutation optimizer."""

from .nloptimizer import NLoptOptimizer, NLoptOptimizerType


class CRS(NLoptOptimizer):
# pylint: disable=line-too-long
"""
Controlled Random Search (CRS) with local mutation.
Controlled Random Search (CRS) with local mutation optimizer.
NLopt global optimizer, derivative-free. See `NLOpt CRS documentation
<https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#controlled-random-search-crs-with-local-mutation>`_
for more information.
Controlled Random Search (CRS) with local mutation is part of the family of the CRS optimizers.
The CRS optimizers start with a random population of points, and randomly evolve these points
by heuristic rules. In the case of CRS with local mutation, the evolution is a randomized
version of the :class:`NELDER_MEAD` local optimizer.
NLopt global optimizer, derivative-free.
For further detail, please refer to
https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#controlled-random-search-crs-with-local-mutation
"""

def get_nlopt_optimizer(self) -> NLoptOptimizerType:
""" return NLopt optimizer type """
""" Return NLopt optimizer type """
return NLoptOptimizerType.GN_CRS2_LM
Loading

0 comments on commit 7b5cb78

Please sign in to comment.