Skip to content

Commit

Permalink
Deprecation of algorithm utils (algorithm_globals and validation) (
Browse files Browse the repository at this point in the history
…#10905)

* Add deprecation warnings

* Warnings, alg_globals, QDrift

* Move rng to init

* Add warnings alg. globals, fix tests

* Add reno

* Fix opflow tests

* Fix final test

* Update reno, deprecation messages

* Fix typo
  • Loading branch information
ElePT authored Oct 12, 2023
1 parent 2156375 commit 65e8c1a
Show file tree
Hide file tree
Showing 53 changed files with 557 additions and 176 deletions.
4 changes: 3 additions & 1 deletion qiskit/algorithms/amplitude_amplifiers/grover.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,9 @@ def amplify(self, amplification_problem: AmplificationProblem) -> "GroverResult"

# sample from [0, power) if specified
if self._sample_from_iterations:
power = algorithm_globals.random.integers(power)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
power = algorithm_globals.random.integers(power)
# Run a grover experiment for a given power of the Grover operator.
if self._sampler is not None:
qc = self.construct_circuit(amplification_problem, power, measurement=True)
Expand Down
6 changes: 5 additions & 1 deletion qiskit/algorithms/eigensolvers/numpy_eigensolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

from __future__ import annotations

import warnings
from typing import Callable, Union, List, Optional
import logging
import numpy as np
Expand Down Expand Up @@ -61,7 +62,10 @@ def __init__(
elements that satisfies the criterion is smaller than ``k``, then the returned list will
have fewer elements and can even be empty.
"""
validate_min("k", k, 1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
validate_min("k", k, 1)

super().__init__()

self._in_k = k
Expand Down
7 changes: 5 additions & 2 deletions qiskit/algorithms/minimum_eigensolvers/adapt_vqe.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import re
import logging
import warnings
from typing import Any

import numpy as np
Expand Down Expand Up @@ -129,8 +130,10 @@ def __init__(
threshold: once all gradients have an absolute value smaller than this threshold, the
algorithm has converged and terminates. Defaults to ``1e-5``.
"""
validate_min("gradient_threshold", gradient_threshold, 1e-15)
validate_min("eigenvalue_threshold", eigenvalue_threshold, 1e-15)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
validate_min("gradient_threshold", gradient_threshold, 1e-15)
validate_min("eigenvalue_threshold", eigenvalue_threshold, 1e-15)

self.solver = solver
self.gradient_threshold = gradient_threshold
Expand Down
5 changes: 4 additions & 1 deletion qiskit/algorithms/minimum_eigensolvers/qaoa.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

from __future__ import annotations

import warnings
from typing import Callable, Any
import numpy as np

Expand Down Expand Up @@ -118,7 +119,9 @@ def __init__(
These data are: the evaluation count, the optimizer parameters for the ansatz, the
evaluated value, the metadata dictionary.
"""
validate_min("reps", reps, 1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
validate_min("reps", reps, 1)

self.reps = reps
self.mixer = mixer
Expand Down
5 changes: 4 additions & 1 deletion qiskit/algorithms/optimizers/aqgd.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import logging
from collections.abc import Callable
from typing import Any
import warnings

import numpy as np
from qiskit.utils.validation import validate_range_exclusive_max
Expand Down Expand Up @@ -89,7 +90,9 @@ def __init__(
"`eta`, and `momentum` must have the same length."
)
for m in momentum:
validate_range_exclusive_max("momentum", m, 0, 1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
validate_range_exclusive_max("momentum", m, 0, 1)

self._eta = eta
self._maxiter = maxiter
Expand Down
6 changes: 5 additions & 1 deletion qiskit/algorithms/optimizers/gsls.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

from __future__ import annotations

import warnings

from collections.abc import Callable
from typing import Any, SupportsFloat
import numpy as np
Expand Down Expand Up @@ -262,7 +264,9 @@ def sample_points(
Returns:
A tuple containing the sampling points and the directions.
"""
normal_samples = algorithm_globals.random.normal(size=(num_points, n))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
normal_samples = algorithm_globals.random.normal(size=(num_points, n))
row_norms = np.linalg.norm(normal_samples, axis=1, keepdims=True)
directions = normal_samples / row_norms
points = x + self._options["sampling_radius"] * directions
Expand Down
9 changes: 7 additions & 2 deletions qiskit/algorithms/optimizers/p_bfgs.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import logging
import multiprocessing
import platform
import warnings
from collections.abc import Callable
from typing import SupportsFloat

Expand Down Expand Up @@ -73,7 +74,9 @@ def __init__(
kwargs: additional kwargs for scipy.optimize.minimize.
"""
if max_processes:
validate_min("max_processes", max_processes, 1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
validate_min("max_processes", max_processes, 1)

if options is None:
options = {}
Expand Down Expand Up @@ -138,7 +141,9 @@ def optimize_runner(_queue, _i_pt): # Multi-process sampling
# Start off as many other processes running the optimize (can be 0)
processes = []
for _ in range(num_procs):
i_pt = algorithm_globals.random.uniform(low, high) # Another random point in bounds
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
i_pt = algorithm_globals.random.uniform(low, high) # Another random point in bounds
proc = multiprocessing.Process(target=optimize_runner, args=(queue, i_pt))
processes.append(proc)
proc.start()
Expand Down
7 changes: 6 additions & 1 deletion qiskit/algorithms/optimizers/scipy_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
"""Wrapper class of scipy.optimize.minimize."""
from __future__ import annotations

import warnings
from collections.abc import Callable
from typing import Any

Expand Down Expand Up @@ -73,7 +74,11 @@ def __init__(
self._initial_point_support_level = OptimizerSupportLevel.required

self._options = options if options is not None else {}
validate_min("max_evals_grouped", max_evals_grouped, 1)

with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
validate_min("max_evals_grouped", max_evals_grouped, 1)

self._max_evals_grouped = max_evals_grouped
self._kwargs = kwargs

Expand Down
16 changes: 10 additions & 6 deletions qiskit/algorithms/optimizers/spsa.py
Original file line number Diff line number Diff line change
Expand Up @@ -687,12 +687,16 @@ def optimize(
def bernoulli_perturbation(dim, perturbation_dims=None):
"""Get a Bernoulli random perturbation."""
if perturbation_dims is None:
return 1 - 2 * algorithm_globals.random.binomial(1, 0.5, size=dim)

pert = 1 - 2 * algorithm_globals.random.binomial(1, 0.5, size=perturbation_dims)
indices = algorithm_globals.random.choice(
list(range(dim)), size=perturbation_dims, replace=False
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
return 1 - 2 * algorithm_globals.random.binomial(1, 0.5, size=dim)

with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
pert = 1 - 2 * algorithm_globals.random.binomial(1, 0.5, size=perturbation_dims)
indices = algorithm_globals.random.choice(
list(range(dim)), size=perturbation_dims, replace=False
)
result = np.zeros(dim)
result[indices] = pert

Expand Down
21 changes: 13 additions & 8 deletions qiskit/algorithms/optimizers/umda.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

from __future__ import annotations

import warnings

from collections.abc import Callable
from typing import Any
import numpy as np
Expand Down Expand Up @@ -173,10 +175,11 @@ def _new_generation(self):
"""Build a new generation sampled from the vector of probabilities.
Updates the generation pandas dataframe
"""

gen = algorithm_globals.random.normal(
self._vector[0, :], self._vector[1, :], [self._size_gen, self._n_variables]
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
gen = algorithm_globals.random.normal(
self._vector[0, :], self._vector[1, :], [self._size_gen, self._n_variables]
)

self._generation = self._generation[: int(self.ELITE_FACTOR * len(self._generation))]
self._generation = np.vstack((self._generation, gen))
Expand Down Expand Up @@ -228,10 +231,12 @@ def minimize(

self._vector = self._initialization()

# initialization of generation
self._generation = algorithm_globals.random.normal(
self._vector[0, :], self._vector[1, :], [self._size_gen, self._n_variables]
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# initialization of generation
self._generation = algorithm_globals.random.normal(
self._vector[0, :], self._vector[1, :], [self._size_gen, self._n_variables]
)

for _ in range(self._maxiter):
self._check_generation(fun)
Expand Down
5 changes: 4 additions & 1 deletion qiskit/algorithms/time_evolvers/pvqd/pvqd.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from __future__ import annotations

import logging
import warnings
from collections.abc import Callable

import numpy as np
Expand Down Expand Up @@ -197,7 +198,9 @@ def step(
loss, gradient = self.get_loss(hamiltonian, ansatz, dt, theta)

if initial_guess is None:
initial_guess = algorithm_globals.random.random(self.initial_parameters.size) * 0.01
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
initial_guess = algorithm_globals.random.random(self.initial_parameters.size) * 0.01

if isinstance(self.optimizer, Optimizer):
optimizer_result = self.optimizer.minimize(loss, initial_guess, gradient)
Expand Down
5 changes: 4 additions & 1 deletion qiskit/algorithms/utils/validate_initial_point.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

from __future__ import annotations

import warnings
from collections.abc import Sequence

import numpy as np
Expand Down Expand Up @@ -58,7 +59,9 @@ def validate_initial_point(
upper_bounds.append(upper if upper is not None else 2 * np.pi)

# sample from within bounds
point = algorithm_globals.random.uniform(lower_bounds, upper_bounds)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
point = algorithm_globals.random.uniform(lower_bounds, upper_bounds)

elif len(point) != expected_size:
raise ValueError(
Expand Down
9 changes: 6 additions & 3 deletions qiskit/opflow/evolutions/trotterizations/qdrift.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
"""

import warnings
from typing import List, Union, cast

import numpy as np
Expand Down Expand Up @@ -77,8 +78,10 @@ def convert(self, operator: OperatorBase) -> OperatorBase:
# The protocol calls for the removal of the individual coefficients,
# and multiplication by a constant factor.
scaled_ops = [(op * (factor / op.coeff)).exp_i() for op in operator_iter]
sampled_ops = algorithm_globals.random.choice(
scaled_ops, size=(int(N * self.reps),), p=weights / lambd
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
sampled_ops = algorithm_globals.random.choice(
scaled_ops, size=(int(N * self.reps),), p=weights / lambd
)

return ComposedOp(sampled_ops).reduce()
31 changes: 17 additions & 14 deletions qiskit/opflow/operator_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
"""OperatorBase Class"""

import itertools
import warnings
from abc import ABC, abstractmethod
from copy import deepcopy
from typing import Dict, List, Optional, Set, Tuple, Union, cast
Expand Down Expand Up @@ -490,20 +491,22 @@ def _check_massive(method: str, matrix: bool, num_qubits: int, massive: bool) ->
Raises:
ValueError: Massive is False and number of qubits is greater than 16
"""
if num_qubits > 16 and not massive and not algorithm_globals.massive:
dim = 2**num_qubits
if matrix:
obj_type = "matrix"
dimensions = f"{dim}x{dim}"
else:
obj_type = "vector"
dimensions = f"{dim}"
raise ValueError(
f"'{method}' will return an exponentially large {obj_type}, "
f"in this case '{dimensions}' elements. "
"Set algorithm_globals.massive=True or the method argument massive=True "
"if you want to proceed."
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
if num_qubits > 16 and not massive and not algorithm_globals.massive:
dim = 2**num_qubits
if matrix:
obj_type = "matrix"
dimensions = f"{dim}x{dim}"
else:
obj_type = "vector"
dimensions = f"{dim}"
raise ValueError(
f"'{method}' will return an exponentially large {obj_type}, "
f"in this case '{dimensions}' elements. "
"Set algorithm_globals.massive=True or the method argument massive=True "
"if you want to proceed."
)

# Printing

Expand Down
15 changes: 9 additions & 6 deletions qiskit/opflow/state_fns/dict_state_fn.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
"""DictStateFn Class"""

import itertools
import warnings
from typing import Dict, List, Optional, Set, Union, cast

import numpy as np
Expand Down Expand Up @@ -328,12 +329,14 @@ def sample(
self, shots: int = 1024, massive: bool = False, reverse_endianness: bool = False
) -> Dict[str, float]:
probs = np.square(np.abs(np.array(list(self.primitive.values()))))
unique, counts = np.unique(
algorithm_globals.random.choice(
list(self.primitive.keys()), size=shots, p=(probs / sum(probs))
),
return_counts=True,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
unique, counts = np.unique(
algorithm_globals.random.choice(
list(self.primitive.keys()), size=shots, p=(probs / sum(probs))
),
return_counts=True,
)
counts = dict(zip(unique, counts))
if reverse_endianness:
scaled_dict = {bstr[::-1]: (prob / shots) for (bstr, prob) in counts.items()}
Expand Down
16 changes: 9 additions & 7 deletions qiskit/opflow/state_fns/vector_state_fn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

"""VectorStateFn Class"""


import warnings
from typing import Dict, List, Optional, Set, Union, cast

import numpy as np
Expand Down Expand Up @@ -243,12 +243,14 @@ def sample(
deterministic_counts = self.primitive.probabilities_dict()
# Don't need to square because probabilities_dict already does.
probs = np.array(list(deterministic_counts.values()))
unique, counts = np.unique(
algorithm_globals.random.choice(
list(deterministic_counts.keys()), size=shots, p=(probs / sum(probs))
),
return_counts=True,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
unique, counts = np.unique(
algorithm_globals.random.choice(
list(deterministic_counts.keys()), size=shots, p=(probs / sum(probs))
),
return_counts=True,
)
counts = dict(zip(unique, counts))
if reverse_endianness:
scaled_dict = {bstr[::-1]: (prob / shots) for (bstr, prob) in counts.items()}
Expand Down
Loading

0 comments on commit 65e8c1a

Please sign in to comment.