diff --git a/adaptive/learner/average_learner.py b/adaptive/learner/average_learner.py index c3d4892b4..494309a05 100644 --- a/adaptive/learner/average_learner.py +++ b/adaptive/learner/average_learner.py @@ -1,22 +1,24 @@ from __future__ import annotations from math import sqrt -from typing import Callable +from typing import TYPE_CHECKING, Callable import cloudpickle import numpy as np from adaptive.learner.base_learner import BaseLearner from adaptive.notebook_integration import ensure_holoviews -from adaptive.types import Float, Int, Real from adaptive.utils import ( assign_defaults, cache_latest, partial_function_from_dataframe, ) +if TYPE_CHECKING: + from adaptive.types import Float, Int, Real + try: - import pandas + import pandas as pd with_pandas = True @@ -47,6 +49,7 @@ class AverageLearner(BaseLearner): Points that still have to be evaluated. npoints : int Number of evaluated points. + """ def __init__( @@ -57,7 +60,8 @@ def __init__( min_npoints: int = 2, ) -> None: if atol is None and rtol is None: - raise Exception("At least one of `atol` and `rtol` should be set.") + msg = "At least one of `atol` and `rtol` should be set." + raise Exception(msg) if atol is None: atol = np.inf if rtol is None: @@ -92,7 +96,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", seed_name: str = "seed", y_name: str = "y", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -116,10 +120,12 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") - df = pandas.DataFrame(sorted(self.data.items()), columns=[seed_name, y_name]) + msg = "pandas is not installed." + raise ImportError(msg) + df = pd.DataFrame(sorted(self.data.items()), columns=[seed_name, y_name]) df.attrs["inputs"] = [seed_name] df.attrs["output"] = y_name if with_default_function_args: @@ -128,12 +134,12 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", seed_name: str = "seed", y_name: str = "y", - ): + ) -> None: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s @@ -153,11 +159,14 @@ def load_dataframe( # type: ignore[override] The ``seed_name`` used in ``to_dataframe``, by default "seed" y_name : str, optional The ``y_name`` used in ``to_dataframe``, by default "y" + """ self.tell_many(df[seed_name].values, df[y_name].values) if with_default_function_args: self.function = partial_function_from_dataframe( - self.function, df, function_prefix + self.function, + df, + function_prefix, ) def ask(self, n: int, tell_pending: bool = True) -> tuple[list[int], list[Float]]: @@ -168,7 +177,7 @@ def ask(self, n: int, tell_pending: bool = True) -> tuple[list[int], list[Float] points = list( set(range(self.n_requested + n)) - set(self.data) - - set(self.pending_points) + - set(self.pending_points), )[:n] loss_improvements = [self._loss_improvement(n) / n] * n @@ -199,7 +208,8 @@ def mean(self) -> Float: @property def std(self) -> Float: """The corrected sample standard deviation of the values - in `data`.""" + in `data`. + """ n = self.npoints if n < self.min_npoints: return np.inf @@ -211,10 +221,7 @@ def std(self) -> Float: @cache_latest def loss(self, real: bool = True, *, n=None) -> Float: - if n is None: - n = self.npoints if real else self.n_requested - else: - n = n + n = (self.npoints if real else self.n_requested) if n is None else n if n < self.min_npoints: return np.inf standard_error = self.std / sqrt(n) @@ -232,7 +239,7 @@ def _loss_improvement(self, n: int) -> Float: else: return np.inf - def remove_unfinished(self): + def remove_unfinished(self) -> None: """Remove uncomputed data from the learner.""" self.pending_points = set() @@ -242,7 +249,9 @@ def plot(self): Returns ------- holoviews.element.Histogram - A histogram of the evaluated data.""" + A histogram of the evaluated data. + + """ hv = ensure_holoviews() vals = [v for v in self.data.values() if v is not None] if not vals: diff --git a/adaptive/learner/average_learner1D.py b/adaptive/learner/average_learner1D.py index 9678b4f64..aa28de700 100644 --- a/adaptive/learner/average_learner1D.py +++ b/adaptive/learner/average_learner1D.py @@ -3,10 +3,9 @@ import math import sys from collections import defaultdict -from collections.abc import Iterable, Sequence from copy import deepcopy from math import hypot -from typing import Callable +from typing import TYPE_CHECKING, Callable import numpy as np import scipy.stats @@ -18,8 +17,11 @@ from adaptive.types import Int, Real from adaptive.utils import assign_defaults, partial_function_from_dataframe +if TYPE_CHECKING: + from collections.abc import Iterable, Sequence + try: - import pandas + import pandas as pd with_pandas = True @@ -73,6 +75,7 @@ class AverageLearner1D(Learner1D): If self.error[x] < min_error, then x will not be resampled anymore, i.e., the smallest confidence interval at x is [self.data[x] - min_error, self.data[x] + min_error]. + """ def __init__( @@ -87,17 +90,22 @@ def __init__( min_samples: int = 50, max_samples: int = sys.maxsize, min_error: float = 0, - ): + ) -> None: if not (0 < delta <= 1): - raise ValueError("Learner requires 0 < delta <= 1.") + msg = "Learner requires 0 < delta <= 1." + raise ValueError(msg) if not (0 < alpha <= 1): - raise ValueError("Learner requires 0 < alpha <= 1.") + msg = "Learner requires 0 < alpha <= 1." + raise ValueError(msg) if not (0 < neighbor_sampling <= 1): - raise ValueError("Learner requires 0 < neighbor_sampling <= 1.") + msg = "Learner requires 0 < neighbor_sampling <= 1." + raise ValueError(msg) if min_samples < 0: - raise ValueError("min_samples should be positive.") + msg = "min_samples should be positive." + raise ValueError(msg) if min_samples > max_samples: - raise ValueError("max_samples should be larger than min_samples.") + msg = "max_samples should be larger than min_samples." + raise ValueError(msg) super().__init__(function, bounds, loss_per_interval) # type: ignore[arg-type] @@ -142,7 +150,7 @@ def new(self) -> AverageLearner1D: @property def nsamples(self) -> int: - """Returns the total number of samples""" + """Returns the total number of samples.""" return sum(self._number_samples.values()) @property @@ -160,7 +168,7 @@ def to_numpy(self, mean: bool = False) -> np.ndarray: (seed, x, *np.atleast_1d(y)) for x, seed_y in self._data_samples.items() for seed, y in seed_y.items() - ] + ], ) def to_dataframe( # type: ignore[override] @@ -171,7 +179,7 @@ def to_dataframe( # type: ignore[override] seed_name: str = "seed", x_name: str = "x", y_name: str = "y", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -197,9 +205,11 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) if mean: data: list[tuple[Real, Real]] = sorted(self.data.items()) columns = [x_name, y_name] @@ -210,7 +220,7 @@ def to_dataframe( # type: ignore[override] for seed, y in sorted(seed_y.items()) ] columns = [seed_name, x_name, y_name] - df = pandas.DataFrame(data, columns=columns) + df = pd.DataFrame(data, columns=columns) df.attrs["inputs"] = [seed_name, x_name] df.attrs["output"] = y_name if with_default_function_args: @@ -219,13 +229,13 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", seed_name: str = "seed", x_name: str = "x", y_name: str = "y", - ): + ) -> None: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s @@ -247,6 +257,7 @@ def load_dataframe( # type: ignore[override] The ``x_name`` used in ``to_dataframe``, by default "x" y_name : str, optional The ``y_name`` used in ``to_dataframe``, by default "y" + """ # Were using zip instead of df[[seed_name, x_name]].values because that will # make the seeds into floats @@ -254,7 +265,9 @@ def load_dataframe( # type: ignore[override] self.tell_many(seed_x, df[y_name].values) if with_default_function_args: self.function = partial_function_from_dataframe( - self.function, df, function_prefix + self.function, + df, + function_prefix, ) def ask(self, n: int, tell_pending: bool = True) -> tuple[Points, list[float]]: # type: ignore[override] @@ -268,17 +281,16 @@ def ask(self, n: int, tell_pending: bool = True) -> tuple[Points, list[float]]: # TODO: if `n` is very large, we should suggest a few different points. points, loss_improvements = self._ask_for_new_point(n) #  Else, check the resampling condition - else: - if len(self.rescaled_error): - # This is in case rescaled_error is empty (e.g. when sigma=0) - x, resc_error = self.rescaled_error.peekitem(0) - # Resampling condition - if resc_error > self.delta: - points, loss_improvements = self._ask_for_more_samples(x, n) - else: - points, loss_improvements = self._ask_for_new_point(n) + elif len(self.rescaled_error): + # This is in case rescaled_error is empty (e.g. when sigma=0) + x, resc_error = self.rescaled_error.peekitem(0) + # Resampling condition + if resc_error > self.delta: + points, loss_improvements = self._ask_for_more_samples(x, n) else: points, loss_improvements = self._ask_for_new_point(n) + else: + points, loss_improvements = self._ask_for_new_point(n) if tell_pending: for p in points: @@ -289,7 +301,8 @@ def ask(self, n: int, tell_pending: bool = True) -> tuple[Points, list[float]]: def _ask_for_more_samples(self, x: Real, n: int) -> tuple[Points, list[float]]: """When asking for n points, the learner returns n times an existing point to be resampled, since in general n << min_samples and this point will - need to be resampled many more times""" + need to be resampled many more times. + """ n_existing = self._number_samples.get(x, 0) points = [(seed + n_existing, x) for seed in range(n)] xl, xr = self.neighbors_combined[x] @@ -300,7 +313,7 @@ def _ask_for_more_samples(self, x: Real, n: int) -> tuple[Points, list[float]]: loss_improvement = float("inf") else: loss_improvement = loss - loss * np.sqrt(n_existing) / np.sqrt( - n_existing + n + n_existing + n, ) loss_improvements = [loss_improvement / n] * n return points, loss_improvements @@ -308,7 +321,8 @@ def _ask_for_more_samples(self, x: Real, n: int) -> tuple[Points, list[float]]: def _ask_for_new_point(self, n: int) -> tuple[Points, list[float]]: """When asking for n new points, the learner returns n times a single new point, since in general n << min_samples and this point will need - to be resampled many more times""" + to be resampled many more times. + """ points, (loss_improvement,) = self._ask_points_without_adding(1) seed_points = list(zip(range(n), n * points)) loss_improvements = [loss_improvement / n] * n @@ -324,10 +338,13 @@ def tell_pending(self, seed_x: Point) -> None: # type: ignore[override] def tell(self, seed_x: Point, y: Real) -> None: # type: ignore[override] seed, x = seed_x if y is None: - raise TypeError( + msg = ( "Y-value may not be None, use learner.tell_pending(x)" "to indicate that this value is currently being calculated" ) + raise TypeError( + msg, + ) if x not in self.data: self._update_data(x, y, "new") @@ -344,6 +361,7 @@ def _update_rescaled_error_in_mean(self, x: Real, point_type: str) -> None: ---------- point_type : str Must be either "new" or "resampled". + """ #  Update neighbors x_left, x_right = self.neighbors[x] @@ -493,27 +511,32 @@ def _calc_error_in_mean(self, ys: Iterable[Real], y_avg: Real, n: int) -> float: return t_student * (variance_in_mean / n) ** 0.5 def tell_many( # type: ignore[override] - self, xs: Points | np.ndarray, ys: Sequence[Real] | np.ndarray + self, + xs: Points | np.ndarray, + ys: Sequence[Real] | np.ndarray, ) -> None: # Check that all x are within the bounds # TODO: remove this requirement, all other learners add the data # but ignore it going forward. if not np.prod([x >= self.bounds[0] and x <= self.bounds[1] for _, x in xs]): - raise ValueError( + msg = ( "x value out of bounds, " "remove x or enlarge the bounds of the learner" ) + raise ValueError( + msg, + ) # Create a mapping of points to a list of samples mapping: defaultdict[Real, defaultdict[Int, Real]] = defaultdict( - lambda: defaultdict(dict) + lambda: defaultdict(dict), ) for (seed, x), y in zip(xs, ys): mapping[x][seed] = y for x, seed_y_mapping in mapping.items(): if len(seed_y_mapping) == 1: - seed, y = list(seed_y_mapping.items())[0] + seed, y = next(iter(seed_y_mapping.items())) self.tell((seed, x), y) elif len(seed_y_mapping) > 1: # If we stored more than 1 y-value for the previous x, @@ -530,13 +553,17 @@ def tell_many_at_point(self, x: Real, seed_y_mapping: dict[int, Real]) -> None: Value from the function domain. seed_y_mapping : Dict[int, Real] Dictionary of ``seed`` -> ``y`` at ``x``. + """ # Check x is within the bounds if not np.prod(x >= self.bounds[0] and x <= self.bounds[1]): - raise ValueError( + msg = ( "x value out of bounds, " "remove x or enlarge the bounds of the learner" ) + raise ValueError( + msg, + ) # If x is a new point: if x not in self.data: @@ -563,7 +590,9 @@ def tell_many_at_point(self, x: Real, seed_y_mapping: dict[int, Real]) -> None: if n > self.min_samples: self._undersampled_points.discard(x) self.error[x] = self._calc_error_in_mean( - self._data_samples[x].values(), self.data[x], n + self._data_samples[x].values(), + self.data[x], + n, ) self._update_distances(x) self._update_rescaled_error_in_mean(x, "resampled") @@ -595,6 +624,7 @@ def plot(self): plot : `holoviews.element.Scatter * holoviews.element.ErrorBars * holoviews.element.Path` Plot of the evaluated data. + """ hv = ensure_holoviews() if not self.data: @@ -606,7 +636,8 @@ def plot(self): line = hv.Path((xs, ys)) p = scatter * error * line else: - raise Exception("plot() not implemented for vector functions.") + msg = "plot() not implemented for vector functions." + raise Exception(msg) # Plot with 5% empty margins such that the boundary points are visible margin = 0.05 * (self.bounds[1] - self.bounds[0]) @@ -616,7 +647,7 @@ def plot(self): def decreasing_dict() -> ItemSortedDict: - """This initialization orders the dictionary from large to small values""" + """This initialization orders the dictionary from large to small values.""" def sorting_rule(key, value): return -value diff --git a/adaptive/learner/balancing_learner.py b/adaptive/learner/balancing_learner.py index e9a4a661e..bc18584c6 100644 --- a/adaptive/learner/balancing_learner.py +++ b/adaptive/learner/balancing_learner.py @@ -24,7 +24,7 @@ from typing import Literal try: - import pandas + import pandas as pd with_pandas = True except ModuleNotFoundError: @@ -94,6 +94,7 @@ class BalancingLearner(BaseLearner): learner) it may be that the loss cannot be compared *even between learners of the same type*. In this case the `~adaptive.BalancingLearner` will behave in an undefined way. Change the `strategy` in that case. + """ def __init__( @@ -116,8 +117,9 @@ def __init__( self._cdims_default = cdims if len({learner.__class__ for learner in self.learners}) > 1: + msg = "A BalacingLearner can handle only one type of learners." raise TypeError( - "A BalacingLearner can handle only one type" " of learners." + msg, ) self.strategy: STRATEGY_TYPE = strategy @@ -153,8 +155,9 @@ def nsamples(self): if hasattr(self.learners[0], "nsamples"): return sum(lrn.nsamples for lrn in self.learners) else: + msg = f"{type(self.learners[0])} as no attribute called `nsamples`." raise AttributeError( - f"{type(self.learners[0])} as no attribute called `nsamples`." + msg, ) @property @@ -165,7 +168,8 @@ def strategy(self) -> STRATEGY_TYPE: the child learners, the number of points per learner, using 'npoints', or by going through all learners one by one using 'cycle'. One can dynamically change the strategy while the simulation is - running by changing the ``learner.strategy`` attribute.""" + running by changing the ``learner.strategy`` attribute. + """ return self._strategy @strategy.setter @@ -181,13 +185,17 @@ def strategy(self, strategy: STRATEGY_TYPE) -> None: self._ask_and_tell = self._ask_and_tell_based_on_cycle self._cycle = itertools.cycle(range(len(self.learners))) else: - raise ValueError( + msg = ( 'Only strategy="loss_improvements", strategy="loss",' ' strategy="npoints", or strategy="cycle" is implemented.' ) + raise ValueError( + msg, + ) def _ask_and_tell_based_on_loss_improvements( - self, n: int + self, + n: int, ) -> tuple[list[tuple[int, Any]], list[float]]: selected = [] # tuples ((learner_index, point), loss_improvement) total_points = [lrn.npoints + len(lrn.pending_points) for lrn in self.learners] @@ -199,7 +207,7 @@ def _ask_and_tell_based_on_loss_improvements( self._ask_cache[index] = learner.ask(n=1, tell_pending=False) points, loss_improvements = self._ask_cache[index] to_select.append( - ((index, points[0]), (loss_improvements[0], -total_points[index])) + ((index, points[0]), (loss_improvements[0], -total_points[index])), ) # Choose the optimal improvement. @@ -212,14 +220,16 @@ def _ask_and_tell_based_on_loss_improvements( return points, loss_improvements def _ask_and_tell_based_on_loss( - self, n: int + self, + n: int, ) -> tuple[list[tuple[int, Any]], list[float]]: selected = [] # tuples ((learner_index, point), loss_improvement) total_points = [lrn.npoints + len(lrn.pending_points) for lrn in self.learners] for _ in range(n): losses = self._losses(real=False) index, _ = max( - enumerate(zip(losses, (-n for n in total_points))), key=itemgetter(1) + enumerate(zip(losses, (-n for n in total_points))), + key=itemgetter(1), ) total_points[index] += 1 @@ -235,7 +245,8 @@ def _ask_and_tell_based_on_loss( return points, loss_improvements def _ask_and_tell_based_on_npoints( - self, n: Int + self, + n: Int, ) -> tuple[list[tuple[Int, Any]], list[float]]: selected = [] # tuples ((learner_index, point), loss_improvement) total_points = [lrn.npoints + len(lrn.pending_points) for lrn in self.learners] @@ -253,7 +264,8 @@ def _ask_and_tell_based_on_npoints( return points, loss_improvements def _ask_and_tell_based_on_cycle( - self, n: int + self, + n: int, ) -> tuple[list[tuple[Int, Any]], list[float]]: points, loss_improvements = [], [] for _ in range(n): @@ -266,7 +278,9 @@ def _ask_and_tell_based_on_cycle( return points, loss_improvements def ask( - self, n: int, tell_pending: bool = True + self, + n: int, + tell_pending: bool = True, ) -> tuple[list[tuple[Int, Any]], list[float]]: """Chose points for learners.""" if n == 0: @@ -348,6 +362,7 @@ def plot( dm : `holoviews.core.DynamicMap` (default) or `holoviews.core.HoloMap` A `DynamicMap` ``(dynamic=True)`` or `HoloMap` ``(dynamic=False)`` with sliders that are defined by `cdims`. + """ hv = ensure_holoviews() cdims = cdims or self._cdims_default @@ -438,6 +453,7 @@ def from_product( ----- The order of the child learners inside `learner.learners` is the same as ``adaptive.utils.named_product(**combos)``. + """ learners = [] arguments = named_product(**combos) @@ -465,22 +481,26 @@ def to_dataframe(self, index_name: str = "learner_index", **kwargs): # type: ig ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) dfs = [] for i, learner in enumerate(self.learners): df = learner.to_dataframe(**kwargs) cols = list(df.columns) df[index_name] = i - df = df[[index_name] + cols] + df = df[[index_name, *cols]] dfs.append(df) - df = pandas.concat(dfs, axis=0, ignore_index=True) - return df + return pd.concat(dfs, axis=0, ignore_index=True) def load_dataframe( # type: ignore[override] - self, df: pandas.DataFrame, index_name: str = "learner_index", **kwargs - ): + self, + df: pd.DataFrame, + index_name: str = "learner_index", + **kwargs, + ) -> None: """Load the data from a `pandas.DataFrame` into the child learners. Parameters @@ -491,6 +511,7 @@ def load_dataframe( # type: ignore[override] The ``index_name`` used in `to_dataframe`, by default "learner_index". **kwargs : dict Keyword arguments passed to each ``child_learner.load_dataframe(**kwargs)``. + """ for i, gr in df.groupby(index_name): self.learners[i].load_dataframe(gr, **kwargs) @@ -529,6 +550,7 @@ def save( >>> runner = adaptive.Runner(learner) >>> # Then save >>> learner.save(combo_fname) # use 'load' in the same way + """ if isinstance(fname, Iterable): for lrn, _fname in zip(self.learners, fname): @@ -557,6 +579,7 @@ def load( Example ------- See the example in the `BalancingLearner.save` doc-string. + """ if isinstance(fname, Iterable): for lrn, _fname in zip(self.learners, fname): @@ -568,7 +591,7 @@ def load( def _get_data(self) -> list[Any]: return [lrn._get_data() for lrn in self.learners] - def _set_data(self, data: list[Any]): + def _set_data(self, data: list[Any]) -> None: for lrn, _data in zip(self.learners, data): lrn._set_data(_data) diff --git a/adaptive/learner/base_learner.py b/adaptive/learner/base_learner.py index ff2d1e483..bf06b5389 100644 --- a/adaptive/learner/base_learner.py +++ b/adaptive/learner/base_learner.py @@ -9,7 +9,7 @@ from adaptive.utils import load, save if TYPE_CHECKING: - import pandas + import pandas as pd def uses_nth_neighbors(n: int): @@ -96,7 +96,7 @@ class BaseLearner(abc.ABC): pending_points: set function: Callable[..., Any] - def tell(self, x, y): + def tell(self, x, y) -> None: """Tell the learner about a single value. Parameters @@ -107,7 +107,7 @@ def tell(self, x, y): """ self.tell_many([x], [y]) - def tell_many(self, xs, ys): + def tell_many(self, xs, ys) -> None: """Tell the learner about some values. Parameters @@ -169,7 +169,7 @@ def _set_data(self, data: Any) -> None: def new(self): """Return a new learner with the same function and parameters.""" - def copy_from(self, other): + def copy_from(self, other) -> None: """Copy over the data from another learner. Parameters @@ -180,7 +180,7 @@ def copy_from(self, other): """ self._set_data(other._get_data()) - def save(self, fname, compress=True): + def save(self, fname, compress=True) -> None: """Save the data of the learner into a pickle file. Parameters @@ -195,7 +195,7 @@ def save(self, fname, compress=True): data = self._get_data() save(fname, data, compress) - def load(self, fname, compress=True): + def load(self, fname, compress=True) -> None: """Load the data of a learner from a pickle file. Parameters @@ -217,7 +217,7 @@ def to_dataframe( with_default_function_args: bool = True, function_prefix: str = "function.", **kwargs: Any, - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -242,7 +242,7 @@ def to_dataframe( @abc.abstractmethod def load_dataframe( self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", **kwargs: Any, diff --git a/adaptive/learner/data_saver.py b/adaptive/learner/data_saver.py index a69807389..4ae3ca7a4 100644 --- a/adaptive/learner/data_saver.py +++ b/adaptive/learner/data_saver.py @@ -2,14 +2,15 @@ import functools from collections import OrderedDict -from typing import Any, Callable +from typing import TYPE_CHECKING, Any, Callable from adaptive.learner.base_learner import BaseLearner, LearnerType from adaptive.utils import copy_docstring_from -try: - import pandas +if TYPE_CHECKING: + import pandas as pd +try: with_pandas = True except ModuleNotFoundError: @@ -38,6 +39,7 @@ class DataSaver(BaseLearner): >>> from operator import itemgetter >>> _learner = Learner1D(f, bounds=(-1.0, 1.0)) >>> learner = DataSaver(_learner, arg_picker=itemgetter('y')) + """ def __init__(self, learner: LearnerType, arg_picker: Callable) -> None: @@ -81,7 +83,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", extra_data_name: str = "extra_data", **kwargs: Any, - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a concatenated `pandas.DataFrame` from child learners. Parameters @@ -99,9 +101,11 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) df = self.learner.to_dataframe( with_default_function_args=with_default_function_args, function_prefix=function_prefix, @@ -115,7 +119,7 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", extra_data_name: str = "extra_data", @@ -138,6 +142,7 @@ def load_dataframe( # type: ignore[override] be ``input_names=('x', 'y')``. **kwargs : dict Keyword arguments passed to each ``child_learner.load_dataframe(**kwargs)``. + """ self.learner.load_dataframe( df, @@ -146,7 +151,7 @@ def load_dataframe( # type: ignore[override] **kwargs, ) keys = df.attrs.get("inputs", list(input_names)) - for _, x in df[keys + [extra_data_name]].iterrows(): + for _, x in df[[*keys, extra_data_name]].iterrows(): key = _to_key(x[:-1]) self.extra_data[key] = x[-1] @@ -216,5 +221,6 @@ def make_datasaver(learner_type, arg_picker): ... arg_picker=itemgetter('y')) >>> learner = adaptive.BalancingLearner.from_product( ... jacobi, learner_type, dict(bounds=(0, 1)), combos) + """ return functools.partial(_ds, learner_type, arg_picker) diff --git a/adaptive/learner/integrator_coeffs.py b/adaptive/learner/integrator_coeffs.py index 862f76eb0..5b7ce3d55 100644 --- a/adaptive/learner/integrator_coeffs.py +++ b/adaptive/learner/integrator_coeffs.py @@ -74,7 +74,8 @@ def newton(n: int) -> np.ndarray: c = (n + 1) * [0] for (d, a), m in terms.items(): if m and a != 0: - raise ValueError("Newton polynomial cannot be represented exactly.") + msg = "Newton polynomial cannot be represented exactly." + raise ValueError(msg) c[n - d] += m # The check could be removed and the above line replaced by # the following, but then the result would be no longer exact. @@ -191,4 +192,5 @@ def __getattr__(name): try: return _coefficients()[name] except KeyError: - raise AttributeError(f"module {__name__} has no attribute {name}") from None + msg = f"module {__name__} has no attribute {name}" + raise AttributeError(msg) from None diff --git a/adaptive/learner/integrator_learner.py b/adaptive/learner/integrator_learner.py index 20631a0c5..5f63e0ce9 100644 --- a/adaptive/learner/integrator_learner.py +++ b/adaptive/learner/integrator_learner.py @@ -18,7 +18,7 @@ from adaptive.utils import assign_defaults, cache_latest, restore try: - import pandas + import pandas as pd with_pandas = True @@ -403,7 +403,8 @@ def approximating_intervals(self) -> set[_Interval]: def tell(self, point: float, value: float) -> None: if point not in self.x_mapping: - raise ValueError(f"Point {point} doesn't belong to any interval") + msg = f"Point {point} doesn't belong to any interval" + raise ValueError(msg) self.data[point] = value self.pending_points.discard(point) @@ -434,11 +435,11 @@ def tell(self, point: float, value: float) -> None: assert ival in self.ivals self.priority_split.append(ival) - def tell_pending(self): + def tell_pending(self) -> None: pass def propagate_removed(self, ival: _Interval) -> None: - def _propagate_removed_down(ival): + def _propagate_removed_down(ival) -> None: ival.removed = True self.ivals.discard(ival) @@ -474,7 +475,8 @@ def _ask_and_tell_pending(self, n: int) -> tuple[list[float], list[float]]: try: self._fill_stack() except ValueError: - raise RuntimeError("No way to improve the integral estimate.") from None + msg = "No way to improve the integral estimate." + raise RuntimeError(msg) from None new_points, new_loss_improvements = self.pop_from_stack(n_left) points += new_points loss_improvements += new_loss_improvements @@ -490,7 +492,7 @@ def pop_from_stack(self, n: int) -> tuple[list[float], list[float]]: ] return points, loss_improvements - def remove_unfinished(self): + def remove_unfinished(self) -> None: pass def _fill_stack(self) -> list[float]: @@ -580,7 +582,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", x_name: str = "x", y_name: str = "y", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -607,8 +609,9 @@ def to_dataframe( # type: ignore[override] """ if not with_pandas: - raise ImportError("pandas is not installed.") - df = pandas.DataFrame(sorted(self.data.items()), columns=[x_name, y_name]) + msg = "pandas is not installed." + raise ImportError(msg) + df = pd.DataFrame(sorted(self.data.items()), columns=[x_name, y_name]) df.attrs["inputs"] = [x_name] df.attrs["output"] = y_name if with_default_function_args: @@ -617,7 +620,7 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", x_name: str = "x", @@ -659,7 +662,7 @@ def _get_data(self): self.first_ival, ) - def _set_data(self, data): + def _set_data(self, data) -> None: ( self.priority_split, self.data, diff --git a/adaptive/learner/learner1D.py b/adaptive/learner/learner1D.py index bf04743bd..5e35a170a 100644 --- a/adaptive/learner/learner1D.py +++ b/adaptive/learner/learner1D.py @@ -4,7 +4,6 @@ import itertools import math import sys -from collections.abc import Sequence from copy import copy, deepcopy from typing import TYPE_CHECKING, Any, Callable, Optional, Union @@ -17,7 +16,6 @@ from adaptive.learner.learnerND import volume from adaptive.learner.triangulation import simplex_volume_in_embedding from adaptive.notebook_integration import ensure_holoviews -from adaptive.types import Float, Int, Real from adaptive.utils import ( assign_defaults, cache_latest, @@ -31,7 +29,7 @@ try: - import pandas + import pandas as pd with_pandas = True @@ -42,6 +40,10 @@ # -- types -- # Commonly used types + from collections.abc import Sequence + + from adaptive.types import Float, Int, Real + Interval: TypeAlias = Union[tuple[float, float], tuple[float, float, int]] NeighborsType: TypeAlias = SortedDict[float, list[Optional[float]]] @@ -49,7 +51,10 @@ XsType0: TypeAlias = tuple[float, float] YsType0: TypeAlias = Union[tuple[float, float], tuple[np.ndarray, np.ndarray]] XsType1: TypeAlias = tuple[ - Optional[float], Optional[float], Optional[float], Optional[float] + Optional[float], + Optional[float], + Optional[float], + Optional[float], ] YsType1: TypeAlias = Union[ tuple[Optional[float], Optional[float], Optional[float], Optional[float]], @@ -62,7 +67,8 @@ ] XsTypeN: TypeAlias = tuple[Optional[float], ...] YsTypeN: TypeAlias = Union[ - tuple[Optional[float], ...], tuple[Optional[np.ndarray], ...] + tuple[Optional[float], ...], + tuple[Optional[np.ndarray], ...], ] @@ -92,9 +98,9 @@ def uniform_loss(xs: XsType0, ys: YsType0) -> Float: ... bounds=(-1, 1), ... loss_per_interval=uniform_sampling_1d) >>> + """ - dx = xs[1] - xs[0] - return dx + return xs[1] - xs[0] @uses_nth_neighbors(0) @@ -141,7 +147,8 @@ def triangle_loss(xs: XsType1, ys: YsType1) -> Float: def resolution_loss_function( - min_length: Real = 0, max_length: Real = 1 + min_length: Real = 0, + max_length: Real = 1, ) -> Callable[[XsType0, YsType0], Float]: """Loss function that is similar to the `default_loss` function, but you can set the maximum and minimum size of an interval. @@ -162,6 +169,7 @@ def resolution_loss_function( >>> >>> loss = resolution_loss_function(min_length=0.01, max_length=1) >>> learner = adaptive.Learner1D(f, bounds=(-1, -1), loss_per_interval=loss) + """ @uses_nth_neighbors(0) @@ -173,14 +181,15 @@ def resolution_loss(xs: XsType0, ys: YsType0) -> Float: if loss > max_length: # Return infinite such that this interval will be picked return np.inf - loss = default_loss(xs, ys) - return loss + return default_loss(xs, ys) return resolution_loss def curvature_loss_function( - area_factor: Real = 1, euclid_factor: Real = 0.02, horizontal_factor: Real = 0.02 + area_factor: Real = 1, + euclid_factor: Real = 0.02, + horizontal_factor: Real = 0.02, ) -> Callable[[XsType1, YsType1], Float]: # XXX: add a doc-string @uses_nth_neighbors(1) @@ -203,7 +212,8 @@ def curvature_loss(xs: XsType1, ys: YsType1) -> Float: def linspace(x_left: Real, x_right: Real, n: Int) -> list[Float]: """This is equivalent to 'np.linspace(x_left, x_right, n, endpoint=False)[1:]', - but it is 15-30 times faster for small 'n'.""" + but it is 15-30 times faster for small 'n'. + """ if n == 1: # This is just an optimization return [] @@ -223,7 +233,9 @@ def _get_neighbors_from_array(xs: np.ndarray) -> NeighborsType: def _get_intervals( - x: float, neighbors: NeighborsType, nth_neighbors: int + x: float, + neighbors: NeighborsType, + nth_neighbors: int, ) -> list[tuple[float, float]]: nn = nth_neighbors i = neighbors.index(x) @@ -275,6 +287,7 @@ class Learner1D(BaseLearner): If `loss_per_interval` doesn't have such an attribute, it's assumed that is uses **no** neighboring intervals. Also see the `uses_nth_neighbors` decorator for more information. + """ def __init__( @@ -282,11 +295,12 @@ def __init__( function: Callable[[Real], Float | np.ndarray], bounds: tuple[Real, Real], loss_per_interval: Callable[[XsTypeN, YsTypeN], Float] | None = None, - ): + ) -> None: self.function = function # type: ignore if loss_per_interval is not None and hasattr( - loss_per_interval, "nth_neighbors" + loss_per_interval, + "nth_neighbors", ): self.nth_neighbors = loss_per_interval.nth_neighbors else: @@ -362,7 +376,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", x_name: str = "x", y_name: str = "y", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -386,11 +400,13 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) xs, ys = zip(*sorted(self.data.items())) if self.data else ([], []) - df = pandas.DataFrame(xs, columns=[x_name]) + df = pd.DataFrame(xs, columns=[x_name]) df[y_name] = ys df.attrs["inputs"] = [x_name] df.attrs["output"] = y_name @@ -400,7 +416,7 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", x_name: str = "x", @@ -425,11 +441,14 @@ def load_dataframe( # type: ignore[override] The ``x_name`` used in ``to_dataframe``, by default "x" y_name : str, optional The ``y_name`` used in ``to_dataframe``, by default "y" + """ self.tell_many(df[x_name].values, df[y_name].values) if with_default_function_args: self.function = partial_function_from_dataframe( - self.function, df, function_prefix + self.function, + df, + function_prefix, ) @property @@ -464,7 +483,8 @@ def _get_point_by_index(self, ind: int) -> float | None: return self.neighbors.keys()[ind] def _get_loss_in_interval(self, x_left: float, x_right: float) -> float: - assert x_left is not None and x_right is not None + assert x_left is not None + assert x_right is not None if x_right - x_left < self._dx_eps: return 0 @@ -484,7 +504,9 @@ def _get_loss_in_interval(self, x_left: float, x_right: float) -> float: return self.loss_per_interval(xs_scaled, ys_scaled) def _update_interpolated_loss_in_interval( - self, x_left: float, x_right: float + self, + x_left: float, + x_right: float, ) -> None: if x_left is None or x_right is None: return @@ -502,7 +524,7 @@ def _update_interpolated_loss_in_interval( a = b def _update_losses(self, x: float, real: bool = True) -> None: - """Update all losses that depend on x""" + """Update all losses that depend on x.""" # When we add a new point x, we should update the losses # (x_left, x_right) are the "real" neighbors of 'x'. x_left, x_right = self._find_neighbors(x, self.neighbors) @@ -592,10 +614,13 @@ def tell(self, x: float, y: Float | Sequence[Float] | np.ndarray) -> None: # The point is already evaluated before return if y is None: - raise TypeError( + msg = ( "Y-value may not be None, use learner.tell_pending(x)" "to indicate that this value is currently being calculated" ) + raise TypeError( + msg, + ) # either it is a float/int, if not, try casting to a np.array if not isinstance(y, (float, int)): @@ -730,7 +755,8 @@ def _missing_bounds(self) -> list[Real]: def _ask_points_without_adding(self, n: int) -> tuple[list[float], list[float]]: """Return 'n' points that are expected to maximally reduce the loss. - Without altering the state of the learner""" + Without altering the state of the learner. + """ # Find out how to divide the n points over the intervals # by finding positive integer n_i that minimize max(L_i / n_i) subject # to a constraint that sum(n_i) = n + N, with N the total number of @@ -789,13 +815,13 @@ def _ask_points_without_adding(self, n: int) -> tuple[list[float], list[float]]: points = list( itertools.chain.from_iterable( linspace(x_l, x_r, n) for (x_l, x_r, n) in quals - ) + ), ) loss_improvements = list( itertools.chain.from_iterable( itertools.repeat(quals[x0, x1, n], n - 1) for (x0, x1, n) in quals - ) + ), ) # add the missing bounds @@ -805,7 +831,9 @@ def _ask_points_without_adding(self, n: int) -> tuple[list[float], list[float]]: return points, loss_improvements def _loss( - self, mapping: dict[Interval, float], ival: Interval + self, + mapping: dict[Interval, float], + ival: Interval, ) -> tuple[float, Interval]: loss = mapping[ival] return finite_loss(ival, loss, self._scale[0]) @@ -822,9 +850,11 @@ def plot(self, *, scatter_or_line: str = "scatter"): ------- plot : `holoviews.Overlay` Plot of the evaluated data. + """ if scatter_or_line not in ("scatter", "line"): - raise ValueError("scatter_or_line must be 'scatter' or 'line'") + msg = "scatter_or_line must be 'scatter' or 'line'" + raise ValueError(msg) hv = ensure_holoviews() xs, ys = zip(*sorted(self.data.items())) if self.data else ([], []) @@ -882,13 +912,13 @@ def sort_key(ival, loss): loss, ival = finite_loss(ival, loss, x_scale) return -loss, ival - sorted_dict = ItemSortedDict(sort_key) - return sorted_dict + return ItemSortedDict(sort_key) def finite_loss(ival: Interval, loss: float, x_scale: float) -> tuple[float, Interval]: """Get the so-called finite_loss of an interval in order to be able to - sort intervals that have infinite loss.""" + sort intervals that have infinite loss. + """ # If the loss is infinite we return the # distance between the two points. if math.isinf(loss) or math.isnan(loss): diff --git a/adaptive/learner/learner2D.py b/adaptive/learner/learner2D.py index 1ea381794..ba75ae8cd 100644 --- a/adaptive/learner/learner2D.py +++ b/adaptive/learner/learner2D.py @@ -3,28 +3,32 @@ import itertools import warnings from collections import OrderedDict -from collections.abc import Iterable from copy import copy from math import sqrt -from typing import Callable +from typing import TYPE_CHECKING, Callable import cloudpickle import numpy as np from scipy import interpolate -from scipy.interpolate.interpnd import LinearNDInterpolator from adaptive.learner.base_learner import BaseLearner from adaptive.learner.triangulation import simplex_volume_in_embedding from adaptive.notebook_integration import ensure_holoviews -from adaptive.types import Bool, Float, Real from adaptive.utils import ( assign_defaults, cache_latest, partial_function_from_dataframe, ) +if TYPE_CHECKING: + from collections.abc import Iterable + + from scipy.interpolate.interpnd import LinearNDInterpolator + + from adaptive.types import Bool, Float, Real + try: - import pandas + import pandas as pd with_pandas = True @@ -47,10 +51,13 @@ def deviations(ip: LinearNDInterpolator) -> list[np.ndarray]: ------- deviations : list The deviation per triangle. + """ values = ip.values / (ip.values.ptp(axis=0).max() or 1) gradients = interpolate.interpnd.estimate_gradients_2d_global( - ip.tri, values, tol=1e-6 + ip.tri, + values, + tol=1e-6, ) simplices = ip.tri.simplices @@ -68,8 +75,7 @@ def deviation(p, v, g): return dev n_levels = vs.shape[2] - devs = [deviation(p, vs[:, :, i], gs[:, :, i]) for i in range(n_levels)] - return devs + return [deviation(p, vs[:, :, i], gs[:, :, i]) for i in range(n_levels)] def areas(ip: LinearNDInterpolator) -> np.ndarray: @@ -86,11 +92,11 @@ def areas(ip: LinearNDInterpolator) -> np.ndarray: ------- areas : numpy.ndarray The area per triangle in ``ip.tri``. + """ p = ip.tri.points[ip.tri.simplices] q = p[:, :-1, :] - p[:, -1, None, :] - areas = abs(q[:, 0, 0] * q[:, 1, 1] - q[:, 0, 1] * q[:, 1, 0]) / 2 - return areas + return abs(q[:, 0, 0] * q[:, 1, 1] - q[:, 0, 1] * q[:, 1, 0]) / 2 def uniform_loss(ip: LinearNDInterpolator) -> np.ndarray: @@ -120,12 +126,14 @@ def uniform_loss(ip: LinearNDInterpolator) -> np.ndarray: ... loss_per_triangle=uniform_loss, ... ) >>> + """ return np.sqrt(areas(ip)) def resolution_loss_function( - min_distance: float = 0, max_distance: float = 1 + min_distance: float = 0, + max_distance: float = 1, ) -> Callable[[LinearNDInterpolator], np.ndarray]: """Loss function that is similar to the `default_loss` function, but you can set the maximimum and minimum size of a triangle. @@ -147,6 +155,7 @@ def resolution_loss_function( >>> >>> loss = resolution_loss_function(min_distance=0.01, max_distance=1) >>> learner = adaptive.Learner2D(f, bounds=[(-1, -1), (1, 1)], loss_per_triangle=loss) + """ def resolution_loss(ip): @@ -154,11 +163,11 @@ def resolution_loss(ip): A = areas(ip) # Setting areas with a small area to zero such that they won't be chosen again - loss[A < min_distance**2] = 0 + loss[min_distance**2 > A] = 0 # Setting triangles that have a size larger than max_distance to infinite loss # such that these triangles will be picked - loss[A > max_distance**2] = np.inf + loss[max_distance**2 < A] = np.inf return loss @@ -191,6 +200,7 @@ def minimize_triangle_surface_loss(ip: LinearNDInterpolator) -> np.ndarray: >>> learner = adaptive.Learner2D(f, bounds=[(-1, -1), (1, 1)], ... loss_per_triangle=minimize_triangle_surface_loss) >>> + """ tri = ip.tri points = tri.points[tri.simplices] @@ -224,11 +234,11 @@ def default_loss(ip: LinearNDInterpolator) -> np.ndarray: ------- losses : numpy.ndarray Loss per triangle in ``ip.tri``. + """ dev = np.sum(deviations(ip), axis=0) A = areas(ip) - losses = dev * np.sqrt(A) + 0.3 * A - return losses + return dev * np.sqrt(A) + 0.3 * A def thresholded_loss_function( @@ -236,8 +246,7 @@ def thresholded_loss_function( upper_threshold: float | None = None, priority_factor: float = 0.1, ) -> Callable[[LinearNDInterpolator], np.ndarray]: - """ - Factory function to create a custom loss function that deprioritizes + """Factory function to create a custom loss function that deprioritizes values above an upper threshold and below a lower threshold. Parameters @@ -256,6 +265,7 @@ def thresholded_loss_function( ------- custom_loss : Callable[[LinearNDInterpolator], np.ndarray] A custom loss function that can be used with Learner2D. + """ def custom_loss(ip: LinearNDInterpolator) -> np.ndarray: @@ -269,6 +279,7 @@ def custom_loss(ip: LinearNDInterpolator) -> np.ndarray: ------- losses : numpy.ndarray Loss per triangle in ``ip.tri``. + """ losses = default_loss(ip) @@ -311,6 +322,7 @@ def choose_point_in_triangle(triangle: np.ndarray, max_badness: int) -> np.ndarr ------- point : numpy.ndarray The x and y coordinate of the suggested new point. + """ a, b, c = triangle area = 0.5 * np.cross(b - a, c - a) @@ -345,6 +357,7 @@ def triangle_loss(ip): This loss function is *extremely* slow. It is here because it gives the same result as the `adaptive.LearnerND`\s `~adaptive.learner.learnerND.triangle_loss`. + """ tri = ip.tri @@ -355,7 +368,8 @@ def get_neighbors(i, ip): return np.concatenate((tri.points[c], ip.values[c]), axis=-1) simplices = np.concatenate( - [tri.points[tri.simplices], ip.values[tri.simplices]], axis=-1 + [tri.points[tri.simplices], ip.values[tri.simplices]], + axis=-1, ) neighbors = [get_neighbors(i, ip) for i in range(len(tri.simplices))] @@ -427,6 +441,7 @@ class Learner2D(BaseLearner): `~adaptive.learner.learner2D.deviations` to calculate the areas and deviations from a linear interpolation over each triangle. + """ def __init__( @@ -473,7 +488,7 @@ def to_numpy(self): and ``(npoints, 2+vdim)`` if ``learner.function`` returns a vector of length ``vdim``. """ return np.array( - [(x, y, *np.atleast_1d(z)) for (x, y), z in sorted(self.data.items())] + [(x, y, *np.atleast_1d(z)) for (x, y), z in sorted(self.data.items())], ) def to_dataframe( # type: ignore[override] @@ -483,7 +498,7 @@ def to_dataframe( # type: ignore[override] x_name: str = "x", y_name: str = "y", z_name: str = "z", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -511,11 +526,13 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) data = sorted((x, y, z) for (x, y), z in self.data.items()) - df = pandas.DataFrame(data, columns=[x_name, y_name, z_name]) + df = pd.DataFrame(data, columns=[x_name, y_name, z_name]) df.attrs["inputs"] = [x_name, y_name] df.attrs["output"] = z_name if with_default_function_args: @@ -524,13 +541,13 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", x_name: str = "x", y_name: str = "y", z_name: str = "z", - ): + ) -> None: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s @@ -552,12 +569,15 @@ def load_dataframe( # type: ignore[override] The ``y_name`` used in ``to_dataframe``, by default "y" z_name : str, optional The ``z_name`` used in ``to_dataframe``, by default "z" + """ data = df.set_index([x_name, y_name])[z_name].to_dict() self._set_data(data) if with_default_function_args: self.function = partial_function_from_dataframe( - self.function, df, function_prefix + self.function, + df, + function_prefix, ) def _scale(self, points: list[tuple[float, float]] | np.ndarray) -> np.ndarray: @@ -596,7 +616,8 @@ def bounds_are_done(self) -> bool: ) def interpolated_on_grid( - self, n: int | None = None + self, + n: int | None = None, ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """Get the interpolated data on a grid. @@ -611,6 +632,7 @@ def interpolated_on_grid( xs : 1D numpy.ndarray ys : 1D numpy.ndarray interpolated_on_grid : 2D numpy.ndarray + """ ip = self.interpolator(scaled=True) if n is None: @@ -661,7 +683,7 @@ def _data_combined(self) -> tuple[np.ndarray, np.ndarray]: return points_combined, values_combined def ip(self) -> LinearNDInterpolator: - """Deprecated, use `self.interpolator(scaled=True)`""" + """Deprecated, use `self.interpolator(scaled=True)`.""" warnings.warn( "`learner.ip()` is deprecated, use `learner.interpolator(scaled=True)`." " This will be removed in v1.0.", @@ -690,6 +712,7 @@ def interpolator(self, *, scaled: bool = False) -> LinearNDInterpolator: >>> xs, ys = [np.linspace(*b, num=100) for b in learner.bounds] >>> ip = learner.interpolator() >>> zs = ip(xs[:, None], ys[None, :]) + """ if scaled: if self._ip is None: @@ -704,7 +727,8 @@ def interpolator(self, *, scaled: bool = False) -> LinearNDInterpolator: def _interpolator_combined(self) -> LinearNDInterpolator: """A `scipy.interpolate.LinearNDInterpolator` instance containing the learner's data *and* interpolated data of - the `pending_points`.""" + the `pending_points`. + """ if self._ip_combined is None: points, values = self._data_combined() points = self._scale(points) @@ -734,10 +758,12 @@ def tell_pending(self, point: tuple[float, float]) -> None: self._stack.pop(point, None) def _fill_stack( - self, stack_till: int = 1 + self, + stack_till: int = 1, ) -> tuple[list[tuple[float, float]], list[float]]: if len(self.data) + len(self.pending_points) < self.ndim + 1: - raise ValueError("too few points...") + msg = "too few points..." + raise ValueError(msg) # Interpolate ip = self._interpolator_combined() @@ -775,7 +801,9 @@ def _fill_stack( return points_new, losses_new def ask( - self, n: int, tell_pending: bool = True + self, + n: int, + tell_pending: bool = True, ) -> tuple[list[tuple[float, float] | np.ndarray], list[float]]: # Even if tell_pending is False we add the point such that _fill_stack # will return new points, later we remove these points if needed. @@ -790,7 +818,7 @@ def ask( # than the number of triangles between the points. Therefore # it could fill up till a length smaller than `stack_till`. new_points, new_loss_improvements = self._fill_stack( - stack_till=max(n_left, self.stack_size) + stack_till=max(n_left, self.stack_size), ) for p in new_points[:n_left]: self.tell_pending(p) @@ -849,6 +877,7 @@ def plot(self, n=None, tri_alpha=0): ----- The plot object that is returned if ``learner.function`` returns a vector *cannot* be used with the live_plotting functionality. + """ hv = ensure_holoviews() x, y = self.bounds @@ -882,7 +911,9 @@ def plot(self, n=None, tri_alpha=0): im = hv.Image([], bounds=lbrt) tris = hv.EdgePaths([]) return im.opts(cmap="viridis") * tris.opts( - line_width=0.5, alpha=tri_alpha, tools=[] + line_width=0.5, + alpha=tri_alpha, + tools=[], ) def _get_data(self) -> dict[tuple[float, float], Float | np.ndarray]: diff --git a/adaptive/learner/learnerND.py b/adaptive/learner/learnerND.py index 96792f863..850c1f90d 100644 --- a/adaptive/learner/learnerND.py +++ b/adaptive/learner/learnerND.py @@ -29,7 +29,7 @@ ) try: - import pandas + import pandas as pd with_pandas = True @@ -50,8 +50,7 @@ def volume(simplex, ys=None): # See https://www.jstor.org/stable/2315353 dim = len(simplex) - 1 - vol = np.abs(fast_det(matrix)) / np.math.factorial(dim) - return vol + return np.abs(fast_det(matrix)) / np.math.factorial(dim) def orientation(simplex): @@ -311,17 +310,20 @@ class LearnerND(BaseLearner): """ - def __init__(self, func, bounds, loss_per_simplex=None): + def __init__(self, func, bounds, loss_per_simplex=None) -> None: self._vdim = None self.loss_per_simplex = loss_per_simplex or default_loss if hasattr(self.loss_per_simplex, "nth_neighbors"): if self.loss_per_simplex.nth_neighbors > 1: - raise NotImplementedError( + msg = ( "The provided loss function wants " "next-nearest neighboring simplices for the loss computation, " "this feature is not yet implemented, either use " - "nth_neightbors = 0 or 1", + "nth_neightbors = 0 or 1" + ) + raise NotImplementedError( + msg, ) self.nth_neighbors = self.loss_per_simplex.nth_neighbors else: @@ -418,7 +420,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", point_names: tuple[str, ...] = ("x", "y", "z"), value_name: str = "value", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -446,14 +448,18 @@ def to_dataframe( # type: ignore[override] """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) if len(point_names) != self.ndim: - raise ValueError( + msg = ( f"point_names ({point_names}) should have the" - f" same length as learner.ndims ({self.ndim})", + f" same length as learner.ndims ({self.ndim})" + ) + raise ValueError( + msg, ) data = [(*x, y) for x, y in self.data.items()] - df = pandas.DataFrame(data, columns=[*point_names, value_name]) + df = pd.DataFrame(data, columns=[*point_names, value_name]) df.attrs["inputs"] = list(point_names) df.attrs["output"] = value_name if with_default_function_args: @@ -462,12 +468,12 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", point_names: tuple[str, ...] = ("x", "y", "z"), value_name: str = "value", - ): + ) -> None: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s @@ -560,6 +566,8 @@ def tell(self, point, value): simplex = None to_delete, to_add = tri.add_point(point, simplex, transform=self._transform) self._update_losses(to_delete, to_add) + return None + return None def _simplex_exists(self, simplex): simplex = tuple(sorted(simplex)) @@ -575,7 +583,7 @@ def inside_bounds(self, point): (mn - eps) <= p <= (mx + eps) for p, (mn, mx) in zip(point, self._bbox) ) - def tell_pending(self, point, *, simplex=None): + def tell_pending(self, point, *, simplex=None) -> None: point = tuple(point) if not self.inside_bounds(point): return @@ -614,7 +622,7 @@ def _try_adding_pending_point_to_simplex(self, point, simplex): self._pending_to_simplex[point] = simplex return self._subtriangulations[simplex].add_point(point) - def _update_subsimplex_losses(self, simplex, new_subsimplices): + def _update_subsimplex_losses(self, simplex, new_subsimplices) -> None: loss = self._losses[simplex] loss_density = loss / self.tri.volume(simplex) @@ -681,9 +689,12 @@ def _pop_highest_existing_simplex(self): # Could not find a simplex, this code should never be reached assert self.tri is not None - raise AssertionError( + msg = ( "Could not find a simplex to subdivide. Yet there should always" - " be a simplex available if LearnerND.tri() is not None.", + " be a simplex available if LearnerND.tri() is not None." + ) + raise AssertionError( + msg, ) def _ask_best_point(self): @@ -764,7 +775,7 @@ def _compute_loss(self, simplex): ), ) - def _update_losses(self, to_delete: set, to_add: set): + def _update_losses(self, to_delete: set, to_add: set) -> None: # XXX: add the points outside the triangulation to this as well pending_points_unbound = set() @@ -812,7 +823,7 @@ def _update_losses(self, to_delete: set, to_add: set): self._subtriangulations[simplex].simplices, ) - def _recompute_all_losses(self): + def _recompute_all_losses(self) -> None: """Recompute all losses and pending losses.""" # amortized O(N) complexity if self.tri is None: @@ -841,7 +852,7 @@ def _scale(self): # get the output scale return self._max_value - self._min_value - def _update_range(self, new_output): + def _update_range(self, new_output) -> bool: if self._min_value is None or self._max_value is None: # this is the first point, nothing to do, just set the range self._min_value = np.min(new_output) @@ -882,7 +893,7 @@ def loss(self, real: bool = True): losses = self._losses if self.tri is not None else {} return max(losses.values()) if losses else float("inf") - def remove_unfinished(self): + def remove_unfinished(self) -> None: # XXX: implement this method self.pending_points = set() self._subtriangulations = {} @@ -905,14 +916,18 @@ def plot(self, n=None, tri_alpha=0): """ hv = ensure_holoviews() if self.vdim > 1: + msg = "holoviews currently does not support" raise NotImplementedError( - "holoviews currently does not support", + msg, "3D surface plots in bokeh.", ) if self.ndim != 2: - raise NotImplementedError( + msg = ( "Only 2D plots are implemented: You can " - "plot a 2D slice with 'plot_slice'.", + "plot a 2D slice with 'plot_slice'." + ) + raise NotImplementedError( + msg, ) x, y = self._bbox lbrt = x[0], y[0], x[1], y[1] @@ -973,8 +988,9 @@ def plot_slice(self, cut_mapping, n=None): if not self.data: return hv.Scatter([]) * hv.Path([]) elif self.vdim > 1: + msg = "multidimensional output not yet supported by `plot_slice`" raise NotImplementedError( - "multidimensional output not yet supported by `plot_slice`", + msg, ) n = n or 201 values = [ @@ -993,8 +1009,9 @@ def plot_slice(self, cut_mapping, n=None): elif plot_dim == 2: if self.vdim > 1: + msg = "holoviews currently does not support 3D surface plots in bokeh." raise NotImplementedError( - "holoviews currently does not support 3D surface plots in bokeh.", + msg, ) if n is None: # Calculate how many grid points are needed. @@ -1025,7 +1042,8 @@ def plot_slice(self, cut_mapping, n=None): return im.opts(cmap="viridis") else: - raise ValueError("Only 1 or 2-dimensional plots can be generated.") + msg = "Only 1 or 2-dimensional plots can be generated." + raise ValueError(msg) def plot_3D(self, with_triangulation=False, return_fig=False): """Plot the learner's data in 3D using plotly. @@ -1116,16 +1134,20 @@ def plot_3D(self, with_triangulation=False, return_fig=False): def _get_iso(self, level=0.0, which="surface"): if which == "surface": if self.ndim != 3 or self.vdim != 1: - raise Exception( + msg = ( "Isosurface plotting is only supported" - " for a 3D input and 1D output", + " for a 3D input and 1D output" + ) + raise Exception( + msg, ) get_surface = True get_line = False elif which == "line": if self.ndim != 2 or self.vdim != 1: + msg = "Isoline plotting is only supported for a 2D input and 1D output" raise Exception( - "Isoline plotting is only supported for a 2D input and 1D output", + msg, ) get_surface = False get_line = True @@ -1176,10 +1198,13 @@ def _get_vertex_index(a, b): r_min = min(self.data[v] for v in self.tri.vertices) r_max = max(self.data[v] for v in self.tri.vertices) - raise ValueError( + msg = ( f"Could not draw isosurface for level={level}, as" " this value is not inside the function range. Please choose" - f" a level strictly inside interval ({r_min}, {r_max})", + f" a level strictly inside interval ({r_min}, {r_max})" + ) + raise ValueError( + msg, ) return vertices, faces_or_lines @@ -1207,10 +1232,7 @@ def plot_isoline(self, level=0.0, n=None, tri_alpha=0): """ hv = ensure_holoviews() - if n == -1: - plot = hv.Path([]) - else: - plot = self.plot(n=n, tri_alpha=tri_alpha) + plot = hv.Path([]) if n == -1 else self.plot(n=n, tri_alpha=tri_alpha) if isinstance(level, Iterable): for lvl in level: @@ -1320,6 +1342,6 @@ def _get_plane_color(simplex): def _get_data(self): return deepcopy(self.__dict__) - def _set_data(self, state): + def _set_data(self, state) -> None: for k, v in state.items(): setattr(self, k, v) diff --git a/adaptive/learner/sequence_learner.py b/adaptive/learner/sequence_learner.py index c307744fd..3fa2edd73 100644 --- a/adaptive/learner/sequence_learner.py +++ b/adaptive/learner/sequence_learner.py @@ -19,9 +19,9 @@ from collections.abc import Sequence from typing import Callable -try: - import pandas + import pandas as pd +try: with_pandas = True except ModuleNotFoundError: @@ -45,7 +45,7 @@ class _IgnoreFirstArgument: pickable. """ - def __init__(self, function): + def __init__(self, function) -> None: self.function = function def __call__(self, index_point: PointType, *args, **kwargs): @@ -84,13 +84,14 @@ class SequenceLearner(BaseLearner): From primitive tests, the `~adaptive.SequenceLearner` appears to have a similar performance to `ipyparallel`\s ``load_balanced_view().map``. With the added benefit of having results in the local kernel already. + """ def __init__( self, function: Callable[[Any], Any], sequence: Sequence[Any], - ): + ) -> None: self._original_function = function self.function = _IgnoreFirstArgument(function) # prefer range(len(...)) over enumerate to avoid slowdowns @@ -107,7 +108,9 @@ def new(self) -> SequenceLearner: return SequenceLearner(self._original_function, self.sequence) def ask( - self, n: int, tell_pending: bool = True + self, + n: int, + tell_pending: bool = True, ) -> tuple[list[PointType], list[float]]: indices = [] points: list[PointType] = [] @@ -156,7 +159,8 @@ def done(self) -> bool: def result(self) -> list[Any]: """Get the function values in the same order as ``sequence``.""" if not self.done(): - raise Exception("Learner is not yet complete.") + msg = "Learner is not yet complete." + raise Exception(msg) return list(self.data.values()) @property @@ -172,7 +176,7 @@ def to_dataframe( # type: ignore[override] y_name: str = "y", *, full_sequence: bool = False, - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -201,9 +205,11 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) import pandas as pd if full_sequence: @@ -214,7 +220,7 @@ def to_dataframe( # type: ignore[override] indices, ys = zip(*self.data.items()) if self.data else ([], []) # type: ignore[assignment] sequence = [self.sequence[i] for i in indices] - df = pandas.DataFrame(indices, columns=[index_name]) + df = pd.DataFrame(indices, columns=[index_name]) df[x_name] = sequence df[y_name] = ys df.attrs["inputs"] = [index_name] @@ -225,7 +231,7 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", index_name: str = "i", @@ -233,7 +239,7 @@ def load_dataframe( # type: ignore[override] y_name: str = "y", *, full_sequence: bool = False, - ): + ) -> None: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s @@ -257,9 +263,11 @@ def load_dataframe( # type: ignore[override] The ``y_name`` used in ``to_dataframe``, by default "y" full_sequence : bool, optional The ``full_sequence`` used in ``to_dataframe``, by default False + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) import pandas as pd indices = df[index_name].values @@ -276,7 +284,9 @@ def load_dataframe( # type: ignore[override] if with_default_function_args: self.function = partial_function_from_dataframe( - self._original_function, df, function_prefix + self._original_function, + df, + function_prefix, ) def _get_data(self) -> dict[int, Any]: diff --git a/adaptive/learner/skopt_learner.py b/adaptive/learner/skopt_learner.py index 173557c31..b9259a941 100644 --- a/adaptive/learner/skopt_learner.py +++ b/adaptive/learner/skopt_learner.py @@ -1,7 +1,7 @@ from __future__ import annotations import collections -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, NoReturn import numpy as np from skopt import Optimizer @@ -11,7 +11,7 @@ from adaptive.utils import cache_latest if TYPE_CHECKING: - import pandas + import pandas as pd class SKOptLearner(Optimizer, BaseLearner): @@ -30,7 +30,7 @@ class SKOptLearner(Optimizer, BaseLearner): """ - def __init__(self, function, **kwargs): + def __init__(self, function, **kwargs) -> None: self.function = function self.pending_points = set() self.data = collections.OrderedDict() @@ -41,7 +41,7 @@ def new(self) -> SKOptLearner: """Return a new `~adaptive.SKOptLearner` without the data.""" return SKOptLearner(self.function, **self._kwargs) - def tell(self, x, y, fit=True): + def tell(self, x, y, fit=True) -> None: if isinstance(x, collections.abc.Iterable): self.pending_points.discard(tuple(x)) self.data[tuple(x)] = y @@ -51,12 +51,12 @@ def tell(self, x, y, fit=True): self.data[x] = y super().tell([x], y, fit) - def tell_pending(self, x): + def tell_pending(self, x) -> None: # 'skopt.Optimizer' takes care of points we # have not got results for. self.pending_points.add(tuple(x)) - def remove_unfinished(self): + def remove_unfinished(self) -> None: pass @cache_latest @@ -72,9 +72,12 @@ def loss(self, real: bool = True): def ask(self, n, tell_pending=True): if not tell_pending: - raise NotImplementedError( + msg = ( "Asking points is an irreversible " - "action, so use `ask(n, tell_pending=True`.", + "action, so use `ask(n, tell_pending=True`." + ) + raise NotImplementedError( + msg, ) points = super().ask(n) # TODO: Choose a better estimate for the loss improvement. @@ -91,7 +94,8 @@ def npoints(self): def plot(self, nsamples=200): hv = ensure_holoviews() if self.space.n_dims > 1: - raise ValueError("Can only plot 1D functions") + msg = "Can only plot 1D functions" + raise ValueError(msg) bounds = self.space.bounds[0] if not self.Xi: p = hv.Scatter([]) * hv.Curve([]) * hv.Area([]) @@ -124,7 +128,7 @@ def plot(self, nsamples=200): def _get_data(self): return [x[0] for x in self.Xi], self.yi - def _set_data(self, data): + def _set_data(self, data) -> None: xs, ys = data self.tell_many(xs, ys) @@ -134,7 +138,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", seed_name: str = "seed", y_name: str = "y", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -161,12 +165,12 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", seed_name: str = "seed", y_name: str = "y", - ): + ) -> NoReturn: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s diff --git a/adaptive/learner/triangulation.py b/adaptive/learner/triangulation.py index 03455e3b7..00c7185eb 100644 --- a/adaptive/learner/triangulation.py +++ b/adaptive/learner/triangulation.py @@ -2,6 +2,7 @@ from collections.abc import Iterable, Sized from itertools import chain, combinations from math import factorial, sqrt +from typing import NoReturn import scipy.spatial from numpy import abs as np_abs @@ -67,7 +68,7 @@ def point_in_simplex(point, simplex, eps=1e-8): def fast_2d_circumcircle(points): - """Compute the center and radius of the circumscribed circle of a triangle + """Compute the center and radius of the circumscribed circle of a triangle. Parameters ---------- @@ -78,6 +79,7 @@ def fast_2d_circumcircle(points): ------- tuple (center point : tuple(float), radius: float) + """ points = array(points) # transform to relative coordinates @@ -114,6 +116,7 @@ def fast_3d_circumcircle(points): ------- tuple (center point : tuple(float), radius: float) + """ points = array(points) pts = points[1:] - points[0] @@ -171,8 +174,8 @@ def circumsphere(pts): Will fail for matrices which are not (N-dim + 1, N-dim) in size due to non-square determinants: will raise numpy.linalg.LinAlgError. May fail for points that are integers (due to 32bit integer overflow). - """ + """ dim = len(pts) - 1 if dim == 2: return fast_2d_circumcircle(pts) @@ -219,6 +222,7 @@ def orientation(face, origin): If two points lie on the same side of the face, the orientation will be equal, if they lie on the other side of the face, it will be negated. + """ vectors = array(face) sign, logdet = slogdet(vectors - origin) @@ -252,6 +256,7 @@ def simplex_volume_in_embedding(vertices) -> float: ValueError if the vertices do not form a simplex (for example, because they are coplanar, colinear or coincident). + """ # Implements http://mathworld.wolfram.com/Cayley-MengerDeterminant.html # Modified from https://codereview.stackexchange.com/questions/77593/calculating-the-volume-of-a-tetrahedron @@ -280,7 +285,8 @@ def simplex_volume_in_embedding(vertices) -> float: if vol_square < 0: if vol_square > -1e-15: return 0 - raise ValueError("Provided vertices do not form a simplex") + msg = "Provided vertices do not form a simplex" + raise ValueError(msg) return sqrt(vol_square) @@ -310,35 +316,45 @@ class Triangulation: ValueError if the list of coordinates is incorrect or the points do not form one or more simplices in the + """ - def __init__(self, coords): + def __init__(self, coords) -> None: if not is_iterable_and_sized(coords): - raise TypeError("Please provide a 2-dimensional list of points") + msg = "Please provide a 2-dimensional list of points" + raise TypeError(msg) coords = list(coords) if not all(is_iterable_and_sized(coord) for coord in coords): - raise TypeError("Please provide a 2-dimensional list of points") + msg = "Please provide a 2-dimensional list of points" + raise TypeError(msg) if len(coords) == 0: - raise ValueError("Please provide at least one simplex") + msg = "Please provide at least one simplex" + raise ValueError(msg) # raise now because otherwise the next line will raise a less dim = len(coords[0]) if any(len(coord) != dim for coord in coords): - raise ValueError("Coordinates dimension mismatch") + msg = "Coordinates dimension mismatch" + raise ValueError(msg) if dim == 1: - raise ValueError("Triangulation class only supports dim >= 2") + msg = "Triangulation class only supports dim >= 2" + raise ValueError(msg) if len(coords) < dim + 1: - raise ValueError("Please provide at least one simplex") + msg = "Please provide at least one simplex" + raise ValueError(msg) coords = list(map(tuple, coords)) vectors = subtract(coords[1:], coords[0]) if matrix_rank(vectors) < dim: - raise ValueError( + msg = ( "Initial simplex has zero volumes " "(the points are linearly dependent)" ) + raise ValueError( + msg, + ) self.vertices = list(coords) self.simplices = set() @@ -351,13 +367,13 @@ def __init__(self, coords): for simplex in initial_tri.simplices: self.add_simplex(simplex) - def delete_simplex(self, simplex): + def delete_simplex(self, simplex) -> None: simplex = tuple(sorted(simplex)) self.simplices.remove(simplex) for vertex in simplex: self.vertex_to_simplices[vertex].remove(simplex) - def add_simplex(self, simplex): + def add_simplex(self, simplex) -> None: simplex = tuple(sorted(simplex)) self.simplices.add(simplex) for vertex in simplex: @@ -379,6 +395,7 @@ def get_reduced_simplex(self, point, simplex, eps=1e-8) -> list: vertices : list of ints Indices of vertices of the simplex to which the vertex belongs. An empty list indicates that the vertex is outside the simplex. + """ # XXX: in the end we want to lose this method if len(simplex) != self.dim + 1: @@ -421,7 +438,8 @@ def faces(self, dim=None, simplices=None, vertices=None): dim = self.dim if simplices is not None and vertices is not None: - raise ValueError("Only one of simplices and vertices is allowed.") + msg = "Only one of simplices and vertices is allowed." + raise ValueError(msg) if vertices is not None: vertices = set(vertices) simplices = chain(*(self.vertex_to_simplices[i] for i in vertices)) @@ -476,7 +494,8 @@ def _extend_hull(self, new_vertex, eps=1e-8): self.simplices.remove(tri) del self.vertex_to_simplices[pt_index] del self.vertices[pt_index] - raise ValueError("Candidate vertex is inside the hull.") + msg = "Candidate vertex is inside the hull." + raise ValueError(msg) return new_simplices @@ -492,6 +511,7 @@ def circumscribed_circle(self, simplex, transform): ------- tuple (center point, radius) The center and radius of the circumscribed circle + """ pts = dot(self.get_vertices(simplex), transform) return circumsphere(pts) @@ -526,6 +546,7 @@ def bowyer_watson(self, pt_index, containing_simplex=None, transform=None): Simplices that have been deleted new_simplices : set of tuples Simplices that have been added + """ queue = set() done_simplices = set() @@ -577,7 +598,8 @@ def _relative_volume(self, simplex): distance of its vertices. The advantage of this is that the relative volume is only dependent on the shape of the simplex and not on the absolute size. Due to the weird scaling, the only use of this method - is to check that a simplex is almost flat.""" + is to check that a simplex is almost flat. + """ vertices = array(self.get_vertices(simplex)) vectors = vertices[1:] - vertices[0] average_edge_length = mean(np_abs(vectors)) @@ -597,6 +619,7 @@ def add_point(self, point, simplex=None, transform=None): Simplex containing the point. Empty tuple indicates points outside the hull. If not provided, the algorithm costs O(N), so this should be used whenever possible. + """ point = tuple(point) if simplex is None: @@ -610,27 +633,30 @@ def add_point(self, point, simplex=None, transform=None): pt_index = len(self.vertices) - 1 deleted_simplices, added_simplices = self.bowyer_watson( - pt_index, transform=transform + pt_index, + transform=transform, ) deleted = deleted_simplices - temporary_simplices added = added_simplices | (temporary_simplices - deleted_simplices) return deleted, added - else: - reduced_simplex = self.get_reduced_simplex(point, simplex) - if not reduced_simplex: - self.vertex_to_simplices.pop() # revert adding vertex - raise ValueError("Point lies outside of the specified simplex.") - else: - simplex = reduced_simplex + + reduced_simplex = self.get_reduced_simplex(point, simplex) + if not reduced_simplex: + self.vertex_to_simplices.pop() # revert adding vertex + msg = "Point lies outside of the specified simplex." + raise ValueError(msg) + + simplex = reduced_simplex if len(simplex) == 1: self.vertex_to_simplices.pop() # revert adding vertex - raise ValueError("Point already in triangulation.") - else: - pt_index = len(self.vertices) - self.vertices.append(point) - return self.bowyer_watson(pt_index, actual_simplex, transform) + msg = "Point already in triangulation." + raise ValueError(msg) + + pt_index = len(self.vertices) + self.vertices.append(point) + return self.bowyer_watson(pt_index, actual_simplex, transform) def volume(self, simplex): prefactor = factorial(self.dim) @@ -641,7 +667,7 @@ def volume(self, simplex): def volumes(self): return [self.volume(sim) for sim in self.simplices] - def reference_invariant(self): + def reference_invariant(self) -> bool: """vertex_to_simplices and simplices are compatible.""" for vertex in range(len(self.vertices)): if any(vertex not in tri for tri in self.vertex_to_simplices[vertex]): @@ -651,7 +677,7 @@ def reference_invariant(self): return False return True - def vertex_invariant(self, vertex): + def vertex_invariant(self, vertex) -> NoReturn: """Simplices originating from a vertex don't overlap.""" raise NotImplementedError @@ -671,7 +697,8 @@ def get_simplices_attached_to_points(self, indices): def get_opposing_vertices(self, simplex): if simplex not in self.simplices: - raise ValueError("Provided simplex is not part of the triangulation") + msg = "Provided simplex is not part of the triangulation" + raise ValueError(msg) neighbors = self.get_simplices_attached_to_points(simplex) def find_opposing_vertex(vertex): @@ -683,8 +710,7 @@ def find_opposing_vertex(vertex): assert len(opposing) == 1 return opposing.pop() - result = tuple(find_opposing_vertex(v) for v in simplex) - return result + return tuple(find_opposing_vertex(v) for v in simplex) @property def hull(self): @@ -700,17 +726,20 @@ def hull(self): ------- hull : set of int Vertices in the hull. + """ counts = Counter(self.faces()) if any(i > 2 for i in counts.values()): - raise RuntimeError( + msg = ( "Broken triangulation, a (N-1)-dimensional" " appears in more than 2 simplices." ) + raise RuntimeError( + msg, + ) - hull = {point for face, count in counts.items() if count == 1 for point in face} - return hull + return {point for face, count in counts.items() if count == 1 for point in face} - def convex_invariant(self, vertex): + def convex_invariant(self, vertex) -> NoReturn: """Hull is convex.""" raise NotImplementedError diff --git a/adaptive/notebook_integration.py b/adaptive/notebook_integration.py index 165a84d82..2ba64746d 100644 --- a/adaptive/notebook_integration.py +++ b/adaptive/notebook_integration.py @@ -13,13 +13,16 @@ _plotly_enabled = False -def notebook_extension(*, _inline_js=True): +def notebook_extension(*, _inline_js=True) -> None: """Enable ipywidgets, holoviews, and asyncio notebook integration.""" if not in_ipynb(): - raise RuntimeError( + msg = ( '"adaptive.notebook_extension()" may only be run ' "from a Jupyter notebook." ) + raise RuntimeError( + msg, + ) global _async_enabled, _holoviews_enabled, _ipywidgets_enabled @@ -61,8 +64,9 @@ def ensure_holoviews(): try: return importlib.import_module("holoviews") except ModuleNotFoundError: + msg = "holoviews is not installed; plotting is disabled." raise RuntimeError( - "holoviews is not installed; plotting is disabled." + msg, ) from None @@ -81,7 +85,8 @@ def ensure_plotly(): _plotly_enabled = True return plotly except ModuleNotFoundError as e: - raise RuntimeError("plotly is not installed; plotting is disabled.") from e + msg = "plotly is not installed; plotting is disabled." + raise RuntimeError(msg) from e def in_ipynb() -> bool: @@ -119,12 +124,16 @@ def live_plot(runner, *, plotter=None, update_interval=2, name=None, normalize=T ------- dm : `holoviews.core.DynamicMap` The plot that automatically updates every `update_interval`. + """ if not _holoviews_enabled: - raise RuntimeError( + msg = ( "Live plotting is not enabled; did you run " "'adaptive.notebook_extension()'?" ) + raise RuntimeError( + msg, + ) import holoviews as hv import ipywidgets @@ -150,15 +159,16 @@ def plot_generator(): dm = dm.map(lambda obj: obj.opts(framewise=True), hv.Element) cancel_button = ipywidgets.Button( - description="cancel live-plot", layout=ipywidgets.Layout(width="150px") + description="cancel live-plot", + layout=ipywidgets.Layout(width="150px"), ) # Could have used dm.periodic in the following, but this would either spin # off a thread (and learner is not threadsafe) or block the kernel. - async def updater(): + async def updater() -> None: event = lambda: hv.streams.Stream.trigger( # noqa: E731 - dm.streams + dm.streams, ) # XXX: used to be dm.event() # see https://github.com/pyviz/holoviews/issues/3564 try: @@ -171,7 +181,7 @@ async def updater(): active_plotting_tasks.pop(name, None) cancel_button.layout.display = "none" # remove cancel button - def cancel(_): + def cancel(_) -> None: with suppress(KeyError): active_plotting_tasks[name].cancel() @@ -200,17 +210,20 @@ def should_update(status): return True -def live_info(runner, *, update_interval=0.5): +def live_info(runner, *, update_interval=0.5) -> None: """Display live information about the runner. Returns an interactive ipywidget that can be visualized in a Jupyter notebook. """ if not _holoviews_enabled: - raise RuntimeError( + msg = ( "Live plotting is not enabled; did you run " "'adaptive.notebook_extension()'?" ) + raise RuntimeError( + msg, + ) import ipywidgets from IPython.display import display @@ -218,11 +231,12 @@ def live_info(runner, *, update_interval=0.5): status = ipywidgets.HTML(value=_info_html(runner)) cancel = ipywidgets.Button( - description="cancel runner", layout=ipywidgets.Layout(width="100px") + description="cancel runner", + layout=ipywidgets.Layout(width="100px"), ) cancel.on_click(lambda _: runner.cancel()) - async def update(): + async def update() -> None: while not runner.task.done(): await asyncio.sleep(update_interval) @@ -239,7 +253,7 @@ async def update(): display(ipywidgets.VBox((status, cancel))) -def _table_row(i, key, value): +def _table_row(i, key, value) -> str: """Style the rows of a table. Based on the default Jupyterlab table style.""" style = "text-align: right; padding: 0.5em 0.5em; line-height: 1.0;" if i % 2 == 1: @@ -247,7 +261,7 @@ def _table_row(i, key, value): return f'{key}{value}' -def _info_html(runner): +def _info_html(runner) -> str: status = runner.status() color = { diff --git a/adaptive/runner.py b/adaptive/runner.py index 4f877096f..a34a7238a 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -25,7 +25,6 @@ IntegratorLearner, SequenceLearner, ) -from adaptive.learner.base_learner import LearnerType from adaptive.notebook_integration import in_ipynb, live_info, live_plot from adaptive.utils import SequentialExecutor @@ -38,7 +37,9 @@ FutureTypes: TypeAlias = Union[concurrent.Future, asyncio.Future, asyncio.Task] if TYPE_CHECKING: - import holoviews + import holoviews as hv + + from adaptive.learner.base_learner import LearnerType if sys.version_info >= (3, 10): @@ -60,7 +61,9 @@ ExecutorTypes = Optional[ Union[ - ExecutorTypes, distributed.Client, distributed.cfexecutor.ClientExecutor + ExecutorTypes, + distributed.Client, + distributed.cfexecutor.ClientExecutor, ] ] @@ -75,7 +78,9 @@ ExecutorTypes = Optional[ Union[ - ExecutorTypes, ipyparallel.Client, ipyparallel.client.view.ViewExecutor + ExecutorTypes, + ipyparallel.Client, + ipyparallel.client.view.ViewExecutor, ] ] FutureTypes = Optional[Union[FutureTypes, AsyncResult]] @@ -189,7 +194,7 @@ def __init__( retries: int = 0, raise_if_retries_exceeded: bool = True, allow_running_forever: bool = False, - ): + ) -> None: self.executor = _ensure_executor(executor) self.goal = _goal( learner, @@ -225,7 +230,8 @@ def __init__( self._id_to_point: dict[int, Any] = {} self._next_id: Callable[[], int] = functools.partial( # type: ignore[assignment] - next, itertools.count() + next, + itertools.count(), ) # some unique id to be associated with each point def _get_max_tasks(self) -> int: @@ -234,10 +240,13 @@ def _get_max_tasks(self) -> int: def _do_raise(self, e: Exception, pid: int) -> None: tb = self._tracebacks[pid] x = self._id_to_point[pid] - raise RuntimeError( + msg = ( "An error occured while evaluating " f'"learner.function({x})". ' f"See the traceback for details.:\n\n{tb}" + ) + raise RuntimeError( + msg, ) from e @property @@ -247,7 +256,7 @@ def do_log(self) -> bool: def _ask(self, n: int) -> tuple[list[int], list[float]]: pending_ids = self._pending_tasks.values() # using generator here because we only need until `n` - pids_gen = (pid for pid in self._to_retry.keys() if pid not in pending_ids) + pids_gen = (pid for pid in self._to_retry if pid not in pending_ids) pids = list(itertools.islice(pids_gen, n)) loss_improvements = len(pids) * [float("inf")] @@ -275,6 +284,7 @@ def overhead(self) -> float: Adaptive whenever executing the function takes longer than 100 ms. This of course depends on the type of executor and the type of learner but is a rough rule of thumb. + """ t_function = self._elapsed_function_time if t_function == 0: @@ -330,8 +340,7 @@ def _get_futures( self._pending_tasks[fut] = pid # Collect and results and add them to the learner - futures = list(self._pending_tasks.keys()) - return futures + return list(self._pending_tasks.keys()) def _remove_unfinished(self) -> list[FutureTypes]: # remove points with 'None' values from the learner @@ -478,7 +487,8 @@ def __init__( raise_if_retries_exceeded: bool = True, ) -> None: if inspect.iscoroutinefunction(learner.function): - raise ValueError("Coroutine functions can only be used with 'AsyncRunner'.") + msg = "Coroutine functions can only be used with 'AsyncRunner'." + raise ValueError(msg) super().__init__( learner, goal=goal, @@ -503,7 +513,8 @@ def _run(self) -> None: first_completed = concurrent.FIRST_COMPLETED if self._get_max_tasks() < 1: - raise RuntimeError("Executor has no workers") + msg = "Executor has no workers" + raise RuntimeError(msg) try: while not self.goal(self.learner): @@ -522,7 +533,8 @@ def _run(self) -> None: def elapsed_time(self) -> float: """Return the total time elapsed since the runner - was started.""" + was started. + """ if self.end_time is None: # This shouldn't happen if the BlockingRunner # correctly finished. @@ -618,6 +630,7 @@ class AsyncRunner(BaseRunner): This runner can be used when an async function (defined with ``async def``) has to be learned. In this case the function will be run directly on the event loop (and not in the executor). + """ def __init__( @@ -645,12 +658,15 @@ def __init__( try: pickle.dumps(learner.function) except pickle.PicklingError as e: - raise ValueError( + msg = ( "`learner.function` cannot be pickled (is it a lamdba function?)" " and therefore does not work with the default executor." " Either make sure the function is pickleble or use an executor" " that might work with 'hard to pickle'-functions" " , e.g. `ipyparallel` with `dill`." + ) + raise ValueError( + msg, ) from e super().__init__( @@ -676,8 +692,9 @@ def __init__( # the user can have more fine-grained control over the parallelism. if inspect.iscoroutinefunction(learner.function): if executor: # user-provided argument + msg = "Cannot use an executor when learning an async function." raise RuntimeError( - "Cannot use an executor when learning an async function." + msg, ) self.executor.shutdown() # Make sure we don't shoot ourselves later @@ -724,20 +741,23 @@ def cancel(self) -> None: def block_until_done(self) -> None: if in_ipynb(): - raise RuntimeError( + msg = ( "Cannot block the event loop when running in a Jupyter notebook." " Use `await runner.task` instead." ) + raise RuntimeError( + msg, + ) self.ioloop.run_until_complete(self.task) def live_plot( self, *, - plotter: Callable[[LearnerType], holoviews.Element] | None = None, + plotter: Callable[[LearnerType], hv.Element] | None = None, update_interval: float = 2.0, name: str | None = None, normalize: bool = True, - ) -> holoviews.DynamicMap: + ) -> hv.DynamicMap: """Live plotting of the learner's data. Parameters @@ -759,6 +779,7 @@ def live_plot( ------- dm : `holoviews.core.DynamicMap` The plot that automatically updates every `update_interval`. + """ return live_plot( self, @@ -777,10 +798,12 @@ def live_info(self, *, update_interval: float = 0.1) -> None: return live_info(self, update_interval=update_interval) def live_info_terminal( - self, *, update_interval: float = 0.5, overwrite_previous: bool = True + self, + *, + update_interval: float = 0.5, + overwrite_previous: bool = True, ) -> asyncio.Task: - """ - Display live information about the runner in the terminal. + """Display live information about the runner in the terminal. This function provides a live update of the runner's status in the terminal. The update can either overwrite the previous status or be printed on a new line. @@ -810,6 +833,7 @@ def live_info_terminal( ----- This function uses ANSI escape sequences to control the terminal's cursor position. It might not work as expected on all terminal emulators. + """ async def _update(runner: AsyncRunner) -> None: @@ -830,7 +854,8 @@ async def _run(self) -> None: first_completed = asyncio.FIRST_COMPLETED if self._get_max_tasks() < 1: - raise RuntimeError("Executor has no workers") + msg = "Executor has no workers" + raise RuntimeError(msg) try: while not self.goal(self.learner): @@ -846,7 +871,8 @@ async def _run(self) -> None: def elapsed_time(self) -> float: """Return the total time elapsed since the runner - was started.""" + was started. + """ if self.task.done(): end_time = self.end_time if end_time is None: @@ -884,17 +910,19 @@ def start_periodic_saving( >>> runner.start_periodic_saving( ... save_kwargs=dict(fname='data/test.pickle'), ... interval=600) + """ - def default_save(learner): + def default_save(learner) -> None: learner.save(**save_kwargs) if method is None: method = default_save if save_kwargs is None: - raise ValueError("Must provide `save_kwargs` if method=None.") + msg = "Must provide `save_kwargs` if method=None." + raise ValueError(msg) - async def _saver(): + async def _saver() -> None: while self.status() == "running": method(self.learner) # No asyncio.shield needed, as 'wait' does not cancel any tasks. @@ -954,7 +982,7 @@ def simple( npoints_goal: int | None = None, end_time_goal: datetime | None = None, duration_goal: timedelta | int | float | None = None, -): +) -> None: """Run the learner until the goal is reached. Requests a single point from the learner, evaluates @@ -989,6 +1017,7 @@ def simple( calculation. Stop when the current time is larger or equal than ``start_time + duration_goal``. ``duration_goal`` can be a number indicating the number of seconds. + """ goal = _goal( learner, @@ -1021,6 +1050,7 @@ def replay_log( New learner where the log will be applied. log : list contains tuples: ``(method_name, *args)``. + """ for method, *args in log: getattr(learner, method)(*args) @@ -1043,11 +1073,14 @@ def _ensure_executor(executor: ExecutorTypes | None) -> concurrent.Executor: elif with_distributed and isinstance(executor, distributed.Client): return executor.get_executor() else: - raise TypeError( - # TODO: check if this is correct. Isn't MPI,loky supported? + msg = ( "Only a concurrent.futures.Executor, distributed.Client," " or ipyparallel.Client can be used." ) + raise TypeError( + # TODO: check if this is correct. Isn't MPI,loky supported? + msg, + ) def _get_ncores( @@ -1063,11 +1096,14 @@ def _get_ncores( if with_ipyparallel and isinstance(ex, ipyparallel.client.view.ViewExecutor): return len(ex.view) elif isinstance( - ex, (concurrent.ProcessPoolExecutor, concurrent.ThreadPoolExecutor) + ex, + ( + concurrent.ProcessPoolExecutor, + concurrent.ThreadPoolExecutor, + loky.reusable_executor._ReusablePoolExecutor, + ), ): return ex._max_workers # type: ignore[union-attr] - elif isinstance(ex, loky.reusable_executor._ReusablePoolExecutor): - return ex._max_workers # type: ignore[union-attr] elif isinstance(ex, SequentialExecutor): return 1 elif with_distributed and isinstance(ex, distributed.cfexecutor.ClientExecutor): @@ -1076,7 +1112,8 @@ def _get_ncores( ex.bootup() # wait until all workers are up and running return ex._pool.size # not public API! else: - raise TypeError(f"Cannot get number of cores for {ex.__class__}") + msg = f"Cannot get number of cores for {ex.__class__}" + raise TypeError(msg) # --- Useful runner goals @@ -1112,13 +1149,14 @@ def stop_after(*, seconds=0, minutes=0, hours=0) -> Callable[[LearnerType], bool The duration specified is only a *lower bound* on the time that the runner will run for, because the runner only checks its goal when it adds points to its learner + """ stop_time = time.time() + seconds + 60 * minutes + 3600 * hours return lambda _: time.time() > stop_time class _TimeGoal: - def __init__(self, dt: timedelta | datetime | int | float): + def __init__(self, dt: timedelta | datetime | int | float) -> None: self.dt = dt if isinstance(dt, (timedelta, datetime)) else timedelta(seconds=dt) self.start_time = None @@ -1129,7 +1167,8 @@ def __call__(self, _): return datetime.now() - self.start_time > self.dt if isinstance(self.dt, datetime): return datetime.now() > self.dt - raise TypeError(f"`dt={self.dt}` is not a datetime, timedelta, or number.") + msg = f"`dt={self.dt}` is not a datetime, timedelta, or number." + raise TypeError(msg) def auto_goal( @@ -1165,11 +1204,13 @@ def auto_goal( Returns ------- Callable[[adaptive.BaseLearner], bool] + """ opts = (loss, npoints, end_time, duration) # all are mutually exclusive if sum(v is not None for v in opts) > 1: + msg = "Only one of loss, npoints, end_time, duration can be specified." raise ValueError( - "Only one of loss, npoints, end_time, duration can be specified." + msg, ) if loss is not None: @@ -1215,15 +1256,20 @@ def auto_goal( if isinstance(learner, IntegratorLearner): return IntegratorLearner.done # type: ignore[return-value] if not allow_running_forever: - raise ValueError( + msg = ( "Goal is None which means the learners" " continue forever and this is not allowed." ) + raise ValueError( + msg, + ) warnings.warn( - "Goal is None which means the learners continue forever!", stacklevel=2 + "Goal is None which means the learners continue forever!", + stacklevel=2, ) return lambda _: False - raise ValueError("Cannot determine goal from {goal}.") + msg = "Cannot determine goal from {goal}." + raise ValueError(msg) def _goal( @@ -1244,10 +1290,13 @@ def _goal( or end_time_goal is not None or duration_goal is not None ): - raise ValueError( + msg = ( "Either goal, loss_goal, npoints_goal, end_time_goal or" " duration_goal can be specified, not multiple." ) + raise ValueError( + msg, + ) return auto_goal( learner=learner, loss=loss_goal, diff --git a/adaptive/tests/algorithm_4.py b/adaptive/tests/algorithm_4.py index 27832298e..938b24067 100644 --- a/adaptive/tests/algorithm_4.py +++ b/adaptive/tests/algorithm_4.py @@ -4,13 +4,14 @@ from collections import defaultdict from fractions import Fraction -from typing import Callable +from typing import TYPE_CHECKING, Callable import numpy as np from numpy.testing import assert_allclose from scipy.linalg import inv, norm -from adaptive.types import Real +if TYPE_CHECKING: + from adaptive.types import Real eps = np.spacing(1) @@ -80,7 +81,8 @@ def newton(n: int) -> np.ndarray: c = (n + 1) * [0] for (d, a), m in terms.items(): if m and a != 0: - raise ValueError("Newton polynomial cannot be represented exactly.") + msg = "Newton polynomial cannot be represented exactly." + raise ValueError(msg) c[n - d] += m # The check could be removed and the above line replaced by # the following, but then the result would be no longer exact. @@ -258,7 +260,11 @@ def points(self) -> np.ndarray: @classmethod def make_first( - cls, f: Callable, a: int, b: int, depth: int = 2 + cls, + f: Callable, + a: int, + b: int, + depth: int = 2, ) -> tuple[_Interval, int]: ival = _Interval(a, b, depth, 1) fx = f(ival.points()) @@ -280,7 +286,8 @@ def calc_igral_and_err(self, c_old: np.ndarray) -> float: return c_diff def split( - self, f: Callable + self, + f: Callable, ) -> tuple[tuple[float, float, float], int] | tuple[list[_Interval], int]: m = (self.a + self.b) / 2 f_center = self.fx[(len(self.fx) - 1) // 2] @@ -323,7 +330,7 @@ def algorithm_4( a: int, b: int, tol: float, - N_loops: int = int(1e9), # noqa: B008 + N_loops: int = int(1e9), ) -> tuple[float, float, int, list[_Interval]]: """ALGORITHM_4 evaluates an integral using adaptive quadrature. The algorithm uses Clenshaw-Curtis quadrature rules of increasing @@ -352,7 +359,6 @@ def algorithm_4( Using Explicit Interpolants", P. Gonnet, ACM Transactions on Mathematical Software, 37 (3), art. no. 26, 2008. """ - ival, nr_points = _Interval.make_first(f, a, b) ivals = [ival] @@ -385,9 +391,12 @@ def algorithm_4( result, nr_points_inc = ivals[i_max].split(f) nr_points += nr_points_inc if isinstance(result, tuple): - raise DivergentIntegralError( + msg = ( "Possibly divergent integral in the interval" - " [{}, {}]! (h={})".format(*result), + " [{}, {}]! (h={})".format(*result) + ) + raise DivergentIntegralError( + msg, ivals[i_max].igral * np.inf, None, nr_points, @@ -469,7 +478,7 @@ def f_one_with_nan(x): return result -def test_legendre(): +def test_legendre() -> None: legs = legendre(11) comparisons = [ (legs[0], [1], 1), @@ -481,7 +490,7 @@ def test_legendre(): assert c * div == d -def test_scalar_product(n=33): +def test_scalar_product(n=33) -> None: legs = legendre(n) selection = [0, 5, 7, n - 1] for i in selection: @@ -502,11 +511,11 @@ def simple_newton(n): ] -def test_newton(): +def test_newton() -> None: assert_allclose(newton(9), simple_newton(9), atol=1e-15) -def test_b_def(depth=1): +def test_b_def(depth=1) -> None: legs = [np.array(leg, float) for leg in legendre(n[depth] + 1)] result = np.zeros(len(legs[-1])) for factor, leg in zip(b_def[depth], legs): @@ -515,7 +524,7 @@ def test_b_def(depth=1): assert_allclose(result, newton(n[depth]), rtol=1e-15) -def test_downdate(depth=3): +def test_downdate(depth=3) -> None: fx = np.abs(xi[depth]) fx[1::2] = np.nan c_downdated = _calc_coeffs(fx, depth) @@ -527,7 +536,7 @@ def test_downdate(depth=3): assert_allclose(c_downdated[: len(c)], c, rtol=0, atol=1e-9) -def test_integration(): +def test_integration() -> None: old_settings = np.seterr(all="ignore") igral, err, nr_points = algorithm_4(f0, 0, 3, 1e-5) @@ -565,7 +574,7 @@ def test_integration(): np.seterr(**old_settings) -def test_analytic(n=200): +def test_analytic(n=200) -> None: def f(x): return f63(x, alpha, beta) diff --git a/adaptive/tests/test_average_learner.py b/adaptive/tests/test_average_learner.py index d94933397..0f2ca5659 100644 --- a/adaptive/tests/test_average_learner.py +++ b/adaptive/tests/test_average_learner.py @@ -1,5 +1,5 @@ import random -from typing import TYPE_CHECKING +from typing import NoReturn import flaky import numpy as np @@ -7,15 +7,13 @@ from adaptive.learner import AverageLearner from adaptive.runner import simple -if TYPE_CHECKING: - pass +def f_unused(seed) -> NoReturn: + msg = "This function shouldn't be used." + raise NotImplementedError(msg) -def f_unused(seed): - raise NotImplementedError("This function shouldn't be used.") - -def test_only_returns_new_points(): +def test_only_returns_new_points() -> None: learner = AverageLearner(f_unused, atol=None, rtol=0.01) # Only tell it n = 5...10 @@ -32,7 +30,7 @@ def test_only_returns_new_points(): @flaky.flaky(max_runs=5) -def test_avg_std_and_npoints(): +def test_avg_std_and_npoints() -> None: learner = AverageLearner(f_unused, atol=None, rtol=0.01) for i in range(300): @@ -57,19 +55,22 @@ def test_avg_std_and_npoints(): assert abs(learner.std - std) < 1e-12 -def test_min_npoints(): - def constant_function(seed): +def test_min_npoints() -> None: + def constant_function(seed) -> float: return 0.1 for min_npoints in [1, 2, 3]: learner = AverageLearner( - constant_function, atol=0.01, rtol=0.01, min_npoints=min_npoints + constant_function, + atol=0.01, + rtol=0.01, + min_npoints=min_npoints, ) simple(learner, loss_goal=1.0) assert learner.npoints >= max(2, min_npoints) -def test_zero_mean(): +def test_zero_mean() -> None: # see https://github.com/python-adaptive/adaptive/issues/275 learner = AverageLearner(f_unused, rtol=0.01) learner.tell(0, -1) diff --git a/adaptive/tests/test_average_learner1d.py b/adaptive/tests/test_average_learner1d.py index c0148c5e9..ebbd3229c 100644 --- a/adaptive/tests/test_average_learner1d.py +++ b/adaptive/tests/test_average_learner1d.py @@ -1,5 +1,4 @@ from itertools import chain -from typing import TYPE_CHECKING import numpy as np @@ -10,11 +9,8 @@ simple_run, ) -if TYPE_CHECKING: - pass - -def almost_equal_dicts(a, b): +def almost_equal_dicts(a, b) -> None: assert a.keys() == b.keys() for k, v1 in a.items(): v2 = b[k] @@ -29,10 +25,11 @@ def almost_equal_dicts(a, b): try: np.testing.assert_almost_equal(v1, v2) except TypeError as e: - raise AssertionError(f"{v1} != {v2}") from e + msg = f"{v1} != {v2}" + raise AssertionError(msg) from e -def test_tell_many_at_point(): +def test_tell_many_at_point() -> None: f = generate_random_parametrization(noisy_peak) learner = AverageLearner1D(f, bounds=(-2, 2)) control = learner.new() diff --git a/adaptive/tests/test_balancing_learner.py b/adaptive/tests/test_balancing_learner.py index 905a55e0c..d24a8cc51 100644 --- a/adaptive/tests/test_balancing_learner.py +++ b/adaptive/tests/test_balancing_learner.py @@ -8,7 +8,7 @@ strategies = ["loss", "loss_improvements", "npoints", "cycle"] -def test_balancing_learner_loss_cache(): +def test_balancing_learner_loss_cache() -> None: learner = Learner1D(lambda x: x, bounds=(-1, 1)) learner.tell(-1, -1) learner.tell(1, 1) @@ -29,7 +29,7 @@ def test_balancing_learner_loss_cache(): @pytest.mark.parametrize("strategy", strategies) -def test_distribute_first_points_over_learners(strategy): +def test_distribute_first_points_over_learners(strategy) -> None: for initial_points in [0, 3]: learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)] learner = BalancingLearner(learners, strategy=strategy) @@ -44,7 +44,7 @@ def test_distribute_first_points_over_learners(strategy): @pytest.mark.parametrize("strategy", strategies) -def test_ask_0(strategy): +def test_ask_0(strategy) -> None: learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)] learner = BalancingLearner(learners, strategy=strategy) points, _ = learner.ask(0) @@ -52,7 +52,7 @@ def test_ask_0(strategy): @pytest.mark.parametrize( - "strategy, goal_type, goal", + ("strategy", "goal_type", "goal"), [ ("loss", "loss_goal", 0.1), ("loss_improvements", "loss_goal", 0.1), @@ -60,7 +60,7 @@ def test_ask_0(strategy): ("cycle", "loss_goal", 0.1), ], ) -def test_strategies(strategy, goal_type, goal): +def test_strategies(strategy, goal_type, goal) -> None: learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)] learner = BalancingLearner(learners, strategy=strategy) simple(learner, **{goal_type: goal}) diff --git a/adaptive/tests/test_cquad.py b/adaptive/tests/test_cquad.py index 15e5f1e27..24fb0fd6e 100644 --- a/adaptive/tests/test_cquad.py +++ b/adaptive/tests/test_cquad.py @@ -63,36 +63,42 @@ def same_ivals(f, a, b, tol): # This will only show up if the test fails, anyway print( - "igral difference", learner.igral - igral, "err difference", learner.err - err + "igral difference", + learner.igral - igral, + "err difference", + learner.err - err, ) return equal_ivals(learner.ivals, ivals, verbose=True) # XXX: This *should* pass (https://github.com/python-adaptive/adaptive/issues/55) -@pytest.mark.xfail -def test_that_gives_same_intervals_as_reference_implementation(): +@pytest.mark.xfail() +def test_that_gives_same_intervals_as_reference_implementation() -> None: for i, args in enumerate( - [[f0, 0, 3, 1e-5], [f7, 0, 1, 1e-6], [f21, 0, 1, 1e-3], [f24, 0, 3, 1e-3]] + [[f0, 0, 3, 1e-5], [f7, 0, 1, 1e-6], [f21, 0, 1, 1e-3], [f24, 0, 3, 1e-3]], ): assert same_ivals(*args), f"Function {i}" -@pytest.mark.xfail -def test_machine_precision(): +@pytest.mark.xfail() +def test_machine_precision() -> None: f, a, b, tol = [partial(f63, alpha=0.987654321, beta=0.45), 0, 1, 1e-10] igral, err, n, ivals = algorithm_4(f, a, b, tol) learner = run_integrator_learner(f, a, b, tol, n) print( - "igral difference", learner.igral - igral, "err difference", learner.err - err + "igral difference", + learner.igral - igral, + "err difference", + learner.err - err, ) assert equal_ivals(learner.ivals, ivals, verbose=True) -def test_machine_precision2(): +def test_machine_precision2() -> None: f, a, b, tol = [partial(f63, alpha=0.987654321, beta=0.45), 0, 1, 1e-10] igral, err, n, ivals = algorithm_4(f, a, b, tol) @@ -102,7 +108,7 @@ def test_machine_precision2(): np.testing.assert_almost_equal(err, learner.err) -def test_divergence(): +def test_divergence() -> None: """This function should raise a DivergentIntegralError.""" f, a, b, tol = fdiv, 0, 1, 1e-6 with pytest.raises(A4DivergentIntegralError) as e: @@ -114,7 +120,7 @@ def test_divergence(): run_integrator_learner(f, a, b, tol, n) -def test_choosing_and_adding_points_one_by_one(): +def test_choosing_and_adding_points_one_by_one() -> None: learner = IntegratorLearner(f24, bounds=(0, 3), tol=1e-10) for _ in range(1000): xs, _ = learner.ask(1) @@ -122,14 +128,14 @@ def test_choosing_and_adding_points_one_by_one(): learner.tell(x, learner.function(x)) -def test_choosing_and_adding_multiple_points_at_once(): +def test_choosing_and_adding_multiple_points_at_once() -> None: learner = IntegratorLearner(f24, bounds=(0, 3), tol=1e-10) xs, _ = learner.ask(100) for x in xs: learner.tell(x, learner.function(x)) -def test_adding_points_and_skip_one_point(): +def test_adding_points_and_skip_one_point() -> None: learner = IntegratorLearner(f24, bounds=(0, 3), tol=1e-10) xs, _ = learner.ask(17) skip_x = xs[1] @@ -159,8 +165,8 @@ def test_adding_points_and_skip_one_point(): # XXX: This *should* pass (https://github.com/python-adaptive/adaptive/issues/55) -@pytest.mark.xfail -def test_tell_in_random_order(first_add_33=False): +@pytest.mark.xfail() +def test_tell_in_random_order(first_add_33=False) -> None: import random from operator import attrgetter @@ -218,12 +224,12 @@ def test_tell_in_random_order(first_add_33=False): # XXX: This *should* pass (https://github.com/python-adaptive/adaptive/issues/55) -@pytest.mark.xfail -def test_tell_in_random_order_first_add_33(): +@pytest.mark.xfail() +def test_tell_in_random_order_first_add_33() -> None: test_tell_in_random_order(first_add_33=True) -def test_approximating_intervals(): +def test_approximating_intervals() -> None: import random learner = IntegratorLearner(f24, bounds=(0, 3), tol=1e-10) @@ -239,8 +245,8 @@ def test_approximating_intervals(): # XXX: This *should* pass (https://github.com/python-adaptive/adaptive/issues/96) -@pytest.mark.xfail -def test_removed_choose_mutiple_points_at_once(): +@pytest.mark.xfail() +def test_removed_choose_mutiple_points_at_once() -> None: """Given that a high-precision interval that was split into 2 low-precision ones, we should use the high-precision interval. """ @@ -249,10 +255,10 @@ def test_removed_choose_mutiple_points_at_once(): xs, _ = learner.ask(n) for x in xs: learner.tell(x, learner.function(x)) - assert list(learner.approximating_intervals)[0] == learner.first_ival + assert next(iter(learner.approximating_intervals)) == learner.first_ival -def test_removed_ask_one_by_one(): +def test_removed_ask_one_by_one() -> None: with pytest.raises(RuntimeError): # This test should raise because integrating np.exp should be done # after the 33th point diff --git a/adaptive/tests/test_learner1d.py b/adaptive/tests/test_learner1d.py index 7dafbd3ab..20cd1886a 100644 --- a/adaptive/tests/test_learner1d.py +++ b/adaptive/tests/test_learner1d.py @@ -2,7 +2,6 @@ import random import time -from typing import TYPE_CHECKING import flaky import numpy as np @@ -11,9 +10,6 @@ from adaptive.learner.learner1D import curvature_loss_function from adaptive.runner import BlockingRunner, simple -if TYPE_CHECKING: - pass - def flat_middle(x): x *= 1e7 @@ -24,7 +20,7 @@ def flat_middle(x): return np.interp(x, xs, ys) -def test_pending_loss_intervals(): +def test_pending_loss_intervals() -> None: # https://github.com/python-adaptive/adaptive/issues/40 learner = Learner1D(lambda x: x, (0, 4)) @@ -38,7 +34,7 @@ def test_pending_loss_intervals(): assert set(learner.losses_combined.keys()) == {(0, 1), (1, 2), (2, 3.5), (3.5, 4.0)} -def test_loss_interpolation_for_unasked_point(): +def test_loss_interpolation_for_unasked_point() -> None: # https://github.com/python-adaptive/adaptive/issues/40 learner = Learner1D(lambda x: x, (0, 4)) @@ -70,7 +66,7 @@ def test_loss_interpolation_for_unasked_point(): } -def test_first_iteration(): +def test_first_iteration() -> None: """Edge cases where we ask for a few points at the start.""" learner = Learner1D(lambda x: None, (-1, 1)) points, loss_improvements = learner.ask(2) @@ -82,7 +78,8 @@ def test_first_iteration(): learner = Learner1D(lambda x: None, (-1, 1)) points, loss_improvements = learner.ask(1) - assert len(points) == 1 and points[0] in learner.bounds + assert len(points) == 1 + assert points[0] in learner.bounds rest = {-1, 0, 1} - set(points) points, loss_improvements = learner.ask(2) assert set(points) == set(rest) @@ -104,7 +101,7 @@ def test_first_iteration(): assert points == [1] -def test_loss_interpolation(): +def test_loss_interpolation() -> None: learner = Learner1D(lambda _: 0, bounds=(-1, 1)) learner.tell(-1, 0) @@ -122,7 +119,7 @@ def test_loss_interpolation(): def _run_on_discontinuity(x_0, bounds): - def f(x): + def f(x) -> int: return -1 if x < x_0 else +1 learner = Learner1D(f, bounds) @@ -133,21 +130,21 @@ def f(x): return learner -def test_termination_on_discontinuities(): +def test_termination_on_discontinuities() -> None: learner = _run_on_discontinuity(0, (-1, 1)) - smallest_interval = min(abs(a - b) for a, b in learner.losses.keys()) + smallest_interval = min(abs(a - b) for a, b in learner.losses) assert smallest_interval >= np.finfo(float).eps learner = _run_on_discontinuity(1, (-2, 2)) - smallest_interval = min(abs(a - b) for a, b in learner.losses.keys()) + smallest_interval = min(abs(a - b) for a, b in learner.losses) assert smallest_interval >= np.finfo(float).eps learner = _run_on_discontinuity(0.5e3, (-1e3, 1e3)) - smallest_interval = min(abs(a - b) for a, b in learner.losses.keys()) + smallest_interval = min(abs(a - b) for a, b in learner.losses) assert smallest_interval >= 0.5e3 * np.finfo(float).eps -def test_order_adding_points(): +def test_order_adding_points() -> None: # and https://github.com/python-adaptive/adaptive/issues/41 learner = Learner1D(lambda x: x, (0, 1)) learner.tell_many([1, 0, 0.5], [0, 0, 0]) @@ -156,7 +153,7 @@ def test_order_adding_points(): learner.ask(1) -def test_adding_existing_point_passes_silently(): +def test_adding_existing_point_passes_silently() -> None: # See https://github.com/python-adaptive/adaptive/issues/42 learner = Learner1D(lambda x: x, (0, 4)) learner.tell(0, 0) @@ -165,11 +162,12 @@ def test_adding_existing_point_passes_silently(): learner.tell(1, 100) -def test_loss_at_machine_precision_interval_is_zero(): +def test_loss_at_machine_precision_interval_is_zero() -> None: """The loss of an interval smaller than _dx_eps - should be set to zero.""" + should be set to zero. + """ - def f(x): + def f(x) -> int: return 1 if x == 0 else 0 def goal(learner): @@ -186,11 +184,11 @@ def small_deviations(x): return 0 if x <= 1 else 1 + 10 ** (-random.randint(12, 14)) -def test_small_deviations(): +def test_small_deviations() -> None: """This tests whether the Learner1D can handle small deviations. See https://gitlab.kwant-project.org/qt/adaptive/merge_requests/73 and - https://github.com/python-adaptive/adaptive/issues/78.""" - + https://github.com/python-adaptive/adaptive/issues/78. + """ eps = 5e-14 learner = Learner1D(small_deviations, bounds=(1 - eps, 1 + eps)) @@ -221,8 +219,8 @@ def test_small_deviations(): break -def test_uniform_sampling1D_v2(): - def check(known, expect): +def test_uniform_sampling1D_v2() -> None: + def check(known, expect) -> None: def f(x): return x @@ -243,7 +241,7 @@ def f(x): check([-1, -0.5], {-0.75, 0.25, 1}) -def test_add_data_unordered(): +def test_add_data_unordered() -> None: # see https://github.com/python-adaptive/adaptive/issues/44 learner = Learner1D(lambda x: x, bounds=(-1, 1)) xs = [-1, 1, 0] @@ -255,7 +253,7 @@ def test_add_data_unordered(): learner.ask(3) -def test_ask_does_not_return_known_points_when_returning_bounds(): +def test_ask_does_not_return_known_points_when_returning_bounds() -> None: learner = Learner1D(lambda x: None, (-1, 1)) learner.tell(0, 0) points, _ = learner.ask(3) @@ -263,7 +261,7 @@ def test_ask_does_not_return_known_points_when_returning_bounds(): @flaky.flaky(max_runs=3) -def test_tell_many(): +def test_tell_many() -> None: def f(x, offset=0.123214): a = 0.01 return ( @@ -279,7 +277,7 @@ def f_vec(x, offset=0.123214): y = x + a**2 / (a**2 + (x - offset) ** 2) return [y, 0.5 * y, y**2] - def assert_equal_dicts(d1, d2): + def assert_equal_dicts(d1, d2) -> None: xs1, ys1 = zip(*sorted(d1.items())) xs2, ys2 = zip(*sorted(d2.items())) ys1 = np.array(ys1, dtype=np.float64) @@ -287,14 +285,15 @@ def assert_equal_dicts(d1, d2): np.testing.assert_almost_equal(xs1, xs2) np.testing.assert_almost_equal(ys1, ys2) - def test_equal(l1, l2): + def test_equal(l1, l2) -> None: assert_equal_dicts(l1.neighbors, l2.neighbors) assert_equal_dicts(l1.neighbors_combined, l2.neighbors_combined) assert_equal_dicts(l1.data, l2.data) assert_equal_dicts(l2.losses, l1.losses) assert_equal_dicts(l2.losses_combined, l1.losses_combined) np.testing.assert_almost_equal( - sorted(l1.pending_points), sorted(l2.pending_points) + sorted(l1.pending_points), + sorted(l2.pending_points), ) np.testing.assert_almost_equal(l1._bbox[1], l1._bbox[1]) assert l1._scale == l2._scale @@ -320,7 +319,7 @@ def test_equal(l1, l2): # Test non-determinism. We keep a list of points that will be # evaluated later to emulate parallel execution. - def _random_run(learner, learner2, scale_doubling=True): + def _random_run(learner, learner2, scale_doubling=True) -> None: if not scale_doubling: # Make the scale huge to no get a scale doubling x = 1e-6 @@ -372,7 +371,7 @@ def _random_run(learner, learner2, scale_doubling=True): test_equal(learner, learner2) -def test_curvature_loss(): +def test_curvature_loss() -> None: def f(x): return np.tanh(20 * x) @@ -383,7 +382,7 @@ def f(x): assert learner.npoints >= 100 -def test_curvature_loss_vectors(): +def test_curvature_loss_vectors() -> None: def f(x): return np.tanh(20 * x), np.tanh(20 * (x - 0.4)) @@ -394,19 +393,19 @@ def f(x): assert learner.npoints >= 100 -def test_NaN_loss(): +def test_NaN_loss() -> None: # see https://github.com/python-adaptive/adaptive/issues/145 def f(x): a = 0.01 if random.random() < 0.2: - return np.NaN + return np.nan return x + a**2 / (a**2 + x**2) learner = Learner1D(f, bounds=(-1, 1)) simple(learner, npoints_goal=100) -def test_inf_loss_with_missing_bounds(): +def test_inf_loss_with_missing_bounds() -> None: learner = Learner1D( flat_middle, bounds=(0, 1e-7), diff --git a/adaptive/tests/test_learnernd.py b/adaptive/tests/test_learnernd.py index 0884b7eeb..2e852db4a 100644 --- a/adaptive/tests/test_learnernd.py +++ b/adaptive/tests/test_learnernd.py @@ -7,7 +7,7 @@ from .test_learners import generate_random_parametrization, ring_of_fire -def test_faiure_case_LearnerND(): +def test_faiure_case_LearnerND() -> None: log = [ ("ask", 4), ("tell", (-1, -1, -1), 1.607873907219222e-101), @@ -26,7 +26,7 @@ def test_faiure_case_LearnerND(): replay_log(learner, log) -def test_interior_vs_bbox_gives_same_result(): +def test_interior_vs_bbox_gives_same_result() -> None: f = generate_random_parametrization(ring_of_fire) control = LearnerND(f, bounds=[(-1, 1), (-1, 1)]) @@ -39,7 +39,7 @@ def test_interior_vs_bbox_gives_same_result(): assert learner.data == control.data -def test_vector_return_with_a_flat_layer(): +def test_vector_return_with_a_flat_layer() -> None: f = generate_random_parametrization(ring_of_fire) g = generate_random_parametrization(ring_of_fire) h1 = lambda xy: np.array([f(xy), g(xy)]) # noqa: E731 diff --git a/adaptive/tests/test_learners.py b/adaptive/tests/test_learners.py index e32aa75ef..84bfce075 100644 --- a/adaptive/tests/test_learners.py +++ b/adaptive/tests/test_learners.py @@ -9,6 +9,7 @@ import shutil import tempfile import time +from typing import NoReturn import flaky import numpy as np @@ -76,11 +77,13 @@ def generate_random_parametrization(f): All parameters but the first must be annotated with a callable that, when called with no arguments, produces a value of the appropriate type for the parameter in question. + """ _, *params = inspect.signature(f).parameters.items() if any(not callable(v.annotation) for (p, v) in params): + msg = f"All parameters to {f.__name__} must be annotated with functions." raise TypeError( - f"All parameters to {f.__name__} must be annotated with functions." + msg, ) realization = {p: v.annotation() for (p, v) in params} return ft.partial(f, **realization) @@ -90,7 +93,7 @@ def uniform(a, b): return lambda: random.uniform(a, b) -def simple_run(learner, n): +def simple_run(learner, n) -> None: def get_goal(learner): if hasattr(learner, "nsamples"): return lambda lrn: lrn.nsamples > n @@ -229,7 +232,7 @@ def ask_randomly(learner, rounds, points): @run_with(Learner1D) -def test_uniform_sampling1D(learner_type, f, learner_kwargs): +def test_uniform_sampling1D(learner_type, f, learner_kwargs) -> None: """Points are sampled uniformly if no data is provided. Non-uniform sampling implies that we think we know something about @@ -245,9 +248,9 @@ def test_uniform_sampling1D(learner_type, f, learner_kwargs): assert max(ivals) / min(ivals) < 2 + 1e-8 -@pytest.mark.xfail +@pytest.mark.xfail() @run_with(Learner2D, LearnerND) -def test_uniform_sampling2D(learner_type, f, learner_kwargs): +def test_uniform_sampling2D(learner_type, f, learner_kwargs) -> None: """Points are sampled uniformly if no data is provided. Non-uniform sampling implies that we think we know something about @@ -271,14 +274,14 @@ def test_uniform_sampling2D(learner_type, f, learner_kwargs): @pytest.mark.parametrize( - "learner_type, bounds", + ("learner_type", "bounds"), [ (Learner1D, (-1, 1)), (Learner2D, ((-1, 1), (-1, 1))), (LearnerND, ((-1, 1), (-1, 1), (-1, 1))), ], ) -def test_learner_accepts_lists(learner_type, bounds): +def test_learner_accepts_lists(learner_type, bounds) -> None: def f(x): return [0, 1] @@ -287,7 +290,7 @@ def f(x): @run_with(Learner1D, Learner2D, LearnerND, SequenceLearner, AverageLearner1D) -def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs): +def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs) -> None: """Adding already existing data is an idempotent operation. Either it is idempotent, or it is an error. @@ -341,7 +344,7 @@ def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs): AverageLearner1D, SequenceLearner, ) -def test_adding_non_chosen_data(learner_type, f, learner_kwargs): +def test_adding_non_chosen_data(learner_type, f, learner_kwargs) -> None: """Adding data for a point that was not returned by 'ask'.""" # XXX: learner, control and bounds are not defined f = generate_random_parametrization(f) @@ -383,9 +386,13 @@ def test_adding_non_chosen_data(learner_type, f, learner_kwargs): @run_with( - Learner1D, xfail(Learner2D), xfail(LearnerND), AverageLearner, AverageLearner1D + Learner1D, + xfail(Learner2D), + xfail(LearnerND), + AverageLearner, + AverageLearner1D, ) -def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs): +def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs) -> None: """The order of calls to 'tell' between calls to 'ask' is arbitrary. @@ -432,8 +439,10 @@ def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs): # see https://github.com/python-adaptive/adaptive/issues/55 @run_with(Learner1D, xfail(Learner2D), LearnerND, AverageLearner, AverageLearner1D) def test_expected_loss_improvement_is_less_than_total_loss( - learner_type, f, learner_kwargs -): + learner_type, + f, + learner_kwargs, +) -> None: """The estimated loss improvement can never be greater than the total loss.""" f = generate_random_parametrization(f) learner = learner_type(f, **learner_kwargs) @@ -450,7 +459,7 @@ def test_expected_loss_improvement_is_less_than_total_loss( if learner_type is Learner2D: assert sum(loss_improvements) < sum( - learner.loss_per_triangle(learner.interpolator(scaled=True)) + learner.loss_per_triangle(learner.interpolator(scaled=True)), ) elif learner_type in (Learner1D, AverageLearner1D): assert sum(loss_improvements) < sum(learner.losses.values()) @@ -462,8 +471,10 @@ def test_expected_loss_improvement_is_less_than_total_loss( # but we xfail it now, as Learner2D will be deprecated anyway @run_with(Learner1D, xfail(Learner2D), LearnerND, AverageLearner1D) def test_learner_performance_is_invariant_under_scaling( - learner_type, f, learner_kwargs -): + learner_type, + f, + learner_kwargs, +) -> None: """Learners behave identically under transformations that leave the loss invariant. @@ -529,7 +540,7 @@ def scale_x(x): SequenceLearner, with_all_loss_functions=False, ) -def test_balancing_learner(learner_type, f, learner_kwargs): +def test_balancing_learner(learner_type, f, learner_kwargs) -> None: """Test if the BalancingLearner works with the different types of learners.""" learners = [ learner_type(generate_random_parametrization(f), **learner_kwargs) @@ -579,7 +590,7 @@ def test_balancing_learner(learner_type, f, learner_kwargs): SequenceLearner, with_all_loss_functions=False, ) -def test_saving(learner_type, f, learner_kwargs): +def test_saving(learner_type, f, learner_kwargs) -> None: f = generate_random_parametrization(f) learner = learner_type(f, **learner_kwargs) control = learner.new() @@ -612,7 +623,7 @@ def test_saving(learner_type, f, learner_kwargs): SequenceLearner, with_all_loss_functions=False, ) -def test_saving_of_balancing_learner(learner_type, f, learner_kwargs): +def test_saving_of_balancing_learner(learner_type, f, learner_kwargs) -> None: f = generate_random_parametrization(f) learner = BalancingLearner([learner_type(f, **learner_kwargs)]) control = learner.new() @@ -650,7 +661,7 @@ def fname(learner): IntegratorLearner, with_all_loss_functions=False, ) -def test_saving_with_datasaver(learner_type, f, learner_kwargs): +def test_saving_with_datasaver(learner_type, f, learner_kwargs) -> None: f = generate_random_parametrization(f) g = lambda x: {"y": f(x), "t": random.random()} # noqa: E731 arg_picker = operator.itemgetter("y") @@ -678,25 +689,30 @@ def test_saving_with_datasaver(learner_type, f, learner_kwargs): os.remove(path) -@pytest.mark.xfail +@pytest.mark.xfail() @run_with(Learner1D, Learner2D, LearnerND) -def test_convergence_for_arbitrary_ordering(learner_type, f, learner_kwargs): +def test_convergence_for_arbitrary_ordering( + learner_type, + f, + learner_kwargs, +) -> NoReturn: """Learners that are learning the same function should converge to the same result "eventually" if given the same data, regardless of the order in which that data is given. """ # XXX: not sure how to implement this. Can we say anything at all about # the scaling of the loss with the number of points? - raise NotImplementedError() + raise NotImplementedError -@pytest.mark.xfail +@pytest.mark.xfail() @run_with(Learner1D, Learner2D, LearnerND) -def test_learner_subdomain(learner_type, f, learner_kwargs): +def test_learner_subdomain(learner_type, f, learner_kwargs) -> NoReturn: """Learners that never receive data outside of a subdomain should - perform 'similarly' to learners defined on that subdomain only.""" + perform 'similarly' to learners defined on that subdomain only. + """ # XXX: not sure how to implement this. How do we measure "performance"? - raise NotImplementedError() + raise NotImplementedError def add_time(f): @@ -720,8 +736,8 @@ def wrapper(*args, **kwargs): IntegratorLearner, with_all_loss_functions=False, ) -def test_to_dataframe(learner_type, f, learner_kwargs): - import pandas +def test_to_dataframe(learner_type, f, learner_kwargs) -> None: + import pandas as pd if learner_type is LearnerND: kw = {"point_names": tuple("xyz")[: len(learner_kwargs["bounds"])]} @@ -739,7 +755,7 @@ def test_to_dataframe(learner_type, f, learner_kwargs): # Run the learner simple_run(learner, 100) df = learner.to_dataframe(**kw) - assert isinstance(df, pandas.DataFrame) + assert isinstance(df, pd.DataFrame) if learner_type is AverageLearner1D: assert len(df) == learner.nsamples else: @@ -758,7 +774,7 @@ def test_to_dataframe(learner_type, f, learner_kwargs): bal_learner = BalancingLearner(learners) simple_run(bal_learner, 100) df_bal = bal_learner.to_dataframe(**kw) - assert isinstance(df_bal, pandas.DataFrame) + assert isinstance(df_bal, pd.DataFrame) if learner_type is not AverageLearner1D: assert len(df_bal) == bal_learner.npoints @@ -779,7 +795,8 @@ def test_to_dataframe(learner_type, f, learner_kwargs): # Test with DataSaver learner = learner_type( - add_time(generate_random_parametrization(f)), **learner_kwargs + add_time(generate_random_parametrization(f)), + **learner_kwargs, ) data_saver = DataSaver(learner, operator.itemgetter("result")) df = data_saver.to_dataframe(**kw) # test if empty dataframe works @@ -796,5 +813,5 @@ def test_to_dataframe(learner_type, f, learner_kwargs): assert data_saver2.extra_data.keys() == data_saver.extra_data.keys() assert all( data_saver2.extra_data[k] == data_saver.extra_data[k] - for k in data_saver.extra_data.keys() + for k in data_saver.extra_data ) diff --git a/adaptive/tests/test_notebook_integration.py b/adaptive/tests/test_notebook_integration.py index 3e4ddb298..099f90197 100644 --- a/adaptive/tests/test_notebook_integration.py +++ b/adaptive/tests/test_notebook_integration.py @@ -2,12 +2,9 @@ import os import sys -from typing import TYPE_CHECKING import pytest -if TYPE_CHECKING: - pass try: import ipykernel.iostream import zmq @@ -24,10 +21,11 @@ not with_notebook_dependencies or skip_because_of_bug, reason="notebook dependencies are not installed", ) -def test_private_api_used_in_live_info(): +def test_private_api_used_in_live_info() -> None: """We are catching all errors in adaptive.notebook_integration.should_update - so if ipykernel changed its API it would happen unnoticed.""" + so if ipykernel changed its API it would happen unnoticed. + """ # XXX: find a potential better solution in # https://github.com/ipython/ipykernel/issues/365 ctx = zmq.Context() diff --git a/adaptive/tests/test_pickling.py b/adaptive/tests/test_pickling.py index 9721fd273..428016819 100644 --- a/adaptive/tests/test_pickling.py +++ b/adaptive/tests/test_pickling.py @@ -50,7 +50,8 @@ def identity_function(x): def datasaver(f, learner_type, learner_kwargs): return DataSaver( - learner=learner_type(f, **learner_kwargs), arg_picker=identity_function + learner=learner_type(f, **learner_kwargs), + arg_picker=identity_function, ) @@ -88,10 +89,12 @@ def balancing_learner(f, learner_type, learner_kwargs): ] -@pytest.mark.parametrize("learner_type, learner_kwargs, serializer, f", learners) -def test_serialization_for(learner_type, learner_kwargs, serializer, f): +@pytest.mark.parametrize( + ("learner_type", "learner_kwargs", "serializer", "f"), + learners, +) +def test_serialization_for(learner_type, learner_kwargs, serializer, f) -> None: """Test serializing a learner using different serializers.""" - learner = learner_type(f, **learner_kwargs) simple(learner, goal=goal_1) diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index f6bb6031e..821b25a6d 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -27,11 +27,11 @@ OPERATING_SYSTEM = platform.system() -def blocking_runner(learner, **kw): +def blocking_runner(learner, **kw) -> None: BlockingRunner(learner, executor=SequentialExecutor(), **kw) -def async_runner(learner, **kw): +def async_runner(learner, **kw) -> None: runner = AsyncRunner(learner, executor=SequentialExecutor(), **kw) runner.block_until_done() @@ -40,7 +40,7 @@ def async_runner(learner, **kw): @pytest.mark.parametrize("runner", runners) -def test_simple(runner): +def test_simple(runner) -> None: """Test that the runners actually run.""" def f(x): @@ -52,7 +52,7 @@ def f(x): @pytest.mark.parametrize("runner", runners) -def test_nonconforming_output(runner): +def test_nonconforming_output(runner) -> None: """Test that using a runner works with a 2D learner, even when the learned function outputs a 1-vector. This tests against the regression flagged in https://github.com/python-adaptive/adaptive/issues/81. @@ -64,7 +64,7 @@ def f(x): runner(Learner2D(f, ((-1, 1), (-1, 1))), npoints_goal=10) -def test_aync_def_function(): +def test_aync_def_function() -> None: async def f(x): return x @@ -87,7 +87,7 @@ def linear(x): return x -def test_concurrent_futures_executor(): +def test_concurrent_futures_executor() -> None: from concurrent.futures import ProcessPoolExecutor BlockingRunner( @@ -97,7 +97,7 @@ def test_concurrent_futures_executor(): ) -def test_stop_after_goal(): +def test_stop_after_goal() -> None: seconds_to_wait = 0.2 # don't make this too large or the test will take ages start_time = time.time() BlockingRunner(Learner1D(linear, (-1, 1)), goal=stop_after(seconds=seconds_to_wait)) @@ -111,7 +111,7 @@ def test_stop_after_goal(): reason="Gets stuck in CI", ) @pytest.mark.skipif(OPERATING_SYSTEM == "Darwin", reason="Cannot stop ipcluster") -def test_ipyparallel_executor(): +def test_ipyparallel_executor() -> None: from ipyparallel import Client if OPERATING_SYSTEM == "Windows": @@ -128,7 +128,8 @@ def test_ipyparallel_executor(): assert learner.npoints > 0 if not child.terminate(force=True): - raise RuntimeError("Could not stop ipcluster") + msg = "Could not stop ipcluster" + raise RuntimeError(msg) @pytest.mark.timeout(60) @@ -136,7 +137,7 @@ def test_ipyparallel_executor(): @pytest.mark.skipif(OPERATING_SYSTEM == "Windows", reason="XXX: seems to always fail") @pytest.mark.skipif(OPERATING_SYSTEM == "Darwin", reason="XXX: intermittently fails") @pytest.mark.skipif(OPERATING_SYSTEM == "Linux", reason="XXX: intermittently fails") -def test_distributed_executor(): +def test_distributed_executor() -> None: from distributed import Client learner = Learner1D(linear, (-1, 1)) @@ -146,21 +147,24 @@ def test_distributed_executor(): assert learner.npoints > 0 -def test_loky_executor(loky_executor): +def test_loky_executor(loky_executor) -> None: learner = Learner1D(lambda x: x, (-1, 1)) BlockingRunner( - learner, npoints_goal=10, executor=loky_executor, shutdown_executor=True + learner, + npoints_goal=10, + executor=loky_executor, + shutdown_executor=True, ) assert learner.npoints > 0 -def test_default_executor(): +def test_default_executor() -> None: learner = Learner1D(linear, (-1, 1)) runner = AsyncRunner(learner, npoints_goal=10) runner.block_until_done() -def test_auto_goal(): +def test_auto_goal() -> None: learner = Learner1D(linear, (-1, 1)) simple(learner, auto_goal(npoints=4)) assert learner.npoints == 4 @@ -186,7 +190,8 @@ def test_auto_goal(): learner2 = Learner1D(linear, (-2, 2)) balancing_learner = BalancingLearner([learner1, learner2]) simple(balancing_learner, auto_goal(npoints=4, learner=balancing_learner)) - assert learner1.npoints == 4 and learner2.npoints == 4 + assert learner1.npoints == 4 + assert learner2.npoints == 4 learner1 = Learner1D(linear, bounds=(0, 1)) learner1 = DataSaver(learner1, lambda x: x) @@ -194,7 +199,8 @@ def test_auto_goal(): learner2 = DataSaver(learner2, lambda x: x) balancing_learner = BalancingLearner([learner1, learner2]) simple(balancing_learner, auto_goal(npoints=10, learner=balancing_learner)) - assert learner1.npoints == 10 and learner2.npoints == 10 + assert learner1.npoints == 10 + assert learner2.npoints == 10 learner = Learner1D(linear, (-1, 1)) t_start = time.time() diff --git a/adaptive/tests/test_sequence_learner.py b/adaptive/tests/test_sequence_learner.py index 9ef5a8f14..d007b7f1a 100644 --- a/adaptive/tests/test_sequence_learner.py +++ b/adaptive/tests/test_sequence_learner.py @@ -15,7 +15,7 @@ def peak(x, offset=offset, wait=True): class FailOnce: - def __init__(self): + def __init__(self) -> None: self.failed = False def __call__(self, value): @@ -25,7 +25,7 @@ def __call__(self, value): raise RuntimeError -def test_fail_with_sequence_of_unhashable(): +def test_fail_with_sequence_of_unhashable() -> None: # https://github.com/python-adaptive/adaptive/issues/265 seq = [{1: 1}] # unhashable learner = SequenceLearner(FailOnce(), sequence=seq) @@ -35,7 +35,7 @@ def test_fail_with_sequence_of_unhashable(): @pytest.mark.skipif(not with_pandas, reason="pandas is not installed") -def test_save_load_dataframe(): +def test_save_load_dataframe() -> None: learner = SequenceLearner(peak, sequence=range(10, 30, 1)) simple(learner, npoints_goal=10) df = learner.to_dataframe() diff --git a/adaptive/tests/test_skopt_learner.py b/adaptive/tests/test_skopt_learner.py index babb617c7..c4b8a9481 100644 --- a/adaptive/tests/test_skopt_learner.py +++ b/adaptive/tests/test_skopt_learner.py @@ -10,9 +10,9 @@ @pytest.mark.skipif(not with_scikit_optimize, reason="scikit-optimize is not installed") -def test_skopt_learner_runs(): +def test_skopt_learner_runs() -> None: """The SKOptLearner provides very few guarantees about its - behaviour, so we only test the most basic usage + behaviour, so we only test the most basic usage. """ def g(x, noise_level=0.1): @@ -26,10 +26,10 @@ def g(x, noise_level=0.1): @pytest.mark.skipif(not with_scikit_optimize, reason="scikit-optimize is not installed") -def test_skopt_learner_4D_runs(): +def test_skopt_learner_4D_runs() -> None: """The SKOptLearner provides very few guarantees about its behaviour, so we only test the most basic usage - In this case we test also for 4D domain + In this case we test also for 4D domain. """ def g(x, noise_level=0.1): @@ -40,7 +40,8 @@ def g(x, noise_level=0.1): ) learner = SKOptLearner( - g, dimensions=[(-2.0, 2.0), (-2.0, 2.0), (-2.0, 2.0), (-2.0, 2.0)] + g, + dimensions=[(-2.0, 2.0), (-2.0, 2.0), (-2.0, 2.0), (-2.0, 2.0)], ) for _ in range(11): diff --git a/adaptive/tests/test_triangulation.py b/adaptive/tests/test_triangulation.py index c67b10146..68ba5d75b 100644 --- a/adaptive/tests/test_triangulation.py +++ b/adaptive/tests/test_triangulation.py @@ -28,7 +28,7 @@ def _standard_simplex_volume(dim): return 1 / factorial(dim) -def _check_simplices_are_valid(t): +def _check_simplices_are_valid(t) -> None: """Check that 'simplices' and 'vertex_to_simplices' are consistent.""" vertex_to_simplices = [set() for _ in t.vertices] @@ -38,28 +38,29 @@ def _check_simplices_are_valid(t): assert vertex_to_simplices == t.vertex_to_simplices -def _check_faces_are_valid(t): +def _check_faces_are_valid(t) -> None: """Check that a 'dim-1'-D face is shared by no more than 2 simplices.""" counts = Counter(t.faces()) assert not any(i > 2 for i in counts.values()), counts -def _check_hull_is_valid(t): +def _check_hull_is_valid(t) -> None: """Check that the stored hull is consistent with one computed from scratch.""" counts = Counter(t.faces()) hull = {point for face, count in counts.items() if count == 1 for point in face} assert t.hull == hull -def _check_triangulation_is_valid(t): +def _check_triangulation_is_valid(t) -> None: _check_simplices_are_valid(t) _check_faces_are_valid(t) _check_hull_is_valid(t) -def _add_point_with_check(tri, point, simplex=None): +def _add_point_with_check(tri, point, simplex=None) -> None: """Check that the difference in simplices before and after adding a point - is returned by tri.add_point""" + is returned by tri.add_point. + """ old_simplices = tri.simplices.copy() deleted_simplices, created_simplices = tri.add_point(point, simplex=simplex) new_simplices = tri.simplices.copy() @@ -68,7 +69,7 @@ def _add_point_with_check(tri, point, simplex=None): assert created_simplices == new_simplices - old_simplices -def test_triangulation_raises_exception_for_1d_list(): +def test_triangulation_raises_exception_for_1d_list() -> None: # We could support 1d, but we don't for now, because it is not relevant # so a user has to be aware pts = [0, 1] @@ -76,7 +77,7 @@ def test_triangulation_raises_exception_for_1d_list(): Triangulation(pts) -def test_triangulation_raises_exception_for_1d_points(): +def test_triangulation_raises_exception_for_1d_points() -> None: # We could support 1d, but we don't for now, because it is not relevant # so a user has to be aware pts = [(0,), (1,)] @@ -85,7 +86,7 @@ def test_triangulation_raises_exception_for_1d_points(): @with_dimension -def test_triangulation_of_standard_simplex(dim): +def test_triangulation_of_standard_simplex(dim) -> None: t = Triangulation(_make_standard_simplex(dim)) expected_simplex = tuple(range(dim + 1)) assert t.simplices == {expected_simplex} @@ -94,7 +95,7 @@ def test_triangulation_of_standard_simplex(dim): @with_dimension -def test_zero_volume_initial_simplex_raises_exception(dim): +def test_zero_volume_initial_simplex_raises_exception(dim) -> None: points = _make_standard_simplex(dim)[:-1] linearly_dependent_point = np.dot(np.random.random(dim), points) zero_volume_simplex = np.vstack((points, linearly_dependent_point)) @@ -106,7 +107,9 @@ def test_zero_volume_initial_simplex_raises_exception(dim): @with_dimension -def test_adding_point_outside_circumscribed_hypersphere_in_positive_orthant(dim): +def test_adding_point_outside_circumscribed_hypersphere_in_positive_orthant( + dim, +) -> None: t = Triangulation(_make_standard_simplex(dim)) point_outside_circumscribed_sphere = (1.1,) * dim @@ -133,7 +136,7 @@ def test_adding_point_outside_circumscribed_hypersphere_in_positive_orthant(dim) @with_dimension -def test_adding_point_outside_standard_simplex_in_negative_orthant(dim): +def test_adding_point_outside_standard_simplex_in_negative_orthant(dim) -> None: t = Triangulation(_make_standard_simplex(dim)) new_point = list(range(-dim, 0)) @@ -168,7 +171,7 @@ def test_adding_point_outside_standard_simplex_in_negative_orthant(dim): @with_dimension @pytest.mark.parametrize("provide_simplex", [True, False]) -def test_adding_point_inside_standard_simplex(dim, provide_simplex): +def test_adding_point_inside_standard_simplex(dim, provide_simplex) -> None: t = Triangulation(_make_standard_simplex(dim)) first_simplex = tuple(range(dim + 1)) inside_simplex = (0.1,) * dim @@ -192,7 +195,7 @@ def test_adding_point_inside_standard_simplex(dim, provide_simplex): @with_dimension -def test_adding_point_on_standard_simplex_face(dim): +def test_adding_point_on_standard_simplex_face(dim) -> None: pts = _make_standard_simplex(dim) t = Triangulation(pts) on_simplex = np.average(pts[1:], axis=0) @@ -213,7 +216,7 @@ def test_adding_point_on_standard_simplex_face(dim): @with_dimension -def test_adding_point_on_standard_simplex_edge(dim): +def test_adding_point_on_standard_simplex_edge(dim) -> None: pts = _make_standard_simplex(dim) t = Triangulation(pts) on_edge = np.average(pts[:2], axis=0) @@ -231,7 +234,7 @@ def test_adding_point_on_standard_simplex_edge(dim): @with_dimension -def test_adding_point_colinear_with_first_edge(dim): +def test_adding_point_colinear_with_first_edge(dim) -> None: pts = _make_standard_simplex(dim) t = Triangulation(pts) edge_extension = np.multiply(pts[1], 2) @@ -246,7 +249,7 @@ def test_adding_point_colinear_with_first_edge(dim): @with_dimension -def test_adding_point_coplanar_with_a_face(dim): +def test_adding_point_coplanar_with_a_face(dim) -> None: pts = _make_standard_simplex(dim) t = Triangulation(pts) face_extension = np.sum(pts[:-1], axis=0) * 2 @@ -261,7 +264,7 @@ def test_adding_point_coplanar_with_a_face(dim): @with_dimension -def test_adding_point_inside_circumscribed_circle(dim): +def test_adding_point_inside_circumscribed_circle(dim) -> None: pts = _make_standard_simplex(dim) t = Triangulation(pts) on_simplex = (0.6,) * dim @@ -280,7 +283,7 @@ def test_adding_point_inside_circumscribed_circle(dim): @with_dimension -def test_triangulation_volume_is_less_than_bounding_box(dim): +def test_triangulation_volume_is_less_than_bounding_box(dim) -> None: eps = 1e-8 points = np.random.random((10, dim)) # all within the unit hypercube t = _make_triangulation(points) @@ -290,7 +293,7 @@ def test_triangulation_volume_is_less_than_bounding_box(dim): @with_dimension -def test_triangulation_is_deterministic(dim): +def test_triangulation_is_deterministic(dim) -> None: points = np.random.random((10, dim)) t1 = _make_triangulation(points) t2 = _make_triangulation(points) @@ -298,7 +301,7 @@ def test_triangulation_is_deterministic(dim): @with_dimension -def test_initialisation_raises_when_not_enough_points(dim): +def test_initialisation_raises_when_not_enough_points(dim) -> None: deficient_simplex = _make_standard_simplex(dim)[:-1] with pytest.raises(ValueError): @@ -306,7 +309,7 @@ def test_initialisation_raises_when_not_enough_points(dim): @with_dimension -def test_initialisation_raises_when_points_coplanar(dim): +def test_initialisation_raises_when_points_coplanar(dim) -> None: zero_volume_simplex = _make_standard_simplex(dim)[:-1] new_point1 = np.average(zero_volume_simplex, axis=0) @@ -318,7 +321,7 @@ def test_initialisation_raises_when_points_coplanar(dim): @with_dimension -def test_initialisation_accepts_more_than_one_simplex(dim): +def test_initialisation_accepts_more_than_one_simplex(dim) -> None: points = _make_standard_simplex(dim) new_point = [1.1] * dim # Point oposing the origin but outside circumsphere points = np.vstack((points, new_point)) diff --git a/adaptive/tests/unit/test_learnernd.py b/adaptive/tests/unit/test_learnernd.py index ecd00d6da..2d182d40e 100644 --- a/adaptive/tests/unit/test_learnernd.py +++ b/adaptive/tests/unit/test_learnernd.py @@ -15,28 +15,28 @@ def ring_of_fire(xy): return x + math.exp(-((x**2 + y**2 - d**2) ** 2) / a**4) -def test_learnerND_inits_loss_depends_on_neighbors_correctly(): +def test_learnerND_inits_loss_depends_on_neighbors_correctly() -> None: learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)]) assert learner.nth_neighbors == 0 -def test_learnerND_curvature_inits_loss_depends_on_neighbors_correctly(): +def test_learnerND_curvature_inits_loss_depends_on_neighbors_correctly() -> None: loss = curvature_loss_function() assert loss.nth_neighbors == 1 learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss) assert learner.nth_neighbors == 1 -def test_learnerND_accepts_ConvexHull_as_input(): +def test_learnerND_accepts_ConvexHull_as_input() -> None: triangle = ConvexHull([(0, 1), (2, 0), (0, 0)]) learner = LearnerND(ring_of_fire, bounds=triangle) assert learner.nth_neighbors == 0 assert np.allclose(learner._bbox, [(0, 2), (0, 1)]) -def test_learnerND_raises_if_too_many_neigbors(): +def test_learnerND_raises_if_too_many_neigbors() -> None: @uses_nth_neighbors(2) - def loss(*args): + def loss(*args) -> int: return 0 assert loss.nth_neighbors == 2 diff --git a/adaptive/tests/unit/test_learnernd_integration.py b/adaptive/tests/unit/test_learnernd_integration.py index 939108377..9b69f5fb0 100644 --- a/adaptive/tests/unit/test_learnernd_integration.py +++ b/adaptive/tests/unit/test_learnernd_integration.py @@ -14,20 +14,20 @@ def ring_of_fire(xy, d=0.75): return x + math.exp(-((x**2 + y**2 - d**2) ** 2) / a**4) -def test_learnerND_runs_to_10_points(): +def test_learnerND_runs_to_10_points() -> None: learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)]) SimpleRunner(learner, npoints_goal=10) assert learner.npoints == 10 @pytest.mark.parametrize("execution_number", range(5)) -def test_learnerND_runs_to_10_points_Blocking(execution_number): +def test_learnerND_runs_to_10_points_Blocking(execution_number) -> None: learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)]) BlockingRunner(learner, npoints_goal=10) assert learner.npoints >= 10 -def test_learnerND_curvature_runs_to_10_points(): +def test_learnerND_curvature_runs_to_10_points() -> None: loss = curvature_loss_function() learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss) SimpleRunner(learner, npoints_goal=10) @@ -35,14 +35,14 @@ def test_learnerND_curvature_runs_to_10_points(): @pytest.mark.parametrize("execution_number", range(5)) -def test_learnerND_curvature_runs_to_10_points_Blocking(execution_number): +def test_learnerND_curvature_runs_to_10_points_Blocking(execution_number) -> None: loss = curvature_loss_function() learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss) BlockingRunner(learner, npoints_goal=10) assert learner.npoints >= 10 -def test_learnerND_log_works(): +def test_learnerND_log_works() -> None: loss = curvature_loss_function() learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss) learner.ask(4) diff --git a/adaptive/tests/unit/test_triangulation.py b/adaptive/tests/unit/test_triangulation.py index 4aa48a9f0..aa9ea9095 100644 --- a/adaptive/tests/unit/test_triangulation.py +++ b/adaptive/tests/unit/test_triangulation.py @@ -23,12 +23,12 @@ points = np.array([(2, 2), (2, 4), (0, 3), (2, 0), (4, 2), (5, 5)]) -def test_triangulation_can_find_the_simplices(): +def test_triangulation_can_find_the_simplices() -> None: tri = Triangulation(points) assert tri.simplices == {(0, 1, 4), (0, 1, 2), (0, 2, 3), (0, 3, 4), (1, 4, 5)} -def test_triangulation_can_find_neighbors(): +def test_triangulation_can_find_neighbors() -> None: tri = Triangulation(points) assert tri.get_simplices_attached_to_points((0, 1, 4)) == { (0, 1, 2), @@ -39,7 +39,7 @@ def test_triangulation_can_find_neighbors(): assert tri.get_simplices_attached_to_points((0, 3, 4)) == {(0, 1, 4), (0, 2, 3)} -def test_triangulation_can_find_oposing_points(): +def test_triangulation_can_find_oposing_points() -> None: tri = Triangulation(points) assert tri.get_opposing_vertices((0, 1, 4)) == (5, 3, 2) assert tri.get_opposing_vertices((1, 4, 5)) == (None, None, 0) @@ -48,12 +48,12 @@ def test_triangulation_can_find_oposing_points(): assert tri.get_opposing_vertices((0, 3, 4)) == (None, 1, 2) -def test_triangulation_can_get_oposing_points_if_only_one_simplex_exists(): +def test_triangulation_can_get_oposing_points_if_only_one_simplex_exists() -> None: tri = Triangulation(points[:3]) assert tri.get_opposing_vertices((0, 1, 2)) == (None, None, None) -def test_triangulation_find_opposing_vertices_raises_if_simplex_is_invalid(): +def test_triangulation_find_opposing_vertices_raises_if_simplex_is_invalid() -> None: tri = Triangulation(points) with pytest.raises(ValueError): tri.get_opposing_vertices((0, 2, 1)) @@ -62,15 +62,14 @@ def test_triangulation_find_opposing_vertices_raises_if_simplex_is_invalid(): tri.get_opposing_vertices((2, 3, 5)) -def test_circumsphere(): +def test_circumsphere() -> None: from numpy import allclose from numpy.random import normal, uniform from adaptive.learner.triangulation import circumsphere, fast_norm def generate_random_sphere_points(dim, radius=0): - """https://math.stackexchange.com/a/1585996""" - + """https://math.stackexchange.com/a/1585996.""" vec = [None] * (dim + 1) center = uniform(-100, 100, dim) radius = uniform(1.0, 100.0) if radius == 0 else radius diff --git a/adaptive/utils.py b/adaptive/utils.py index ff80f62f1..0afe33a4c 100644 --- a/adaptive/utils.py +++ b/adaptive/utils.py @@ -7,7 +7,6 @@ import os import pickle import warnings -from collections.abc import Awaitable, Iterator, Sequence from contextlib import contextmanager from functools import wraps from itertools import product @@ -16,6 +15,8 @@ import cloudpickle if TYPE_CHECKING: + from collections.abc import Awaitable, Iterator, Sequence + from dask.distributed import Client as AsyncDaskClient @@ -37,7 +38,8 @@ def restore(*learners) -> Iterator[None]: def cache_latest(f: Callable) -> Callable: """Cache the latest return value of the function and add it - as 'self._cache[f.__name__]'.""" + as 'self._cache[f.__name__]'. + """ @functools.wraps(f) def wrapper(*args, **kwargs): @@ -96,15 +98,14 @@ def decorator(method): def _default_parameters(function, function_prefix: str = "function."): sig = inspect.signature(function) - defaults = { + return { f"{function_prefix}{k}": v.default for i, (k, v) in enumerate(sig.parameters.items()) if v.default != inspect._empty and i >= 1 } - return defaults -def assign_defaults(function, df, function_prefix: str = "function."): +def assign_defaults(function, df, function_prefix: str = "function.") -> None: defaults = _default_parameters(function, function_prefix) for k, v in defaults.items(): df[k] = len(df) * [v] @@ -112,10 +113,13 @@ def assign_defaults(function, df, function_prefix: str = "function."): def partial_function_from_dataframe(function, df, function_prefix: str = "function."): if function_prefix == "": - raise ValueError( + msg = ( "The function_prefix cannot be an empty string because" " it is used to distinguish between function and learner parameters." ) + raise ValueError( + msg, + ) kwargs = {} for col in df.columns: if col.startswith(function_prefix): @@ -123,7 +127,8 @@ def partial_function_from_dataframe(function, df, function_prefix: str = "functi vs = df[col] v, *rest = vs.unique() if rest: - raise ValueError(f"The column '{col}' can only have one value.") + msg = f"The column '{col}' can only have one value." + raise ValueError(msg) kwargs[k] = v if not kwargs: return function @@ -131,12 +136,15 @@ def partial_function_from_dataframe(function, df, function_prefix: str = "functi sig = inspect.signature(function) for k, v in kwargs.items(): if k not in sig.parameters: - raise ValueError( + msg = ( f"The DataFrame contains a default parameter" f" ({k}={v}) but the function does not have that parameter." ) + raise ValueError( + msg, + ) default = sig.parameters[k].default - if default != inspect._empty and kwargs[k] != default: + if default not in (inspect._empty, kwargs[k]): warnings.warn( f"The DataFrame contains a default parameter" f" ({k}={v}) but the function already has a default ({k}={default})." @@ -163,7 +171,7 @@ def submit(self, fn: Callable, *args, **kwargs) -> concurrent.Future: # type: i def map(self, fn, *iterable, timeout=None, chunksize=1): return map(fn, iterable) - def shutdown(self, wait=True): + def shutdown(self, wait=True) -> None: pass @@ -177,7 +185,8 @@ def _cache_key(args: tuple[Any], kwargs: dict[str, Any]) -> str: def daskify( - client: AsyncDaskClient, cache: bool = False + client: AsyncDaskClient, + cache: bool = False, ) -> Callable[[Callable[..., T]], Callable[..., Awaitable[T]]]: from dask import delayed @@ -199,8 +208,7 @@ async def wrapper(*args: Any, **kwargs: Any) -> T: else: future = client.compute(delayed_func(*args, **kwargs)) - result = await future - return result + return await future return wrapper diff --git a/benchmarks/benchmarks/benchmarks.py b/benchmarks/benchmarks/benchmarks.py index 6af7345f5..4ea0d17e0 100644 --- a/benchmarks/benchmarks/benchmarks.py +++ b/benchmarks/benchmarks/benchmarks.py @@ -19,30 +19,30 @@ def f_2d(xy): class TimeLearner1D: - def setup(self): + def setup(self) -> None: self.learner = adaptive.Learner1D(f_1d, bounds=(-1, 1)) - def time_run(self): + def time_run(self) -> None: for _ in range(1000): points, _ = self.learner.ask(1) self.learner.tell_many(points, map(f_1d, points)) class TimeLearner2D: - def setup(self): + def setup(self) -> None: self.learner = adaptive.Learner2D(f_2d, bounds=[(-1, 1), (-1, 1)]) self.xs = np.random.rand(50**2, 2) self.ys = np.random.rand(50**2) - def time_run(self): + def time_run(self) -> None: for _ in range(50**2): points, _ = self.learner.ask(1) self.learner.tell_many(points, map(f_2d, points)) - def time_ask(self): + def time_ask(self) -> None: for _ in range(50**2): self.learner.ask(1) - def time_tell(self): + def time_tell(self) -> None: for x, y in zip(self.xs, self.ys): self.learner.tell(x, y) diff --git a/docs/logo.py b/docs/logo.py index 384c71065..7ac574a95 100644 --- a/docs/logo.py +++ b/docs/logo.py @@ -1,7 +1,7 @@ import os import sys -import holoviews +import holoviews as hv import matplotlib.pyplot as plt import matplotlib.tri as mtri import numpy as np @@ -9,9 +9,9 @@ sys.path.insert(0, os.path.abspath("..")) # to get adaptive on the path -import adaptive # noqa: E402, isort:skip +import adaptive # , isort:skip -holoviews.notebook_extension("matplotlib") +hv.notebook_extension("matplotlib") def create_and_run_learner(): @@ -27,7 +27,7 @@ def ring(xy): return learner -def plot_learner_and_save(learner, fname): +def plot_learner_and_save(learner, fname) -> None: fig, ax = plt.subplots() tri = learner.interpolator(scaled=True).tri triang = mtri.Triangulation(*tri.points.T, triangles=tri.vertices) @@ -54,7 +54,7 @@ def add_rounded_corners(fname, rad): return im -def main(fname="source/_static/logo_docs.png"): +def main(fname="source/_static/logo_docs.png") -> None: learner = create_and_run_learner() plot_learner_and_save(learner, fname) im = add_rounded_corners(fname, rad=200) diff --git a/docs/source/conf.py b/docs/source/conf.py index cbe37c5c8..fe63031dd 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -101,5 +101,5 @@ def replace_named_emojis(input_file: Path, output_file: Path) -> None: replace_named_emojis(input_file, output_file) -def setup(app): +def setup(app) -> None: app.add_css_file("custom.css") # For the `live_info` widget diff --git a/ipynb_filter.py b/ipynb_filter.py index 6ae8a8c8a..270379cf4 100755 --- a/ipynb_filter.py +++ b/ipynb_filter.py @@ -15,7 +15,7 @@ class RemoveMetadata(Preprocessor): def preprocess(self, nb, resources): nb.metadata = { - "language_info": {"name": "python", "pygments_lexer": "ipython3"} + "language_info": {"name": "python", "pygments_lexer": "ipython3"}, } return nb, resources diff --git a/noxfile.py b/noxfile.py index 1866a94a6..f0760247f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -3,21 +3,21 @@ @nox.session(python=["3.9", "3.10", "3.11"]) @nox.parametrize("all_deps", [True, False]) -def pytest(session, all_deps): +def pytest(session, all_deps) -> None: session.install(".[testing,other]" if all_deps else ".[testing]") session.run("coverage", "erase") session.run("pytest") @nox.session(python="3.11") -def pytest_typeguard(session): +def pytest_typeguard(session) -> None: session.install(".[testing,other]") session.run("coverage", "erase") session.run("pytest", "--typeguard-packages=adaptive") @nox.session(python="3.11") -def coverage(session): +def coverage(session) -> None: session.install("coverage") session.install(".[testing,other]") session.run("pytest")