diff --git a/adaptive/learner/average_learner.py b/adaptive/learner/average_learner.py index c3d4892b4..494309a05 100644 --- a/adaptive/learner/average_learner.py +++ b/adaptive/learner/average_learner.py @@ -1,22 +1,24 @@ from __future__ import annotations from math import sqrt -from typing import Callable +from typing import TYPE_CHECKING, Callable import cloudpickle import numpy as np from adaptive.learner.base_learner import BaseLearner from adaptive.notebook_integration import ensure_holoviews -from adaptive.types import Float, Int, Real from adaptive.utils import ( assign_defaults, cache_latest, partial_function_from_dataframe, ) +if TYPE_CHECKING: + from adaptive.types import Float, Int, Real + try: - import pandas + import pandas as pd with_pandas = True @@ -47,6 +49,7 @@ class AverageLearner(BaseLearner): Points that still have to be evaluated. npoints : int Number of evaluated points. + """ def __init__( @@ -57,7 +60,8 @@ def __init__( min_npoints: int = 2, ) -> None: if atol is None and rtol is None: - raise Exception("At least one of `atol` and `rtol` should be set.") + msg = "At least one of `atol` and `rtol` should be set." + raise Exception(msg) if atol is None: atol = np.inf if rtol is None: @@ -92,7 +96,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", seed_name: str = "seed", y_name: str = "y", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -116,10 +120,12 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") - df = pandas.DataFrame(sorted(self.data.items()), columns=[seed_name, y_name]) + msg = "pandas is not installed." + raise ImportError(msg) + df = pd.DataFrame(sorted(self.data.items()), columns=[seed_name, y_name]) df.attrs["inputs"] = [seed_name] df.attrs["output"] = y_name if with_default_function_args: @@ -128,12 +134,12 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", seed_name: str = "seed", y_name: str = "y", - ): + ) -> None: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s @@ -153,11 +159,14 @@ def load_dataframe( # type: ignore[override] The ``seed_name`` used in ``to_dataframe``, by default "seed" y_name : str, optional The ``y_name`` used in ``to_dataframe``, by default "y" + """ self.tell_many(df[seed_name].values, df[y_name].values) if with_default_function_args: self.function = partial_function_from_dataframe( - self.function, df, function_prefix + self.function, + df, + function_prefix, ) def ask(self, n: int, tell_pending: bool = True) -> tuple[list[int], list[Float]]: @@ -168,7 +177,7 @@ def ask(self, n: int, tell_pending: bool = True) -> tuple[list[int], list[Float] points = list( set(range(self.n_requested + n)) - set(self.data) - - set(self.pending_points) + - set(self.pending_points), )[:n] loss_improvements = [self._loss_improvement(n) / n] * n @@ -199,7 +208,8 @@ def mean(self) -> Float: @property def std(self) -> Float: """The corrected sample standard deviation of the values - in `data`.""" + in `data`. + """ n = self.npoints if n < self.min_npoints: return np.inf @@ -211,10 +221,7 @@ def std(self) -> Float: @cache_latest def loss(self, real: bool = True, *, n=None) -> Float: - if n is None: - n = self.npoints if real else self.n_requested - else: - n = n + n = (self.npoints if real else self.n_requested) if n is None else n if n < self.min_npoints: return np.inf standard_error = self.std / sqrt(n) @@ -232,7 +239,7 @@ def _loss_improvement(self, n: int) -> Float: else: return np.inf - def remove_unfinished(self): + def remove_unfinished(self) -> None: """Remove uncomputed data from the learner.""" self.pending_points = set() @@ -242,7 +249,9 @@ def plot(self): Returns ------- holoviews.element.Histogram - A histogram of the evaluated data.""" + A histogram of the evaluated data. + + """ hv = ensure_holoviews() vals = [v for v in self.data.values() if v is not None] if not vals: diff --git a/adaptive/learner/average_learner1D.py b/adaptive/learner/average_learner1D.py index 9678b4f64..aa28de700 100644 --- a/adaptive/learner/average_learner1D.py +++ b/adaptive/learner/average_learner1D.py @@ -3,10 +3,9 @@ import math import sys from collections import defaultdict -from collections.abc import Iterable, Sequence from copy import deepcopy from math import hypot -from typing import Callable +from typing import TYPE_CHECKING, Callable import numpy as np import scipy.stats @@ -18,8 +17,11 @@ from adaptive.types import Int, Real from adaptive.utils import assign_defaults, partial_function_from_dataframe +if TYPE_CHECKING: + from collections.abc import Iterable, Sequence + try: - import pandas + import pandas as pd with_pandas = True @@ -73,6 +75,7 @@ class AverageLearner1D(Learner1D): If self.error[x] < min_error, then x will not be resampled anymore, i.e., the smallest confidence interval at x is [self.data[x] - min_error, self.data[x] + min_error]. + """ def __init__( @@ -87,17 +90,22 @@ def __init__( min_samples: int = 50, max_samples: int = sys.maxsize, min_error: float = 0, - ): + ) -> None: if not (0 < delta <= 1): - raise ValueError("Learner requires 0 < delta <= 1.") + msg = "Learner requires 0 < delta <= 1." + raise ValueError(msg) if not (0 < alpha <= 1): - raise ValueError("Learner requires 0 < alpha <= 1.") + msg = "Learner requires 0 < alpha <= 1." + raise ValueError(msg) if not (0 < neighbor_sampling <= 1): - raise ValueError("Learner requires 0 < neighbor_sampling <= 1.") + msg = "Learner requires 0 < neighbor_sampling <= 1." + raise ValueError(msg) if min_samples < 0: - raise ValueError("min_samples should be positive.") + msg = "min_samples should be positive." + raise ValueError(msg) if min_samples > max_samples: - raise ValueError("max_samples should be larger than min_samples.") + msg = "max_samples should be larger than min_samples." + raise ValueError(msg) super().__init__(function, bounds, loss_per_interval) # type: ignore[arg-type] @@ -142,7 +150,7 @@ def new(self) -> AverageLearner1D: @property def nsamples(self) -> int: - """Returns the total number of samples""" + """Returns the total number of samples.""" return sum(self._number_samples.values()) @property @@ -160,7 +168,7 @@ def to_numpy(self, mean: bool = False) -> np.ndarray: (seed, x, *np.atleast_1d(y)) for x, seed_y in self._data_samples.items() for seed, y in seed_y.items() - ] + ], ) def to_dataframe( # type: ignore[override] @@ -171,7 +179,7 @@ def to_dataframe( # type: ignore[override] seed_name: str = "seed", x_name: str = "x", y_name: str = "y", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -197,9 +205,11 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) if mean: data: list[tuple[Real, Real]] = sorted(self.data.items()) columns = [x_name, y_name] @@ -210,7 +220,7 @@ def to_dataframe( # type: ignore[override] for seed, y in sorted(seed_y.items()) ] columns = [seed_name, x_name, y_name] - df = pandas.DataFrame(data, columns=columns) + df = pd.DataFrame(data, columns=columns) df.attrs["inputs"] = [seed_name, x_name] df.attrs["output"] = y_name if with_default_function_args: @@ -219,13 +229,13 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", seed_name: str = "seed", x_name: str = "x", y_name: str = "y", - ): + ) -> None: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s @@ -247,6 +257,7 @@ def load_dataframe( # type: ignore[override] The ``x_name`` used in ``to_dataframe``, by default "x" y_name : str, optional The ``y_name`` used in ``to_dataframe``, by default "y" + """ # Were using zip instead of df[[seed_name, x_name]].values because that will # make the seeds into floats @@ -254,7 +265,9 @@ def load_dataframe( # type: ignore[override] self.tell_many(seed_x, df[y_name].values) if with_default_function_args: self.function = partial_function_from_dataframe( - self.function, df, function_prefix + self.function, + df, + function_prefix, ) def ask(self, n: int, tell_pending: bool = True) -> tuple[Points, list[float]]: # type: ignore[override] @@ -268,17 +281,16 @@ def ask(self, n: int, tell_pending: bool = True) -> tuple[Points, list[float]]: # TODO: if `n` is very large, we should suggest a few different points. points, loss_improvements = self._ask_for_new_point(n) # Else, check the resampling condition - else: - if len(self.rescaled_error): - # This is in case rescaled_error is empty (e.g. when sigma=0) - x, resc_error = self.rescaled_error.peekitem(0) - # Resampling condition - if resc_error > self.delta: - points, loss_improvements = self._ask_for_more_samples(x, n) - else: - points, loss_improvements = self._ask_for_new_point(n) + elif len(self.rescaled_error): + # This is in case rescaled_error is empty (e.g. when sigma=0) + x, resc_error = self.rescaled_error.peekitem(0) + # Resampling condition + if resc_error > self.delta: + points, loss_improvements = self._ask_for_more_samples(x, n) else: points, loss_improvements = self._ask_for_new_point(n) + else: + points, loss_improvements = self._ask_for_new_point(n) if tell_pending: for p in points: @@ -289,7 +301,8 @@ def ask(self, n: int, tell_pending: bool = True) -> tuple[Points, list[float]]: def _ask_for_more_samples(self, x: Real, n: int) -> tuple[Points, list[float]]: """When asking for n points, the learner returns n times an existing point to be resampled, since in general n << min_samples and this point will - need to be resampled many more times""" + need to be resampled many more times. + """ n_existing = self._number_samples.get(x, 0) points = [(seed + n_existing, x) for seed in range(n)] xl, xr = self.neighbors_combined[x] @@ -300,7 +313,7 @@ def _ask_for_more_samples(self, x: Real, n: int) -> tuple[Points, list[float]]: loss_improvement = float("inf") else: loss_improvement = loss - loss * np.sqrt(n_existing) / np.sqrt( - n_existing + n + n_existing + n, ) loss_improvements = [loss_improvement / n] * n return points, loss_improvements @@ -308,7 +321,8 @@ def _ask_for_more_samples(self, x: Real, n: int) -> tuple[Points, list[float]]: def _ask_for_new_point(self, n: int) -> tuple[Points, list[float]]: """When asking for n new points, the learner returns n times a single new point, since in general n << min_samples and this point will need - to be resampled many more times""" + to be resampled many more times. + """ points, (loss_improvement,) = self._ask_points_without_adding(1) seed_points = list(zip(range(n), n * points)) loss_improvements = [loss_improvement / n] * n @@ -324,10 +338,13 @@ def tell_pending(self, seed_x: Point) -> None: # type: ignore[override] def tell(self, seed_x: Point, y: Real) -> None: # type: ignore[override] seed, x = seed_x if y is None: - raise TypeError( + msg = ( "Y-value may not be None, use learner.tell_pending(x)" "to indicate that this value is currently being calculated" ) + raise TypeError( + msg, + ) if x not in self.data: self._update_data(x, y, "new") @@ -344,6 +361,7 @@ def _update_rescaled_error_in_mean(self, x: Real, point_type: str) -> None: ---------- point_type : str Must be either "new" or "resampled". + """ # Update neighbors x_left, x_right = self.neighbors[x] @@ -493,27 +511,32 @@ def _calc_error_in_mean(self, ys: Iterable[Real], y_avg: Real, n: int) -> float: return t_student * (variance_in_mean / n) ** 0.5 def tell_many( # type: ignore[override] - self, xs: Points | np.ndarray, ys: Sequence[Real] | np.ndarray + self, + xs: Points | np.ndarray, + ys: Sequence[Real] | np.ndarray, ) -> None: # Check that all x are within the bounds # TODO: remove this requirement, all other learners add the data # but ignore it going forward. if not np.prod([x >= self.bounds[0] and x <= self.bounds[1] for _, x in xs]): - raise ValueError( + msg = ( "x value out of bounds, " "remove x or enlarge the bounds of the learner" ) + raise ValueError( + msg, + ) # Create a mapping of points to a list of samples mapping: defaultdict[Real, defaultdict[Int, Real]] = defaultdict( - lambda: defaultdict(dict) + lambda: defaultdict(dict), ) for (seed, x), y in zip(xs, ys): mapping[x][seed] = y for x, seed_y_mapping in mapping.items(): if len(seed_y_mapping) == 1: - seed, y = list(seed_y_mapping.items())[0] + seed, y = next(iter(seed_y_mapping.items())) self.tell((seed, x), y) elif len(seed_y_mapping) > 1: # If we stored more than 1 y-value for the previous x, @@ -530,13 +553,17 @@ def tell_many_at_point(self, x: Real, seed_y_mapping: dict[int, Real]) -> None: Value from the function domain. seed_y_mapping : Dict[int, Real] Dictionary of ``seed`` -> ``y`` at ``x``. + """ # Check x is within the bounds if not np.prod(x >= self.bounds[0] and x <= self.bounds[1]): - raise ValueError( + msg = ( "x value out of bounds, " "remove x or enlarge the bounds of the learner" ) + raise ValueError( + msg, + ) # If x is a new point: if x not in self.data: @@ -563,7 +590,9 @@ def tell_many_at_point(self, x: Real, seed_y_mapping: dict[int, Real]) -> None: if n > self.min_samples: self._undersampled_points.discard(x) self.error[x] = self._calc_error_in_mean( - self._data_samples[x].values(), self.data[x], n + self._data_samples[x].values(), + self.data[x], + n, ) self._update_distances(x) self._update_rescaled_error_in_mean(x, "resampled") @@ -595,6 +624,7 @@ def plot(self): plot : `holoviews.element.Scatter * holoviews.element.ErrorBars * holoviews.element.Path` Plot of the evaluated data. + """ hv = ensure_holoviews() if not self.data: @@ -606,7 +636,8 @@ def plot(self): line = hv.Path((xs, ys)) p = scatter * error * line else: - raise Exception("plot() not implemented for vector functions.") + msg = "plot() not implemented for vector functions." + raise Exception(msg) # Plot with 5% empty margins such that the boundary points are visible margin = 0.05 * (self.bounds[1] - self.bounds[0]) @@ -616,7 +647,7 @@ def plot(self): def decreasing_dict() -> ItemSortedDict: - """This initialization orders the dictionary from large to small values""" + """This initialization orders the dictionary from large to small values.""" def sorting_rule(key, value): return -value diff --git a/adaptive/learner/balancing_learner.py b/adaptive/learner/balancing_learner.py index e9a4a661e..bc18584c6 100644 --- a/adaptive/learner/balancing_learner.py +++ b/adaptive/learner/balancing_learner.py @@ -24,7 +24,7 @@ from typing import Literal try: - import pandas + import pandas as pd with_pandas = True except ModuleNotFoundError: @@ -94,6 +94,7 @@ class BalancingLearner(BaseLearner): learner) it may be that the loss cannot be compared *even between learners of the same type*. In this case the `~adaptive.BalancingLearner` will behave in an undefined way. Change the `strategy` in that case. + """ def __init__( @@ -116,8 +117,9 @@ def __init__( self._cdims_default = cdims if len({learner.__class__ for learner in self.learners}) > 1: + msg = "A BalacingLearner can handle only one type of learners." raise TypeError( - "A BalacingLearner can handle only one type" " of learners." + msg, ) self.strategy: STRATEGY_TYPE = strategy @@ -153,8 +155,9 @@ def nsamples(self): if hasattr(self.learners[0], "nsamples"): return sum(lrn.nsamples for lrn in self.learners) else: + msg = f"{type(self.learners[0])} as no attribute called `nsamples`." raise AttributeError( - f"{type(self.learners[0])} as no attribute called `nsamples`." + msg, ) @property @@ -165,7 +168,8 @@ def strategy(self) -> STRATEGY_TYPE: the child learners, the number of points per learner, using 'npoints', or by going through all learners one by one using 'cycle'. One can dynamically change the strategy while the simulation is - running by changing the ``learner.strategy`` attribute.""" + running by changing the ``learner.strategy`` attribute. + """ return self._strategy @strategy.setter @@ -181,13 +185,17 @@ def strategy(self, strategy: STRATEGY_TYPE) -> None: self._ask_and_tell = self._ask_and_tell_based_on_cycle self._cycle = itertools.cycle(range(len(self.learners))) else: - raise ValueError( + msg = ( 'Only strategy="loss_improvements", strategy="loss",' ' strategy="npoints", or strategy="cycle" is implemented.' ) + raise ValueError( + msg, + ) def _ask_and_tell_based_on_loss_improvements( - self, n: int + self, + n: int, ) -> tuple[list[tuple[int, Any]], list[float]]: selected = [] # tuples ((learner_index, point), loss_improvement) total_points = [lrn.npoints + len(lrn.pending_points) for lrn in self.learners] @@ -199,7 +207,7 @@ def _ask_and_tell_based_on_loss_improvements( self._ask_cache[index] = learner.ask(n=1, tell_pending=False) points, loss_improvements = self._ask_cache[index] to_select.append( - ((index, points[0]), (loss_improvements[0], -total_points[index])) + ((index, points[0]), (loss_improvements[0], -total_points[index])), ) # Choose the optimal improvement. @@ -212,14 +220,16 @@ def _ask_and_tell_based_on_loss_improvements( return points, loss_improvements def _ask_and_tell_based_on_loss( - self, n: int + self, + n: int, ) -> tuple[list[tuple[int, Any]], list[float]]: selected = [] # tuples ((learner_index, point), loss_improvement) total_points = [lrn.npoints + len(lrn.pending_points) for lrn in self.learners] for _ in range(n): losses = self._losses(real=False) index, _ = max( - enumerate(zip(losses, (-n for n in total_points))), key=itemgetter(1) + enumerate(zip(losses, (-n for n in total_points))), + key=itemgetter(1), ) total_points[index] += 1 @@ -235,7 +245,8 @@ def _ask_and_tell_based_on_loss( return points, loss_improvements def _ask_and_tell_based_on_npoints( - self, n: Int + self, + n: Int, ) -> tuple[list[tuple[Int, Any]], list[float]]: selected = [] # tuples ((learner_index, point), loss_improvement) total_points = [lrn.npoints + len(lrn.pending_points) for lrn in self.learners] @@ -253,7 +264,8 @@ def _ask_and_tell_based_on_npoints( return points, loss_improvements def _ask_and_tell_based_on_cycle( - self, n: int + self, + n: int, ) -> tuple[list[tuple[Int, Any]], list[float]]: points, loss_improvements = [], [] for _ in range(n): @@ -266,7 +278,9 @@ def _ask_and_tell_based_on_cycle( return points, loss_improvements def ask( - self, n: int, tell_pending: bool = True + self, + n: int, + tell_pending: bool = True, ) -> tuple[list[tuple[Int, Any]], list[float]]: """Chose points for learners.""" if n == 0: @@ -348,6 +362,7 @@ def plot( dm : `holoviews.core.DynamicMap` (default) or `holoviews.core.HoloMap` A `DynamicMap` ``(dynamic=True)`` or `HoloMap` ``(dynamic=False)`` with sliders that are defined by `cdims`. + """ hv = ensure_holoviews() cdims = cdims or self._cdims_default @@ -438,6 +453,7 @@ def from_product( ----- The order of the child learners inside `learner.learners` is the same as ``adaptive.utils.named_product(**combos)``. + """ learners = [] arguments = named_product(**combos) @@ -465,22 +481,26 @@ def to_dataframe(self, index_name: str = "learner_index", **kwargs): # type: ig ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) dfs = [] for i, learner in enumerate(self.learners): df = learner.to_dataframe(**kwargs) cols = list(df.columns) df[index_name] = i - df = df[[index_name] + cols] + df = df[[index_name, *cols]] dfs.append(df) - df = pandas.concat(dfs, axis=0, ignore_index=True) - return df + return pd.concat(dfs, axis=0, ignore_index=True) def load_dataframe( # type: ignore[override] - self, df: pandas.DataFrame, index_name: str = "learner_index", **kwargs - ): + self, + df: pd.DataFrame, + index_name: str = "learner_index", + **kwargs, + ) -> None: """Load the data from a `pandas.DataFrame` into the child learners. Parameters @@ -491,6 +511,7 @@ def load_dataframe( # type: ignore[override] The ``index_name`` used in `to_dataframe`, by default "learner_index". **kwargs : dict Keyword arguments passed to each ``child_learner.load_dataframe(**kwargs)``. + """ for i, gr in df.groupby(index_name): self.learners[i].load_dataframe(gr, **kwargs) @@ -529,6 +550,7 @@ def save( >>> runner = adaptive.Runner(learner) >>> # Then save >>> learner.save(combo_fname) # use 'load' in the same way + """ if isinstance(fname, Iterable): for lrn, _fname in zip(self.learners, fname): @@ -557,6 +579,7 @@ def load( Example ------- See the example in the `BalancingLearner.save` doc-string. + """ if isinstance(fname, Iterable): for lrn, _fname in zip(self.learners, fname): @@ -568,7 +591,7 @@ def load( def _get_data(self) -> list[Any]: return [lrn._get_data() for lrn in self.learners] - def _set_data(self, data: list[Any]): + def _set_data(self, data: list[Any]) -> None: for lrn, _data in zip(self.learners, data): lrn._set_data(_data) diff --git a/adaptive/learner/base_learner.py b/adaptive/learner/base_learner.py index ff2d1e483..bf06b5389 100644 --- a/adaptive/learner/base_learner.py +++ b/adaptive/learner/base_learner.py @@ -9,7 +9,7 @@ from adaptive.utils import load, save if TYPE_CHECKING: - import pandas + import pandas as pd def uses_nth_neighbors(n: int): @@ -96,7 +96,7 @@ class BaseLearner(abc.ABC): pending_points: set function: Callable[..., Any] - def tell(self, x, y): + def tell(self, x, y) -> None: """Tell the learner about a single value. Parameters @@ -107,7 +107,7 @@ def tell(self, x, y): """ self.tell_many([x], [y]) - def tell_many(self, xs, ys): + def tell_many(self, xs, ys) -> None: """Tell the learner about some values. Parameters @@ -169,7 +169,7 @@ def _set_data(self, data: Any) -> None: def new(self): """Return a new learner with the same function and parameters.""" - def copy_from(self, other): + def copy_from(self, other) -> None: """Copy over the data from another learner. Parameters @@ -180,7 +180,7 @@ def copy_from(self, other): """ self._set_data(other._get_data()) - def save(self, fname, compress=True): + def save(self, fname, compress=True) -> None: """Save the data of the learner into a pickle file. Parameters @@ -195,7 +195,7 @@ def save(self, fname, compress=True): data = self._get_data() save(fname, data, compress) - def load(self, fname, compress=True): + def load(self, fname, compress=True) -> None: """Load the data of a learner from a pickle file. Parameters @@ -217,7 +217,7 @@ def to_dataframe( with_default_function_args: bool = True, function_prefix: str = "function.", **kwargs: Any, - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -242,7 +242,7 @@ def to_dataframe( @abc.abstractmethod def load_dataframe( self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", **kwargs: Any, diff --git a/adaptive/learner/data_saver.py b/adaptive/learner/data_saver.py index a69807389..4ae3ca7a4 100644 --- a/adaptive/learner/data_saver.py +++ b/adaptive/learner/data_saver.py @@ -2,14 +2,15 @@ import functools from collections import OrderedDict -from typing import Any, Callable +from typing import TYPE_CHECKING, Any, Callable from adaptive.learner.base_learner import BaseLearner, LearnerType from adaptive.utils import copy_docstring_from -try: - import pandas +if TYPE_CHECKING: + import pandas as pd +try: with_pandas = True except ModuleNotFoundError: @@ -38,6 +39,7 @@ class DataSaver(BaseLearner): >>> from operator import itemgetter >>> _learner = Learner1D(f, bounds=(-1.0, 1.0)) >>> learner = DataSaver(_learner, arg_picker=itemgetter('y')) + """ def __init__(self, learner: LearnerType, arg_picker: Callable) -> None: @@ -81,7 +83,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", extra_data_name: str = "extra_data", **kwargs: Any, - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a concatenated `pandas.DataFrame` from child learners. Parameters @@ -99,9 +101,11 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) df = self.learner.to_dataframe( with_default_function_args=with_default_function_args, function_prefix=function_prefix, @@ -115,7 +119,7 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", extra_data_name: str = "extra_data", @@ -138,6 +142,7 @@ def load_dataframe( # type: ignore[override] be ``input_names=('x', 'y')``. **kwargs : dict Keyword arguments passed to each ``child_learner.load_dataframe(**kwargs)``. + """ self.learner.load_dataframe( df, @@ -146,7 +151,7 @@ def load_dataframe( # type: ignore[override] **kwargs, ) keys = df.attrs.get("inputs", list(input_names)) - for _, x in df[keys + [extra_data_name]].iterrows(): + for _, x in df[[*keys, extra_data_name]].iterrows(): key = _to_key(x[:-1]) self.extra_data[key] = x[-1] @@ -216,5 +221,6 @@ def make_datasaver(learner_type, arg_picker): ... arg_picker=itemgetter('y')) >>> learner = adaptive.BalancingLearner.from_product( ... jacobi, learner_type, dict(bounds=(0, 1)), combos) + """ return functools.partial(_ds, learner_type, arg_picker) diff --git a/adaptive/learner/integrator_coeffs.py b/adaptive/learner/integrator_coeffs.py index 862f76eb0..5b7ce3d55 100644 --- a/adaptive/learner/integrator_coeffs.py +++ b/adaptive/learner/integrator_coeffs.py @@ -74,7 +74,8 @@ def newton(n: int) -> np.ndarray: c = (n + 1) * [0] for (d, a), m in terms.items(): if m and a != 0: - raise ValueError("Newton polynomial cannot be represented exactly.") + msg = "Newton polynomial cannot be represented exactly." + raise ValueError(msg) c[n - d] += m # The check could be removed and the above line replaced by # the following, but then the result would be no longer exact. @@ -191,4 +192,5 @@ def __getattr__(name): try: return _coefficients()[name] except KeyError: - raise AttributeError(f"module {__name__} has no attribute {name}") from None + msg = f"module {__name__} has no attribute {name}" + raise AttributeError(msg) from None diff --git a/adaptive/learner/integrator_learner.py b/adaptive/learner/integrator_learner.py index 20631a0c5..5f63e0ce9 100644 --- a/adaptive/learner/integrator_learner.py +++ b/adaptive/learner/integrator_learner.py @@ -18,7 +18,7 @@ from adaptive.utils import assign_defaults, cache_latest, restore try: - import pandas + import pandas as pd with_pandas = True @@ -403,7 +403,8 @@ def approximating_intervals(self) -> set[_Interval]: def tell(self, point: float, value: float) -> None: if point not in self.x_mapping: - raise ValueError(f"Point {point} doesn't belong to any interval") + msg = f"Point {point} doesn't belong to any interval" + raise ValueError(msg) self.data[point] = value self.pending_points.discard(point) @@ -434,11 +435,11 @@ def tell(self, point: float, value: float) -> None: assert ival in self.ivals self.priority_split.append(ival) - def tell_pending(self): + def tell_pending(self) -> None: pass def propagate_removed(self, ival: _Interval) -> None: - def _propagate_removed_down(ival): + def _propagate_removed_down(ival) -> None: ival.removed = True self.ivals.discard(ival) @@ -474,7 +475,8 @@ def _ask_and_tell_pending(self, n: int) -> tuple[list[float], list[float]]: try: self._fill_stack() except ValueError: - raise RuntimeError("No way to improve the integral estimate.") from None + msg = "No way to improve the integral estimate." + raise RuntimeError(msg) from None new_points, new_loss_improvements = self.pop_from_stack(n_left) points += new_points loss_improvements += new_loss_improvements @@ -490,7 +492,7 @@ def pop_from_stack(self, n: int) -> tuple[list[float], list[float]]: ] return points, loss_improvements - def remove_unfinished(self): + def remove_unfinished(self) -> None: pass def _fill_stack(self) -> list[float]: @@ -580,7 +582,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", x_name: str = "x", y_name: str = "y", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -607,8 +609,9 @@ def to_dataframe( # type: ignore[override] """ if not with_pandas: - raise ImportError("pandas is not installed.") - df = pandas.DataFrame(sorted(self.data.items()), columns=[x_name, y_name]) + msg = "pandas is not installed." + raise ImportError(msg) + df = pd.DataFrame(sorted(self.data.items()), columns=[x_name, y_name]) df.attrs["inputs"] = [x_name] df.attrs["output"] = y_name if with_default_function_args: @@ -617,7 +620,7 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", x_name: str = "x", @@ -659,7 +662,7 @@ def _get_data(self): self.first_ival, ) - def _set_data(self, data): + def _set_data(self, data) -> None: ( self.priority_split, self.data, diff --git a/adaptive/learner/learner1D.py b/adaptive/learner/learner1D.py index bf04743bd..5e35a170a 100644 --- a/adaptive/learner/learner1D.py +++ b/adaptive/learner/learner1D.py @@ -4,7 +4,6 @@ import itertools import math import sys -from collections.abc import Sequence from copy import copy, deepcopy from typing import TYPE_CHECKING, Any, Callable, Optional, Union @@ -17,7 +16,6 @@ from adaptive.learner.learnerND import volume from adaptive.learner.triangulation import simplex_volume_in_embedding from adaptive.notebook_integration import ensure_holoviews -from adaptive.types import Float, Int, Real from adaptive.utils import ( assign_defaults, cache_latest, @@ -31,7 +29,7 @@ try: - import pandas + import pandas as pd with_pandas = True @@ -42,6 +40,10 @@ # -- types -- # Commonly used types + from collections.abc import Sequence + + from adaptive.types import Float, Int, Real + Interval: TypeAlias = Union[tuple[float, float], tuple[float, float, int]] NeighborsType: TypeAlias = SortedDict[float, list[Optional[float]]] @@ -49,7 +51,10 @@ XsType0: TypeAlias = tuple[float, float] YsType0: TypeAlias = Union[tuple[float, float], tuple[np.ndarray, np.ndarray]] XsType1: TypeAlias = tuple[ - Optional[float], Optional[float], Optional[float], Optional[float] + Optional[float], + Optional[float], + Optional[float], + Optional[float], ] YsType1: TypeAlias = Union[ tuple[Optional[float], Optional[float], Optional[float], Optional[float]], @@ -62,7 +67,8 @@ ] XsTypeN: TypeAlias = tuple[Optional[float], ...] YsTypeN: TypeAlias = Union[ - tuple[Optional[float], ...], tuple[Optional[np.ndarray], ...] + tuple[Optional[float], ...], + tuple[Optional[np.ndarray], ...], ] @@ -92,9 +98,9 @@ def uniform_loss(xs: XsType0, ys: YsType0) -> Float: ... bounds=(-1, 1), ... loss_per_interval=uniform_sampling_1d) >>> + """ - dx = xs[1] - xs[0] - return dx + return xs[1] - xs[0] @uses_nth_neighbors(0) @@ -141,7 +147,8 @@ def triangle_loss(xs: XsType1, ys: YsType1) -> Float: def resolution_loss_function( - min_length: Real = 0, max_length: Real = 1 + min_length: Real = 0, + max_length: Real = 1, ) -> Callable[[XsType0, YsType0], Float]: """Loss function that is similar to the `default_loss` function, but you can set the maximum and minimum size of an interval. @@ -162,6 +169,7 @@ def resolution_loss_function( >>> >>> loss = resolution_loss_function(min_length=0.01, max_length=1) >>> learner = adaptive.Learner1D(f, bounds=(-1, -1), loss_per_interval=loss) + """ @uses_nth_neighbors(0) @@ -173,14 +181,15 @@ def resolution_loss(xs: XsType0, ys: YsType0) -> Float: if loss > max_length: # Return infinite such that this interval will be picked return np.inf - loss = default_loss(xs, ys) - return loss + return default_loss(xs, ys) return resolution_loss def curvature_loss_function( - area_factor: Real = 1, euclid_factor: Real = 0.02, horizontal_factor: Real = 0.02 + area_factor: Real = 1, + euclid_factor: Real = 0.02, + horizontal_factor: Real = 0.02, ) -> Callable[[XsType1, YsType1], Float]: # XXX: add a doc-string @uses_nth_neighbors(1) @@ -203,7 +212,8 @@ def curvature_loss(xs: XsType1, ys: YsType1) -> Float: def linspace(x_left: Real, x_right: Real, n: Int) -> list[Float]: """This is equivalent to 'np.linspace(x_left, x_right, n, endpoint=False)[1:]', - but it is 15-30 times faster for small 'n'.""" + but it is 15-30 times faster for small 'n'. + """ if n == 1: # This is just an optimization return [] @@ -223,7 +233,9 @@ def _get_neighbors_from_array(xs: np.ndarray) -> NeighborsType: def _get_intervals( - x: float, neighbors: NeighborsType, nth_neighbors: int + x: float, + neighbors: NeighborsType, + nth_neighbors: int, ) -> list[tuple[float, float]]: nn = nth_neighbors i = neighbors.index(x) @@ -275,6 +287,7 @@ class Learner1D(BaseLearner): If `loss_per_interval` doesn't have such an attribute, it's assumed that is uses **no** neighboring intervals. Also see the `uses_nth_neighbors` decorator for more information. + """ def __init__( @@ -282,11 +295,12 @@ def __init__( function: Callable[[Real], Float | np.ndarray], bounds: tuple[Real, Real], loss_per_interval: Callable[[XsTypeN, YsTypeN], Float] | None = None, - ): + ) -> None: self.function = function # type: ignore if loss_per_interval is not None and hasattr( - loss_per_interval, "nth_neighbors" + loss_per_interval, + "nth_neighbors", ): self.nth_neighbors = loss_per_interval.nth_neighbors else: @@ -362,7 +376,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", x_name: str = "x", y_name: str = "y", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -386,11 +400,13 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) xs, ys = zip(*sorted(self.data.items())) if self.data else ([], []) - df = pandas.DataFrame(xs, columns=[x_name]) + df = pd.DataFrame(xs, columns=[x_name]) df[y_name] = ys df.attrs["inputs"] = [x_name] df.attrs["output"] = y_name @@ -400,7 +416,7 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", x_name: str = "x", @@ -425,11 +441,14 @@ def load_dataframe( # type: ignore[override] The ``x_name`` used in ``to_dataframe``, by default "x" y_name : str, optional The ``y_name`` used in ``to_dataframe``, by default "y" + """ self.tell_many(df[x_name].values, df[y_name].values) if with_default_function_args: self.function = partial_function_from_dataframe( - self.function, df, function_prefix + self.function, + df, + function_prefix, ) @property @@ -464,7 +483,8 @@ def _get_point_by_index(self, ind: int) -> float | None: return self.neighbors.keys()[ind] def _get_loss_in_interval(self, x_left: float, x_right: float) -> float: - assert x_left is not None and x_right is not None + assert x_left is not None + assert x_right is not None if x_right - x_left < self._dx_eps: return 0 @@ -484,7 +504,9 @@ def _get_loss_in_interval(self, x_left: float, x_right: float) -> float: return self.loss_per_interval(xs_scaled, ys_scaled) def _update_interpolated_loss_in_interval( - self, x_left: float, x_right: float + self, + x_left: float, + x_right: float, ) -> None: if x_left is None or x_right is None: return @@ -502,7 +524,7 @@ def _update_interpolated_loss_in_interval( a = b def _update_losses(self, x: float, real: bool = True) -> None: - """Update all losses that depend on x""" + """Update all losses that depend on x.""" # When we add a new point x, we should update the losses # (x_left, x_right) are the "real" neighbors of 'x'. x_left, x_right = self._find_neighbors(x, self.neighbors) @@ -592,10 +614,13 @@ def tell(self, x: float, y: Float | Sequence[Float] | np.ndarray) -> None: # The point is already evaluated before return if y is None: - raise TypeError( + msg = ( "Y-value may not be None, use learner.tell_pending(x)" "to indicate that this value is currently being calculated" ) + raise TypeError( + msg, + ) # either it is a float/int, if not, try casting to a np.array if not isinstance(y, (float, int)): @@ -730,7 +755,8 @@ def _missing_bounds(self) -> list[Real]: def _ask_points_without_adding(self, n: int) -> tuple[list[float], list[float]]: """Return 'n' points that are expected to maximally reduce the loss. - Without altering the state of the learner""" + Without altering the state of the learner. + """ # Find out how to divide the n points over the intervals # by finding positive integer n_i that minimize max(L_i / n_i) subject # to a constraint that sum(n_i) = n + N, with N the total number of @@ -789,13 +815,13 @@ def _ask_points_without_adding(self, n: int) -> tuple[list[float], list[float]]: points = list( itertools.chain.from_iterable( linspace(x_l, x_r, n) for (x_l, x_r, n) in quals - ) + ), ) loss_improvements = list( itertools.chain.from_iterable( itertools.repeat(quals[x0, x1, n], n - 1) for (x0, x1, n) in quals - ) + ), ) # add the missing bounds @@ -805,7 +831,9 @@ def _ask_points_without_adding(self, n: int) -> tuple[list[float], list[float]]: return points, loss_improvements def _loss( - self, mapping: dict[Interval, float], ival: Interval + self, + mapping: dict[Interval, float], + ival: Interval, ) -> tuple[float, Interval]: loss = mapping[ival] return finite_loss(ival, loss, self._scale[0]) @@ -822,9 +850,11 @@ def plot(self, *, scatter_or_line: str = "scatter"): ------- plot : `holoviews.Overlay` Plot of the evaluated data. + """ if scatter_or_line not in ("scatter", "line"): - raise ValueError("scatter_or_line must be 'scatter' or 'line'") + msg = "scatter_or_line must be 'scatter' or 'line'" + raise ValueError(msg) hv = ensure_holoviews() xs, ys = zip(*sorted(self.data.items())) if self.data else ([], []) @@ -882,13 +912,13 @@ def sort_key(ival, loss): loss, ival = finite_loss(ival, loss, x_scale) return -loss, ival - sorted_dict = ItemSortedDict(sort_key) - return sorted_dict + return ItemSortedDict(sort_key) def finite_loss(ival: Interval, loss: float, x_scale: float) -> tuple[float, Interval]: """Get the so-called finite_loss of an interval in order to be able to - sort intervals that have infinite loss.""" + sort intervals that have infinite loss. + """ # If the loss is infinite we return the # distance between the two points. if math.isinf(loss) or math.isnan(loss): diff --git a/adaptive/learner/learner2D.py b/adaptive/learner/learner2D.py index 1ea381794..ba75ae8cd 100644 --- a/adaptive/learner/learner2D.py +++ b/adaptive/learner/learner2D.py @@ -3,28 +3,32 @@ import itertools import warnings from collections import OrderedDict -from collections.abc import Iterable from copy import copy from math import sqrt -from typing import Callable +from typing import TYPE_CHECKING, Callable import cloudpickle import numpy as np from scipy import interpolate -from scipy.interpolate.interpnd import LinearNDInterpolator from adaptive.learner.base_learner import BaseLearner from adaptive.learner.triangulation import simplex_volume_in_embedding from adaptive.notebook_integration import ensure_holoviews -from adaptive.types import Bool, Float, Real from adaptive.utils import ( assign_defaults, cache_latest, partial_function_from_dataframe, ) +if TYPE_CHECKING: + from collections.abc import Iterable + + from scipy.interpolate.interpnd import LinearNDInterpolator + + from adaptive.types import Bool, Float, Real + try: - import pandas + import pandas as pd with_pandas = True @@ -47,10 +51,13 @@ def deviations(ip: LinearNDInterpolator) -> list[np.ndarray]: ------- deviations : list The deviation per triangle. + """ values = ip.values / (ip.values.ptp(axis=0).max() or 1) gradients = interpolate.interpnd.estimate_gradients_2d_global( - ip.tri, values, tol=1e-6 + ip.tri, + values, + tol=1e-6, ) simplices = ip.tri.simplices @@ -68,8 +75,7 @@ def deviation(p, v, g): return dev n_levels = vs.shape[2] - devs = [deviation(p, vs[:, :, i], gs[:, :, i]) for i in range(n_levels)] - return devs + return [deviation(p, vs[:, :, i], gs[:, :, i]) for i in range(n_levels)] def areas(ip: LinearNDInterpolator) -> np.ndarray: @@ -86,11 +92,11 @@ def areas(ip: LinearNDInterpolator) -> np.ndarray: ------- areas : numpy.ndarray The area per triangle in ``ip.tri``. + """ p = ip.tri.points[ip.tri.simplices] q = p[:, :-1, :] - p[:, -1, None, :] - areas = abs(q[:, 0, 0] * q[:, 1, 1] - q[:, 0, 1] * q[:, 1, 0]) / 2 - return areas + return abs(q[:, 0, 0] * q[:, 1, 1] - q[:, 0, 1] * q[:, 1, 0]) / 2 def uniform_loss(ip: LinearNDInterpolator) -> np.ndarray: @@ -120,12 +126,14 @@ def uniform_loss(ip: LinearNDInterpolator) -> np.ndarray: ... loss_per_triangle=uniform_loss, ... ) >>> + """ return np.sqrt(areas(ip)) def resolution_loss_function( - min_distance: float = 0, max_distance: float = 1 + min_distance: float = 0, + max_distance: float = 1, ) -> Callable[[LinearNDInterpolator], np.ndarray]: """Loss function that is similar to the `default_loss` function, but you can set the maximimum and minimum size of a triangle. @@ -147,6 +155,7 @@ def resolution_loss_function( >>> >>> loss = resolution_loss_function(min_distance=0.01, max_distance=1) >>> learner = adaptive.Learner2D(f, bounds=[(-1, -1), (1, 1)], loss_per_triangle=loss) + """ def resolution_loss(ip): @@ -154,11 +163,11 @@ def resolution_loss(ip): A = areas(ip) # Setting areas with a small area to zero such that they won't be chosen again - loss[A < min_distance**2] = 0 + loss[min_distance**2 > A] = 0 # Setting triangles that have a size larger than max_distance to infinite loss # such that these triangles will be picked - loss[A > max_distance**2] = np.inf + loss[max_distance**2 < A] = np.inf return loss @@ -191,6 +200,7 @@ def minimize_triangle_surface_loss(ip: LinearNDInterpolator) -> np.ndarray: >>> learner = adaptive.Learner2D(f, bounds=[(-1, -1), (1, 1)], ... loss_per_triangle=minimize_triangle_surface_loss) >>> + """ tri = ip.tri points = tri.points[tri.simplices] @@ -224,11 +234,11 @@ def default_loss(ip: LinearNDInterpolator) -> np.ndarray: ------- losses : numpy.ndarray Loss per triangle in ``ip.tri``. + """ dev = np.sum(deviations(ip), axis=0) A = areas(ip) - losses = dev * np.sqrt(A) + 0.3 * A - return losses + return dev * np.sqrt(A) + 0.3 * A def thresholded_loss_function( @@ -236,8 +246,7 @@ def thresholded_loss_function( upper_threshold: float | None = None, priority_factor: float = 0.1, ) -> Callable[[LinearNDInterpolator], np.ndarray]: - """ - Factory function to create a custom loss function that deprioritizes + """Factory function to create a custom loss function that deprioritizes values above an upper threshold and below a lower threshold. Parameters @@ -256,6 +265,7 @@ def thresholded_loss_function( ------- custom_loss : Callable[[LinearNDInterpolator], np.ndarray] A custom loss function that can be used with Learner2D. + """ def custom_loss(ip: LinearNDInterpolator) -> np.ndarray: @@ -269,6 +279,7 @@ def custom_loss(ip: LinearNDInterpolator) -> np.ndarray: ------- losses : numpy.ndarray Loss per triangle in ``ip.tri``. + """ losses = default_loss(ip) @@ -311,6 +322,7 @@ def choose_point_in_triangle(triangle: np.ndarray, max_badness: int) -> np.ndarr ------- point : numpy.ndarray The x and y coordinate of the suggested new point. + """ a, b, c = triangle area = 0.5 * np.cross(b - a, c - a) @@ -345,6 +357,7 @@ def triangle_loss(ip): This loss function is *extremely* slow. It is here because it gives the same result as the `adaptive.LearnerND`\s `~adaptive.learner.learnerND.triangle_loss`. + """ tri = ip.tri @@ -355,7 +368,8 @@ def get_neighbors(i, ip): return np.concatenate((tri.points[c], ip.values[c]), axis=-1) simplices = np.concatenate( - [tri.points[tri.simplices], ip.values[tri.simplices]], axis=-1 + [tri.points[tri.simplices], ip.values[tri.simplices]], + axis=-1, ) neighbors = [get_neighbors(i, ip) for i in range(len(tri.simplices))] @@ -427,6 +441,7 @@ class Learner2D(BaseLearner): `~adaptive.learner.learner2D.deviations` to calculate the areas and deviations from a linear interpolation over each triangle. + """ def __init__( @@ -473,7 +488,7 @@ def to_numpy(self): and ``(npoints, 2+vdim)`` if ``learner.function`` returns a vector of length ``vdim``. """ return np.array( - [(x, y, *np.atleast_1d(z)) for (x, y), z in sorted(self.data.items())] + [(x, y, *np.atleast_1d(z)) for (x, y), z in sorted(self.data.items())], ) def to_dataframe( # type: ignore[override] @@ -483,7 +498,7 @@ def to_dataframe( # type: ignore[override] x_name: str = "x", y_name: str = "y", z_name: str = "z", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -511,11 +526,13 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) data = sorted((x, y, z) for (x, y), z in self.data.items()) - df = pandas.DataFrame(data, columns=[x_name, y_name, z_name]) + df = pd.DataFrame(data, columns=[x_name, y_name, z_name]) df.attrs["inputs"] = [x_name, y_name] df.attrs["output"] = z_name if with_default_function_args: @@ -524,13 +541,13 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", x_name: str = "x", y_name: str = "y", z_name: str = "z", - ): + ) -> None: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s @@ -552,12 +569,15 @@ def load_dataframe( # type: ignore[override] The ``y_name`` used in ``to_dataframe``, by default "y" z_name : str, optional The ``z_name`` used in ``to_dataframe``, by default "z" + """ data = df.set_index([x_name, y_name])[z_name].to_dict() self._set_data(data) if with_default_function_args: self.function = partial_function_from_dataframe( - self.function, df, function_prefix + self.function, + df, + function_prefix, ) def _scale(self, points: list[tuple[float, float]] | np.ndarray) -> np.ndarray: @@ -596,7 +616,8 @@ def bounds_are_done(self) -> bool: ) def interpolated_on_grid( - self, n: int | None = None + self, + n: int | None = None, ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """Get the interpolated data on a grid. @@ -611,6 +632,7 @@ def interpolated_on_grid( xs : 1D numpy.ndarray ys : 1D numpy.ndarray interpolated_on_grid : 2D numpy.ndarray + """ ip = self.interpolator(scaled=True) if n is None: @@ -661,7 +683,7 @@ def _data_combined(self) -> tuple[np.ndarray, np.ndarray]: return points_combined, values_combined def ip(self) -> LinearNDInterpolator: - """Deprecated, use `self.interpolator(scaled=True)`""" + """Deprecated, use `self.interpolator(scaled=True)`.""" warnings.warn( "`learner.ip()` is deprecated, use `learner.interpolator(scaled=True)`." " This will be removed in v1.0.", @@ -690,6 +712,7 @@ def interpolator(self, *, scaled: bool = False) -> LinearNDInterpolator: >>> xs, ys = [np.linspace(*b, num=100) for b in learner.bounds] >>> ip = learner.interpolator() >>> zs = ip(xs[:, None], ys[None, :]) + """ if scaled: if self._ip is None: @@ -704,7 +727,8 @@ def interpolator(self, *, scaled: bool = False) -> LinearNDInterpolator: def _interpolator_combined(self) -> LinearNDInterpolator: """A `scipy.interpolate.LinearNDInterpolator` instance containing the learner's data *and* interpolated data of - the `pending_points`.""" + the `pending_points`. + """ if self._ip_combined is None: points, values = self._data_combined() points = self._scale(points) @@ -734,10 +758,12 @@ def tell_pending(self, point: tuple[float, float]) -> None: self._stack.pop(point, None) def _fill_stack( - self, stack_till: int = 1 + self, + stack_till: int = 1, ) -> tuple[list[tuple[float, float]], list[float]]: if len(self.data) + len(self.pending_points) < self.ndim + 1: - raise ValueError("too few points...") + msg = "too few points..." + raise ValueError(msg) # Interpolate ip = self._interpolator_combined() @@ -775,7 +801,9 @@ def _fill_stack( return points_new, losses_new def ask( - self, n: int, tell_pending: bool = True + self, + n: int, + tell_pending: bool = True, ) -> tuple[list[tuple[float, float] | np.ndarray], list[float]]: # Even if tell_pending is False we add the point such that _fill_stack # will return new points, later we remove these points if needed. @@ -790,7 +818,7 @@ def ask( # than the number of triangles between the points. Therefore # it could fill up till a length smaller than `stack_till`. new_points, new_loss_improvements = self._fill_stack( - stack_till=max(n_left, self.stack_size) + stack_till=max(n_left, self.stack_size), ) for p in new_points[:n_left]: self.tell_pending(p) @@ -849,6 +877,7 @@ def plot(self, n=None, tri_alpha=0): ----- The plot object that is returned if ``learner.function`` returns a vector *cannot* be used with the live_plotting functionality. + """ hv = ensure_holoviews() x, y = self.bounds @@ -882,7 +911,9 @@ def plot(self, n=None, tri_alpha=0): im = hv.Image([], bounds=lbrt) tris = hv.EdgePaths([]) return im.opts(cmap="viridis") * tris.opts( - line_width=0.5, alpha=tri_alpha, tools=[] + line_width=0.5, + alpha=tri_alpha, + tools=[], ) def _get_data(self) -> dict[tuple[float, float], Float | np.ndarray]: diff --git a/adaptive/learner/learnerND.py b/adaptive/learner/learnerND.py index 96792f863..850c1f90d 100644 --- a/adaptive/learner/learnerND.py +++ b/adaptive/learner/learnerND.py @@ -29,7 +29,7 @@ ) try: - import pandas + import pandas as pd with_pandas = True @@ -50,8 +50,7 @@ def volume(simplex, ys=None): # See https://www.jstor.org/stable/2315353 dim = len(simplex) - 1 - vol = np.abs(fast_det(matrix)) / np.math.factorial(dim) - return vol + return np.abs(fast_det(matrix)) / np.math.factorial(dim) def orientation(simplex): @@ -311,17 +310,20 @@ class LearnerND(BaseLearner): """ - def __init__(self, func, bounds, loss_per_simplex=None): + def __init__(self, func, bounds, loss_per_simplex=None) -> None: self._vdim = None self.loss_per_simplex = loss_per_simplex or default_loss if hasattr(self.loss_per_simplex, "nth_neighbors"): if self.loss_per_simplex.nth_neighbors > 1: - raise NotImplementedError( + msg = ( "The provided loss function wants " "next-nearest neighboring simplices for the loss computation, " "this feature is not yet implemented, either use " - "nth_neightbors = 0 or 1", + "nth_neightbors = 0 or 1" + ) + raise NotImplementedError( + msg, ) self.nth_neighbors = self.loss_per_simplex.nth_neighbors else: @@ -418,7 +420,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", point_names: tuple[str, ...] = ("x", "y", "z"), value_name: str = "value", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -446,14 +448,18 @@ def to_dataframe( # type: ignore[override] """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) if len(point_names) != self.ndim: - raise ValueError( + msg = ( f"point_names ({point_names}) should have the" - f" same length as learner.ndims ({self.ndim})", + f" same length as learner.ndims ({self.ndim})" + ) + raise ValueError( + msg, ) data = [(*x, y) for x, y in self.data.items()] - df = pandas.DataFrame(data, columns=[*point_names, value_name]) + df = pd.DataFrame(data, columns=[*point_names, value_name]) df.attrs["inputs"] = list(point_names) df.attrs["output"] = value_name if with_default_function_args: @@ -462,12 +468,12 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", point_names: tuple[str, ...] = ("x", "y", "z"), value_name: str = "value", - ): + ) -> None: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s @@ -560,6 +566,8 @@ def tell(self, point, value): simplex = None to_delete, to_add = tri.add_point(point, simplex, transform=self._transform) self._update_losses(to_delete, to_add) + return None + return None def _simplex_exists(self, simplex): simplex = tuple(sorted(simplex)) @@ -575,7 +583,7 @@ def inside_bounds(self, point): (mn - eps) <= p <= (mx + eps) for p, (mn, mx) in zip(point, self._bbox) ) - def tell_pending(self, point, *, simplex=None): + def tell_pending(self, point, *, simplex=None) -> None: point = tuple(point) if not self.inside_bounds(point): return @@ -614,7 +622,7 @@ def _try_adding_pending_point_to_simplex(self, point, simplex): self._pending_to_simplex[point] = simplex return self._subtriangulations[simplex].add_point(point) - def _update_subsimplex_losses(self, simplex, new_subsimplices): + def _update_subsimplex_losses(self, simplex, new_subsimplices) -> None: loss = self._losses[simplex] loss_density = loss / self.tri.volume(simplex) @@ -681,9 +689,12 @@ def _pop_highest_existing_simplex(self): # Could not find a simplex, this code should never be reached assert self.tri is not None - raise AssertionError( + msg = ( "Could not find a simplex to subdivide. Yet there should always" - " be a simplex available if LearnerND.tri() is not None.", + " be a simplex available if LearnerND.tri() is not None." + ) + raise AssertionError( + msg, ) def _ask_best_point(self): @@ -764,7 +775,7 @@ def _compute_loss(self, simplex): ), ) - def _update_losses(self, to_delete: set, to_add: set): + def _update_losses(self, to_delete: set, to_add: set) -> None: # XXX: add the points outside the triangulation to this as well pending_points_unbound = set() @@ -812,7 +823,7 @@ def _update_losses(self, to_delete: set, to_add: set): self._subtriangulations[simplex].simplices, ) - def _recompute_all_losses(self): + def _recompute_all_losses(self) -> None: """Recompute all losses and pending losses.""" # amortized O(N) complexity if self.tri is None: @@ -841,7 +852,7 @@ def _scale(self): # get the output scale return self._max_value - self._min_value - def _update_range(self, new_output): + def _update_range(self, new_output) -> bool: if self._min_value is None or self._max_value is None: # this is the first point, nothing to do, just set the range self._min_value = np.min(new_output) @@ -882,7 +893,7 @@ def loss(self, real: bool = True): losses = self._losses if self.tri is not None else {} return max(losses.values()) if losses else float("inf") - def remove_unfinished(self): + def remove_unfinished(self) -> None: # XXX: implement this method self.pending_points = set() self._subtriangulations = {} @@ -905,14 +916,18 @@ def plot(self, n=None, tri_alpha=0): """ hv = ensure_holoviews() if self.vdim > 1: + msg = "holoviews currently does not support" raise NotImplementedError( - "holoviews currently does not support", + msg, "3D surface plots in bokeh.", ) if self.ndim != 2: - raise NotImplementedError( + msg = ( "Only 2D plots are implemented: You can " - "plot a 2D slice with 'plot_slice'.", + "plot a 2D slice with 'plot_slice'." + ) + raise NotImplementedError( + msg, ) x, y = self._bbox lbrt = x[0], y[0], x[1], y[1] @@ -973,8 +988,9 @@ def plot_slice(self, cut_mapping, n=None): if not self.data: return hv.Scatter([]) * hv.Path([]) elif self.vdim > 1: + msg = "multidimensional output not yet supported by `plot_slice`" raise NotImplementedError( - "multidimensional output not yet supported by `plot_slice`", + msg, ) n = n or 201 values = [ @@ -993,8 +1009,9 @@ def plot_slice(self, cut_mapping, n=None): elif plot_dim == 2: if self.vdim > 1: + msg = "holoviews currently does not support 3D surface plots in bokeh." raise NotImplementedError( - "holoviews currently does not support 3D surface plots in bokeh.", + msg, ) if n is None: # Calculate how many grid points are needed. @@ -1025,7 +1042,8 @@ def plot_slice(self, cut_mapping, n=None): return im.opts(cmap="viridis") else: - raise ValueError("Only 1 or 2-dimensional plots can be generated.") + msg = "Only 1 or 2-dimensional plots can be generated." + raise ValueError(msg) def plot_3D(self, with_triangulation=False, return_fig=False): """Plot the learner's data in 3D using plotly. @@ -1116,16 +1134,20 @@ def plot_3D(self, with_triangulation=False, return_fig=False): def _get_iso(self, level=0.0, which="surface"): if which == "surface": if self.ndim != 3 or self.vdim != 1: - raise Exception( + msg = ( "Isosurface plotting is only supported" - " for a 3D input and 1D output", + " for a 3D input and 1D output" + ) + raise Exception( + msg, ) get_surface = True get_line = False elif which == "line": if self.ndim != 2 or self.vdim != 1: + msg = "Isoline plotting is only supported for a 2D input and 1D output" raise Exception( - "Isoline plotting is only supported for a 2D input and 1D output", + msg, ) get_surface = False get_line = True @@ -1176,10 +1198,13 @@ def _get_vertex_index(a, b): r_min = min(self.data[v] for v in self.tri.vertices) r_max = max(self.data[v] for v in self.tri.vertices) - raise ValueError( + msg = ( f"Could not draw isosurface for level={level}, as" " this value is not inside the function range. Please choose" - f" a level strictly inside interval ({r_min}, {r_max})", + f" a level strictly inside interval ({r_min}, {r_max})" + ) + raise ValueError( + msg, ) return vertices, faces_or_lines @@ -1207,10 +1232,7 @@ def plot_isoline(self, level=0.0, n=None, tri_alpha=0): """ hv = ensure_holoviews() - if n == -1: - plot = hv.Path([]) - else: - plot = self.plot(n=n, tri_alpha=tri_alpha) + plot = hv.Path([]) if n == -1 else self.plot(n=n, tri_alpha=tri_alpha) if isinstance(level, Iterable): for lvl in level: @@ -1320,6 +1342,6 @@ def _get_plane_color(simplex): def _get_data(self): return deepcopy(self.__dict__) - def _set_data(self, state): + def _set_data(self, state) -> None: for k, v in state.items(): setattr(self, k, v) diff --git a/adaptive/learner/sequence_learner.py b/adaptive/learner/sequence_learner.py index c307744fd..3fa2edd73 100644 --- a/adaptive/learner/sequence_learner.py +++ b/adaptive/learner/sequence_learner.py @@ -19,9 +19,9 @@ from collections.abc import Sequence from typing import Callable -try: - import pandas + import pandas as pd +try: with_pandas = True except ModuleNotFoundError: @@ -45,7 +45,7 @@ class _IgnoreFirstArgument: pickable. """ - def __init__(self, function): + def __init__(self, function) -> None: self.function = function def __call__(self, index_point: PointType, *args, **kwargs): @@ -84,13 +84,14 @@ class SequenceLearner(BaseLearner): From primitive tests, the `~adaptive.SequenceLearner` appears to have a similar performance to `ipyparallel`\s ``load_balanced_view().map``. With the added benefit of having results in the local kernel already. + """ def __init__( self, function: Callable[[Any], Any], sequence: Sequence[Any], - ): + ) -> None: self._original_function = function self.function = _IgnoreFirstArgument(function) # prefer range(len(...)) over enumerate to avoid slowdowns @@ -107,7 +108,9 @@ def new(self) -> SequenceLearner: return SequenceLearner(self._original_function, self.sequence) def ask( - self, n: int, tell_pending: bool = True + self, + n: int, + tell_pending: bool = True, ) -> tuple[list[PointType], list[float]]: indices = [] points: list[PointType] = [] @@ -156,7 +159,8 @@ def done(self) -> bool: def result(self) -> list[Any]: """Get the function values in the same order as ``sequence``.""" if not self.done(): - raise Exception("Learner is not yet complete.") + msg = "Learner is not yet complete." + raise Exception(msg) return list(self.data.values()) @property @@ -172,7 +176,7 @@ def to_dataframe( # type: ignore[override] y_name: str = "y", *, full_sequence: bool = False, - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -201,9 +205,11 @@ def to_dataframe( # type: ignore[override] ------ ImportError If `pandas` is not installed. + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) import pandas as pd if full_sequence: @@ -214,7 +220,7 @@ def to_dataframe( # type: ignore[override] indices, ys = zip(*self.data.items()) if self.data else ([], []) # type: ignore[assignment] sequence = [self.sequence[i] for i in indices] - df = pandas.DataFrame(indices, columns=[index_name]) + df = pd.DataFrame(indices, columns=[index_name]) df[x_name] = sequence df[y_name] = ys df.attrs["inputs"] = [index_name] @@ -225,7 +231,7 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", index_name: str = "i", @@ -233,7 +239,7 @@ def load_dataframe( # type: ignore[override] y_name: str = "y", *, full_sequence: bool = False, - ): + ) -> None: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s @@ -257,9 +263,11 @@ def load_dataframe( # type: ignore[override] The ``y_name`` used in ``to_dataframe``, by default "y" full_sequence : bool, optional The ``full_sequence`` used in ``to_dataframe``, by default False + """ if not with_pandas: - raise ImportError("pandas is not installed.") + msg = "pandas is not installed." + raise ImportError(msg) import pandas as pd indices = df[index_name].values @@ -276,7 +284,9 @@ def load_dataframe( # type: ignore[override] if with_default_function_args: self.function = partial_function_from_dataframe( - self._original_function, df, function_prefix + self._original_function, + df, + function_prefix, ) def _get_data(self) -> dict[int, Any]: diff --git a/adaptive/learner/skopt_learner.py b/adaptive/learner/skopt_learner.py index 173557c31..b9259a941 100644 --- a/adaptive/learner/skopt_learner.py +++ b/adaptive/learner/skopt_learner.py @@ -1,7 +1,7 @@ from __future__ import annotations import collections -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, NoReturn import numpy as np from skopt import Optimizer @@ -11,7 +11,7 @@ from adaptive.utils import cache_latest if TYPE_CHECKING: - import pandas + import pandas as pd class SKOptLearner(Optimizer, BaseLearner): @@ -30,7 +30,7 @@ class SKOptLearner(Optimizer, BaseLearner): """ - def __init__(self, function, **kwargs): + def __init__(self, function, **kwargs) -> None: self.function = function self.pending_points = set() self.data = collections.OrderedDict() @@ -41,7 +41,7 @@ def new(self) -> SKOptLearner: """Return a new `~adaptive.SKOptLearner` without the data.""" return SKOptLearner(self.function, **self._kwargs) - def tell(self, x, y, fit=True): + def tell(self, x, y, fit=True) -> None: if isinstance(x, collections.abc.Iterable): self.pending_points.discard(tuple(x)) self.data[tuple(x)] = y @@ -51,12 +51,12 @@ def tell(self, x, y, fit=True): self.data[x] = y super().tell([x], y, fit) - def tell_pending(self, x): + def tell_pending(self, x) -> None: # 'skopt.Optimizer' takes care of points we # have not got results for. self.pending_points.add(tuple(x)) - def remove_unfinished(self): + def remove_unfinished(self) -> None: pass @cache_latest @@ -72,9 +72,12 @@ def loss(self, real: bool = True): def ask(self, n, tell_pending=True): if not tell_pending: - raise NotImplementedError( + msg = ( "Asking points is an irreversible " - "action, so use `ask(n, tell_pending=True`.", + "action, so use `ask(n, tell_pending=True`." + ) + raise NotImplementedError( + msg, ) points = super().ask(n) # TODO: Choose a better estimate for the loss improvement. @@ -91,7 +94,8 @@ def npoints(self): def plot(self, nsamples=200): hv = ensure_holoviews() if self.space.n_dims > 1: - raise ValueError("Can only plot 1D functions") + msg = "Can only plot 1D functions" + raise ValueError(msg) bounds = self.space.bounds[0] if not self.Xi: p = hv.Scatter([]) * hv.Curve([]) * hv.Area([]) @@ -124,7 +128,7 @@ def plot(self, nsamples=200): def _get_data(self): return [x[0] for x in self.Xi], self.yi - def _set_data(self, data): + def _set_data(self, data) -> None: xs, ys = data self.tell_many(xs, ys) @@ -134,7 +138,7 @@ def to_dataframe( # type: ignore[override] function_prefix: str = "function.", seed_name: str = "seed", y_name: str = "y", - ) -> pandas.DataFrame: + ) -> pd.DataFrame: """Return the data as a `pandas.DataFrame`. Parameters @@ -161,12 +165,12 @@ def to_dataframe( # type: ignore[override] def load_dataframe( # type: ignore[override] self, - df: pandas.DataFrame, + df: pd.DataFrame, with_default_function_args: bool = True, function_prefix: str = "function.", seed_name: str = "seed", y_name: str = "y", - ): + ) -> NoReturn: """Load data from a `pandas.DataFrame`. If ``with_default_function_args`` is True, then ``learner.function``'s diff --git a/adaptive/learner/triangulation.py b/adaptive/learner/triangulation.py index 03455e3b7..00c7185eb 100644 --- a/adaptive/learner/triangulation.py +++ b/adaptive/learner/triangulation.py @@ -2,6 +2,7 @@ from collections.abc import Iterable, Sized from itertools import chain, combinations from math import factorial, sqrt +from typing import NoReturn import scipy.spatial from numpy import abs as np_abs @@ -67,7 +68,7 @@ def point_in_simplex(point, simplex, eps=1e-8): def fast_2d_circumcircle(points): - """Compute the center and radius of the circumscribed circle of a triangle + """Compute the center and radius of the circumscribed circle of a triangle. Parameters ---------- @@ -78,6 +79,7 @@ def fast_2d_circumcircle(points): ------- tuple (center point : tuple(float), radius: float) + """ points = array(points) # transform to relative coordinates @@ -114,6 +116,7 @@ def fast_3d_circumcircle(points): ------- tuple (center point : tuple(float), radius: float) + """ points = array(points) pts = points[1:] - points[0] @@ -171,8 +174,8 @@ def circumsphere(pts): Will fail for matrices which are not (N-dim + 1, N-dim) in size due to non-square determinants: will raise numpy.linalg.LinAlgError. May fail for points that are integers (due to 32bit integer overflow). - """ + """ dim = len(pts) - 1 if dim == 2: return fast_2d_circumcircle(pts) @@ -219,6 +222,7 @@ def orientation(face, origin): If two points lie on the same side of the face, the orientation will be equal, if they lie on the other side of the face, it will be negated. + """ vectors = array(face) sign, logdet = slogdet(vectors - origin) @@ -252,6 +256,7 @@ def simplex_volume_in_embedding(vertices) -> float: ValueError if the vertices do not form a simplex (for example, because they are coplanar, colinear or coincident). + """ # Implements http://mathworld.wolfram.com/Cayley-MengerDeterminant.html # Modified from https://codereview.stackexchange.com/questions/77593/calculating-the-volume-of-a-tetrahedron @@ -280,7 +285,8 @@ def simplex_volume_in_embedding(vertices) -> float: if vol_square < 0: if vol_square > -1e-15: return 0 - raise ValueError("Provided vertices do not form a simplex") + msg = "Provided vertices do not form a simplex" + raise ValueError(msg) return sqrt(vol_square) @@ -310,35 +316,45 @@ class Triangulation: ValueError if the list of coordinates is incorrect or the points do not form one or more simplices in the + """ - def __init__(self, coords): + def __init__(self, coords) -> None: if not is_iterable_and_sized(coords): - raise TypeError("Please provide a 2-dimensional list of points") + msg = "Please provide a 2-dimensional list of points" + raise TypeError(msg) coords = list(coords) if not all(is_iterable_and_sized(coord) for coord in coords): - raise TypeError("Please provide a 2-dimensional list of points") + msg = "Please provide a 2-dimensional list of points" + raise TypeError(msg) if len(coords) == 0: - raise ValueError("Please provide at least one simplex") + msg = "Please provide at least one simplex" + raise ValueError(msg) # raise now because otherwise the next line will raise a less dim = len(coords[0]) if any(len(coord) != dim for coord in coords): - raise ValueError("Coordinates dimension mismatch") + msg = "Coordinates dimension mismatch" + raise ValueError(msg) if dim == 1: - raise ValueError("Triangulation class only supports dim >= 2") + msg = "Triangulation class only supports dim >= 2" + raise ValueError(msg) if len(coords) < dim + 1: - raise ValueError("Please provide at least one simplex") + msg = "Please provide at least one simplex" + raise ValueError(msg) coords = list(map(tuple, coords)) vectors = subtract(coords[1:], coords[0]) if matrix_rank(vectors) < dim: - raise ValueError( + msg = ( "Initial simplex has zero volumes " "(the points are linearly dependent)" ) + raise ValueError( + msg, + ) self.vertices = list(coords) self.simplices = set() @@ -351,13 +367,13 @@ def __init__(self, coords): for simplex in initial_tri.simplices: self.add_simplex(simplex) - def delete_simplex(self, simplex): + def delete_simplex(self, simplex) -> None: simplex = tuple(sorted(simplex)) self.simplices.remove(simplex) for vertex in simplex: self.vertex_to_simplices[vertex].remove(simplex) - def add_simplex(self, simplex): + def add_simplex(self, simplex) -> None: simplex = tuple(sorted(simplex)) self.simplices.add(simplex) for vertex in simplex: @@ -379,6 +395,7 @@ def get_reduced_simplex(self, point, simplex, eps=1e-8) -> list: vertices : list of ints Indices of vertices of the simplex to which the vertex belongs. An empty list indicates that the vertex is outside the simplex. + """ # XXX: in the end we want to lose this method if len(simplex) != self.dim + 1: @@ -421,7 +438,8 @@ def faces(self, dim=None, simplices=None, vertices=None): dim = self.dim if simplices is not None and vertices is not None: - raise ValueError("Only one of simplices and vertices is allowed.") + msg = "Only one of simplices and vertices is allowed." + raise ValueError(msg) if vertices is not None: vertices = set(vertices) simplices = chain(*(self.vertex_to_simplices[i] for i in vertices)) @@ -476,7 +494,8 @@ def _extend_hull(self, new_vertex, eps=1e-8): self.simplices.remove(tri) del self.vertex_to_simplices[pt_index] del self.vertices[pt_index] - raise ValueError("Candidate vertex is inside the hull.") + msg = "Candidate vertex is inside the hull." + raise ValueError(msg) return new_simplices @@ -492,6 +511,7 @@ def circumscribed_circle(self, simplex, transform): ------- tuple (center point, radius) The center and radius of the circumscribed circle + """ pts = dot(self.get_vertices(simplex), transform) return circumsphere(pts) @@ -526,6 +546,7 @@ def bowyer_watson(self, pt_index, containing_simplex=None, transform=None): Simplices that have been deleted new_simplices : set of tuples Simplices that have been added + """ queue = set() done_simplices = set() @@ -577,7 +598,8 @@ def _relative_volume(self, simplex): distance of its vertices. The advantage of this is that the relative volume is only dependent on the shape of the simplex and not on the absolute size. Due to the weird scaling, the only use of this method - is to check that a simplex is almost flat.""" + is to check that a simplex is almost flat. + """ vertices = array(self.get_vertices(simplex)) vectors = vertices[1:] - vertices[0] average_edge_length = mean(np_abs(vectors)) @@ -597,6 +619,7 @@ def add_point(self, point, simplex=None, transform=None): Simplex containing the point. Empty tuple indicates points outside the hull. If not provided, the algorithm costs O(N), so this should be used whenever possible. + """ point = tuple(point) if simplex is None: @@ -610,27 +633,30 @@ def add_point(self, point, simplex=None, transform=None): pt_index = len(self.vertices) - 1 deleted_simplices, added_simplices = self.bowyer_watson( - pt_index, transform=transform + pt_index, + transform=transform, ) deleted = deleted_simplices - temporary_simplices added = added_simplices | (temporary_simplices - deleted_simplices) return deleted, added - else: - reduced_simplex = self.get_reduced_simplex(point, simplex) - if not reduced_simplex: - self.vertex_to_simplices.pop() # revert adding vertex - raise ValueError("Point lies outside of the specified simplex.") - else: - simplex = reduced_simplex + + reduced_simplex = self.get_reduced_simplex(point, simplex) + if not reduced_simplex: + self.vertex_to_simplices.pop() # revert adding vertex + msg = "Point lies outside of the specified simplex." + raise ValueError(msg) + + simplex = reduced_simplex if len(simplex) == 1: self.vertex_to_simplices.pop() # revert adding vertex - raise ValueError("Point already in triangulation.") - else: - pt_index = len(self.vertices) - self.vertices.append(point) - return self.bowyer_watson(pt_index, actual_simplex, transform) + msg = "Point already in triangulation." + raise ValueError(msg) + + pt_index = len(self.vertices) + self.vertices.append(point) + return self.bowyer_watson(pt_index, actual_simplex, transform) def volume(self, simplex): prefactor = factorial(self.dim) @@ -641,7 +667,7 @@ def volume(self, simplex): def volumes(self): return [self.volume(sim) for sim in self.simplices] - def reference_invariant(self): + def reference_invariant(self) -> bool: """vertex_to_simplices and simplices are compatible.""" for vertex in range(len(self.vertices)): if any(vertex not in tri for tri in self.vertex_to_simplices[vertex]): @@ -651,7 +677,7 @@ def reference_invariant(self): return False return True - def vertex_invariant(self, vertex): + def vertex_invariant(self, vertex) -> NoReturn: """Simplices originating from a vertex don't overlap.""" raise NotImplementedError @@ -671,7 +697,8 @@ def get_simplices_attached_to_points(self, indices): def get_opposing_vertices(self, simplex): if simplex not in self.simplices: - raise ValueError("Provided simplex is not part of the triangulation") + msg = "Provided simplex is not part of the triangulation" + raise ValueError(msg) neighbors = self.get_simplices_attached_to_points(simplex) def find_opposing_vertex(vertex): @@ -683,8 +710,7 @@ def find_opposing_vertex(vertex): assert len(opposing) == 1 return opposing.pop() - result = tuple(find_opposing_vertex(v) for v in simplex) - return result + return tuple(find_opposing_vertex(v) for v in simplex) @property def hull(self): @@ -700,17 +726,20 @@ def hull(self): ------- hull : set of int Vertices in the hull. + """ counts = Counter(self.faces()) if any(i > 2 for i in counts.values()): - raise RuntimeError( + msg = ( "Broken triangulation, a (N-1)-dimensional" " appears in more than 2 simplices." ) + raise RuntimeError( + msg, + ) - hull = {point for face, count in counts.items() if count == 1 for point in face} - return hull + return {point for face, count in counts.items() if count == 1 for point in face} - def convex_invariant(self, vertex): + def convex_invariant(self, vertex) -> NoReturn: """Hull is convex.""" raise NotImplementedError diff --git a/adaptive/notebook_integration.py b/adaptive/notebook_integration.py index 165a84d82..2ba64746d 100644 --- a/adaptive/notebook_integration.py +++ b/adaptive/notebook_integration.py @@ -13,13 +13,16 @@ _plotly_enabled = False -def notebook_extension(*, _inline_js=True): +def notebook_extension(*, _inline_js=True) -> None: """Enable ipywidgets, holoviews, and asyncio notebook integration.""" if not in_ipynb(): - raise RuntimeError( + msg = ( '"adaptive.notebook_extension()" may only be run ' "from a Jupyter notebook." ) + raise RuntimeError( + msg, + ) global _async_enabled, _holoviews_enabled, _ipywidgets_enabled @@ -61,8 +64,9 @@ def ensure_holoviews(): try: return importlib.import_module("holoviews") except ModuleNotFoundError: + msg = "holoviews is not installed; plotting is disabled." raise RuntimeError( - "holoviews is not installed; plotting is disabled." + msg, ) from None @@ -81,7 +85,8 @@ def ensure_plotly(): _plotly_enabled = True return plotly except ModuleNotFoundError as e: - raise RuntimeError("plotly is not installed; plotting is disabled.") from e + msg = "plotly is not installed; plotting is disabled." + raise RuntimeError(msg) from e def in_ipynb() -> bool: @@ -119,12 +124,16 @@ def live_plot(runner, *, plotter=None, update_interval=2, name=None, normalize=T ------- dm : `holoviews.core.DynamicMap` The plot that automatically updates every `update_interval`. + """ if not _holoviews_enabled: - raise RuntimeError( + msg = ( "Live plotting is not enabled; did you run " "'adaptive.notebook_extension()'?" ) + raise RuntimeError( + msg, + ) import holoviews as hv import ipywidgets @@ -150,15 +159,16 @@ def plot_generator(): dm = dm.map(lambda obj: obj.opts(framewise=True), hv.Element) cancel_button = ipywidgets.Button( - description="cancel live-plot", layout=ipywidgets.Layout(width="150px") + description="cancel live-plot", + layout=ipywidgets.Layout(width="150px"), ) # Could have used dm.periodic in the following, but this would either spin # off a thread (and learner is not threadsafe) or block the kernel. - async def updater(): + async def updater() -> None: event = lambda: hv.streams.Stream.trigger( # noqa: E731 - dm.streams + dm.streams, ) # XXX: used to be dm.event() # see https://github.com/pyviz/holoviews/issues/3564 try: @@ -171,7 +181,7 @@ async def updater(): active_plotting_tasks.pop(name, None) cancel_button.layout.display = "none" # remove cancel button - def cancel(_): + def cancel(_) -> None: with suppress(KeyError): active_plotting_tasks[name].cancel() @@ -200,17 +210,20 @@ def should_update(status): return True -def live_info(runner, *, update_interval=0.5): +def live_info(runner, *, update_interval=0.5) -> None: """Display live information about the runner. Returns an interactive ipywidget that can be visualized in a Jupyter notebook. """ if not _holoviews_enabled: - raise RuntimeError( + msg = ( "Live plotting is not enabled; did you run " "'adaptive.notebook_extension()'?" ) + raise RuntimeError( + msg, + ) import ipywidgets from IPython.display import display @@ -218,11 +231,12 @@ def live_info(runner, *, update_interval=0.5): status = ipywidgets.HTML(value=_info_html(runner)) cancel = ipywidgets.Button( - description="cancel runner", layout=ipywidgets.Layout(width="100px") + description="cancel runner", + layout=ipywidgets.Layout(width="100px"), ) cancel.on_click(lambda _: runner.cancel()) - async def update(): + async def update() -> None: while not runner.task.done(): await asyncio.sleep(update_interval) @@ -239,7 +253,7 @@ async def update(): display(ipywidgets.VBox((status, cancel))) -def _table_row(i, key, value): +def _table_row(i, key, value) -> str: """Style the rows of a table. Based on the default Jupyterlab table style.""" style = "text-align: right; padding: 0.5em 0.5em; line-height: 1.0;" if i % 2 == 1: @@ -247,7 +261,7 @@ def _table_row(i, key, value): return f'