diff --git a/docs/code/sampling/theta_criterion/README.rst b/docs/code/sampling/theta_criterion/README.rst new file mode 100644 index 000000000..13ac8db64 --- /dev/null +++ b/docs/code/sampling/theta_criterion/README.rst @@ -0,0 +1,2 @@ +Theta Criterion PCE Examples +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/code/sampling/theta_criterion/pce_theta_criterion.py b/docs/code/sampling/theta_criterion/pce_theta_criterion.py new file mode 100644 index 000000000..ef37babd4 --- /dev/null +++ b/docs/code/sampling/theta_criterion/pce_theta_criterion.py @@ -0,0 +1,246 @@ +""" +Polynomial Chaos Expansion example: Active Learning for Multiple Surrogate Models +====================================================================================== + +In this example, we use active learning for construction of optimal experimental design with respect to exploration of +the design domain and exploitation of given surrogate models in form of Polynomial Chaos Expansion (PCE). Active learning is based on :math:`\Theta` criterion recently proposed in + +L. Novák, M. Vořechovský, V. Sadílek, M. D. Shields, *Variance-based adaptive sequential sampling for polynomial chaos expansion*, 637 Computer Methods in Applied Mechanics and Engineering 386 (2021) 114105. doi:10.1016/j.cma.2021.114105. +""" + +# %% md +# We start with the necessary imports. + +# %% + +import numpy as np +import matplotlib.pyplot as plt +from UQpy.sampling.ThetaCriterionPCE import * + +from UQpy.distributions import Uniform, JointIndependent, Normal +from UQpy.surrogates import * + +from UQpy.sampling import LatinHypercubeSampling +from UQpy.sampling.stratified_sampling.latin_hypercube_criteria import * + + +# %% md +# The example involves a :math:`2D` function with mirrored quarter-circle arc line singularities. The form of the function is give by: +# +# .. math:: f(\mathbf{X})= \frac{1}{ \lvert 0.3-X_1^2 - X_2^2\rvert + \delta}- \frac{1}{ \lvert 0.3-(1-X_1)^2 - (1-X_2)^2\rvert + \delta}, \quad \mathbf{X} \sim \mathcal{U}[0,1]^2 +# +# where the strength of the singularities is controlled by the parameter :math:`\delta`, which we set as :math:`\delta=0.01`. +# +# + +# %% + + +def Model2DComplete(X, delta=0.01): + Y = Model2D1(X) + Model2D2(X) + return Y + + +# %% md +# In order to show the possibilities of active learning for multiple surrogate models, we split the function into the two parts as follows: +# +# .. math:: f_1(\mathbf{X})= \begin{cases} \frac{1}{ \lvert 0.3-X_1^2 - X_2^2\rvert + \delta}-\frac{1}{ \lvert 0.3-(1-X_1)^2 - (1-X_2)^2\rvert + \delta} \quad \text{for} \quad X_1X_2\\ 0 \quad \text{otherwise} \end{cases} + +# %% + + +def Model2D1(X, delta=0.01): + M = X[:, 0] < X[:, 1] + Y = 1 / (np.abs(0.3 - X[:, 0] ** 2 - X[:, 1] ** 2) + delta) - 1 / ( + np.abs(0.3 - (1 - X[:, 0]) ** 2 - (1 - X[:, 1]) ** 2) + delta) + Y[M] = 0 + return Y + + +def Model2D2(X, delta=0.01): + M = X[:, 0] > X[:, 1] + Y = 1 / (np.abs(0.3 - X[:, 0] ** 2 - X[:, 1] ** 2) + delta) - 1 / ( + np.abs(0.3 - (1 - X[:, 0]) ** 2 - (1 - X[:, 1]) ** 2) + delta) + Y[M] = 0 + return Y + +# %% md +# The mathematical models have independent random inputs, which are uniformly distributed in interval :math:`[0, 1]`. + +# %% + + +# input distributions +dist1 = Uniform(loc=0, scale=1) +dist2 = Uniform(loc=0, scale=1) + +marg = [dist1, dist2] +joint = JointIndependent(marginals=marg) + +# %% md +# We must now select a polynomial basis. Here we opt for a total-degree (TD) basis, such that the univariate polynomials have a maximum degree equal to :math:`P` and all multivariate polynomial have a total-degree (sum of degrees of corresponding univariate polynomials) at most equal to :math:`P`. The size of the basis is then given by +# +# .. math:: \frac{(N+P)!}{N! P!} +# +# where :math:`N` is the number of random inputs (here, :math:`N=2`). +# + +# %% + + +# realizations of random inputs +# training data +# maximum polynomial degree +P = 10 +# construct total-degree polynomial basis and use OLS for estimation of coefficients +polynomial_basis = TotalDegreeBasis(joint, P) + +# %% md +# We must now compute the PCE coefficients. For that we first need a training sample of input random variable realizations and the corresponding model outputs. These two data sets form what is also known as an ''experimental design''. In case of adaptive construction of PCE by the best model selection algorithm, size of ED is given apriori and the most suitable basis functions are adaptively selected. Here we have two surrogate models with identical training samples of input random vector and two sets of corresponding mathematical models. This ED represents small initial ED, which will be further extended by active learning algorithm. + +# %% + + +# number of inital traning samples +sample_size = 15 + +# realizations of input random vector +xx_train = joint.rvs(sample_size) + +# corresponding model outputs +yy_train1 = Model2D1(xx_train) +yy_train2 = Model2D2(xx_train) + +# %% md +# We now fit the PCE coefficients by solving a regression problem. Here we opt for the :code:`np.linalg.lstsq` method, which is based on the _dgelsd_ solver of LAPACK. This original PCE class will be used for further selection of the best basis functions. Once we have created the PCE containing all basis functions generated by TD algorithm, it is possible to reduce the number of basis functions by LAR algorithm. We create sparse PCE approximations for both mathematical models as follows. + +# %% + + +least_squares = LeastSquareRegression() + +# fit model 1 +pce1 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) +pce1.fit(xx_train, yy_train1) +pceLAR1 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce1) + +# fit model 2 +pce2 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) +pce2.fit(xx_train, yy_train2) +pceLAR2 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce2) + +# %% md +# The active learning algorithm based on $\Theta$ criterion selects the best sample from given large set of candidates coverign uniformly the whole design domain. Here we set number of samples as :math:`n_{cand}=10^4` and use LHS-MaxiMin for sampling, though any sampling technique can be employed. + +# %% + + +# number of candidates for the active learning algorithm +n_cand = 10000 + +# MaxiMin LHS samples uniformly covering the whole input random space +lhs_maximin_cand = LatinHypercubeSampling(distributions=[dist1, dist2], + criterion=MaxiMin(metric=DistanceMetric.CHEBYSHEV), + nsamples=n_cand) + +# candidates for active learning +Xaptive = lhs_maximin_cand._samples + +# %% md +# Start of the active learning algorithm interatively selecting :math:`nsamples=400` samples one-by-one. Note that the class :code:`ThetaCriterionPCE` takes a list of surrogate models as input, here we have 2 PCEs approximated 2 mathematical models. The active learning further selects the best candidate in each run by variance-based :math:`\Theta` criterion. + +# %% + + +# total number of added points by the active learning algorithm +naddedsims = 400 + +# loading of existing ED for both PCEs +Xadapted = xx_train +Yadapted1 = yy_train1 +Yadapted2 = yy_train2 + +# adaptive algorithm and reconstruction of PCE + +for i in range(0, int(naddedsims)): + # create ThetaCriterion class for active learning + ThetaSampling = ThetaCriterionPCE([pceLAR1, pceLAR2]) + + # find the best candidate according to given criterium (variance and distance) + pos = ThetaSampling.run(Xadapted, Xaptive) + newpointX = np.array([Xaptive[pos, :]]) + + newpointres1 = Model2D1(newpointX) + newpointres2 = Model2D2(newpointX) + + # add the best candidate to experimental design + Xadapted = np.append(Xadapted, newpointX, axis=0) + Yadapted1 = np.r_[Yadapted1, newpointres1] + Yadapted2 = np.r_[Yadapted2, newpointres2] + + # reconstruct the PCE 1 + pce1.fit(Xadapted, Yadapted1) + pceLAR1 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce1) + + # reconstruct the PCE 2 + pce2.fit(Xadapted, Yadapted2) + pceLAR2 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce2) + + if i % 10 == 0: + print('\nNumber of added simulations:', i) + +# plot final ED +fig, ax_nstd = plt.subplots(figsize=(6, 6)) +ax_nstd.plot(Xadapted[:, 0], Xadapted[:, 1], 'ro', label='Adapted ED') +ax_nstd.plot(xx_train[:, 0], xx_train[:, 1], 'bo', label='Original ED') +ax_nstd.set_ylabel(r'$X_2$') +ax_nstd.set_xlabel(r'$X_1$') +ax_nstd.legend(loc='upper left'); + +# %% md +# For a comparison, we construct also a surrogate model of the full original mathematical model and further run active learning similarly as for the previous reduced models. Note that the final ED for this complete mathematical model should be almost identical as in the previous case with the two PCEs approximating reduced models. + +# %% + + +yy_train3 = Model2DComplete(xx_train) +pce3 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) +pce3.fit(xx_train, yy_train3) +pceLAR3 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce3) + + + +Xadapted3 = xx_train +Yadapted3 = yy_train3 + +# adaptive algorithm and reconstruction of PCE +for i in range(0, int(400)): + # create ThetaCriterion class for active learning + ThetaSampling_complete = ThetaCriterionPCE([pceLAR3]) + + # find the best candidate according to given criterium (variance and distance) + pos = ThetaSampling_complete.run(Xadapted3, Xaptive) + newpointX = np.array([Xaptive[pos, :]]) + newpointres = Model2DComplete(newpointX) + + # add the best candidate to experimental design + Xadapted3 = np.append(Xadapted3, newpointX, axis=0) + Yadapted3 = np.r_[Yadapted3, newpointres] + + pce3.fit(Xadapted3, Yadapted3) + pceLAR3 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce3) + + if i % 10 == 0: + print('\nNumber of added simulations:', i) + +# plot final ED +fig, ax_nstd = plt.subplots(figsize=(6, 6)) +ax_nstd.plot(Xadapted3[:, 0], Xadapted3[:, 1], 'ro', label='Adapted ED') +ax_nstd.plot(xx_train[:, 0], xx_train[:, 1], 'bo', label='Original ED') +ax_nstd.set_ylabel(r'$X_2$') +ax_nstd.set_xlabel(r'$X_1$') +ax_nstd.legend(loc='upper left'); diff --git a/docs/source/conf.py b/docs/source/conf.py index c4230d73b..4d44183fb 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -84,6 +84,7 @@ "../code/sampling/mcmc", "../code/sampling/tempering", "../code/sampling/simplex", + "../code/sampling/theta_criterion", "../code/sampling/true_stratified_sampling", "../code/sampling/refined_stratified_sampling", "../code/inference/mle", @@ -125,6 +126,7 @@ "auto_examples/sampling/mcmc", "auto_examples/sampling/tempering", "auto_examples/sampling/simplex", + "auto_examples/sampling/theta_criterion", "auto_examples/sampling/true_stratified_sampling", "auto_examples/sampling/refined_stratified_sampling", "auto_examples/inference/mle", diff --git a/docs/source/sampling/index.rst b/docs/source/sampling/index.rst index 6dc5fa454..fac6a95b2 100644 --- a/docs/source/sampling/index.rst +++ b/docs/source/sampling/index.rst @@ -17,6 +17,8 @@ The module currently contains the following classes: - :class:`.AdaptiveKriging`: Class generating samples adaptively using a specified Kriging-based learning function in a general Adaptive Kriging-Monte Carlo Sampling (AKMCS) framework +- :class:`.ThetaCriterionPCE`: Active learning for polynomial chaos expansion using Theta criterion balancing between exploration and exploitation. + - :class:`.MCMC`: The goal of Markov Chain Monte Carlo is to draw samples from some probability distribution which is hard to compute - :class:`.ImportanceSampling`: Importance sampling (IS) is based on the idea of sampling from an alternate distribution and reweighing the samples to be representative of the target distribution @@ -31,6 +33,7 @@ The module currently contains the following classes: Refined Stratified Sampling Simplex Sampling Adaptive Kriging + Theta Criterion Markov Chain Monte Carlo Importance Sampling diff --git a/docs/source/sampling/theta_criterion.rst b/docs/source/sampling/theta_criterion.rst new file mode 100644 index 000000000..34d243a0e --- /dev/null +++ b/docs/source/sampling/theta_criterion.rst @@ -0,0 +1,32 @@ +Theta Criterion +--------------- +The technique enables one-by-one extension of an experimental design while trying to obtain an optimal sample at each stage of the adaptive sequential surrogate model +construction process. The sequential sampling strategy based on :math:`\Theta` criterion selects from a pool of candidate points by trying to cover the design domain +proportionally to their local variance contribution. The proposed criterion for the sample selection balances both exploitation of the surrogate model using variance +density derived analytically from Polynomial Chaos Expansion and exploration of the design domain. The active learning technique based on :math:`\Theta` criterion can be +combined with arbitrary sampling technique employed for construction of a pool of candidate points. More details can be found in: + +L. Novák, M. Vořechovský, V. Sadílek, M. D. Shields, *Variance-based adaptive sequential sampling for polynomial chaos expansion*, +637 Computer Methods in Applied Mechanics and Engineering 386 (2021) 114105. doi:10.1016/j.cma.2021.114105 + + +ThetaCriterionPCE Class +^^^^^^^^^^^^^^^^^^^^^^^^ + +The :class:`.ThetaCriterionPCE` class is imported using the following command: + +>>> from UQpy.sampling.ThetaCriterionPCE import ThetaCriterionPCE + + +Methods +""""""""""" +.. autoclass:: UQpy.sampling.ThetaCriterionPCE + :members: run + + +Examples +""""""""""" + +.. toctree:: + + Theta Criterion Examples <../auto_examples/sampling/theta_criterion/index> diff --git a/src/UQpy/sampling/ThetaCriterionPCE.py b/src/UQpy/sampling/ThetaCriterionPCE.py new file mode 100644 index 000000000..5a71fbd6f --- /dev/null +++ b/src/UQpy/sampling/ThetaCriterionPCE.py @@ -0,0 +1,110 @@ +import numpy as np +import UQpy +from UQpy.surrogates import polynomial_chaos +from scipy.spatial.distance import cdist +from beartype import beartype + + +class ThetaCriterionPCE: + @beartype + def __init__(self, surrogates: list[UQpy.surrogates.polynomial_chaos.PolynomialChaosExpansion]): + """ + Active learning for polynomial chaos expansion using Theta criterion balancing between exploration and + exploitation. + + :param surrogates: list of objects of the :py:meth:`UQpy` :class:`PolynomialChaosExpansion` class + """ + + self.surrogates = surrogates + + def run(self, existing_samples: np.ndarray, candidate_samples: np.ndarray, nsamples=1, samples_weights=None, + candidate_weights=None, pce_weights=None, enable_criterium: bool=False): + + """ + Execute the :class:`.ThetaCriterionPCE` active learning. + + :param existing_samples: Samples in existing ED used for construction of PCEs. + :param candidate_samples: Candidate samples for selecting by Theta criterion. + :param samples_weights: Weights associated to X samples (e.g. from Coherence Sampling). + :param candidate_weights: Weights associated to candidate samples (e.g. from Coherence Sampling). + :param nsamples: Number of samples selected from candidate set in a single run of this algorithm + :param pce_weights: Weights associated to each PCE (e.g. Eigen values from dimension-reduction techniques) + :param enable_criterium: If True, values of Theta criterion (variance density, average variance density, geometrical part, total Theta criterion) for all + candidates are returned instead of a positions of best candidates + The :meth:`run` method is the function that performs iterations in the :class:`.ThetaCriterionPCE` class. + The :meth:`run` method of the :class:`.ThetaCriterionPCE` class can be invoked many times for sequential + sampling. + :return: Position of the best candidate in candidate set. If ``enable_criterium = True``, values of Theta + criterion (variance density, average variance density, geometrical part, total Theta criterion) for all + candidates are returned instead of a position. + """ + + pces = self.surrogates + + npce = len(pces) + nsimexisting, nvar = existing_samples.shape + nsimcandidate, nvar = candidate_samples.shape + criterium = np.zeros(nsimcandidate) + if samples_weights is None: + samples_weights = np.ones(nsimexisting) + + if candidate_weights is None: + candidate_weights = np.ones(nsimcandidate) + + if pce_weights is None: + pce_weights = np.ones(npce) + + pos = [] + + for _ in range(nsamples): + S = polynomial_chaos.Polynomials.standardize_sample(existing_samples, pces[0].polynomial_basis.distributions) + s_candidate = polynomial_chaos.Polynomials.standardize_sample(candidate_samples, + pces[0].polynomial_basis.distributions) + + lengths = cdist(s_candidate, S) + closest_s_position = np.argmin(lengths, axis=1) + closest_value_x = existing_samples[closest_s_position] + l = np.nanmin(lengths, axis=1) + variance_candidate = 0 + variance_closest = 0 + + for i in range(npce): + pce = pces[i] + variance_candidatei = self._local_variance(candidate_samples, pce, candidate_weights) + variance_closesti = self._local_variance(closest_value_x, pce, samples_weights[closest_s_position]) + + variance_candidate = variance_candidate + variance_candidatei * pce_weights[i] + variance_closest = variance_closest + variance_closesti * pce_weights[i] + + criterium_v = np.sqrt(variance_candidate * variance_closest) + criterium_l = l ** nvar + criterium = criterium_v * criterium_l + pos.append(np.argmax(criterium)) + existing_samples = np.append(existing_samples, candidate_samples[pos, :], axis=0) + samples_weights = np.append(samples_weights, candidate_weights[pos]) + + if not enable_criterium: + if nsamples == 1: + pos = pos[0] + return pos + else: + return variance_candidate, criterium_v, criterium_l, criterium + + # calculate variance density of PCE for Theta Criterion + @staticmethod + def _local_variance(coordinates, pce, weight=1): + beta = pce.coefficients + beta[0] = 0 + + product = pce.polynomial_basis.evaluate_basis(coordinates) + + product = np.transpose(np.transpose(product) * weight) + product = product.dot(beta) + + product = np.sum(product, axis=1) + + product = product ** 2 + product = product * polynomial_chaos.Polynomials.standardize_pdf(coordinates, + pce.polynomial_basis.distributions) + + return product diff --git a/src/UQpy/sampling/__init__.py b/src/UQpy/sampling/__init__.py index d4b2ea7c9..9629c9b48 100644 --- a/src/UQpy/sampling/__init__.py +++ b/src/UQpy/sampling/__init__.py @@ -8,3 +8,4 @@ from UQpy.sampling.MonteCarloSampling import MonteCarloSampling from UQpy.sampling.SimplexSampling import SimplexSampling +from UQpy.sampling.ThetaCriterionPCE import ThetaCriterionPCE diff --git a/src/UQpy/surrogates/polynomial_chaos/polynomials/baseclass/Polynomials.py b/src/UQpy/surrogates/polynomial_chaos/polynomials/baseclass/Polynomials.py index 2a3154cb3..2f60144a2 100644 --- a/src/UQpy/surrogates/polynomial_chaos/polynomials/baseclass/Polynomials.py +++ b/src/UQpy/surrogates/polynomial_chaos/polynomials/baseclass/Polynomials.py @@ -4,7 +4,7 @@ import numpy as np import scipy.integrate as integrate from beartype import beartype - +from scipy import stats as stats from UQpy.distributions.baseclass import Distribution import warnings @@ -26,6 +26,61 @@ def __init__(self, distributions: Union[Distribution, list[Distribution]], degre self.distributions = distributions self.degree = degree + 1 + @staticmethod + def standardize_sample(x, joint_distribution): + """ + Static method: Standardize data based on the joint probability distribution. + + :param x: Input data generated from a joint probability distribution. + :param joint_distribution: joint probability distribution from :py:mod:`UQpy` distribution object + :return: Standardized data. + """ + + s = np.zeros(x.shape) + inputs_number = len(x[0, :]) + if inputs_number == 1: + marginals = [joint_distribution] + else: + marginals = joint_distribution.marginals + + for i in range(inputs_number): + if type(marginals[i]) == Normal: + s[:, i] = Polynomials.standardize_normal(x[:, i], mean=marginals[i].parameters['loc'], + std=marginals[i].parameters['scale']) + elif type(marginals[i]) == Uniform: + s[:, i] = Polynomials.standardize_uniform(x[:, i], marginals[i]) + else: + raise TypeError("standarize_sample is defined only for Uniform and Gaussian marginal distributions") + return s + + @staticmethod + def standardize_pdf(x, joint_distribution): + """ + Static method: PDF of standardized distributions associated to Hermite or Legendre polynomials. + + :param x: Input data generated from a joint probability distribution + :param joint_distribution: joint probability distribution from :py:mod:`UQpy` distribution object + :return: Value of standardized PDF calculated for x + """ + + inputs_number = len(x[0, :]) + pdf_val = 1 + s = Polynomials.standardize_sample(x, joint_distribution) + + if inputs_number == 1: + marginals = [joint_distribution] + else: + marginals = joint_distribution.marginals + + for i in range(inputs_number): + if type(marginals[i]) == Normal: + pdf_val *= (stats.norm.pdf(s[:, i])) + elif type(marginals[i]) == Uniform: + pdf_val *= (stats.uniform.pdf(s[:, i], loc=-1, scale=2)) + else: + raise TypeError("standardize_pdf is defined only for Uniform and Gaussian marginal distributions") + return pdf_val + @staticmethod def standardize_normal(tensor: np.ndarray, mean: float, std: float): """ @@ -46,7 +101,7 @@ def standardize_uniform(x, uniform): return (2 * x - loc - upper) / (upper - loc) @staticmethod - def normalized(degree: int, samples: np.ndarray, a: float, b: float, pdf_st: Callable, p:list): + def normalized(degree: int, samples: np.ndarray, a: float, b: float, pdf_st: Callable, p: list): """ Calculates design matrix and normalized polynomials. @@ -111,4 +166,4 @@ def scale(self): def evaluate(self, x: np.ndarray): pass - distribution_to_polynomial = { } \ No newline at end of file + distribution_to_polynomial = {} diff --git a/tests/unit_tests/surrogates/test_pce.py b/tests/unit_tests/surrogates/test_pce.py index 2a0b95199..c01d32496 100644 --- a/tests/unit_tests/surrogates/test_pce.py +++ b/tests/unit_tests/surrogates/test_pce.py @@ -1,10 +1,12 @@ import pytest +from UQpy import ThetaCriterionPCE from UQpy.distributions import JointIndependent, Normal from UQpy.sampling import MonteCarloSampling from UQpy.distributions import Uniform from UQpy.sensitivity.PceSensitivity import PceSensitivity from UQpy.surrogates import * +from scipy import stats as stats import numpy as np from UQpy.surrogates.polynomial_chaos.polynomials.TotalDegreeBasis import TotalDegreeBasis @@ -39,7 +41,7 @@ def test_2(): Test tp basis """ polynomial_basis = TensorProductBasis(distributions=dist, - max_degree=max_degree).polynomials + max_degree=max_degree).polynomials value = polynomial_basis[1].evaluate(x)[0] assert round(value, 4) == -0.2874 @@ -382,3 +384,57 @@ def test_21(): assert all((np.argwhere(np.round(pce2_lar_sens.calculate_generalized_total_order_indices(), 3) > 0) == [[0], [2], [3]])) + + +def test_22(): + """ + Test Active Learning based on Theta Criterion + """ + polynomial_basis = TotalDegreeBasis(dist, max_degree) + least_squares = LeastSquareRegression() + pce = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) + uniform_x = np.zeros((3, 1)) + uniform_x[:, 0] = np.array([0, 5, 10]) + pce.fit(uniform_x, uniform_x) + + adapted_x = uniform_x + candidates_x = np.zeros((5, 1)) + candidates_x[:, 0] = np.array([1.1, 4, 5.1, 6, 9]) + + ThetaSampling_complete = ThetaCriterionPCE([pce]) + pos = ThetaSampling_complete.run(adapted_x, candidates_x, nsamples=2) + best_candidates = candidates_x[pos, :] + assert best_candidates[0, 0] == 1.1 and best_candidates[1, 0] == 9 + + +def test_23(): + """ + Test Standardization of sample and associated PDF + """ + dist1 = Uniform(loc=0, scale=10) + dist2 = Normal(loc=6, scale=2) + marg = [dist1, dist2] + joint = JointIndependent(marginals=marg) + + polynomial_basis = TotalDegreeBasis(joint, max_degree) + least_squares = LeastSquareRegression() + pce = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) + X = np.zeros((3, 2)) + X[:, 1] = np.array([0, 6, 10]) + X[:, 0] = np.array([0, 5, 10]) + + pce.fit(X, X) + + standardized_samples = polynomial_chaos.Polynomials.standardize_sample(X, pce.polynomial_basis.distributions) + standardized_pdf = polynomial_chaos.Polynomials.standardize_pdf(X, pce.polynomial_basis.distributions) + + ref_sample1 = np.array([-1, 0, 1]) + ref_pdf1 = np.array([0.5, 0.5, 0.5]) + ref_sample2 = np.array([-3, 0, 2]) + ref_pdf2 = stats.norm.pdf(ref_sample2) + + ref_sample = np.zeros((3, 2)) + ref_sample[:, 0] = ref_sample1 + ref_sample[:, 1] = ref_sample2 + ref_pdf = ref_pdf1 * ref_pdf2 + assert (standardized_samples == ref_sample).all() and (standardized_pdf == ref_pdf).all()