diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 583494898bb..d59a940269e 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -11,6 +11,7 @@ - Added the `broadcast_distribution_samples` function that helps broadcasting arrays of drawn samples, taking into account the requested `size` and the inferred distribution shape. This sometimes is needed by distributions that call several `rvs` separately within their `random` method, such as the `ZeroInflatedPoisson` (Fix issue #3310). - The `Wald`, `Kumaraswamy`, `LogNormal`, `Pareto`, `Cauchy`, `HalfCauchy`, `Weibull` and `ExGaussian` distributions `random` method used a hidden `_random` function that was written with scalars in mind. This could potentially lead to artificial correlations between random draws. Added shape guards and broadcasting of the distribution samples to prevent this (Similar to issue #3310). - Added a fix to allow the imputation of single missing values of observed data, which previously would fail (Fix issue #3122). +- Fix for #3346. The `draw_values` function was too permissive with what could be grabbed from inside `point`, which lead to an error when sampling posterior predictives of variables that depended on shared variables that had changed their shape after `pm.sample()` had been called. ### Deprecations diff --git a/pymc3/distributions/distribution.py b/pymc3/distributions/distribution.py index 248720101d7..0d451f45ba7 100644 --- a/pymc3/distributions/distribution.py +++ b/pymc3/distributions/distribution.py @@ -309,7 +309,7 @@ def draw_values(params, point=None, size=None): # param was drawn in related contexts v = drawn[(p, size)] evaluated[i] = v - elif name is not None and name in point: + elif name is not None and hasattr(p, 'model') and name in point: # param.name is in point v = point[name] evaluated[i] = drawn[(p, size)] = v @@ -487,7 +487,7 @@ def _draw_value(param, point=None, givens=None, size=None): dist_tmp.shape = distshape try: - dist_tmp.random(point=point, size=size) + return dist_tmp.random(point=point, size=size) except (ValueError, TypeError): # reset shape to account for shape changes # with theano.shared inputs diff --git a/pymc3/tests/test_sampling.py b/pymc3/tests/test_sampling.py index cca560cad14..78bd9a0d076 100644 --- a/pymc3/tests/test_sampling.py +++ b/pymc3/tests/test_sampling.py @@ -302,6 +302,32 @@ def test_model_not_drawable_prior(self): samples = pm.sample_posterior_predictive(trace, 50) assert samples['foo'].shape == (50, 200) + def test_model_shared_variable(self): + x = np.random.randn(100) + y = x > 0 + x_shared = theano.shared(x) + y_shared = theano.shared(y) + with pm.Model() as model: + coeff = pm.Normal('x', mu=0, sd=1) + logistic = pm.Deterministic('p', pm.math.sigmoid(coeff * x_shared)) + + obs = pm.Bernoulli('obs', p=logistic, observed=y_shared) + trace = pm.sample(100) + + x_shared.set_value([-1, 0, 1.]) + y_shared.set_value([0, 0, 0]) + + samples = 100 + with model: + post_pred = pm.sample_posterior_predictive(trace, + samples=samples, + vars=[logistic, obs]) + + expected_p = np.array([logistic.eval({coeff: val}) + for val in trace['x'][:samples]]) + assert post_pred['obs'].shape == (samples, 3) + assert np.allclose(post_pred['p'], expected_p) + class TestSamplePPCW(SeededTest): def test_sample_posterior_predictive_w(self):