diff --git a/DESCRIPTION b/DESCRIPTION index cfaa868d6..75ff2f098 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Type: Package Package: bayestestR Title: Understand and Describe Bayesian Models and Posterior Distributions -Version: 0.15.0.4 +Version: 0.15.1 Authors@R: c(person(given = "Dominique", family = "Makowski", @@ -67,7 +67,7 @@ Depends: R (>= 3.6) Imports: insight (>= 1.0.0), - datawizard (>= 0.13.0), + datawizard (>= 1.0.0), graphics, methods, stats, @@ -95,7 +95,7 @@ Suggests: lavaan, lme4, logspline (>= 2.1.21), - marginaleffects (>= 0.24.0.6), + marginaleffects (>= 0.24.0), MASS, mclust, mediation, @@ -128,4 +128,3 @@ Config/testthat/parallel: true Config/rcmdcheck/ignore-inconsequential-notes: true Config/Needs/website: easystats/easystatstemplate Config/Needs/check: stan-dev/cmdstanr -Remotes: easystats/datawizard, vincentarelbundock/marginaleffects diff --git a/NEWS.md b/NEWS.md index bd534d59e..a13d756d5 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,10 +1,13 @@ -# bayestestR 0.15.0.4 +# bayestestR 0.15.1 + +## Changes + +* Several minor changes to deal with recent changes in other packages. ## Bug fixes * Fix to `emmeans` / `marginaleffects` / `data.frame()` methods when using multiple credible levels (#688). - # bayestestR 0.15.0 ## Changes diff --git a/R/bayesfactor_parameters.R b/R/bayesfactor_parameters.R index 84127a40c..28f3d4597 100644 --- a/R/bayesfactor_parameters.R +++ b/R/bayesfactor_parameters.R @@ -323,7 +323,6 @@ bayesfactor_parameters.stanreg <- function(posterior, } - #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.brmsfit <- bayesfactor_parameters.stanreg diff --git a/R/bci.R b/R/bci.R index b5e61ccf4..48b774145 100644 --- a/R/bci.R +++ b/R/bci.R @@ -27,7 +27,6 @@ bci <- function(x, ...) { bcai <- bci - #' @rdname bci #' @export bci.numeric <- function(x, ci = 0.95, verbose = TRUE, ...) { @@ -40,7 +39,6 @@ bci.numeric <- function(x, ci = 0.95, verbose = TRUE, ...) { } - #' @rdname bci #' @inheritParams p_direction #' @export @@ -66,7 +64,6 @@ bci.data.frame <- function(x, ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) { } - #' @export bci.draws <- function(x, ci = 0.95, verbose = TRUE, ...) { dat <- .compute_interval_dataframe(x = .posterior_draws_to_df(x), ci = ci, verbose = verbose, fun = "bci") @@ -89,7 +86,6 @@ bci.MCMCglmm <- function(x, ci = 0.95, verbose = TRUE, ...) { } - #' @export bci.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- as.data.frame(x) @@ -99,7 +95,6 @@ bci.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { } - #' @export bci.bamlss <- function(x, ci = 0.95, @@ -114,7 +109,6 @@ bci.bamlss <- function(x, } - #' @export bci.bcplm <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- insight::get_parameters(x) @@ -137,7 +131,6 @@ bci.mcmc.list <- bci.bcplm bci.BGGM <- bci.bcplm - #' @rdname bci #' @export bci.sim.merMod <- function(x, @@ -161,7 +154,6 @@ bci.sim.merMod <- function(x, } - #' @rdname bci #' @export bci.sim <- function(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) { @@ -178,7 +170,6 @@ bci.sim <- function(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) { } - #' @rdname bci #' @export bci.emmGrid <- function(x, ci = 0.95, verbose = TRUE, ...) { @@ -249,7 +240,6 @@ bci.stanfit <- bci.stanreg bci.blavaan <- bci.stanreg - #' @rdname bci #' @export bci.brmsfit <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), @@ -279,7 +269,6 @@ bci.brmsfit <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), } - #' @rdname bci #' @export bci.BFBayesFactor <- function(x, ci = 0.95, verbose = TRUE, ...) { diff --git a/R/check_prior.R b/R/check_prior.R index 4b9cd5b5d..a1b114fc2 100644 --- a/R/check_prior.R +++ b/R/check_prior.R @@ -50,9 +50,6 @@ check_prior <- function(model, method = "gelman", simulate_priors = TRUE, ...) { } - - - #' @export check_prior.brmsfit <- function(model, method = "gelman", diff --git a/R/contr.equalprior.R b/R/contr.equalprior.R index e7f725f59..0e0f5fa90 100644 --- a/R/contr.equalprior.R +++ b/R/contr.equalprior.R @@ -184,7 +184,6 @@ contr.equalprior_deviations <- function(n, contrasts = TRUE, sparse = FALSE) { } - # OLD ------------------------------ #' @export diff --git a/R/convert_pd_to_p.R b/R/convert_pd_to_p.R index 51c05233e..3b66619b8 100644 --- a/R/convert_pd_to_p.R +++ b/R/convert_pd_to_p.R @@ -95,7 +95,6 @@ p_to_pd <- function(p, direction = "two-sided", ...) { } - #' @rdname pd_to_p #' @export convert_p_to_pd <- p_to_pd diff --git a/R/describe_posterior.R b/R/describe_posterior.R index 73b41aa83..0f7b64024 100644 --- a/R/describe_posterior.R +++ b/R/describe_posterior.R @@ -11,11 +11,11 @@ #' `"SPI"` (see [`spi()`]), or `"SI"` (see [`si()`]). #' @param test The indices of effect existence to compute. Character (vector) or #' list with one or more of these options: `"p_direction"` (or `"pd"`), -#' `"rope"`, `"p_map"`, `"equivalence_test"` (or `"equitest"`), -#' `"bayesfactor"` (or `"bf"`) or `"all"` to compute all tests. For each -#' "test", the corresponding \pkg{bayestestR} function is called (e.g. -#' [`rope()`] or [`p_direction()`]) and its results included in the summary -#' output. +#' `"rope"`, `"p_map"`, `"p_significance"` (or `"ps"`), `"p_rope"`, +#' `"equivalence_test"` (or `"equitest"`), `"bayesfactor"` (or `"bf"`) or +#' `"all"` to compute all tests. For each "test", the corresponding +#' \pkg{bayestestR} function is called (e.g. [`rope()`] or [`p_direction()`]) +#' and its results included in the summary output. #' @param rope_range ROPE's lower and higher bounds. Should be a vector of two #' values (e.g., `c(-0.1, 0.1)`), `"default"` or a list of numeric vectors of #' the same length as numbers of parameters. If `"default"`, the bounds are @@ -528,8 +528,6 @@ describe_posterior.default <- function(posterior, ...) { } - - # Models based on simple data frame of posterior --------------------- @@ -728,7 +726,6 @@ describe_posterior.draws <- function(posterior, describe_posterior.rvar <- describe_posterior.draws - # easystats methods ------------------------ @@ -825,8 +822,6 @@ describe_posterior.get_predicted <- function(posterior, } - - # emmeans --------------------------- @@ -1221,8 +1216,6 @@ describe_posterior.brmsfit <- function(posterior, describe_posterior.blavaan <- describe_posterior.stanfit - - # other models -------------------------------- @@ -1345,8 +1338,6 @@ describe_posterior.bamlss <- function(posterior, } - - # BayesFactor -------------------- @@ -1431,9 +1422,6 @@ describe_posterior.BFBayesFactor <- function(posterior, } - - - # Helpers ----------------------------------------------------------------- diff --git a/R/describe_prior.R b/R/describe_prior.R index 54b140c83..e3f2018d0 100644 --- a/R/describe_prior.R +++ b/R/describe_prior.R @@ -53,7 +53,6 @@ describe_prior.brmsfit <- function(model, # Internal ---------------------------------------------------------------- - #' @keywords internal .describe_prior <- function(model, parameters = NULL, ...) { priors <- insight::get_priors(model, ...) diff --git a/R/distribution.R b/R/distribution.R index d89e05548..5687ee61d 100644 --- a/R/distribution.R +++ b/R/distribution.R @@ -67,10 +67,6 @@ distribution_custom <- function(n, type = "norm", ..., random = FALSE) { } - - - - #' @rdname distribution #' @inheritParams stats::rbeta #' @export @@ -99,7 +95,6 @@ distribution_binomial <- function(n, size = 1, prob = 0.5, random = FALSE, ...) distribution_binom <- distribution_binomial - #' @rdname distribution #' @inheritParams stats::rcauchy #' @export @@ -127,7 +122,6 @@ distribution_chisquared <- function(n, df, ncp = 0, random = FALSE, ...) { distribution_chisq <- distribution_chisquared - #' @rdname distribution #' @inheritParams stats::rgamma #' @param shape Shape parameter. @@ -267,7 +261,6 @@ distribution_uniform <- function(n, min = 0, max = 1, random = FALSE, ...) { } - #' @rdname distribution #' @inheritParams stats::rnorm #' @export diff --git a/R/equivalence_test.R b/R/equivalence_test.R index aafd098bf..d7f951050 100644 --- a/R/equivalence_test.R +++ b/R/equivalence_test.R @@ -147,7 +147,6 @@ equivalence_test.numeric <- function(x, range = "default", ci = 0.95, verbose = } - #' @rdname equivalence_test #' @inheritParams p_direction #' @export @@ -256,8 +255,6 @@ equivalence_test.BFBayesFactor <- function(x, range = "default", ci = 0.95, verb } - - #' @keywords internal .equivalence_test_models <- function(x, range = "default", @@ -354,7 +351,6 @@ equivalence_test.brmsfit <- function(x, } - #' @export equivalence_test.sim.merMod <- function(x, range = "default", @@ -433,7 +429,6 @@ equivalence_test.mcmc.list <- equivalence_test.bcplm equivalence_test.bayesQR <- equivalence_test.bcplm - #' @export equivalence_test.bamlss <- function(x, range = "default", diff --git a/R/estimate_density.R b/R/estimate_density.R index 9ee4c424f..ecee3906c 100644 --- a/R/estimate_density.R +++ b/R/estimate_density.R @@ -161,7 +161,6 @@ estimate_density.default <- function(x, ...) { } - # Methods ----------------------------------------------------------------- #' @export @@ -589,7 +588,6 @@ estimate_density.bamlss <- function(x, } - #' Coerce to a Data Frame #' #' @inheritParams base::as.data.frame @@ -644,8 +642,6 @@ as.data.frame.density <- function(x, ...) { } - - #' Density Probability at a Given Value #' #' Compute the density value at a given point of a distribution (i.e., diff --git a/R/eti.R b/R/eti.R index 404a9f036..57f6f12ee 100644 --- a/R/eti.R +++ b/R/eti.R @@ -64,7 +64,6 @@ eti.numeric <- function(x, ci = 0.95, verbose = TRUE, ...) { } - #' @export #' @rdname eti #' @inheritParams p_direction @@ -91,7 +90,6 @@ eti.data.frame <- function(x, ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) { } - #' @export eti.draws <- function(x, ci = 0.95, verbose = TRUE, ...) { dat <- .compute_interval_dataframe(x = .posterior_draws_to_df(x), ci = ci, verbose = verbose, fun = "eti") @@ -103,7 +101,6 @@ eti.draws <- function(x, ci = 0.95, verbose = TRUE, ...) { eti.rvar <- eti.draws - #' @export eti.MCMCglmm <- function(x, ci = 0.95, verbose = TRUE, ...) { nF <- x$Fixed$nfl @@ -114,7 +111,6 @@ eti.MCMCglmm <- function(x, ci = 0.95, verbose = TRUE, ...) { } - #' @export eti.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- as.data.frame(x) @@ -124,7 +120,6 @@ eti.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { } - #' @export eti.bamlss <- function(x, ci = 0.95, component = c("all", "conditional", "location"), verbose = TRUE, ...) { component <- match.arg(component) @@ -135,7 +130,6 @@ eti.bamlss <- function(x, ci = 0.95, component = c("all", "conditional", "locati } - #' @export eti.bcplm <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- insight::get_parameters(x) @@ -248,7 +242,6 @@ eti.stanfit <- eti.stanreg eti.blavaan <- eti.stanreg - #' @rdname eti #' @export eti.brmsfit <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), diff --git a/R/format.R b/R/format.R index 3d81e4734..0cbfef169 100644 --- a/R/format.R +++ b/R/format.R @@ -80,7 +80,6 @@ format.equivalence_test <- format.describe_posterior format.rope <- format.describe_posterior - # special handling for bayes factors ------------------ @@ -154,7 +153,6 @@ format.bayesfactor_models <- function(x, } - #' @export format.bayesfactor_inclusion <- function(x, digits = 3, @@ -208,7 +206,6 @@ format.bayesfactor_inclusion <- function(x, } - #' @export format.bayesfactor_restricted <- function(x, digits = 3, @@ -254,7 +251,6 @@ format.bayesfactor_restricted <- function(x, } - #' @export format.bayesfactor_parameters <- function(x, cp = NULL, diff --git a/R/hdi.R b/R/hdi.R index fa31b23bd..003fd07ab 100644 --- a/R/hdi.R +++ b/R/hdi.R @@ -407,11 +407,9 @@ hdi.get_predicted <- function(x, ci = 0.95, use_iterations = FALSE, verbose = TR } - # Helper ------------------------------------------------------------------ - #' @keywords internal .hdi <- function(x, ci = 0.95, verbose = TRUE) { check_ci <- .check_ci_argument(x, ci, verbose) diff --git a/R/map_estimate.R b/R/map_estimate.R index aea57b96c..3ca623b71 100644 --- a/R/map_estimate.R +++ b/R/map_estimate.R @@ -41,7 +41,6 @@ map_estimate <- function(x, ...) { } - # numeric ----------------------- #' @rdname map_estimate @@ -91,7 +90,6 @@ map_estimate.blrm <- map_estimate.bayesQR map_estimate.mcmc.list <- map_estimate.bayesQR - # stan / posterior models ----------------------- #' @keywords internal @@ -235,7 +233,6 @@ map_estimate.get_predicted <- function(x, } - # Methods ----------------------------------------------------------------- #' @rdname as.numeric.p_direction diff --git a/R/mcse.R b/R/mcse.R index f449515df..0eaba0b67 100644 --- a/R/mcse.R +++ b/R/mcse.R @@ -85,7 +85,6 @@ mcse.stanreg <- function(model, } - #' @export mcse.stanfit <- mcse.stanreg diff --git a/R/mediation.R b/R/mediation.R index c8b664265..c118045b2 100644 --- a/R/mediation.R +++ b/R/mediation.R @@ -156,9 +156,6 @@ mediation.stanmvreg <- function(model, treatment, mediator, response = NULL, cen } - - - # workhorse --------------------------------- @@ -316,7 +313,6 @@ as.data.frame.bayestestR_mediation <- function(x, ...) { } - # S3 --------------------------------- #' @export @@ -358,7 +354,6 @@ print.bayestestR_mediation <- function(x, digits = 3, ...) { } - #' @export plot.bayestestR_mediation <- function(x, ...) { insight::check_if_installed("see", "to plot results from mediation analysis") diff --git a/R/p_direction.R b/R/p_direction.R index c5b1f84e0..1706eeb6c 100644 --- a/R/p_direction.R +++ b/R/p_direction.R @@ -263,9 +263,6 @@ p_direction.data.frame <- function(x, } - - - #' @export p_direction.draws <- function(x, method = "direct", @@ -477,7 +474,6 @@ p_direction.sim <- function(x, } - #' @rdname p_direction #' @export p_direction.stanreg <- function(x, @@ -643,7 +639,6 @@ p_direction.parameters_model <- function(x, ...) { } - # Definition -------------------------------------------------------------- diff --git a/R/p_map.R b/R/p_map.R index a0267e867..08b0670b1 100644 --- a/R/p_map.R +++ b/R/p_map.R @@ -79,7 +79,6 @@ p_map <- function(x, ...) { p_pointnull <- p_map - #' @rdname p_map #' @export p_map.numeric <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { @@ -160,7 +159,6 @@ p_map.data.frame <- function(x, null = 0, precision = 2^10, method = "kernel", r } - #' @export p_map.draws <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { p_map(.posterior_draws_to_df(x), null = null, precision = precision, method = method, ...) @@ -170,7 +168,6 @@ p_map.draws <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { p_map.rvar <- p_map.draws - #' @export p_map.emmGrid <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { xdf <- insight::get_parameters(x) @@ -216,8 +213,6 @@ p_map.predictions <- p_map.slopes } - - #' @export p_map.mcmc <- function(x, null = 0, precision = 2^10, method = "kernel", parameters = NULL, ...) { out <- .p_map_models( @@ -249,7 +244,6 @@ p_map.mcmc.list <- p_map.mcmc p_map.BGGM <- p_map.mcmc - #' @export p_map.bamlss <- function(x, null = 0, precision = 2^10, method = "kernel", component = c("all", "conditional", "location"), parameters = NULL, ...) { @@ -271,7 +265,6 @@ p_map.bamlss <- function(x, null = 0, precision = 2^10, method = "kernel", } - #' @export p_map.sim.merMod <- function(x, null = 0, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), parameters = NULL, ...) { @@ -293,8 +286,6 @@ p_map.sim.merMod <- function(x, null = 0, precision = 2^10, method = "kernel", } - - #' @export p_map.sim <- function(x, null = 0, precision = 2^10, method = "kernel", parameters = NULL, ...) { @@ -314,8 +305,6 @@ p_map.sim <- function(x, null = 0, precision = 2^10, method = "kernel", } - - #' @rdname p_map #' @export p_map.stanreg <- function(x, null = 0, precision = 2^10, method = "kernel", @@ -356,7 +345,6 @@ p_map.stanfit <- p_map.stanreg p_map.blavaan <- p_map.stanreg - #' @rdname p_map #' @export p_map.brmsfit <- function(x, null = 0, precision = 2^10, method = "kernel", @@ -390,9 +378,6 @@ p_map.brmsfit <- function(x, null = 0, precision = 2^10, method = "kernel", } - - - #' @export p_map.BFBayesFactor <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { out <- p_map(insight::get_parameters(x), null = null, precision = precision, method = method, ...) @@ -401,7 +386,6 @@ p_map.BFBayesFactor <- function(x, null = 0, precision = 2^10, method = "kernel" } - #' @export p_map.MCMCglmm <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { nF <- x$Fixed$nfl @@ -411,7 +395,6 @@ p_map.MCMCglmm <- function(x, null = 0, precision = 2^10, method = "kernel", ... } - #' @export p_map.bayesQR <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { out <- p_map(insight::get_parameters(x), null = null, precision = precision, method = method, ...) diff --git a/R/p_rope.R b/R/p_rope.R index 5a93dda7b..46ce67226 100644 --- a/R/p_rope.R +++ b/R/p_rope.R @@ -222,7 +222,6 @@ p_rope.blrm <- p_rope.mcmc p_rope.mcmc.list <- p_rope.mcmc - # Internal ---------------------------------------------------------------- diff --git a/R/p_significance.R b/R/p_significance.R index c38a3842a..fa3917440 100644 --- a/R/p_significance.R +++ b/R/p_significance.R @@ -389,7 +389,6 @@ as.numeric.p_significance <- function(x, ...) { as.double.p_significance <- as.numeric.p_significance - # helpers -------------------------- #' @keywords internal diff --git a/R/p_to_bf.R b/R/p_to_bf.R index 4e34ef029..74d62491b 100644 --- a/R/p_to_bf.R +++ b/R/p_to_bf.R @@ -121,7 +121,6 @@ p_to_bf.default <- function(x, log = FALSE, ...) { } - # methods --------------- #' @export diff --git a/R/print.R b/R/print.R index dfc62eeb7..d6ace60c4 100644 --- a/R/print.R +++ b/R/print.R @@ -246,7 +246,6 @@ print.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { } - # util --------------------- .print_default <- function(x, @@ -297,7 +296,6 @@ print.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { } - .print_bf_default <- function(x, digits = 3, log = FALSE, diff --git a/R/print_html.R b/R/print_html.R index fb487c405..bfb951f16 100644 --- a/R/print_html.R +++ b/R/print_html.R @@ -28,7 +28,6 @@ print_html.p_map <- function(x, digits = 2, caption = "MAP-based p-value", ...) } - #' @export print_html.p_rope <- function(x, digits = 2, ...) { # check if we have multiple ROPE values @@ -87,8 +86,6 @@ print_html.bayestestR_si <- function(x, digits = 2, caption = "Support Interval" } - - # special handling for bayes factors ------------------ @@ -157,9 +154,6 @@ print_html.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { } - - - # util --------------- @@ -189,7 +183,6 @@ print_html.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { } - .print_bf_html_default <- function(x, digits = 3, log = FALSE, diff --git a/R/print_md.R b/R/print_md.R index 053ae0472..ed15c3edb 100644 --- a/R/print_md.R +++ b/R/print_md.R @@ -86,8 +86,6 @@ print_md.bayestestR_si <- function(x, digits = 2, caption = "Support Interval", } - - # special handling for bayes factors ------------------ @@ -156,9 +154,6 @@ print_md.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { } - - - # util --------------- diff --git a/R/rope_range.R b/R/rope_range.R index 0b583890a..bc92577ab 100644 --- a/R/rope_range.R +++ b/R/rope_range.R @@ -121,7 +121,6 @@ rope_range.mlm <- function(x, verbose = TRUE, ...) { } - # helper ------------------ diff --git a/R/sensitivity_to_prior.R b/R/sensitivity_to_prior.R index ad710be48..beb32798e 100644 --- a/R/sensitivity_to_prior.R +++ b/R/sensitivity_to_prior.R @@ -87,8 +87,6 @@ sensitivity_to_prior.default <- function(model, ...) { } - - #' Set a new location for a prior #' @keywords internal .prior_new_location <- function(prior, sign, magnitude = 10) { @@ -97,9 +95,6 @@ sensitivity_to_prior.default <- function(model, ...) { } - - - #' Extract and Returns the priors formatted for rstanarm #' @keywords internal .extract_priors_rstanarm <- function(model, ...) { diff --git a/R/sexit.R b/R/sexit.R index eb7925114..945ecc24b 100644 --- a/R/sexit.R +++ b/R/sexit.R @@ -252,7 +252,6 @@ sexit <- function(x, significant = "default", large = "default", ci = 0.95, ...) } - #' @keywords internal .sexit_preprocess <- function(x, significant = "default", large = "default", ...) { thresholds <- sexit_thresholds(x) @@ -260,7 +259,6 @@ sexit <- function(x, significant = "default", large = "default", ci = 0.95, ...) if (large == "default") large <- thresholds[2] - suppressWarnings({ resp <- .safe(insight::get_response(x, type = "mf")) }) @@ -296,7 +294,6 @@ sexit <- function(x, significant = "default", large = "default", ci = 0.95, ...) } - #' @export print.sexit <- function(x, summary = FALSE, digits = 2, ...) { orig_x <- x diff --git a/R/sexit_thresholds.R b/R/sexit_thresholds.R index ee4efa465..9fea27695 100644 --- a/R/sexit_thresholds.R +++ b/R/sexit_thresholds.R @@ -139,8 +139,6 @@ sexit_thresholds.mlm <- function(x, verbose = TRUE, ...) { } - - # helper ------------------ diff --git a/R/si.R b/R/si.R index 69bb9bde3..664a956c2 100644 --- a/R/si.R +++ b/R/si.R @@ -284,7 +284,6 @@ si.draws <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { si.rvar <- si.draws - # Helper ------------------------------------------------------------------ .si.data.frame <- function(posterior, prior, BF, verbose = TRUE, ...) { @@ -308,7 +307,6 @@ si.rvar <- si.draws } - #' @keywords internal .si <- function(posterior, prior, BF = 1, extend_scale = 0.05, precision = 2^8, verbose = TRUE, ...) { insight::check_if_installed("logspline") diff --git a/R/simulate_data.R b/R/simulate_data.R index 0c566fcd3..ac8b2c49b 100644 --- a/R/simulate_data.R +++ b/R/simulate_data.R @@ -114,7 +114,6 @@ simulate_correlation <- function(n = 100, } - #' @rdname simulate_correlation #' @export simulate_ttest <- function(n = 100, d = 0.5, names = NULL, ...) { diff --git a/R/simulate_priors.R b/R/simulate_priors.R index 00fb02ecc..0c33ba397 100644 --- a/R/simulate_priors.R +++ b/R/simulate_priors.R @@ -24,7 +24,6 @@ simulate_prior <- function(model, n = 1000, ...) { } - #' @export simulate_prior.stanreg <- function(model, n = 1000, @@ -75,17 +74,12 @@ simulate_prior.brmsfit <- function(model, } - #' @export simulate_prior.bcplm <- function(model, n = 1000, verbose = TRUE, ...) { .simulate_prior(insight::get_priors(model, verbose = verbose), n = n, verbose = verbose) } - - - - #' @keywords internal .simulate_prior <- function(priors, n = 1000, verbose = TRUE) { simulated <- data.frame(.bamboozled = 1:n) diff --git a/R/spi.R b/R/spi.R index 53429f90f..7637ff04c 100644 --- a/R/spi.R +++ b/R/spi.R @@ -282,8 +282,6 @@ spi.get_predicted <- function(x, ci = 0.95, use_iterations = FALSE, verbose = TR } - - # Helper ------------------------------------------------------------------ # Code taken (and slightly simplified) from: diff --git a/R/unupdate.R b/R/unupdate.R index 6a30541b8..b180007ca 100644 --- a/R/unupdate.R +++ b/R/unupdate.R @@ -22,7 +22,6 @@ unupdate <- function(model, verbose = TRUE, ...) { } - #' @export #' @rdname unupdate unupdate.stanreg <- function(model, verbose = TRUE, ...) { @@ -52,7 +51,6 @@ unupdate.stanreg <- function(model, verbose = TRUE, ...) { } - #' @export #' @rdname unupdate unupdate.brmsfit <- function(model, verbose = TRUE, ...) { diff --git a/R/utils.R b/R/utils.R index 206a9c951..097aa3046 100644 --- a/R/utils.R +++ b/R/utils.R @@ -246,7 +246,6 @@ } - attr(results, "idvars") <- grid_names results } diff --git a/R/utils_bayesfactor.R b/R/utils_bayesfactor.R index 1992c63f8..14c51d6aa 100644 --- a/R/utils_bayesfactor.R +++ b/R/utils_bayesfactor.R @@ -420,7 +420,6 @@ as.double.bayesfactor_parameters <- as.numeric.bayesfactor_inclusion as.double.bayesfactor_restricted <- as.numeric.bayesfactor_inclusion - # logspline --------------------------------------------------------------- #' @keywords internal diff --git a/R/utils_hdi_ci.R b/R/utils_hdi_ci.R index 81fe83128..bce95cf3b 100644 --- a/R/utils_hdi_ci.R +++ b/R/utils_hdi_ci.R @@ -45,7 +45,6 @@ } - #' @keywords internal .compute_interval_dataframe <- function(x, ci, verbose, fun) { numeric_variables <- vapply(x, is.numeric, TRUE) @@ -75,7 +74,6 @@ } - #' @keywords internal .compute_interval_simMerMod <- function(x, ci, effects, parameters, verbose, fun) { fixed <- fixed.data <- NULL @@ -103,7 +101,6 @@ } - #' @keywords internal .compute_interval_sim <- function(x, ci, parameters, verbose, fun) { fixed.data <- insight::get_parameters(x, parameters = parameters) diff --git a/man/describe_posterior.Rd b/man/describe_posterior.Rd index 717288520..6aae380df 100644 --- a/man/describe_posterior.Rd +++ b/man/describe_posterior.Rd @@ -113,11 +113,11 @@ to be estimated. Default to \code{0.95} (\verb{95\%}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), -\code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), -\code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each -"test", the corresponding \pkg{bayestestR} function is called (e.g. -\code{\link[=rope]{rope()}} or \code{\link[=p_direction]{p_direction()}}) and its results included in the summary -output.} +\code{"rope"}, \code{"p_map"}, \code{"p_significance"} (or \code{"ps"}), \code{"p_rope"}, +\code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or +\code{"all"} to compute all tests. For each "test", the corresponding +\pkg{bayestestR} function is called (e.g. \code{\link[=rope]{rope()}} or \code{\link[=p_direction]{p_direction()}}) +and its results included in the summary output.} \item{rope_range}{ROPE's lower and higher bounds. Should be a vector of two values (e.g., \code{c(-0.1, 0.1)}), \code{"default"} or a list of numeric vectors of diff --git a/tests/testthat/test-data.frame-with-rvar.R b/tests/testthat/test-data.frame-with-rvar.R index 4c5c373e1..14a7d5a2a 100644 --- a/tests/testthat/test-data.frame-with-rvar.R +++ b/tests/testthat/test-data.frame-with-rvar.R @@ -81,7 +81,6 @@ test_that("data.frame w/ rvar_col bayesfactors", { dfx - ## SIs res <- si(dfx, rvar_col = "my_rvar", prior = "other_rvar", verbose = FALSE) res.ref <- si(dfx$my_rvar, prior = dfx$other_rvar, verbose = FALSE) diff --git a/tests/testthat/test-describe_posterior.R b/tests/testthat/test-describe_posterior.R index de9db998e..ff104140f 100644 --- a/tests/testthat/test-describe_posterior.R +++ b/tests/testthat/test-describe_posterior.R @@ -107,7 +107,6 @@ test_that("describe_posterior", { }) - test_that("describe_posterior", { skip_on_os(c("mac", "linux")) skip_if_offline() diff --git a/tests/testthat/test-emmGrid.R b/tests/testthat/test-emmGrid.R index a5186830c..1e60ed2ec 100644 --- a/tests/testthat/test-emmGrid.R +++ b/tests/testthat/test-emmGrid.R @@ -52,7 +52,6 @@ test_that("emmGrid point_estimate", { }) - # Basics ------------------------------------------------------------------ test_that("emmGrid ci", { diff --git a/tests/testthat/test-marginaleffects.R b/tests/testthat/test-marginaleffects.R index e7cecf4d9..7dfaa413c 100644 --- a/tests/testthat/test-marginaleffects.R +++ b/tests/testthat/test-marginaleffects.R @@ -1,7 +1,7 @@ skip_on_cran() skip_if_not_installed("withr") skip_if_not_installed("rstanarm") -skip_if_not_installed("marginaleffects", minimum_version = "0.24.0") +skip_if_not_installed("marginaleffects", minimum_version = "0.24.1") skip_if_not_installed("collapse") withr::with_environment( diff --git a/tests/testthat/test-p_map.R b/tests/testthat/test-p_map.R index 4af3e8717..e2370c32d 100644 --- a/tests/testthat/test-p_map.R +++ b/tests/testthat/test-p_map.R @@ -50,7 +50,6 @@ test_that("p_map", { }) - test_that("p_map | null", { x <- distribution_normal(4000, mean = 1) expect_equal(as.numeric(p_map(x)), 0.6194317, ignore_attr = TRUE, tolerance = 0.01) diff --git a/tests/testthat/test-rope.R b/tests/testthat/test-rope.R index 0c9059854..a4124dc6e 100644 --- a/tests/testthat/test-rope.R +++ b/tests/testthat/test-rope.R @@ -79,7 +79,6 @@ test_that("rope", { }) - test_that("rope", { skip_if_offline() skip_if_not_or_load_if_installed("rstanarm") diff --git a/tests/testthat/test-rope_range.R b/tests/testthat/test-rope_range.R index 4217f6abb..cf890ee48 100644 --- a/tests/testthat/test-rope_range.R +++ b/tests/testthat/test-rope_range.R @@ -28,7 +28,6 @@ test_that("rope_range logistic", { }) - test_that("rope_range", { skip_if_not_or_load_if_installed("brms") model <- suppressWarnings(brms::brm(mpg ~ wt + gear, data = mtcars, iter = 300)) diff --git a/vignettes/bayes_factors.Rmd b/vignettes/bayes_factors.Rmd index 1ff4b13dc..90ebd471a 100644 --- a/vignettes/bayes_factors.Rmd +++ b/vignettes/bayes_factors.Rmd @@ -1,6 +1,6 @@ --- title: "Bayes Factors" -output: +output: rmarkdown::html_vignette: toc: true toc_depth: 2 @@ -11,7 +11,7 @@ vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Bayes Factors} %\VignetteEngine{knitr::rmarkdown} -editor_options: +editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl @@ -71,7 +71,7 @@ Having said that, here's an introduction to Bayes factors :) # The Bayes Factor -**Bayes Factors (BFs) are indices of *relative* evidence of one "model" over another**. +**Bayes Factors (BFs) are indices of *relative* evidence of one "model" over another**. In their role as a hypothesis testing index, they are to Bayesian framework what a $p$-value is to the **classical/frequentist framework**. In significance-based @@ -92,8 +92,8 @@ $$ Using this equation, we can compare the probability-odds of two models: $$ -\underbrace{\frac{P(M_1|D)}{P(M_2|D)}}_{\text{Posterior Odds}} = -\underbrace{\frac{P(D|M_1)}{P(D|M_2)}}_{\text{Likelihood Ratio}} +\underbrace{\frac{P(M_1|D)}{P(M_2|D)}}_{\text{Posterior Odds}} = +\underbrace{\frac{P(D|M_1)}{P(D|M_2)}}_{\text{Likelihood Ratio}} \times \underbrace{\frac{P(M_1)}{P(M_2)}}_{\text{Prior Odds}} $$ @@ -116,9 +116,9 @@ $$ BF_{12}=\frac{Posterior~Odds_{12}}{Prior~Odds_{12}} $$ -Here we provide functions for computing Bayes factors in two different contexts: +Here we provide functions for computing Bayes factors in two different contexts: -- **testing single parameters (coefficients) within a model** +- **testing single parameters (coefficients) within a model** - **comparing statistical models themselves** # 1. Testing Models' Parameters with Bayes Factors {#bayesfactor_parameters} @@ -185,7 +185,7 @@ One way of operationlizing the null-hypothesis is by setting a null region, such that an effect that falls within this interval would be *practically* equivalent to the null [@kruschke2010believe]. In our case, that means defining a range of effects we would consider equal to the drug having no effect at all. We can then -compute the prior probability of the drug's effect falling *outside this null-region*, +compute the prior probability of the drug's effect falling *outside this null-region*, and the prior probability of the drug's effect falling *within the null-region* to get our *prior odds*. Say any effect smaller than an hour of extra sleep is practically equivalent to being no effect at all, we would define our prior odds @@ -311,7 +311,7 @@ null region shrinks to a point, the change from the prior probability to the posterior probability of the null can be estimated by comparing the density of the null value between the two distributions.^[Note that as the width of null interval shrinks to zero, the prior probability and posterior probability of the -alternative tends towards 1.00.] This ratio is called the **Savage-Dickey ratio**, +alternative tends towards 1.00.] This ratio is called the **Savage-Dickey ratio**, and has the added benefit of also being an approximation of a Bayes factor comparing the estimated model against a model in which the parameter of interest has been restricted to a point-null: @@ -375,7 +375,7 @@ Thanks to the flexibility of Bayesian framework, it is also possible to compute a Bayes factor for **dividing** hypotheses - that is, for a null and alternative that are *complementary*, opposing one-sided hypotheses [@morey2014simple]. -For example, above we compared an alternative of $H_A$: *the drug has a positive effects* to the null $H_0$: *the drug has no effect*. But we can also compare instead the same alternative to its *complementary* hypothesis: $H_{-A}$: *the drug has a negative effects*. +For example, above we compared an alternative of $H_A$: *the drug has a positive effects* to the null $H_0$: *the drug has no effect*. But we can also compare instead the same alternative to its *complementary* hypothesis: $H_{-A}$: *the drug has a negative effects*. ```{r inteval_div, eval=FALSE} test_group2_dividing <- bayesfactor_parameters(model, null = c(-Inf, 0)) @@ -404,10 +404,10 @@ trickery. For example: $$ \underbrace{BF_{0 1$ contains values who received more impressive support from the data. +- A $BF > 1$ contains values who received more impressive support from the data. - A $BF < 1$ contains values whose credibility has *not* been impressively decreased by observing the data. Testing against values outside this interval will produce a Bayes factor larger than $1/BF$ in support of the alternative. @@ -484,10 +484,10 @@ the length of an iris' sepal using the `iris` data set. ### For Bayesian models (`brms` and `rstanarm`) **Note: In order to compute Bayes factors for Bayesian models, non-default arguments must be added upon fitting:** - + - `brmsfit` models **must** have been fitted with `save_pars = save_pars(all = TRUE)` - `stanreg` models **must** have been fitted with a defined `diagnostic_file`. - + Let's first fit 5 Bayesian regressions with `brms` to predict `Sepal.Length`: ```{r brms_disp, eval = FALSE} @@ -606,7 +606,7 @@ as.matrix(comparison) **NOTE:** In order to correctly and precisely estimate Bayes Factors, you always need the 4 P's: **P**roper **P**riors ^[[Robert, 2016](https://doi.org/10.1016/j.jmp.2015.08.002); [Kass & Raftery, 1993](https://doi.org/10.1080/01621459.1995.10476572); [Fernández, Ley, & Steel, 2001](https://doi.org/10.1016/S0304-4076(00)00076-2)], and a **P**lentiful **P**osterior ^[[Gronau, Singmann, & Wagenmakers, 2017](https://arxiv.org/abs/1710.08162)]. -### For Frequentist models via the BIC approximation +### For Frequentist models via the BIC approximation It is also possible to compute Bayes factors for the comparison of frequentist models. This is done by comparing BIC measures, allowing a Bayesian comparison @@ -634,7 +634,7 @@ As stated above when discussing one-sided hypothesis tests, we can create new models by imposing order restrictions on a given model. For example, consider the following model, in which we predict the length of an iris' sepal from the length of its petal, as well as from its species, with priors: -- $b_{petal} \sim N(0,2)$ +- $b_{petal} \sim N(0,2)$ - $b_{versicolors}\ \&\ b_{virginica} \sim N(0,1.2)$ ```{r, eval=FALSE} @@ -667,7 +667,7 @@ depends on our *prior* knowledge or hypotheses. For example, even a novice botanist will assume that it is unlikely that petal length will be *negatively* associated with sepal length - an iris with longer petals is likely larger, and thus will also have a longer sepal. And an expert botanist will perhaps assume -that setosas have smaller sepals than both versicolors and virginica. +that setosas have smaller sepals than both versicolors and virginica. These priors can be formulated as **restricted** priors [@morey_2015_blog; @morey2011bayesinterval]: @@ -785,9 +785,9 @@ bayesfactor_inclusion(comparison, match_models = TRUE) ### Comparison with JASP `bayesfactor_inclusion()` is meant to provide Bayes Factors per predictor, -similar to JASP's *Effects* option. +similar to JASP's *Effects* option. -Let's compare the two. +Let's compare the two. Note that for this comparison we will use the `{BayesFactor}` package, which is what _JASP_ uses under the hood. (Note that this package used different model-parameterization and different default prior-specifications compared to _Stan_-based packages.) @@ -913,7 +913,7 @@ several models, weighted by the models' marginal likelihood (done via the models but is missing from others, it is assumed to be fixed a 0 (which can also be seen as a method of applying shrinkage to our estimates). This results in a posterior distribution across several models, which we can now treat like any -posterior distribution, and estimate the HDI. +posterior distribution, and estimate the HDI. In `bayestestR`, we can do this with the `weighted_posteriors()` function: @@ -942,7 +942,7 @@ We can also see that across both models, that now **the HDI does contain 0**. Thus we have resolved the conflict between the Bayes factor and the HDI [@rouder2018bayesian]! -**Note**: Parameters might play different roles across different models. +**Note**: Parameters might play different roles across different models. For example, the parameter `A` plays a different role in the model `Y ~ A + B` (where it is a *main* effect) than it does in the model `Y ~ A + B + A:B` (where @@ -1003,7 +1003,7 @@ estimate_contrasts(model, test = "bf", bf_prior = model) ## Specifying correct priors for factors {#contr_bayes} -This section introduces the biased priors obtained when using the common *effects* factor coding (`contr.sum`) or dummy factor coding (`contr.treatment`), and the solution of using orthonormal factor coding (`contr.equalprior`) [as outlined in @rouder2012default, section 7.2]. +This section introduces the biased priors obtained when using the common *effects* factor coding (`contr.sum`) or dummy factor coding (`contr.treatment`), and the solution of using orthonormal factor coding (`contr.equalprior`) [as outlined in @rouder2012default, section 7.2]. **Special care should be taken when working with factors with 3 or more levels**. @@ -1067,7 +1067,7 @@ Notice that, though the prior estimate for all 3 pairwise contrasts is ~0, the scale or the HDI is much narrower for the prior of the `setosa - versicolor` contrast! -**What happened???** +**What happened???** This is caused by an inherent bias in the priors introduced by the *effects* coding (it's even worse with the default treatment coding, because the prior for the intercept is usually drastically different from the effect's parameters). **And since it affects the priors, this bias will also bias the Bayes factors over / understating evidence for some contrasts over others!** @@ -1187,10 +1187,10 @@ em_sum <- suppressWarnings(emmeans(fit_sum, ~Species, data = iris)) bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ``` -***What happened???*** +***What happened???*** -1. The comparison of 2 levels all have a prior of ~0.5, as expected. -2. The comparison of 3 levels has different priors, depending on the order restriction - i.e. **some orders are *a priori* more likely than others!!!** +1. The comparison of 2 levels all have a prior of ~0.5, as expected. +2. The comparison of 3 levels has different priors, depending on the order restriction - i.e. **some orders are *a priori* more likely than others!!!** Again, this is solved by using the *equal prior* factor coding (from above). @@ -1222,9 +1222,9 @@ bayesfactor_restricted(em_bayes, fit_bayes, hypothesis = hyp) ### Conclusion -When comparing the results from the two factor coding schemes, we find: -1. In both cases, the estimated (posterior) means are quite similar (if not identical). -2. The priors and Bayes factors differ between the two schemes. +When comparing the results from the two factor coding schemes, we find: +1. In both cases, the estimated (posterior) means are quite similar (if not identical). +2. The priors and Bayes factors differ between the two schemes. 3. Only with `contr.equalprior*`, the prior distribution of the difference or the order of 3 (or more) means is balanced. Read more about the equal prior contrasts in the `contr.equalprior` docs!