diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml index 0294d03a..a8b4e3ac 100644 --- a/.github/workflows/R-CMD-check.yaml +++ b/.github/workflows/R-CMD-check.yaml @@ -52,6 +52,7 @@ jobs: knitr collapse rmarkdown + ggplot2 rjags coda runjags diff --git a/.github/workflows/test-coverage.yaml b/.github/workflows/test-coverage.yaml index e9c73fff..9d5b7ea7 100644 --- a/.github/workflows/test-coverage.yaml +++ b/.github/workflows/test-coverage.yaml @@ -34,6 +34,7 @@ jobs: knitr collapse rmarkdown + ggplot2 rjags coda runjags diff --git a/DESCRIPTION b/DESCRIPTION index 60321631..0c05fb15 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -46,6 +46,7 @@ Roxygen: list(markdown = TRUE) RoxygenNote: 7.2.3 Suggests: cmdstanr (>= 0.4.0), + ggplot2 (>= 2.0.0), knitr, collapse, rmarkdown, diff --git a/NAMESPACE b/NAMESPACE index f8ca690e..c55f0197 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -128,6 +128,7 @@ importFrom(stats,logLik) importFrom(stats,make.link) importFrom(stats,median) importFrom(stats,model.frame) +importFrom(stats,na.fail) importFrom(stats,na.pass) importFrom(stats,pacf) importFrom(stats,pbeta) @@ -165,4 +166,5 @@ importFrom(stats,update.formula) importFrom(stats,var) importFrom(utils,getFromNamespace) importFrom(utils,head) +importFrom(utils,lsf.str) importFrom(utils,tail) diff --git a/R/as.data.frame.mvgam.R b/R/as.data.frame.mvgam.R index 36433bda..cb6fe6a2 100644 --- a/R/as.data.frame.mvgam.R +++ b/R/as.data.frame.mvgam.R @@ -38,9 +38,12 @@ #'head(beta_draws_df) #'str(beta_draws_df) #' -#'beta_draws_mat <- as.data.frame(mod1, variable = 'betas') +#'beta_draws_mat <- as.matrix(mod1, variable = 'betas') #'head(beta_draws_mat) -#'str(beta_draws_mat)} +#'str(beta_draws_mat) +#' +#'shape_pars <- as.matrix(mod1, variable = 'shape', regex = TRUE) +#'head(shape_pars)} NULL #'@rdname mvgam_draws @@ -228,7 +231,6 @@ as.data.frame.mvgam = function(x, } return(post) - } #'@rdname mvgam_draws @@ -244,6 +246,5 @@ as.matrix.mvgam = function(x, colnames(post) <- varnames return(post) - } diff --git a/R/dynamic.R b/R/dynamic.R index 8ebdba14..cd681b75 100644 --- a/R/dynamic.R +++ b/R/dynamic.R @@ -27,6 +27,61 @@ #' set automatically to ensure enough basis functions are used to approximate the expected #' wiggliness of the underlying dynamic function (\code{k} will increase as \code{rho} decreases) #' @rdname dynamic +#'@examples +#'\dontrun{ +#'# Simulate a time-varying coefficient \ +#'#(as a Gaussian Process with length scale = 10) +#'set.seed(1111) +#'N <- 200 +#'beta <- mvgam:::sim_gp(rnorm(1), +#' alpha_gp = 0.75, +#' rho_gp = 10, +#' h = N) + 0.5 +#'plot(beta, type = 'l', lwd = 3, +#' bty = 'l', xlab = 'Time', +#' ylab = 'Coefficient', +#' col = 'darkred') +#' +#'# Simulate the predictor as a standard normal +#'predictor <- rnorm(N, sd = 1) +#' +#'# Simulate a Gaussian outcome variable +#'out <- rnorm(N, mean = 4 + beta * predictor, +#' sd = 0.25) +#'time <- seq_along(predictor) +#'plot(out, type = 'l', lwd = 3, +#' bty = 'l', xlab = 'Time', ylab = 'Outcome', +#' col = 'darkred') +#' +#'# Gather into a data.frame and fit a dynamic coefficient mmodel +#'data <- data.frame(out, predictor, time) +#' +#'# Split into training and testing +#'data_train <- data[1:190,] +#'data_test <- data[191:200,] +#' +#'# Fit a model using the dynamic function +#'mod <- mvgam(out ~ +#' # mis-specify the length scale slightly as this +#' # won't be known in practice +#' dynamic(predictor, rho = 8, stationary = TRUE), +#' family = gaussian(), +#' data = data_train) +#' +#'# Inspect the summary +#'summary(mod) +#' +#'# Plot the time-varying coefficient estimates +#'plot(mod, type = 'smooths') +#' +#'# Extrapolate the coefficient forward in time +#'plot_mvgam_smooth(mod, smooth = 1, newdata = data) +#'abline(v = 190, lty = 'dashed', lwd = 2) +#' +#'# Overlay the true simulated time-varying coefficient +#'lines(beta, lwd = 2.5, col = 'white') +#'lines(beta, lwd = 2) +#'} #' @author Nicholas J Clark #' @export dynamic = function(variable, rho = 5, stationary = TRUE){ diff --git a/R/evaluate_mvgams.R b/R/evaluate_mvgams.R index ef3f3af4..8b3a0246 100644 --- a/R/evaluate_mvgams.R +++ b/R/evaluate_mvgams.R @@ -4,6 +4,7 @@ #'@importFrom stats quantile ecdf median predict #'@importFrom parallel clusterExport stopCluster setDefaultCluster clusterEvalQ #'@importFrom grDevices devAskNewPage +#'@importFrom utils lsf.str #'@param object \code{list} object returned from \code{mvgam} #'@param n_samples \code{integer} specifying the number of samples to generate from the model's #'posterior distribution @@ -56,7 +57,75 @@ #'extrapolation of smooths and let the latent trends in the \code{mvgam} model capture any #'temporal dependencies in the data. These trends are time series models and so will provide much more #'stable forecasts +#'@examples +#'\dontrun{ +#'# Simulate from a Poisson-AR2 model with a seasonal smooth +#'set.seed(100) +#'dat <- sim_mvgam(T = 75, +#' n_series = 1, +#' prop_trend = 0.75, +#' trend_model = 'AR2', +#' family = poisson()) #' +#'# Plot the time series +#'plot_mvgam_series(data = dat$data_train, +#' newdata = dat$data_test, +#' series = 1) +#' +#'# Fit an appropriate model +#'mod_ar2 <- mvgam(y ~ s(season, bs = 'cc'), +#' trend_model = 'AR2', +#' family = poisson(), +#' data = dat$data_train, +#' newdata = dat$data_test) +#' +#'# Fit a less appropriate model +#'mod_rw <- mvgam(y ~ s(season, bs = 'cc'), +#' trend_model = 'RW', +#' family = poisson(), +#' data = dat$data_train, +#' newdata = dat$data_test) +#' +#'# Compare Discrete Ranked Probability Scores for the testing period +#'fc_ar2 <- forecast(mod_ar2) +#'fc_rw <- forecast(mod_rw) +#'score_ar2 <- score(fc_ar2, score = 'drps') +#'score_rw <- score(fc_rw, score = 'drps') +#'sum(score_ar2$series_1$score) +#'sum(score_rw$series_1$score) +#' +#'# Use rolling evaluation for approximate comparisons of 3-step ahead +#'# forecasts across the training period +#'compare_mvgams(mod_ar2, +#' mod_rw, +#' fc_horizon = 3, +#' n_samples = 1000, +#' n_evaluations = 5) +#' +#'# Now use approximate leave-future-out CV to compare +#'# rolling forecasts; start at time point 40 to reduce +#'# computational time and to ensure enough data is available +#'# for estimating model parameters +#'lfo_ar2 <- lfo_cv(mod_ar2, +#' min_t = 40, +#' fc_horizon = 3) +#'lfo_rw <- lfo_cv(mod_rw, +#' min_t = 40, +#' fc_horizon = 3) +#' +#'# Plot Pareto-K values and ELPD estimates +#'plot(lfo_ar2) +#'plot(lfo_rw) +#' +#'# Proportion of timepoints in which AR2 model gives +#'# better forecasts +#'length(which((lfo_ar2$elpds - lfo_rw$elpds) > 0)) / +#' length(lfo_ar2$elpds) +#' +#'# A higher total ELPD is preferred +#'lfo_ar2$sum_ELPD +#'lfo_rw$sum_ELPD +#'} #' @name evaluate_mvgams NULL diff --git a/R/lfo_cv.mvgam.R b/R/lfo_cv.mvgam.R index ca7fd6c4..92acaa59 100644 --- a/R/lfo_cv.mvgam.R +++ b/R/lfo_cv.mvgam.R @@ -37,6 +37,67 @@ #'the Pareto-k shape values and 'the specified `pareto_k_threshold` #'@references Paul-Christian Bürkner, Jonah Gabry & Aki Vehtari (2020). Approximate leave-future-out cross-validation for Bayesian time series models #'Journal of Statistical Computation and Simulation. 90:14, 2499-2523. +#'@examples +#'\dontrun{ +#'# Simulate from a Poisson-AR2 model with a seasonal smooth +#'set.seed(100) +#'dat <- sim_mvgam(T = 75, +#' n_series = 1, +#' prop_trend = 0.75, +#' trend_model = 'AR2', +#' family = poisson()) +#' +#'# Plot the time series +#'plot_mvgam_series(data = dat$data_train, +#' newdata = dat$data_test, +#' series = 1) +#' +#'# Fit an appropriate model +#'mod_ar2 <- mvgam(y ~ s(season, bs = 'cc'), +#' trend_model = 'AR2', +#' family = poisson(), +#' data = dat$data_train, +#' newdata = dat$data_test) +#' +#'# Fit a less appropriate model +#'mod_rw <- mvgam(y ~ s(season, bs = 'cc'), +#' trend_model = 'RW', +#' family = poisson(), +#' data = dat$data_train, +#' newdata = dat$data_test) +#' +#'# Compare Discrete Ranked Probability Scores for the testing period +#'fc_ar2 <- forecast(mod_ar2) +#'fc_rw <- forecast(mod_rw) +#'score_ar2 <- score(fc_ar2, score = 'drps') +#'score_rw <- score(fc_rw, score = 'drps') +#'sum(score_ar2$series_1$score) +#'sum(score_rw$series_1$score) +#' +#'# Now use approximate leave-future-out CV to compare +#'# rolling forecasts; start at time point 40 to reduce +#'# computational time and to ensure enough data is available +#'# for estimating model parameters +#'lfo_ar2 <- lfo_cv(mod_ar2, +#' min_t = 40, +#' fc_horizon = 3) +#'lfo_rw <- lfo_cv(mod_rw, +#' min_t = 40, +#' fc_horizon = 3) +#' +#'# Plot Pareto-K values and ELPD estimates +#'plot(lfo_ar2) +#'plot(lfo_rw) +#' +#'# Proportion of timepoints in which AR2 model gives +#'# better forecasts +#'length(which((lfo_ar2$elpds - lfo_rw$elpds) > 0)) / +#' length(lfo_ar2$elpds) +#' +#'# A higher total ELPD is preferred +#'lfo_ar2$sum_ELPD +#'lfo_rw$sum_ELPD +#'} #'@author Nicholas J Clark #'@export lfo_cv <- function(object, ...){ diff --git a/R/mvgam.R b/R/mvgam.R index ad7d0fec..ea46a876 100644 --- a/R/mvgam.R +++ b/R/mvgam.R @@ -1510,29 +1510,56 @@ mvgam = function(formula, # Condition the model using Cmdstan if(prior_simulation){ - fit1 <- cmd_mod$sample(data = model_data, - chains = chains, - parallel_chains = min(c(chains, parallel::detectCores() - 1)), - threads_per_chain = if(threads > 1){ threads } else { NULL }, - refresh = 100, - init = inits, - max_treedepth = 12, - adapt_delta = 0.8, - iter_sampling = samples, - iter_warmup = 200, - show_messages = FALSE, - diagnostics = NULL) + if(parallel){ + fit1 <- cmd_mod$sample(data = model_data, + chains = chains, + parallel_chains = min(c(chains, parallel::detectCores() - 1)), + threads_per_chain = if(threads > 1){ threads } else { NULL }, + refresh = 100, + init = inits, + max_treedepth = 12, + adapt_delta = 0.8, + iter_sampling = samples, + iter_warmup = 200, + show_messages = FALSE, + diagnostics = NULL) + } else { + fit1 <- cmd_mod$sample(data = model_data, + chains = chains, + threads_per_chain = if(threads > 1){ threads } else { NULL }, + refresh = 100, + init = inits, + max_treedepth = 12, + adapt_delta = 0.8, + iter_sampling = samples, + iter_warmup = 200, + show_messages = FALSE, + diagnostics = NULL) + } + } else { - fit1 <- cmd_mod$sample(data = model_data, - chains = chains, - parallel_chains = min(c(chains, parallel::detectCores() - 1)), - threads_per_chain = if(threads > 1){ threads } else { NULL }, - refresh = 100, - init = inits, - max_treedepth = max_treedepth, - adapt_delta = adapt_delta, - iter_sampling = samples, - iter_warmup = burnin) + if(parallel){ + fit1 <- cmd_mod$sample(data = model_data, + chains = chains, + parallel_chains = min(c(chains, parallel::detectCores() - 1)), + threads_per_chain = if(threads > 1){ threads } else { NULL }, + refresh = 100, + init = inits, + max_treedepth = max_treedepth, + adapt_delta = adapt_delta, + iter_sampling = samples, + iter_warmup = burnin) + } else { + fit1 <- cmd_mod$sample(data = model_data, + chains = chains, + threads_per_chain = if(threads > 1){ threads } else { NULL }, + refresh = 100, + init = inits, + max_treedepth = max_treedepth, + adapt_delta = adapt_delta, + iter_sampling = samples, + iter_warmup = burnin) + } } # Convert model files to stan_fit class for consistency @@ -1570,18 +1597,33 @@ mvgam = function(formula, stan_control <- list(max_treedepth = max_treedepth, adapt_delta = adapt_delta) - fit1 <- rstan::stan(model_code = vectorised$model_file, - iter = samples, - warmup = burnin, - chains = chains, - data = model_data, - cores = min(c(chains, parallel::detectCores() - 1)), - init = inits, - verbose = FALSE, - thin = thin, - control = stan_control, - pars = param, - refresh = 100) + if(parallel){ + fit1 <- rstan::stan(model_code = vectorised$model_file, + iter = samples, + warmup = burnin, + chains = chains, + data = model_data, + cores = 1, + init = inits, + verbose = FALSE, + thin = thin, + control = stan_control, + pars = param, + refresh = 100) + } else { + fit1 <- rstan::stan(model_code = vectorised$model_file, + iter = samples, + warmup = burnin, + chains = chains, + data = model_data, + cores = min(c(chains, parallel::detectCores() - 1)), + init = inits, + verbose = FALSE, + thin = thin, + control = stan_control, + pars = param, + refresh = 100) + } out_gam_mod <- fit1 } diff --git a/R/mvgam_setup.R b/R/mvgam_setup.R index 47318a97..da623931 100644 --- a/R/mvgam_setup.R +++ b/R/mvgam_setup.R @@ -1,4 +1,5 @@ #' Generic GAM setup function +#' @importFrom stats na.fail #' @noRd #' mvgam_setup <- function(formula, diff --git a/R/plot.mvgam.R b/R/plot.mvgam.R index 69cebb65..c889c1ab 100644 --- a/R/plot.mvgam.R +++ b/R/plot.mvgam.R @@ -5,7 +5,7 @@ #' #'@param x \code{list} object returned from \code{mvgam}. See [mvgam()] #'@param type \code{character} specifying which type of plot to return. Options are: -#''series, +#'series, #'residuals, #'smooths, #'re (random effect smooths), diff --git a/man/dynamic.Rd b/man/dynamic.Rd index 320d864e..9c92e3fd 100644 --- a/man/dynamic.Rd +++ b/man/dynamic.Rd @@ -38,6 +38,62 @@ Values of \code{k} are set automatically to ensure enough basis functions are used to approximate the expected wiggliness of the underlying dynamic function (\code{k} will increase as \code{rho} decreases) } +\examples{ +\dontrun{ +# Simulate a time-varying coefficient \ +#(as a Gaussian Process with length scale = 10) +set.seed(1111) +N <- 200 +beta <- mvgam:::sim_gp(rnorm(1), + alpha_gp = 0.75, + rho_gp = 10, + h = N) + 0.5 +plot(beta, type = 'l', lwd = 3, + bty = 'l', xlab = 'Time', + ylab = 'Coefficient', + col = 'darkred') + +# Simulate the predictor as a standard normal +predictor <- rnorm(N, sd = 1) + +# Simulate a Gaussian outcome variable +out <- rnorm(N, mean = 4 + beta * predictor, + sd = 0.25) +time <- seq_along(predictor) +plot(out, type = 'l', lwd = 3, + bty = 'l', xlab = 'Time', ylab = 'Outcome', + col = 'darkred') + +# Gather into a data.frame and fit a dynamic coefficient mmodel +data <- data.frame(out, predictor, time) + +# Split into training and testing +data_train <- data[1:190,] +data_test <- data[191:200,] + +# Fit a model using the dynamic function +mod <- mvgam(out ~ + # mis-specify the length scale slightly as this + # won't be known in practice + dynamic(predictor, rho = 8, stationary = TRUE), + family = gaussian(), + data = data_train) + +# Inspect the summary +summary(mod) + +# Plot the time-varying coefficient estimates +plot(mod, type = 'smooths') + +# Extrapolate the coefficient forward in time +plot_mvgam_smooth(mod, smooth = 1, newdata = data) +abline(v = 190, lty = 'dashed', lwd = 2) + +# Overlay the true simulated time-varying coefficient +lines(beta, lwd = 2.5, col = 'white') +lines(beta, lwd = 2) +} +} \author{ Nicholas J Clark } diff --git a/man/evaluate_mvgams.Rd b/man/evaluate_mvgams.Rd index d9345e02..602bc317 100644 --- a/man/evaluate_mvgams.Rd +++ b/man/evaluate_mvgams.Rd @@ -122,3 +122,73 @@ for whether or not the true value lies within the forecast's 90\% prediction int provides a series of summary plots to facilitate model selection. It is essentially a wrapper for \code{roll_eval_mvgam} } +\examples{ +\dontrun{ +# Simulate from a Poisson-AR2 model with a seasonal smooth +set.seed(100) +dat <- sim_mvgam(T = 75, + n_series = 1, + prop_trend = 0.75, + trend_model = 'AR2', + family = poisson()) + +# Plot the time series +plot_mvgam_series(data = dat$data_train, + newdata = dat$data_test, + series = 1) + +# Fit an appropriate model +mod_ar2 <- mvgam(y ~ s(season, bs = 'cc'), + trend_model = 'AR2', + family = poisson(), + data = dat$data_train, + newdata = dat$data_test) + +# Fit a less appropriate model +mod_rw <- mvgam(y ~ s(season, bs = 'cc'), + trend_model = 'RW', + family = poisson(), + data = dat$data_train, + newdata = dat$data_test) + +# Compare Discrete Ranked Probability Scores for the testing period +fc_ar2 <- forecast(mod_ar2) +fc_rw <- forecast(mod_rw) +score_ar2 <- score(fc_ar2, score = 'drps') +score_rw <- score(fc_rw, score = 'drps') +sum(score_ar2$series_1$score) +sum(score_rw$series_1$score) + +# Use rolling evaluation for approximate comparisons of 3-step ahead +# forecasts across the training period +compare_mvgams(mod_ar2, + mod_rw, + fc_horizon = 3, + n_samples = 1000, + n_evaluations = 5) + +# Now use approximate leave-future-out CV to compare +# rolling forecasts; start at time point 40 to reduce +# computational time and to ensure enough data is available +# for estimating model parameters +lfo_ar2 <- lfo_cv(mod_ar2, + min_t = 40, + fc_horizon = 3) +lfo_rw <- lfo_cv(mod_rw, + min_t = 40, + fc_horizon = 3) + +# Plot Pareto-K values and ELPD estimates +plot(lfo_ar2) +plot(lfo_rw) + +# Proportion of timepoints in which AR2 model gives +# better forecasts +length(which((lfo_ar2$elpds - lfo_rw$elpds) > 0)) / + length(lfo_ar2$elpds) + +# A higher total ELPD is preferred +lfo_ar2$sum_ELPD +lfo_rw$sum_ELPD +} +} diff --git a/man/lfo_cv.mvgam.Rd b/man/lfo_cv.mvgam.Rd index a154dcb2..0c16acc2 100644 --- a/man/lfo_cv.mvgam.Rd +++ b/man/lfo_cv.mvgam.Rd @@ -57,6 +57,68 @@ crossing a certain threshold \code{pareto_k_threshold}. Only then do we refit th all of the observations up to the time of the failure. We then restart the process and iterate forward until the next refit is triggered (Bürkner et al. 2020). } +\examples{ +\dontrun{ +# Simulate from a Poisson-AR2 model with a seasonal smooth +set.seed(100) +dat <- sim_mvgam(T = 75, + n_series = 1, + prop_trend = 0.75, + trend_model = 'AR2', + family = poisson()) + +# Plot the time series +plot_mvgam_series(data = dat$data_train, + newdata = dat$data_test, + series = 1) + +# Fit an appropriate model +mod_ar2 <- mvgam(y ~ s(season, bs = 'cc'), + trend_model = 'AR2', + family = poisson(), + data = dat$data_train, + newdata = dat$data_test) + +# Fit a less appropriate model +mod_rw <- mvgam(y ~ s(season, bs = 'cc'), + trend_model = 'RW', + family = poisson(), + data = dat$data_train, + newdata = dat$data_test) + +# Compare Discrete Ranked Probability Scores for the testing period +fc_ar2 <- forecast(mod_ar2) +fc_rw <- forecast(mod_rw) +score_ar2 <- score(fc_ar2, score = 'drps') +score_rw <- score(fc_rw, score = 'drps') +sum(score_ar2$series_1$score) +sum(score_rw$series_1$score) + +# Now use approximate leave-future-out CV to compare +# rolling forecasts; start at time point 40 to reduce +# computational time and to ensure enough data is available +# for estimating model parameters +lfo_ar2 <- lfo_cv(mod_ar2, + min_t = 40, + fc_horizon = 3) +lfo_rw <- lfo_cv(mod_rw, + min_t = 40, + fc_horizon = 3) + +# Plot Pareto-K values and ELPD estimates +plot(lfo_ar2) +plot(lfo_rw) + +# Proportion of timepoints in which AR2 model gives +# better forecasts +length(which((lfo_ar2$elpds - lfo_rw$elpds) > 0)) / + length(lfo_ar2$elpds) + +# A higher total ELPD is preferred +lfo_ar2$sum_ELPD +lfo_rw$sum_ELPD +} +} \references{ Paul-Christian Bürkner, Jonah Gabry & Aki Vehtari (2020). Approximate leave-future-out cross-validation for Bayesian time series models Journal of Statistical Computation and Simulation. 90:14, 2499-2523. diff --git a/man/mvgam_draws.Rd b/man/mvgam_draws.Rd index dd4a5159..92839e06 100644 --- a/man/mvgam_draws.Rd +++ b/man/mvgam_draws.Rd @@ -66,7 +66,10 @@ beta_draws_df <- as.data.frame(mod1, variable = 'betas') head(beta_draws_df) str(beta_draws_df) -beta_draws_mat <- as.data.frame(mod1, variable = 'betas') +beta_draws_mat <- as.matrix(mod1, variable = 'betas') head(beta_draws_mat) -str(beta_draws_mat)} +str(beta_draws_mat) + +shape_pars <- as.matrix(mod1, variable = 'shape', regex = TRUE) +head(shape_pars)} } diff --git a/vignettes/mvgam_overview.Rmd b/vignettes/mvgam_overview.Rmd index 015c88c5..e3dd32ac 100644 --- a/vignettes/mvgam_overview.Rmd +++ b/vignettes/mvgam_overview.Rmd @@ -144,7 +144,8 @@ We are now ready for our first `mvgam` model. The syntax will be familiar to use ```{r model1, include=FALSE, results='hide'} model1 <- mvgam(count ~ s(year_fac, bs = 're') - 1, family = poisson(), - data = model_data) + data = model_data, + parallel = FALSE) ``` ```{r eval=FALSE} @@ -224,7 +225,8 @@ model_data %>% model1b <- mvgam(count ~ s(year_fac, bs = 're') - 1, family = poisson(), data = data_train, - newdata = data_test) + newdata = data_test, + parallel = FALSE) ``` ```{r eval=FALSE} @@ -262,7 +264,8 @@ model2 <- mvgam(count ~ s(year_fac, bs = 're') + ndvi - 1, family = poisson(), data = data_train, - newdata = data_test) + newdata = data_test, + parallel = FALSE) ``` ```{r eval=FALSE} @@ -342,7 +345,8 @@ model3 <- mvgam(count ~ s(time, bs = 'bs', k = 15) + ndvi, family = poisson(), data = data_train, - newdata = data_test) + newdata = data_test, + parallel = FALSE) ``` ```{r eval=FALSE} @@ -425,7 +429,8 @@ model4 <- mvgam(count ~ s(ndvi, k = 6), family = poisson(), data = data_train, newdata = data_test, - trend_model = 'AR1') + trend_model = 'AR1', + parallel = FALSE) ``` ```{r eval=FALSE}