diff --git a/.Rbuildignore b/.Rbuildignore index 20163c0..099b1dc 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -5,12 +5,11 @@ note ^.*\.Rproj$ ^\.Rproj\.user$ ^\.travis\.yml$ +^_pkgdown\.yml$ ^cran-comments\.md$ ^LICENSE$ ^CRAN-RELEASE$ ^CRAN-SUBMISSION$ -^_pkgdown\.yml$ ^docs$ ^pkgdown$ ^\.github$ -^codecov\.yml$ diff --git a/DESCRIPTION b/DESCRIPTION index a1e6316..8a3374d 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,33 +1,32 @@ Package: bruceR Title: Broadly Useful Convenient and Efficient R Functions -Version: 0.8.11-1 -Date: 2023-05-05 +Version: 2023.8 +Date: 2023-08-08 Authors@R: c(person(given = "Han-Wu-Shuang", family = "Bao", role = c("aut", "cre"), email = "baohws@foxmail.com", comment = c(ORCID = "0000-0003-3043-710X"))) -Author: Han-Wu-Shuang Bao [aut, cre] Maintainer: Han-Wu-Shuang Bao Description: - Broadly useful convenient and efficient R functions - that bring users concise and elegant R data analyses. - This package includes easy-to-use functions for - (1) basic R programming - (e.g., set working directory to the path of currently opened file; - import/export data from/to files in any format; - print tables to Microsoft Word); - (2) multivariate computation - (e.g., compute scale sums/means/... with reverse scoring); - (3) reliability analyses and factor analyses; - (4) descriptive statistics and correlation analyses; - (5) t-test, multi-factor analysis of variance (ANOVA), - simple-effect analysis, and post-hoc multiple comparison; - (6) tidy report of statistical models - (to R Console and Microsoft Word); - (7) mediation and moderation analyses (PROCESS); - and (8) additional toolbox for statistics and graphics. + Broadly useful convenient and efficient R functions + that bring users concise and elegant R data analyses. + This package includes easy-to-use functions for + (1) basic R programming + (e.g., set working directory to the path of currently opened file; + import/export data from/to files in any format; + print tables to Microsoft Word); + (2) multivariate computation + (e.g., compute scale sums/means/... with reverse scoring); + (3) reliability analyses and factor analyses; + (4) descriptive statistics and correlation analyses; + (5) t-test, multi-factor analysis of variance (ANOVA), + simple-effect analysis, and post-hoc multiple comparison; + (6) tidy report of statistical models + (to R Console and Microsoft Word); + (7) mediation and moderation analyses (PROCESS); + and (8) additional toolbox for statistics and graphics. License: GPL-3 Encoding: UTF-8 LazyData: true diff --git a/NAMESPACE b/NAMESPACE index 2fb1d80..75de4ab 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -40,6 +40,7 @@ export(added) export(cc) export(ccf_plot) export(cor_diff) +export(cor_multilevel) export(dtime) export(export) export(formatF) @@ -88,6 +89,7 @@ importFrom(dplyr,across) importFrom(dplyr,group_by) importFrom(dplyr,left_join) importFrom(dplyr,mutate) +importFrom(dplyr,rename) importFrom(dplyr,select) importFrom(dplyr,summarise) importFrom(dplyr,sym) diff --git a/NEWS.md b/NEWS.md index 1b47fc2..95eb766 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,6 +1,16 @@ **Please check the [latest news (change log)](https://psychbruce.github.io/bruceR/news/index.html) and keep this package updated.** -# bruceR 0.9.0 (ongoing...) +# bruceR 2023.8 + +## New Features + +- New function `cor_multilevel()`: Multilevel correlations (within-level and between-level). + +## Major Changes + +- Now use "YYYY.M" as package version number. +- Improved `Corr()`: Now it uses `ggplot2` to produce correlation plot. +- Deprecated the argument `nsmall` for all functions. Now always use `digits` instead. (Both were acceptable in former versions.) ## Bug Fixes diff --git a/R/bruceR-stats_1_basic.R b/R/bruceR-stats_1_basic.R index 95009a4..b2e3abd 100644 --- a/R/bruceR-stats_1_basic.R +++ b/R/bruceR-stats_1_basic.R @@ -31,7 +31,7 @@ #' Passing to \code{\link[data.table:data.table]{data.table}}: #' \code{DT[ , , by]} #' @param drop Drop existing variables and return only new variables? -#' Default is \code{FALSE}, which returns all variables. +#' Defaults to \code{FALSE}, which returns all variables. #' #' @return #' \code{add()} returns a new @@ -241,8 +241,8 @@ RESCALE = function(var, from=range(var, na.rm=T), to) { #' and it is just equivalent to \code{RESCALE(var, to=0:1)}. #' #' @param v Variable (numeric vector). -#' @param min Minimum value (default is 0). -#' @param max Maximum value (default is 1). +#' @param min Minimum value (defaults to 0). +#' @param max Maximum value (defaults to 1). #' #' @return A vector of rescaled variable. #' @@ -263,7 +263,7 @@ scaler = function(v, min=0, max=1) { #' #' @param z,t,f,r,chi2 \emph{z}, \emph{t}, \emph{F}, \emph{r}, \eqn{\chi}^2 value. #' @param n,df,df1,df2 Sample size or degree of freedom. -#' @param digits,nsmall Number of decimal places of output. Default is \code{2}. +#' @param digits Number of decimal places of output. Defaults to \code{2}. #' #' @return \emph{p} value statistics. #' @@ -282,22 +282,22 @@ scaler = function(v, min=0, max=1) { #' #' @export p = function(z=NULL, t=NULL, f=NULL, r=NULL, chi2=NULL, - n=NULL, df=NULL, df1=NULL, df2=NULL, digits=2, nsmall=digits) { - if(!is.null(z)) {p = p.z(z); pstat = Glue("<> = {z:.{nsmall}}, <> {p.trans2(p)} {sig.trans(p)}")} - if(!is.null(t)) {p = p.t(t, df); pstat = Glue("<>({df}) = {t:.{nsmall}}, <> {p.trans2(p)} {sig.trans(p)}")} - if(!is.null(f)) {p = p.f(f, df1, df2); pstat = Glue("<>({df1}, {df2}) = {f:.{nsmall}}, <> {p.trans2(p)} {sig.trans(p)}")} - if(!is.null(r)) {p = p.r(r, n); pstat = Glue("<>({n-2}) = {r:.{nsmall}}, <> {p.trans2(p)} {sig.trans(p)}")} - if(!is.null(chi2)) {p = p.chi2(chi2, df); pstat = Glue("\u03c7\u00b2({df}{ifelse(is.null(n), '', ', <> = ' %^% n)}) = {chi2:.{nsmall}}, <> {p.trans2(p)} {sig.trans(p)}")} + n=NULL, df=NULL, df1=NULL, df2=NULL, digits=2) { + if(!is.null(z)) {p = p.z(z); pstat = Glue("<> = {z:.{digits}}, <> {p.trans2(p)} {sig.trans(p)}")} + if(!is.null(t)) {p = p.t(t, df); pstat = Glue("<>({df}) = {t:.{digits}}, <> {p.trans2(p)} {sig.trans(p)}")} + if(!is.null(f)) {p = p.f(f, df1, df2); pstat = Glue("<>({df1}, {df2}) = {f:.{digits}}, <> {p.trans2(p)} {sig.trans(p)}")} + if(!is.null(r)) {p = p.r(r, n); pstat = Glue("<>({n-2}) = {r:.{digits}}, <> {p.trans2(p)} {sig.trans(p)}")} + if(!is.null(chi2)) {p = p.chi2(chi2, df); pstat = Glue("\u03c7\u00b2({df}{ifelse(is.null(n), '', ', <> = ' %^% n)}) = {chi2:.{digits}}, <> {p.trans2(p)} {sig.trans(p)}")} return(pstat) } p.plain = function(z=NULL, t=NULL, f=NULL, r=NULL, chi2=NULL, - n=NULL, df=NULL, df1=NULL, df2=NULL, digits=2, nsmall=digits) { - if(!is.null(z)) {p = p.z(z); pstat = Glue("z = {z:.{nsmall}}, p {p.trans2(p)} {sig.trans(p)}")} - if(!is.null(t)) {p = p.t(t, df); pstat = Glue("t({df}) = {t:.{nsmall}}, p {p.trans2(p)} {sig.trans(p)}")} - if(!is.null(f)) {p = p.f(f, df1, df2); pstat = Glue("F({df1}, {df2}) = {f:.{nsmall}}, p {p.trans2(p)} {sig.trans(p)}")} - if(!is.null(r)) {p = p.r(r, n); pstat = Glue("r({n-2}) = {r:.{nsmall}}, p {p.trans2(p)} {sig.trans(p)}")} - if(!is.null(chi2)) {p = p.chi2(chi2, df); pstat = Glue("\u03c7\u00b2({df}{ifelse(is.null(n), '', ', N = ' %^% n)}) = {chi2:.{nsmall}}, p {p.trans2(p)} {sig.trans(p)}")} + n=NULL, df=NULL, df1=NULL, df2=NULL, digits=2) { + if(!is.null(z)) {p = p.z(z); pstat = Glue("z = {z:.{digits}}, p {p.trans2(p)} {sig.trans(p)}")} + if(!is.null(t)) {p = p.t(t, df); pstat = Glue("t({df}) = {t:.{digits}}, p {p.trans2(p)} {sig.trans(p)}")} + if(!is.null(f)) {p = p.f(f, df1, df2); pstat = Glue("F({df1}, {df2}) = {f:.{digits}}, p {p.trans2(p)} {sig.trans(p)}")} + if(!is.null(r)) {p = p.r(r, n); pstat = Glue("r({n-2}) = {r:.{digits}}, p {p.trans2(p)} {sig.trans(p)}")} + if(!is.null(chi2)) {p = p.chi2(chi2, df); pstat = Glue("\u03c7\u00b2({df}{ifelse(is.null(n), '', ', N = ' %^% n)}) = {chi2:.{digits}}, p {p.trans2(p)} {sig.trans(p)}")} return(pstat) } @@ -325,7 +325,7 @@ p.chi2 = function(chi2, df) ifelse(df==0, 1, pchisq(chi2, df, lower.tail=FALSE)) ## Transform \emph{p} value. ## ## @param p \emph{p} value. -## @param nsmall.p Number of decimal places of \emph{p} value. Default is \code{3}. +## @param nsmall.p Number of decimal places of \emph{p} value. Defaults to \code{3}. ## ## @return A character string of transformed \emph{p} value. ## @@ -335,19 +335,19 @@ p.chi2 = function(chi2, df) ifelse(df==0, 1, pchisq(chi2, df, lower.tail=FALSE)) ## @seealso \code{\link{p.trans2}} ## ## @export -p.trans = function(p, nsmall.p=3) { - mapply(function(p, nsmall.p) { +p.trans = function(p, digits.p=3) { + mapply(function(p, digits.p) { ifelse(is.na(p) | p > 1 | p < 0, "", - ifelse(p < 10^-nsmall.p, gsub("0(?=\\.)", "", Glue("<{10^-nsmall.p:.{nsmall.p}}"), perl=T), - gsub("0(?=\\.)", " ", Glue("{p:.{nsmall.p}}"), perl=T))) - }, p, nsmall.p) + ifelse(p < 10^-digits.p, gsub("0(?=\\.)", "", Glue("<{10^-digits.p:.{digits.p}}"), perl=T), + gsub("0(?=\\.)", " ", Glue("{p:.{digits.p}}"), perl=T))) + }, p, digits.p) } ## Transform \emph{p} value. ## ## @inheritParams p.trans -## @param p.min Minimum of \emph{p}. Default is \code{1e-99}. +## @param p.min Minimum of \emph{p}. Defaults to \code{1e-99}. ## ## @return A character string of transformed \emph{p} value. ## @@ -357,11 +357,11 @@ p.trans = function(p, nsmall.p=3) { ## @seealso \code{\link{p.trans}} ## ## @export -p.trans2 = function(p, nsmall.p=3, p.min=1e-99) { +p.trans2 = function(p, digits.p=3, p.min=1e-99) { ifelse(is.na(p) | p > 1 | p < 0, "", ifelse(p < p.min, paste("<", p.min), - ifelse(p < 10^-nsmall.p, paste("=", format(p, digits=1, scientific=TRUE)), - paste("=", formatF(p, nsmall=nsmall.p))))) + ifelse(p < 10^-digits.p, paste("=", format(p, digits=1, scientific=TRUE)), + paste("=", formatF(p, digits=digits.p))))) } @@ -384,6 +384,15 @@ sig.trans = function(p) { } +sig.trans2 = function(p) { + ifelse(is.na(p) | p > 1 | p < 0, "", + ifelse(p < .001, "***", + ifelse(p < .01, "**", + ifelse(p < .05, "*", "")))) +} + + + #### Basic Statistics #### @@ -392,7 +401,7 @@ sig.trans = function(p) { #' @param data Data frame or numeric vector. #' @param all.as.numeric \code{TRUE} (default) or \code{FALSE}. #' Transform all variables into numeric (continuous). -#' @param digits,nsmall Number of decimal places of output. Default is \code{2}. +#' @param digits Number of decimal places of output. Defaults to \code{2}. #' @param file File name of MS Word (\code{.doc}). #' @param plot \code{TRUE} or \code{FALSE} (default). #' Visualize the descriptive statistics using \code{\link[GGally:ggpairs]{GGally::ggpairs()}}. @@ -401,14 +410,14 @@ sig.trans = function(p) { #' @param upper.smooth \code{"none"} (default), \code{"lm"}, or \code{"loess"}. #' Add fitting lines to scatter plots (if any). #' @param plot.file \code{NULL} (default, plot in RStudio) or a file name (\code{"xxx.png"}). -#' @param plot.width Width (in "inch") of the saved plot. Default is \code{8}. -#' @param plot.height Height (in "inch") of the saved plot. Default is \code{6}. -#' @param plot.dpi DPI (dots per inch) of the saved plot. Default is \code{500}. +#' @param plot.width Width (in "inch") of the saved plot. Defaults to \code{8}. +#' @param plot.height Height (in "inch") of the saved plot. Defaults to \code{6}. +#' @param plot.dpi DPI (dots per inch) of the saved plot. Defaults to \code{500}. #' #' @return -#' Invisibly return a list consisting of +#' Invisibly return a list with #' (1) a data frame of descriptive statistics and -#' (2) a \code{ggplot2} object if users set \code{plot=TRUE}. +#' (2) a \code{ggplot2} object if \code{plot=TRUE}. #' #' @examples #' \donttest{set.seed(1) @@ -436,13 +445,15 @@ sig.trans = function(p) { #' @seealso \code{\link{Corr}} #' #' @export -Describe = function(data, - all.as.numeric=TRUE, - digits=2, nsmall=digits, - file=NULL, - plot=FALSE, - upper.triangle=FALSE, upper.smooth="none", - plot.file=NULL, plot.width=8, plot.height=6, plot.dpi=500) { +Describe = function( + data, + all.as.numeric=TRUE, + digits=2, + file=NULL, + plot=FALSE, + upper.triangle=FALSE, upper.smooth="none", + plot.file=NULL, plot.width=8, plot.height=6, plot.dpi=500 +) { if(is.numeric(data)) data = data.frame(X=data) desc = as.data.frame(psych::describe(data, fast=FALSE)) desc$vars = desc$trimmed = desc$mad = desc$range = desc$se = NULL @@ -457,13 +468,13 @@ Describe = function(data, missing = nrow(data) - desc$N if(max(missing)==0) { desc$Missing = NULL - nsmalls = c(0, rep(nsmall, 8)) + nsmalls = c(0, rep(digits, 8)) } else { desc$Missing = ifelse(missing==0, NA, missing) names(desc)[2] = "(NA)" - nsmalls = c(0, 0, rep(nsmall, 8)) + nsmalls = c(0, 0, rep(digits, 8)) } - print_table(desc, nsmalls=nsmalls, file=file, + print_table(desc, digits=nsmalls, file=file, title="Descriptive Statistics:") data.new = as.data.frame(data) @@ -481,8 +492,6 @@ Describe = function(data, p = NULL if(plot) { - # if(!pacman::p_isinstalled("GGally")) - # stop("Package `GGally` needs to be installed for plotting.\nRun this code: install.packages(\"GGally\")", call.=FALSE) if(upper.triangle) { smooth = switch(upper.smooth, "none"="points", @@ -497,13 +506,13 @@ Describe = function(data, data.new, switch="both", axisLabels="none", upper=upper, lower=list(continuous=GGally::wrap( - "cor", digits=nsmall, + "cor", digits=digits, use="pairwise.complete.obs", size=4, color="black"))) + theme_bruce() + theme(strip.text=element_text(size=12, color="black")) if(is.null(plot.file)) { - print(p) + suppressWarnings(print(p)) } else { ggsave(plot=p, filename=plot.file, width=plot.width, height=plot.height, dpi=plot.dpi) @@ -524,7 +533,7 @@ Describe = function(data, #' @param labels [Optional] A vector re-defining the labels of values. #' @param sort \code{""} (default, sorted by the order of variable values/labels), #' \code{"-"} (decreasing by N), or \code{"+"} (increasing by N). -#' @param digits,nsmall Number of decimal places of output. Default is \code{1}. +#' @param digits Number of decimal places of output. Defaults to \code{1}. #' @param file File name of MS Word (\code{.doc}). #' #' @return A data frame of frequency statistics. @@ -543,8 +552,10 @@ Describe = function(data, #' Freq(data, "age") #' #' @export -Freq = function(x, varname, labels, sort="", - digits=1, nsmall=digits, file=NULL) { +Freq = function( + x, varname, labels, sort="", + digits=1, file=NULL +) { if(inherits(x, "data.frame")) { if(missing(varname)) stop("Please also specify `varname` (variable name). See help page: help(Freq)", call.=FALSE) @@ -560,10 +571,10 @@ Freq = function(x, varname, labels, sort="", if(missing(labels)) labels = names(table.var) output = cbind(matrix(table.var, dimnames=list(labels, "N")), - matrix(round(table.var/N*100, nsmall), + matrix(round(table.var/N*100, digits), dimnames=list(labels, "%"))) if(N.na) output = rbind(output, - matrix(c(N.na, round(N.na/N*100, nsmall)), + matrix(c(N.na, round(N.na/N*100, digits)), ncol=2, dimnames=list("(NA)"))) if(sort=="-") output = output[order(output[,"N"], decreasing=TRUE),] @@ -579,7 +590,7 @@ Freq = function(x, varname, labels, sort="", if(N.na>0) note = note %^% "

\n

" %^% "Valid N = " %^% formatN(N-N.na) } - print_table(output, nsmalls=c(0, nsmall), file=file, + print_table(output, digits=c(0, digits), file=file, title="Frequency Statistics:", note=note) invisible(output) @@ -594,21 +605,22 @@ Freq = function(x, varname, labels, sort="", #' @param p.adjust Adjustment of \emph{p} values for multiple tests: #' \code{"none"}, \code{"fdr"}, \code{"holm"}, \code{"bonferroni"}, ... #' For details, see \code{\link[stats:p.adjust]{stats::p.adjust()}}. -#' @param digits,nsmall Number of decimal places of output. Default is \code{2}. +#' @param digits Number of decimal places of output. Defaults to \code{2}. #' @param file File name of MS Word (\code{.doc}). #' @param plot \code{TRUE} (default) or \code{FALSE}. Plot the correlation matrix. -#' @param plot.range Range of correlation coefficients for plot. Default is \code{c(-1, 1)}. -#' @param plot.palette Color gradient for plot. Default is \code{c("#B52127", "white", "#2171B5")}. -#' You may also set it to, e.g., \code{c("red", "white", "blue")}. -#' @param plot.color.levels Default is \code{201}. +#' @param plot.r.size Font size of correlation text label. Defaults to \code{4}. +#' @param plot.colors Plot colors (character vector). Defaults to "RdBu" of the Color Brewer Palette. #' #' @return -#' Invisibly return the correlation results obtained from -#' \code{\link[psych:corr.test]{psych::corr.test()}}. +#' Invisibly return a list with +#' (1) correlation results from +#' \code{\link[psych:corr.test]{psych::corr.test()}} and +#' (2) a \code{ggplot2} object if \code{plot=TRUE}. #' #' @examples #' Corr(airquality) -#' Corr(airquality, p.adjust="bonferroni") +#' Corr(airquality, p.adjust="bonferroni", +#' plot.colors=c("#b2182b", "white", "#2166ac")) #' #' d = as.data.table(psych::bfi) #' added(d, { @@ -622,18 +634,24 @@ Freq = function(x, varname, labels, sort="", #' }) #' Corr(d[, .(age, gender, education, E, A, C, N, O)]) #' -#' @seealso \code{\link{Describe}} +#' @seealso +#' \code{\link{Describe}} +#' +#' \code{\link{cor_multilevel}} #' #' @export -Corr = function(data, - method="pearson", - p.adjust="none", - all.as.numeric=TRUE, - digits=2, nsmall=digits, - file=NULL, - plot=TRUE, plot.range=c(-1, 1), - plot.palette=NULL, plot.color.levels=201, - plot.file=NULL, plot.width=8, plot.height=6, plot.dpi=500) { +Corr = function( + data, + method="pearson", + p.adjust="none", + all.as.numeric=TRUE, + digits=2, + file=NULL, + plot=TRUE, + plot.r.size=4, + plot.colors=NULL, + plot.file=NULL, plot.width=8, plot.height=6, plot.dpi=500 +) { data.new = as.data.frame(data) vars.not.numeric = c() if(all.as.numeric) { @@ -650,17 +668,17 @@ Corr = function(data, minlength=20) COR = cor$ci["r"] if(p.adjust=="none") { - COR$`[95% CI]` = cc_ci(cor$ci$lower, cor$ci$upper, nsmall) + COR$`[95% CI]` = cc_ci(cor$ci$lower, cor$ci$upper, digits) COR$pval = cor$ci$p } else { - COR$`[95% CI]` = cc_ci(cor$ci.adj$lower, cor$ci.adj$upper, nsmall) + COR$`[95% CI]` = cc_ci(cor$ci.adj$lower, cor$ci.adj$upper, digits) COR$pval = p.adjust(cor$ci$p, method=p.adjust) } if(inherits(cor$n, "matrix")) Ns = cor$n[lower.tri(cor$n)] else Ns = cor$n - COR$r = formatF(COR$r, nsmall) + COR$r = formatF(COR$r, digits) COR$N = Ns if(length(vars.not.numeric)>0) { @@ -668,53 +686,15 @@ Corr = function(data, cat("\n") } - if(plot) { - if(is.null(plot.palette)) - plot.palette = c("#B52127", "white", "#2171B5") - if(!is.null(plot.file)) { - grDevices::png(filename=plot.file, width=plot.width, height=plot.height, units="in", res=plot.dpi) - } - try({ - plot.error = TRUE - cor_plot(r=cor$r, adjust="none", nsmall=nsmall, - numbers=TRUE, zlim=plot.range, - diag=FALSE, xlas=2, n=plot.color.levels, - pval=cor$p, stars=TRUE, - alpha=1, gr=grDevices::colorRampPalette(plot.palette), - main="Correlation Matrix") - plot.error = FALSE - }, silent=TRUE) - if(plot.error) { - warning = Glue(" - Plot is NOT successfully displayed in the RStudio `Plots` Pane. - Please check if the `Plots` Pane of your RStudio is too small. - You should enlarge the `Plots` Pane (and/or clear all plots).") - warning(warning, call.=TRUE) - cat("\n") - } else { - Print("Correlation matrix is displayed in the RStudio `Plots` Pane.") - if(p.adjust!="none") - Print("<> values ABOVE the diagonal are adjusted using the \"{p.adjust}\" method.>>") - cat("\n") - } - if(!is.null(plot.file)) { - grDevices::dev.off() - plot.file = str_split(plot.file, "/", simplify=TRUE) - plot.path = paste0(getwd(), '/', plot.file[length(plot.file)]) - Print("<> Plot saved to <>") - cat("\n") - } - } - if(is.null(file)) { Print("{capitalize(method)}'s <> and 95% confidence intervals:") if(p.adjust!="none") Print("<> values and 95% CIs are adjusted using the \"{p.adjust}\" method.>>") - print_table(COR, nsmalls=0) + print_table(COR, digits=0) cat("\n") } else { Print("Descriptive Statistics and Correlation Matrix:") - cor.mat = matrix(formatF(cor$r, nsmall), + cor.mat = matrix(formatF(cor$r, digits), nrow=nrow(cor$r), dimnames=list(rownames(cor$r), colnames(cor$r))) @@ -768,7 +748,7 @@ Corr = function(data, " p values and 95% CIs are adjusted using the \"" %^% p.adjust %^% "\" method.

") ) print_table( - des.cor, nsmalls=nsmall, row.names=FALSE, + des.cor, digits=digits, row.names=FALSE, title="Table 1. Descriptive Statistics and Correlation Matrix.", note=paste0( "Note. ", @@ -784,219 +764,56 @@ Corr = function(data, ) } - invisible(cor) -} - - -## modified `psych::cor.plot()` -## see comment lines -cor_plot <- function (r, numbers = TRUE, colors = TRUE, n = 51, main = NULL, - zlim = c(-1, 1), show.legend = TRUE, labels = NULL, n.legend = 10, - select = NULL, pval = NULL, cuts = c(0.001, 0.01), scale = TRUE, - cex, MAR, upper = TRUE, diag = TRUE, - symmetric = TRUE, stars = FALSE, adjust = "holm", xaxis = 1, - xlas = 0, ylas = 2, gr = NULL, alpha = 0.75, min.length = NULL, - nsmall=2, # added in bruceR - ...) -{ - oldpar <- graphics::par(no.readonly = TRUE) - on.exit(graphics::par(oldpar)) - if (missing(MAR)) - # MAR <- 5 - MAR <- 4 - if (!is.matrix(r) & (!is.data.frame(r))) { - if ((length(class(r)) > 1) & (inherits(r, "psych"))) { - switch(class(r)[2], omega = { - r <- r$schmid$sl - nff <- ncol(r) - r <- r[, 1:(nff - 3)] - if (is.null(main)) { - main <- "Omega plot" - } - }, cor.ci = { - pval <- 2 * (1 - r$ptci) - r <- r$rho - }, fa = { - r <- r$loadings - if (is.null(main)) { - main <- "Factor Loadings plot" - } - }, pc = { - r <- r$loadings - if (is.null(main)) { - main <- "PCA Loadings plot" - } - }, principal = { - r <- r$loadings - if (is.null(main)) { - main <- "PCA Loadings plot" - } - }) - } - } - else { - if (symmetric & !psych::isCorrelation(r) & (nrow(r) != ncol(r))) { - cp <- psych::corr.test(r, adjust = adjust) - r <- cp$r - pval <- cp$p - if (is.null(main)) { - main <- "Correlation plot" - } - } - } - R <- r <- as.matrix(r) - if (!is.null(select)) - r <- r[select, select] - if (min(dim(r)) < 2) { - stop("You need at least two dimensions to make a meaningful plot.", call.=TRUE) - } - if (is.null(n)) { - n <- dim(r)[2] - } - nf <- dim(r)[2] - nvar <- dim(r)[1] - if (!upper) - r[col(r) > row(r)] <- NA - if (!diag) - r[col(r) == row(r)] <- NA - if (nf == nvar) - r <- t(r) - if (missing(pval) | is.null(pval)) { - pval <- matrix(rep(1, nvar * nf), nvar) - } - else { - if (length(pval) != nvar * nf) { - pr = matrix(0, nvar, nf) - pr[row(pr) > col(pr)] <- pval - pr <- pr + t(pr) - diag(pr) <- 0 - pval <- pr - } - if (!stars) { - pval <- psych::con2cat(pval, cuts = cuts) - pval <- (length(cuts) + 1 - pval)/length(cuts) - } - pval <- t(pval) - } - if (is.null(labels)) { - if (is.null(rownames(r))) - rownames(r) <- paste("V", 1:nvar) - if (is.null(colnames(r))) - colnames(r) <- paste("V", 1:nf) - } - else { - rownames(r) <- colnames(r) <- labels - } - if (!is.null(min.length)) { - rownames(r) <- abbreviate(rownames(r), minlength = min.length) - colnames(r) <- abbreviate(colnames(r), minlength = min.length) - } - max.len <- max(nchar(rownames(r)))/6 - if (is.null(zlim)) { - zlim <- range(r) - } - if (colors) { - if (missing(gr)) { - gr <- grDevices::colorRampPalette(c("red", "white", "blue")) - } - if (max(r, na.rm = TRUE) > 1) { - maxr <- max(r) - n1 <- n * (zlim[2] - zlim[1])/(maxr - zlim[1]) - colramp <- rep(NA, n) - n1 <- ceiling(n1) - colramp[1:(n1 + 1)] <- gr(n1 + 1) - colramp[(n1 + 1):n] <- colramp[n1 + 1] - zlim[2] <- maxr - } - else { - colramp <- gr(n) - } - } - else { - colramp <- grDevices::grey((n:0)/n) - } - colramp <- grDevices::adjustcolor(colramp, alpha.f = alpha) - if (nvar != nf) { - r <- t(r) - } - ord1 <- seq(nvar, 1, -1) - if (nf == nvar) { - r <- r[, ord1] - pval <- pval[, ord1] - } - else { - r <- r[, ord1] - pval <- t(pval[ord1, ]) - } - # graphics::par(mar = c(MAR + max.len, MAR + max.len, 4, 0.5)) - graphics::par(mar = c(MAR + max.len, MAR + max.len, 2.5, 0.5)) - if (show.legend) { - graphics::layout(matrix(c(1, 2), nrow = 1), widths = c(0.9, 0.1), - heights = c(1, 1)) - } - graphics::image(r, col = colramp, axes = FALSE, main = main, zlim = zlim) - graphics::box() - at1 <- (0:(nf - 1))/(nf - 1) - at2 <- (0:(nvar - 1))/(nvar - 1) - lab1 <- rownames(r) - lab2 <- colnames(r) - if (xaxis == 3) { - line <- -0.5 - tick <- FALSE - } - else { - line <- NA - tick <- TRUE - } - if (max.len > 0.5) { - graphics::axis(2, at = at2, labels = lab2, las = ylas, ...) - graphics::axis(xaxis, at = at1, labels = lab1, las = xlas, line = line, - tick = tick, ...) - } - else { - graphics::axis(2, at = at2, labels = lab2, las = ylas, ...) - graphics::axis(xaxis, at = at1, labels = lab1, las = xlas, line = line, - tick = tick, ...) - } - if (numbers) { - rx <- rep(at1, ncol(r)) - ry <- rep(at2, each = nrow(r)) - # rv <- round(r, 2) # modified in bruceR - rv <- formatF(r, nsmall) # modified in bruceR - if (stars) { - symp <- stats::symnum(pval, corr = FALSE, cutpoints = c(0, - 0.001, 0.01, 0.05, 1), symbols = c("***", "**", - "*", " "), legend = FALSE) - rv[!is.na(rv)] <- paste0(rv[!is.na(rv)], symp[!is.na(rv)]) - rv <- gsub("NA.*", "", rv) # modified in bruceR - if (missing(cex)) - cex = 9/max(nrow(r), ncol(r)) - graphics::text(rx, ry, rv, cex = cex, ...) - } - else { - if (missing(cex)) - cex = 9/max(nrow(r), ncol(r)) - if (scale) { - graphics::text(rx, ry, rv, cex = pval * cex, ...) - } - else { - graphics::text(rx, ry, rv, cex = cex, ...) - } + p = label = r = x = y = NULL + if(plot) { + dcor = cbind(expand.grid(y=row.names(cor$r), x=row.names(cor$r)), + data.frame(r=as.numeric(cor$r), p=as.numeric(cor$p))) + dcor$label = str_replace(str_replace( + str_trim(formatF(dcor$r, digits)), "0\\.", "."), "-", "\u2013") %^% + sig.trans2(dcor$p) + colors = c("#67001f", "#b2182b", "#d6604d", "#f4a582", + "#fddbc7", "#f7f7f7", "#d1e5f0", "#92c5de", + "#4393c3", "#2166ac", "#053061") + if(is.null(plot.colors)) + plot.colors = grDevices::colorRampPalette(colors)(100) + p = ggplot(dcor[which(dcor$x!=dcor$y),], + aes(x=x, y=y, fill=r)) + + geom_tile() + + geom_text(aes(label=label), size=plot.r.size) + + scale_x_discrete(position="top", expand=expansion(add=0)) + + scale_y_discrete(limits=rev, expand=expansion(add=0)) + + # scale_fill_fermenter( + # palette="RdBu", direction=1, + # limits=c(-1, 1), breaks=seq(-1, 1, 0.2), + # guide=guide_colorsteps(barwidth=0.5, barheight=10)) + + scale_fill_stepsn( + colors=plot.colors, + limits=c(-1, 1), breaks=seq(-1, 1, 0.1), + labels=function(x) ifelse(x %in% seq(-1, 1, 0.2), x, ""), + guide=guide_colorsteps(barwidth=0.5, barheight=10)) + + coord_equal() + + labs(x=NULL, y=NULL, fill=NULL) + + theme_bruce(border=TRUE, line.x=FALSE, line.y=FALSE, + tick.x=FALSE, tick.y=FALSE) + + theme(axis.text.x=element_text(hjust=0, angle=45)) + if(is.null(plot.file)) { + print(p) + Print("Correlation matrix is displayed in the RStudio `Plots` Pane.") + if(p.adjust!="none") + Print("<> values ABOVE the diagonal are adjusted using the \"{p.adjust}\" method.>>") + cat("\n") + } else { + ggsave(plot=p, filename=plot.file, + width=plot.width, height=plot.height, dpi=plot.dpi) + plot.file = str_split(plot.file, "/", simplify=TRUE) + plot.path = paste0(getwd(), '/', plot.file[length(plot.file)]) + Print("<> Plot saved to <>") + cat("\n") } } - if (show.legend) { - leg <- matrix(seq(from = zlim[1], to = zlim[2], by = (zlim[2] - - zlim[1])/n), nrow = 1) - # graphics::par(mar = c(MAR, 0, 4, 3)) - graphics::par(mar = c(MAR, 0, 2.5, 3)) - graphics::image(leg, col = colramp, axes = FALSE, zlim = zlim) - at2 <- seq(0, 1, 1/n.legend) - labels = seq(zlim[1], zlim[2], (zlim[2] - zlim[1])/(length(at2) - - 1)) - graphics::axis(4, at = at2, labels = labels, las = 2, ...) - } - invisible(R) + + invisible(list(corr=cor, plot=p)) } @@ -1052,6 +869,94 @@ cor_diff = function(r1, n1, r2, n2, n=NULL, rcov=NULL) { } +#' Multilevel correlations (within-level and between-level). +#' +#' Multilevel correlations (within-level and between-level). +#' For details, see description in \code{\link{HLM_ICC_rWG}}. +#' +#' @inheritParams HLM_ICC_rWG +#' +#' @return Invisibly return a list of results. +#' +#' @seealso +#' \code{\link{Corr}} +#' +#' \code{\link{HLM_ICC_rWG}} +#' +#' @examples +#' # see https://psychbruce.github.io/supp/CEM +#' +#' @export +cor_multilevel = function( + data, group, digits=3 +) { + Print("<>") + + cors = suppressWarnings(psych::statsBy(data, group, cors=TRUE)) + + # Correlations between and within groups + rw = cors$rwg + rb = cors$rbg + rbw = rw + vars = rownames(cors$pooled) + + # Reliabliity + # diag(rbw) = reliability + + # Combine rwg & rbg + rbw[upper.tri(rbw)] = rb[upper.tri(rb)] + rownames(rbw) = vars + colnames(rbw) = vars + print_table(rbw, digits=digits) + + # P values and 95% CI + pwg = cors$pwg + pbg = cors$pbg + rwg.ci = cors$ci.wg$r.ci + rbg.ci = cors$ci.bg$r.ci + if(!is.null(rwg.ci)) { + lower = upper = CI = NULL + rwg.ci = mutate( + rwg.ci, + CI = ifelse( + is.na(lower), "", + paste0("[", formatF(lower, digits), + ", ", formatF(upper, digits), + "]")), + pval = pwg[lower.tri(pwg)] + ) %>% rename(`[95% CI]`=CI) + print_table(rwg.ci[c("r", "[95% CI]", "pval")], digits, + title="\n\n\n<>") + } + if(!is.null(rbg.ci)) { + rbg.ci = mutate( + rbg.ci, + CI = ifelse( + is.na(lower), "", + paste0("[", formatF(lower, digits), + ", ", formatF(upper, digits), + "]")), + pval = pbg[lower.tri(pbg)] + ) %>% rename(`[95% CI]`=CI) + print_table(rbg.ci[c("r", "[95% CI]", "pval")], digits, + title="\n\n\n<>") + } + + # ICC1 & ICC2 + ICC = as.data.frame(rbind(cors$ICC1, cors$ICC2))[vars] + row.names(ICC) = c("ICC1", "ICC2") + print_table(ICC, digits, + title="\n\n\n<>") + + invisible(list(cors=rbw, + rwg.ci=rwg.ci, + rbg.ci=rbg.ci, + icc=ICC)) +} + + #### T-Tests #### #' One-sample, independent-samples, and paired-samples t-test. @@ -1092,10 +997,10 @@ cor_diff = function(r1, n1, r2, n2, n=NULL, rcov=NULL) { #' Multiple variables should be included in a character vector \code{c()}. #' #' Only necessary for independent-samples \emph{t}-test. -#' @param paired For paired-samples \emph{t}-test, set it as \code{TRUE}. Default is \code{FALSE}. +#' @param paired For paired-samples \emph{t}-test, set it as \code{TRUE}. Defaults to \code{FALSE}. #' @param paired.d.type Type of Cohen's \emph{d} for paired-samples \emph{t}-test (see Lakens, 2013). #' -#' Default is \code{"dz"}. Options include: +#' Defaults to \code{"dz"}. Options include: #' \describe{ #' \item{\code{"dz"} (\emph{d} for standardized difference)}{ #' Cohen's \eqn{d_{z} = \frac{M_{diff}}{SD_{diff}}} @@ -1109,15 +1014,15 @@ cor_diff = function(r1, n1, r2, n2, n=NULL, rcov=NULL) { #' } #' } #' @param var.equal If Levene's test indicates a violation of the homogeneity of variance, -#' then you should better set this argument as \code{FALSE}. Default is \code{TRUE}. -#' @param mean.diff Whether to display results of mean difference and its 95\% CI. Default is \code{TRUE}. -#' @param test.value The true value of the mean (or difference in means for a two-samples test). Default is \code{0}. +#' then you should better set this argument as \code{FALSE}. Defaults to \code{TRUE}. +#' @param mean.diff Whether to display results of mean difference and its 95\% CI. Defaults to \code{TRUE}. +#' @param test.value The true value of the mean (or difference in means for a two-samples test). Defaults to \code{0}. #' @param test.sided Any of \code{"="} (two-sided, the default), \code{"<"} (one-sided), or \code{">"} (one-sided). #' @param factor.rev Whether to reverse the levels of factor (X) -#' such that the test compares higher vs. lower level. Default is \code{TRUE}. -#' @param bayes.prior Prior scale in Bayesian \emph{t}-test. Default is 0.707. +#' such that the test compares higher vs. lower level. Defaults to \code{TRUE}. +#' @param bayes.prior Prior scale in Bayesian \emph{t}-test. Defaults to 0.707. #' See details in \code{\link[BayesFactor:ttestBF]{BayesFactor::ttestBF()}}. -#' @param digits,nsmall Number of decimal places of output. Default is \code{2}. +#' @param digits Number of decimal places of output. Defaults to \code{2}. #' @param file File name of MS Word (\code{.doc}). #' #' @examples @@ -1200,17 +1105,19 @@ cor_diff = function(r1, n1, r2, n2, n=NULL, rcov=NULL) { #' A practical primer for \emph{t}-tests and ANOVAs. \emph{Frontiers in Psychology, 4}, Article 863. #' #' @export -TTEST = function(data, y, x=NULL, - paired=FALSE, - paired.d.type="dz", - var.equal=TRUE, - mean.diff=TRUE, - test.value=0, - test.sided=c("=", "<", ">"), - factor.rev=TRUE, - bayes.prior="medium", - digits=2, nsmall=digits, - file=NULL) { +TTEST = function( + data, y, x=NULL, + paired=FALSE, + paired.d.type="dz", + var.equal=TRUE, + mean.diff=TRUE, + test.value=0, + test.sided=c("=", "<", ">"), + factor.rev=TRUE, + bayes.prior="medium", + digits=2, + file=NULL +) { data = as.data.frame(data) if(paired) { @@ -1275,18 +1182,18 @@ TTEST = function(data, y, x=NULL, } ## Print (nmsd) - nmsd$MeanSD = paste0(formatF(nmsd$Mean, nsmall), " (", - formatF(nmsd$S.D., nsmall), ")") + nmsd$MeanSD = paste0(formatF(nmsd$Mean, digits), " (", + formatF(nmsd$S.D., digits), ")") names(nmsd)[length(nmsd)] = "Mean (S.D.)" nmsd$Mean = nmsd$S.D. = NULL Print("Descriptives:") - print_table(nmsd, row.names=FALSE, nsmalls=0) + print_table(nmsd, row.names=FALSE, digits=0) cat("\n") ## Print (check) if(nrow(lev)>0) { Print("Levene\u2019s test for homogeneity of variance:") - print_table(lev, nsmalls=c(2, 0, 0, 0)) + print_table(lev, digits=c(2, 0, 0, 0)) Print("<>: H0 = equal variance (homoscedasticity). If significant (violation of the assumption), then you should better set `var.equal=FALSE`.") @@ -1295,9 +1202,9 @@ TTEST = function(data, y, x=NULL, ## Print (t-test) RES = res[,1:3] # t, df, pval - RES$Diff = cc_m_ci(res$diff, res$llci, res$ulci, nsmall) - RES$Cohen = cc_m_ci(res$Cohen_d, res$LLCI, res$ULCI, nsmall) - RES$BF10 = sprintf(Glue("%.{nsmall}e"), res$BF10) + RES$Diff = cc_m_ci(res$diff, res$llci, res$ulci, digits) + RES$Cohen = cc_m_ci(res$Cohen_d, res$LLCI, res$ULCI, digits) + RES$BF10 = sprintf(Glue("%.{digits}e"), res$BF10) if(!is.null(file)) { RES$Diff = str_replace_all(RES$Diff, "-", "\u2013") RES$Cohen = str_replace_all(RES$Cohen, "-", "\u2013") @@ -1316,7 +1223,7 @@ TTEST = function(data, y, x=NULL, RES[,4] = NULL print_table( RES, - nsmalls=c(nsmall, ifelse(var.equal, 0, nsmall), 0, 0, 0, 0, 0), + digits=c(digits, ifelse(var.equal, 0, digits), 0, 0, 0, 0, 0), title=ifelse(is.null(file), Glue("Results of t-test{ifelse(var.equal, '', ' (adjusted df)')}:"), type), diff --git a/R/bruceR-stats_2_scale.R b/R/bruceR-stats_2_scale.R index 73eb22b..976bcd8 100644 --- a/R/bruceR-stats_2_scale.R +++ b/R/bruceR-stats_2_scale.R @@ -30,8 +30,8 @@ #' (2) a numeric vector specifying the item number of reverse-scoring variables (not recommended). #' @param range,likert [Optional] Range of likert scale (e.g., \code{1:5}, \code{c(1, 5)}). #' If not provided, it will be automatically estimated from the given data (BUT you should use this carefully). -#' @param na.rm Ignore missing values. Default is \code{TRUE}. -#' @param values [Only for \code{CONSEC}] Values to be counted as consecutive identical values. Default is all numbers (\code{0:9}). +#' @param na.rm Ignore missing values. Defaults to \code{TRUE}. +#' @param values [Only for \code{CONSEC}] Values to be counted as consecutive identical values. Defaults to all numbers (\code{0:9}). #' #' @return A vector of computed values. #' @@ -302,7 +302,7 @@ CONSEC = function(data, #' } #' #' @inheritParams %%COMPUTE%% -#' @param digits,nsmall Number of decimal places of output. Default is \code{3}. +#' @param digits Number of decimal places of output. Defaults to \code{3}. #' #' @return #' A list of results obtained from @@ -325,8 +325,10 @@ CONSEC = function(data, #' \code{\link{MEAN}}, \code{\link{EFA}}, \code{\link{CFA}} #' #' @export -Alpha = function(data, var, items, vars=NULL, varrange=NULL, rev=NULL, - digits=3, nsmall=digits) { +Alpha = function( + data, var, items, vars=NULL, varrange=NULL, rev=NULL, + digits=3 +) { if(!is.null(varrange)) { dn = names(data) varrange = gsub(" ", "", strsplit(varrange, ":")[[1]]) @@ -374,13 +376,13 @@ Alpha = function(data, var, items, vars=NULL, varrange=NULL, rev=NULL, Valid Cases: {n.valid} ({100*n.valid/n.total:.1}%) Scale Statistics: - <> = {alpha$total$mean:.{nsmall}} - <> = {alpha$total$sd:.{nsmall}} - Cronbach\u2019s \u03b1 = {alpha$total$raw_alpha:.{nsmall}} - McDonald\u2019s \u03c9 = {omega$omega.tot:.{nsmall}} + <> = {alpha$total$mean:.{digits}} + <> = {alpha$total$sd:.{digits}} + Cronbach\u2019s \u03b1 = {alpha$total$raw_alpha:.{digits}} + McDonald\u2019s \u03c9 = {omega$omega.tot:.{digits}} ") - # Cronbach's \u03b1: {alpha$total$raw_alpha:.{nsmall}} (based on raw scores) - # Cronbach's \u03b1: {alpha$total$std.alpha:.{nsmall}} (based on standardized items) + # Cronbach's \u03b1: {alpha$total$raw_alpha:.{digits}} (based on raw scores) + # Cronbach's \u03b1: {alpha$total$std.alpha:.{digits}} (based on standardized items) if(alpha$total$raw_alpha<0.5 | length(items.need.rev)>0) { cat("\n") @@ -395,7 +397,7 @@ Alpha = function(data, var, items, vars=NULL, varrange=NULL, rev=NULL, } cat("\n") - print_table(items, nsmalls=nsmall, + print_table(items, digits=digits, title="Item Statistics (Cronbach\u2019s \u03b1 If Item Deleted):", note="Item-Rest Cor. = Corrected Item-Total Correlation") cat("\n") @@ -451,14 +453,14 @@ Alpha = function(data, var, items, vars=NULL, varrange=NULL, rev=NULL, #' \item \code{"parallel"} - based on parallel analysis #' \item (any number >= 1) - user-defined fixed number #' } -#' @param sort.loadings Sort factor/component loadings by size? Default is \code{TRUE}. +#' @param sort.loadings Sort factor/component loadings by size? Defaults to \code{TRUE}. #' @param hide.loadings A number (0~1) for hiding absolute factor/component loadings below this value. -#' Default is \code{0} (does not hide any loading). -#' @param plot.scree Display the scree plot? Default is \code{TRUE}. -#' @param kaiser Do the Kaiser normalization (as in SPSS)? Default is \code{TRUE}. -#' @param max.iter Maximum number of iterations for convergence. Default is \code{25} (the same as in SPSS). -#' @param min.eigen Minimum eigenvalue (used if \code{nfactors="eigen"}). Default is \code{1}. -#' @param digits,nsmall Number of decimal places of output. Default is \code{3}. +#' Defaults to \code{0} (does not hide any loading). +#' @param plot.scree Display the scree plot? Defaults to \code{TRUE}. +#' @param kaiser Do the Kaiser normalization (as in SPSS)? Defaults to \code{TRUE}. +#' @param max.iter Maximum number of iterations for convergence. Defaults to \code{25} (the same as in SPSS). +#' @param min.eigen Minimum eigenvalue (used if \code{nfactors="eigen"}). Defaults to \code{1}. +#' @param digits Number of decimal places of output. Defaults to \code{3}. #' @param file File name of MS Word (\code{.doc}). #' @param ... Arguments passed from \code{PCA()} to \code{EFA()}. #' @@ -504,19 +506,21 @@ Alpha = function(data, var, items, vars=NULL, varrange=NULL, rev=NULL, #' hide.loadings=0.45) # hide loadings < 0.45 #' #' @export -EFA = function(data, var, items, vars=NULL, varrange=NULL, rev=NULL, - method=c("pca", "pa", "ml", "minres", "uls", "ols", "wls", "gls", "alpha"), - rotation=c("none", "varimax", "oblimin", "promax", "quartimax", "equamax"), - nfactors=c("eigen", "parallel", "(any number >= 1)"), - sort.loadings=TRUE, - hide.loadings=0.00, - plot.scree=TRUE, - # plot.factor=TRUE, - kaiser=TRUE, - max.iter=25, - min.eigen=1, - digits=3, nsmall=digits, - file=NULL) { +EFA = function( + data, var, items, vars=NULL, varrange=NULL, rev=NULL, + method=c("pca", "pa", "ml", "minres", "uls", "ols", "wls", "gls", "alpha"), + rotation=c("none", "varimax", "oblimin", "promax", "quartimax", "equamax"), + nfactors=c("eigen", "parallel", "(any number >= 1)"), + sort.loadings=TRUE, + hide.loadings=0.00, + plot.scree=TRUE, + # plot.factor=TRUE, + kaiser=TRUE, + max.iter=25, + min.eigen=1, + digits=3, + file=NULL +) { if(!is.null(varrange)) { dn = names(data) varrange = gsub(" ", "", strsplit(varrange, ":")[[1]]) @@ -643,7 +647,7 @@ EFA = function(data, var, items, vars=NULL, varrange=NULL, rev=NULL, - {Method.Rotation} KMO and Bartlett's Test: - - Kaiser-Meyer-Olkin (KMO) Measure of Sampling Adequacy: MSA = {kmo:.{nsmall}} + - Kaiser-Meyer-Olkin (KMO) Measure of Sampling Adequacy: MSA = {kmo:.{digits}} - Bartlett's Test of Sphericity: Approx. {p(chi2=btl$chisq, df=btl$df)} ") @@ -662,7 +666,7 @@ EFA = function(data, var, items, vars=NULL, varrange=NULL, rev=NULL, names(eigen) = c("Eigenvalue", "Variance %", "Cumulative %", "SS Loading", "Variance %", "Cumulative %") cat("\n") - print_table(eigen, nsmalls=nsmall, + print_table(eigen, digits=digits, title="Total Variance Explained:") # factor loadings @@ -680,7 +684,7 @@ EFA = function(data, var, items, vars=NULL, varrange=NULL, rev=NULL, info2 = ifelse(sort.loadings, " (Sorted by Size)", "") loadings.info = info1%^%info2 cat("\n") - print_table(loadings, nsmalls=nsmall, + print_table(loadings, digits=digits, title=Glue("{tag} Loadings{loadings.info}:")) Print(" Communality = Sum of Squared (SS) Factor Loadings @@ -688,7 +692,7 @@ EFA = function(data, var, items, vars=NULL, varrange=NULL, rev=NULL, \n ") if(!is.null(file)) - print_table(loadings, nsmalls=nsmall, file=file, + print_table(loadings, digits=digits, file=file, title=Glue("{tag} Loadings{loadings.info}:"), note=Glue("Extraction Method: {Method}.

Rotation Method: {Method.Rotation}.")) @@ -829,7 +833,7 @@ modelCFA.trans = function(style=c("jmv", "lavaan"), #' @param model Model formula. See examples. #' @param estimator The estimator to be used #' (for details, see \link[lavaan:lavOptions]{lavaan options}). -#' Default is \code{"ML"}. +#' Defaults to \code{"ML"}. #' Can be one of the following: #' \describe{ #' \item{\code{"ML"}}{Maximum Likelihood (can be extended to @@ -841,12 +845,12 @@ modelCFA.trans = function(style=c("jmv", "lavaan"), #' \item{\code{"DWLS"}}{Diagonally Weighted Least Squares} #' \item{\code{"DLS"}}{Distributionally-weighted Least Squares} #' } -#' @param highorder High-order factor. Default is \code{""}. -#' @param orthogonal Default is \code{FALSE}. If \code{TRUE}, all covariances among latent variables are set to zero. -#' @param missing Default is \code{"listwise"}. Alternative is \code{"fiml"} ("Full Information Maximum Likelihood"). +#' @param highorder High-order factor. Defaults to \code{""}. +#' @param orthogonal Defaults to \code{FALSE}. If \code{TRUE}, all covariances among latent variables are set to zero. +#' @param missing Defaults to \code{"listwise"}. Alternative is \code{"fiml"} ("Full Information Maximum Likelihood"). ## @param CI \code{TRUE} or \code{FALSE} (default), provide confidence intervals for the model estimates. ## @param MI \code{TRUE} or \code{FALSE} (default), provide modification indices for the parameters not included in the model. -#' @param digits,nsmall Number of decimal places of output. Default is \code{3}. +#' @param digits Number of decimal places of output. Defaults to \code{3}. #' @param file File name of MS Word (\code{.doc}). #' #' @return @@ -868,13 +872,15 @@ modelCFA.trans = function(style=c("jmv", "lavaan"), #' CFA(data.bfi, "E =~ E[1:5]; A =~ A[1:5]; C =~ C[1:5]; N =~ N[1:5]; O =~ O[1:5]") #' } #' @export -CFA = function(data, - model="A =~ a[1:5]; B =~ b[c(1,3,5)]; C =~ c1 + c2 + c3", - estimator="ML", - highorder="", orthogonal=FALSE, missing="listwise", - # CI=FALSE, MI=FALSE, - digits=3, nsmall=digits, - file=NULL) { +CFA = function( + data, + model="A =~ a[1:5]; B =~ b[c(1,3,5)]; C =~ c1 + c2 + c3", + estimator="ML", + highorder="", orthogonal=FALSE, missing="listwise", + # CI=FALSE, MI=FALSE, + digits=3, + file=NULL +) { # model.jmv = modelCFA.trans("jmv", model) model.lav = modelCFA.trans("lavaan", model, highorder) # if(orthogonal==TRUE | highorder!="") style = "lavaan" @@ -910,7 +916,7 @@ CFA = function(data, orthogonal=orthogonal, missing=missing) # fiml, listwise (default) # cat("\n#### lavaan output ####\n\n") - lavaan_summary(fit.lav, ci="raw", nsmall=nsmall, file=file) + lavaan_summary(fit.lav, ci="raw", digits=digits, file=file) # lavaan::summary(fit.lav, # fit.measures=TRUE, # standardized=TRUE, diff --git a/R/bruceR-stats_3_manova.R b/R/bruceR-stats_3_manova.R index 5ddb392..74f3715 100644 --- a/R/bruceR-stats_3_manova.R +++ b/R/bruceR-stats_3_manova.R @@ -93,7 +93,7 @@ levene_test = function(data, id, dvs, ivs.between) { row.names(lev) = paste("DV:", dv) levene = rbind(levene, lev) } - print_table(levene, nsmalls=c(3, 0, 0, 0)) + print_table(levene, digits=c(3, 0, 0, 0)) } } @@ -203,21 +203,21 @@ fix_long_data = function(data.long, ivs) { #' @param between Between-subjects factor(s). Multiple variables should be included in a character vector \code{c()}. #' @param within Within-subjects factor(s). Multiple variables should be included in a character vector \code{c()}. #' @param covariate Covariates. Multiple variables should be included in a character vector \code{c()}. -#' @param ss.type Type of sums of squares (SS) for ANOVA. Default is \code{"III"}. +#' @param ss.type Type of sums of squares (SS) for ANOVA. Defaults to \code{"III"}. #' Possible values are \code{"II"}, \code{"III"}, \code{2}, or \code{3}. #' @param sph.correction [Only for repeated measures with >= 3 levels] #' -#' Sphericity correction method for adjusting the degrees of freedom (\emph{df}) when the sphericity assumption is violated. Default is \code{"none"}. +#' Sphericity correction method for adjusting the degrees of freedom (\emph{df}) when the sphericity assumption is violated. Defaults to \code{"none"}. #' If Mauchly's test of sphericity is significant, you may set it to \code{"GG"} (Greenhouse-Geisser) or \code{"HF"} (Huynh-Feldt). #' @param aov.include Include the \code{aov} object in the returned object? -#' Default is \code{FALSE}, as suggested by \code{\link[afex:aov_car]{afex::aov_ez()}} +#' Defaults to \code{FALSE}, as suggested by \code{\link[afex:aov_car]{afex::aov_ez()}} #' (please see the \code{include_aov} argument in this help page, which provides a detailed explanation). #' If \code{TRUE}, you should also specify \code{model.type="univariate"} in \code{\link{EMMEANS}}. -#' @param digits,nsmall Number of decimal places of output. Default is \code{3}. +#' @param digits Number of decimal places of output. Defaults to \code{3}. #' @param file File name of MS Word (\code{.doc}). ## @param which.observed \strong{[only effective for computing generalized \eqn{\eta^2}]} ## -## Factors that are observed or measured (e.g., gender, age group, measured covariates) but not experimentally manipulated. Default is \code{NULL}. +## Factors that are observed or measured (e.g., gender, age group, measured covariates) but not experimentally manipulated. Defaults to \code{NULL}. ## The generalized \eqn{\eta^2} requires correct specification of the observed (vs. manipulated) variables. ## (If all the variables in \code{between} and \code{within} are set to \code{observed}, then generalized \eqn{\eta^2} will be equal to \eqn{\eta^2}.) #' @@ -319,15 +319,17 @@ fix_long_data = function(data.long, ivs) { #' @seealso \code{\link{TTEST}}, \code{\link{EMMEANS}}, \code{\link{bruceR-demodata}} #' #' @export -MANOVA = function(data, subID=NULL, dv=NULL, - dvs=NULL, dvs.pattern=NULL, - between=NULL, within=NULL, covariate=NULL, - ss.type="III", - sph.correction="none", - # which.observed=NULL, - aov.include=FALSE, - digits=3, nsmall=digits, - file=NULL) { +MANOVA = function( + data, subID=NULL, dv=NULL, + dvs=NULL, dvs.pattern=NULL, + between=NULL, within=NULL, covariate=NULL, + ss.type="III", + sph.correction="none", + # which.observed=NULL, + aov.include=FALSE, + digits=3, + file=NULL +) { ## Initialize data = as.data.frame(data) if(is.null(within)) { @@ -436,16 +438,16 @@ MANOVA = function(data, subID=NULL, dv=NULL, at$MS = at$`F`*at$`MSE` eta2 = effectsize::F_to_eta2(at$`F`, at$df1, at$df2, ci=0.90, alternative="two.sided") - at$p.eta2 = cc_m_ci(eta2$Eta2_partial, eta2$CI_low, eta2$CI_high, nsmall) %>% + at$p.eta2 = cc_m_ci(eta2$Eta2_partial, eta2$CI_low, eta2$CI_high, digits) %>% str_replace_all("0\\.", ".") - at$g.eta2 = str_replace_all(formatF(at$ges, nsmall), "0\\.", ".") + at$g.eta2 = str_replace_all(formatF(at$ges, digits), "0\\.", ".") at = at[c("MS", "MSE", "df1", "df2", "F", "Pr(>F)", "p.eta2", "g.eta2")] names(at)[7:8] = c("\u03b7\u00b2p [90% CI of \u03b7\u00b2p]", "\u03b7\u00b2G") row.names(at) = row.names(aov.ez$anova_table) %>% str_replace_all(":", " * ") - df.nsmall = ifelse(sph.correction=="none", 0, nsmall) - at.nsmalls = c(nsmall, nsmall, df.nsmall, df.nsmall, nsmall, 0, 0, 0) + df.nsmall = ifelse(sph.correction=="none", 0, digits) + at.nsmalls = c(digits, digits, df.nsmall, df.nsmall, digits, 0, 0, 0) ## Descriptive Statistics nsub = nrow(data.wide) @@ -469,12 +471,12 @@ MANOVA = function(data, subID=NULL, dv=NULL, cat("\n") Print("Descriptives:") print_table(nmsd, row.names=FALSE, - nsmalls=c(rep(nsmall, ncol.nmsd-1), 0)) + digits=c(rep(digits, ncol.nmsd-1), 0)) Print("Total sample size: <> = {N.info}") cat("\n") - nmsd$Mean = formatF(nmsd$Mean, nsmall) - nmsd$S.D. = formatF(nmsd$S.D., nsmall) + nmsd$Mean = formatF(nmsd$Mean, digits) + nmsd$S.D. = formatF(nmsd$S.D., digits) names(nmsd)[(ncol.nmsd-2):ncol.nmsd] = c("M", "SD", "n") nmsd.html = paste0( "



", @@ -499,7 +501,7 @@ MANOVA = function(data, subID=NULL, dv=NULL, Within-subjects factor(s): {WIT} Covariate(s): {COV} ") - print_table(at, nsmalls=at.nsmalls) + print_table(at, digits=at.nsmalls) if(sph.correction %in% c("GG", "HF")) { sph.text=switch(sph.correction, "GG"="GG (Greenhouse-Geisser)", @@ -533,7 +535,7 @@ MANOVA = function(data, subID=NULL, dv=NULL, sph = as.data.frame(sph) names(sph) = c("Mauchly's W", "pval") row.names(sph) = str_replace_all(row.names(sph), ":", " * ") - print_table(sph, nsmalls=4) + print_table(sph, digits=4) if(min(sph[,2])<.05 & sph.correction=="none") { Print("<>") @@ -545,7 +547,7 @@ MANOVA = function(data, subID=NULL, dv=NULL, if(!is.null(file)) { print_table( at, - nsmalls=at.nsmalls, + digits=at.nsmalls, col.names=c("MS", "MSE", "df1", @@ -679,18 +681,18 @@ MANOVA = function(data, subID=NULL, dv=NULL, #' it reports the results of omnibus test or simple main effect. #' If set to a character vector (e.g., \code{c("A", "B")}), #' it also reports the results of simple interaction effect. -#' @param by Moderator variable(s). Default is \code{NULL}. +#' @param by Moderator variable(s). Defaults to \code{NULL}. #' @param contrast Contrast method for multiple comparisons. -#' Default is \code{"pairwise"}. +#' Defaults to \code{"pairwise"}. #' #' Alternatives can be \code{"pairwise"} (\code{"revpairwise"}), #' \code{"seq"} (\code{"consec"}), \code{"poly"}, \code{"eff"}. #' For details, see \code{?emmeans::`contrast-methods`}. #' @param reverse The order of levels to be contrasted. -#' Default is \code{TRUE} (higher level vs. lower level). +#' Defaults to \code{TRUE} (higher level vs. lower level). #' @param p.adjust Adjustment method of \emph{p} values for multiple comparisons. -#' Default is \code{"bonferroni"}. -#' For polynomial contrasts, default is \code{"none"}. +#' Defaults to \code{"bonferroni"}. +#' For polynomial contrasts, defaults to \code{"none"}. #' #' Alternatives can be \code{"none"}, \code{"fdr"}, \code{"hochberg"}, #' \code{"hommel"}, \code{"holm"}, \code{"tukey"}, \code{"mvt"}, @@ -708,7 +710,7 @@ MANOVA = function(data, subID=NULL, dv=NULL, #' #' \code{"univariate"} requires also specifying \code{aov.include=TRUE} in \code{\link{MANOVA}} #' (not recommended by the \code{afex} package; for details, see \code{\link[afex:aov_car]{afex::aov_ez()}}). -#' @param digits,nsmall Number of decimal places of output. Default is \code{3}. +#' @param digits Number of decimal places of output. Defaults to \code{3}. #' #' @return #' The same model object as returned by @@ -822,13 +824,15 @@ MANOVA = function(data, subID=NULL, dv=NULL, #' @seealso \code{\link{TTEST}}, \code{\link{MANOVA}}, \code{\link{bruceR-demodata}} #' #' @export -EMMEANS = function(model, effect=NULL, by=NULL, - contrast="pairwise", - reverse=TRUE, - p.adjust="bonferroni", - sd.pooled=NULL, - model.type="multivariate", - digits=3, nsmall=digits) { +EMMEANS = function( + model, effect=NULL, by=NULL, + contrast="pairwise", + reverse=TRUE, + p.adjust="bonferroni", + sd.pooled=NULL, + model.type="multivariate", + digits=3 +) { # IMPORTANT: If include 'aov', the 'emmeans' results of # within-subjects design will not be equal to those in SPSS! # So we do not include 'aov' object but instead use 'lm' and 'mlm' @@ -865,7 +869,7 @@ EMMEANS = function(model, effect=NULL, by=NULL, sim$Effect = str_replace_all(sim$Effect, ":", " * ") eta2 = effectsize::F_to_eta2(sim$F.ratio, sim$df1, sim$df2, ci=0.90, alternative="two.sided") - sim$p.eta2 = cc_m_ci(eta2$Eta2_partial, eta2$CI_low, eta2$CI_high, nsmall) %>% + sim$p.eta2 = cc_m_ci(eta2$Eta2_partial, eta2$CI_low, eta2$CI_high, digits) %>% str_replace_all("0\\.", ".") if(length(by)>0) { vns = names(sim)[2:(length(by)+1)] @@ -875,8 +879,8 @@ EMMEANS = function(model, effect=NULL, by=NULL, c("F", "pval", "\u03b7\u00b2p [90% CI of \u03b7\u00b2p]") Print("Joint Tests of \"{effect.text}\":") - print_table(sim, nsmalls=c(rep(0, length(by)+3), - nsmall, 0, 0), + print_table(sim, digits=c(rep(0, length(by)+3), + digits, 0, 0), row.names=FALSE) Print(" <>. Simple effects of <> with 3 or more levels @@ -907,7 +911,7 @@ EMMEANS = function(model, effect=NULL, by=NULL, "pval") row.names(pht) = str_replace_all(row.names(pht), " : ", " & ") %^% ": " %^% Glue("\"{effect.text}\"") - print_table(pht, nsmalls=nsmall, + print_table(pht, digits=digits, title=Glue("Multivariate Tests of \"{effect.text}\":"), note=Glue("<>. Identical to the results obtained with SPSS GLM EMMEANS syntax.")) cat("\n") @@ -919,7 +923,7 @@ EMMEANS = function(model, effect=NULL, by=NULL, names(pht) = c("Sum of Squares", "df", "Mean Square", "F", "pval") row.names(pht)[1:(nrow(pht)-1)] = str_replace_all(row.names(pht)[1:(nrow(pht)-1)], " : ", " & ") %^% ": " %^% Glue("\"{effect.text}\"") - print_table(pht, nsmalls=c(nsmall, 0, nsmall, nsmall, 0), + print_table(pht, digits=c(digits, 0, digits, digits, 0), title=Glue("Univariate Tests of \"{effect.text}\":"), note=Glue("<>. Identical to the results obtained with SPSS GLM EMMEANS syntax.")) cat("\n") @@ -934,14 +938,14 @@ EMMEANS = function(model, effect=NULL, by=NULL, model=model.type) }) emm = summary(emm) # to a data.frame (class 'summary_emm') - emm$MeanCI = cc_m_ci(emm$emmean, emm$lower.CL, emm$upper.CL, nsmall) + emm$MeanCI = cc_m_ci(emm$emmean, emm$lower.CL, emm$upper.CL, digits) vns = names(emm)[1:(length(by)+1)] names(emm)[1:(length(by)+1)] = "\"" %^% vns %^% "\"" emm = cbind(emm[1:(length(by)+1)], emm[c("MeanCI", "SE")]) names(emm)[length(emm)-1] = "Mean [95% CI of Mean]" Print("Estimated Marginal Means of \"{effect.text}\":") - print_table(emm, nsmalls=nsmall, row.names=FALSE) + print_table(emm, digits=digits, row.names=FALSE) cat(paste(attr(emm, "mesg"), collapse="\n")) cat("\n") @@ -993,12 +997,12 @@ EMMEANS = function(model, effect=NULL, by=NULL, # sd.pooled = sqrt(model$anova_table[term, "MSE"]) if(contrast!="poly") attr(con, "mesg") = c( - Glue("Pooled SD for computing Cohen\u2019s d: {formatF(sd.pooled, nsmall)}"), + Glue("Pooled SD for computing Cohen\u2019s d: {formatF(sd.pooled, digits)}"), attr(con, "mesg")) con$cohen.d.ci = cc_m_ci(con$estimate/sd.pooled, conCI$lower.CL/sd.pooled, conCI$upper.CL/sd.pooled, - nsmall) + digits) if(length(by)>0) { vns = names(con)[2:(length(con)-6)] names(con)[2:(length(con)-6)] = "\"" %^% vns %^% "\"" @@ -1024,7 +1028,7 @@ EMMEANS = function(model, effect=NULL, by=NULL, Print("{contr.method} of \"{effect.text}\":") con$df = formatF(con$df, 0) - print_table(con, nsmalls=nsmall, row.names=FALSE) + print_table(con, digits=digits, row.names=FALSE) cat(paste(attr(con, "mesg"), collapse="\n")) cat("\n\n") Print("<2) { FE.vif = jtools::summ(model, vif=TRUE) @@ -622,7 +626,7 @@ GLM_summary = function(model, robust=FALSE, cluster=NULL, } else { FE = cbind(FE, VIF=NA) } - print_table(FE, nsmalls=nsmall, + print_table(FE, digits=digits, title=Glue(" Unstandardized Coefficients: Outcome Variable: {dv} @@ -636,9 +640,9 @@ GLM_summary = function(model, robust=FALSE, cluster=NULL, FE.rob = as.data.frame(summ.rob$coeftable) FE.rob$CI = cc_ci(FE.rob[,1] + qt(0.025, df) * FE.rob[,2], FE.rob[,1] + qt(0.975, df) * FE.rob[,2], - nsmall) + digits) names(FE.rob) = c("b", "S.E.", "t", "pval", "[95% CI of b]") - print_table(FE.rob, nsmalls=nsmall, + print_table(FE.rob, digits=digits, title=Glue("{ifelse(is.null(cluster), 'Heteroskedasticity', 'Cluster')}-Robust Standard Errors:"), note=Glue("<>")) cat("\n") @@ -655,13 +659,13 @@ GLM_summary = function(model, robust=FALSE, cluster=NULL, pval = p.t(t, df), CI.std = cc_ci(FE.std[,1] + qt(0.025, df) * FE.std[,2], FE.std[,1] + qt(0.975, df) * FE.std[,2], - nsmall), + digits), r.partial = FE.rp$coeftable[-1, "partial.r"], r.part = FE.rp$coeftable[-1, "part.r"]) names(FE.std) = c("\u03b2", "S.E.", "t", "pval", "[95% CI of \u03b2]", "r(partial)", "r(part)") - print_table(FE.std, nsmalls=nsmall, + print_table(FE.std, digits=digits, title=Glue(" Standardized Coefficients (\u03b2): Outcome Variable: {dv} @@ -679,8 +683,8 @@ GLM_summary = function(model, robust=FALSE, cluster=NULL, <> Model Fit: - AIC = {AIC(model):.{nsmall}} - BIC = {BIC(model):.{nsmall}} + AIC = {AIC(model):.{digits}} + BIC = {BIC(model):.{digits}} {p(chi2={Chi2}, df={Df})} {rep_char('\u2500', 7)} Pseudo-<>\u00b2 {rep_char('\u2500', 7)} McFadden\u2019s <>\u00b2 = {1 - model$deviance/model$null.deviance:.5} <> @@ -692,7 +696,7 @@ GLM_summary = function(model, robust=FALSE, cluster=NULL, FE = as.data.frame(sumModel[["coefficients"]]) FE$CI = cc_ci(FE[,1] + qnorm(0.025) * FE[,2], FE[,1] + qnorm(0.975) * FE[,2], - nsmall) + digits) FE$OR = exp(FE[,1]) if(length(model[["model"]])>2) { FE$VIF = jtools::summ(model, vif=TRUE)$coeftable[,"VIF"] @@ -700,7 +704,7 @@ GLM_summary = function(model, robust=FALSE, cluster=NULL, FE$VIF = NA } names(FE) = c("b", "S.E.", "z", "pval", "[95% CI of b]", "OR", "VIF") - print_table(FE, nsmalls=nsmall, + print_table(FE, digits=digits, title=Glue(" Unstandardized Coefficients: Outcome Variable: {dv} (family: {model$family$family}; link function: {model$family$link}) @@ -715,9 +719,9 @@ GLM_summary = function(model, robust=FALSE, cluster=NULL, FE.rob = as.data.frame(summ.rob$coeftable) FE.rob$CI = cc_ci(FE.rob[,1] + qnorm(0.025) * FE.rob[,2], FE.rob[,1] + qnorm(0.975) * FE.rob[,2], - nsmall) + digits) names(FE.rob) = c("b", "S.E.", "z", "pval", "[95% CI of b]") - print_table(FE.rob, nsmalls=nsmall, + print_table(FE.rob, digits=digits, title=Glue("{ifelse(is.null(cluster), 'Heteroskedasticity', 'Cluster')}-Robust Standard Errors:"), note=Glue("<>")) cat("\n") @@ -734,7 +738,7 @@ GLM_summary = function(model, robust=FALSE, cluster=NULL, ## Testing random effects and computing intraclass correlation coefficient (ICC) for HLM -HLM_ICC = function(model, nsmall=3) { +HLM_ICC = function(model, digits=3) { ## Extract components from model ## sumModel = summary(model) data = as.data.frame(model@frame) @@ -768,13 +772,13 @@ HLM_ICC = function(model, nsmall=3) { # icc.wald.z = icc/icc.se ## Combine results ## - ICC$K = formatF(ICC$K, nsmall=0) - ICC$Variance = formatF(ICC$Variance, nsmall=5) - # ICC$S.E. = formatF(var.se, nsmall=nsmall) - # ICC$Wald.Z = formatF(var.wald.z, nsmall=2) + ICC$K = formatF(ICC$K, digits=0) + ICC$Variance = formatF(ICC$Variance, digits=5) + # ICC$S.E. = formatF(var.se, digits=digits) + # ICC$Wald.Z = formatF(var.wald.z, digits=2) # ICC$p = p.trans(var.p) # ICC$sig = sig.trans(var.p) - ICC$ICC = formatF(icc, nsmall=5) + ICC$ICC = formatF(icc, digits=5) ICC[ICC$Group=="Residual", c("K", "Parameter", "ICC")] = "" ICC[ICC$Parameter!="(Intercept)" & ICC$Group!="Residual", c("Group", "K", "ICC")] = "" names(ICC)[1] = paste0("Cluster", rep_char(" ", max(nchar(ICC$Group))-7)) @@ -810,7 +814,7 @@ HLM_ICC = function(model, nsmall=3) { ## the \code{HLM} software provides \emph{df}s that totally depend on the variable types (i.e., a theory-driven approach). #' #' @param model A model fitted with \code{lmer} or \code{glmer} function using the \code{lmerTest} package. -## @param level2.predictors \strong{[Only for \code{lmer}]} [Optional] Default is \code{NULL}. +## @param level2.predictors \strong{[Only for \code{lmer}]} [Optional] Defaults to \code{NULL}. ## If you have predictors at level 2, ## you may also specify the level-2 grouping/clustering variables ## and the corresponding level-2 predictor variables. @@ -825,7 +829,7 @@ HLM_ICC = function(model, nsmall=3) { #' Test random effects (i.e., variance components) by using the likelihood-ratio test (LRT), #' which is asymptotically chi-square distributed. #' For large datasets, it is much time-consuming. -#' @param digits,nsmall Number of decimal places of output. Default is \code{3}. +#' @param digits Number of decimal places of output. Defaults to \code{3}. #' @param ... Other arguments. You may re-define \code{formula}, \code{data}, or \code{family}. #' #' @return No return value. @@ -878,12 +882,14 @@ HLM_ICC = function(model, nsmall=3) { #' \code{\link{regress}} #' #' @export -HLM_summary = function(model=NULL, - # level2.predictors=NULL, - # vartypes=NULL, - test.rand=FALSE, # time-consuming in big datasets - digits=3, nsmall=digits, - ...) { +HLM_summary = function( + model=NULL, + # level2.predictors=NULL, + # vartypes=NULL, + test.rand=FALSE, # time-consuming in big datasets + digits=3, + ... +) { dots = list(...) if(c("formula", "data") %allin% names(dots)) { # re-modeling @@ -957,8 +963,8 @@ HLM_summary = function(model=NULL, R2.glmm = suppressWarnings( MuMIn::r.squaredGLMM(model) ) # R2.glmm[1,1]; R2.glmm[1,2] Print(" Model Fit: - AIC = {AIC(model):.{nsmall}} - BIC = {BIC(model):.{nsmall}} + AIC = {AIC(model):.{digits}} + BIC = {BIC(model):.{digits}} <>_(m)\u00b2 = {R2.glmm[1,1]:.5} <>\u00b2: fixed effects)>> <>_(c)\u00b2 = {R2.glmm[1,2]:.5} <>\u00b2: fixed + random effects)>> Omega\u00b2 = {Omg2:.5} <> @@ -969,7 +975,7 @@ HLM_summary = function(model=NULL, # aov.hlm=car::Anova(model, type=3) aov.hlm = stats::anova(model) if(nrow(aov.hlm)>0) { - print_table(aov.hlm, nsmalls=2, + print_table(aov.hlm, digits=2, title="ANOVA Table:") cat("\n") } @@ -980,10 +986,10 @@ HLM_summary = function(model=NULL, FE[c(1,2,4,3,5)], CI = cc_ci(FE[,1] + qt(0.025, FE[,3]) * FE[,2], FE[,1] + qt(0.975, FE[,3]) * FE[,2], - nsmall)) + digits)) names(FE) = c("b/\u03b3", "S.E.", "t", "df", "pval", "[95% CI of b/\u03b3]") - print_table(FE, nsmalls=c(nsmall, nsmall, 2, 1, 0, 0), + print_table(FE, digits=c(digits, digits, 2, 1, 0, 0), title=Glue(" Fixed Effects: Unstandardized Coefficients (b or \u03b3): @@ -1002,10 +1008,10 @@ HLM_summary = function(model=NULL, FE.std, t = t, df = df, pval = p.t(t, df), CI = cc_ci(FE.std[,1] + qt(0.025, df) * FE.std[,2], FE.std[,1] + qt(0.975, df) * FE.std[,2], - nsmall)) + digits)) names(FE.std) = c("\u03b2", "S.E.", "t", "df", "pval", "[95% CI of \u03b2]") - print_table(FE.std, nsmalls=c(nsmall, nsmall, 2, 1, 0, 0), + print_table(FE.std, digits=c(digits, digits, 2, 1, 0, 0), title=Glue(" Standardized Coefficients (\u03b2): Outcome Variable: {dv}")) @@ -1016,18 +1022,18 @@ HLM_summary = function(model=NULL, # RE = sumModel[["varcor"]] # res = sumModel[["sigma"]]^2 # print(RE, comp="Variance") - RE = HLM_ICC(model, nsmall=nsmall) + RE = HLM_ICC(model, digits=digits) print_table(RE, row.names=FALSE, title="Random Effects:") cat("\n") } else if(inherits(model, "glmerMod")) { - summ = jtools::summ(model, digits=nsmall, re.variance="var") + summ = jtools::summ(model, digits=digits, re.variance="var") ## Print: Model Fit (Omega^2, Pseudo-R^2, and Information Criteria) ## R2.glmm = suppressWarnings( MuMIn::r.squaredGLMM(model) ) # R2.glmm[1,1]; R2.glmm[1,2] Print(" Model Fit: - AIC = {AIC(model):.{nsmall}} - BIC = {BIC(model):.{nsmall}} + AIC = {AIC(model):.{digits}} + BIC = {BIC(model):.{digits}} <>_(m)\u00b2 = {R2.glmm[1,1]:.5} <>\u00b2: fixed effects)>> <>_(c)\u00b2 = {R2.glmm[1,2]:.5} <>\u00b2: fixed + random effects)>> \n @@ -1037,11 +1043,11 @@ HLM_summary = function(model=NULL, FE = as.data.frame(sumModel[["coefficients"]]) FE$CI = cc_ci(FE[,1] + qnorm(0.025) * FE[,2], FE[,1] + qnorm(0.975) * FE[,2], - nsmall) + digits) FE$OR = exp(FE[,1]) names(FE) = c("b/\u03b3", "S.E.", "z", "pval", "[95% CI of b/\u03b3]", "OR") - print_table(FE, nsmalls=c(nsmall, nsmall, 2, 0, 0, nsmall), + print_table(FE, digits=c(digits, digits, 2, 0, 0, digits), title=Glue(" Fixed Effects: Unstandardized Coefficients (b or \u03b3): @@ -1138,7 +1144,7 @@ HLM_summary = function(model=NULL, #' @param data Data frame. #' @param group Grouping variable. #' @param icc.var Key variable for analysis (usually the dependent variable). -#' @param rwg.vars Default is \code{icc.var}. It can be: +#' @param rwg.vars Defaults to \code{icc.var}. It can be: #' \itemize{ #' \item A single variable (\emph{single-item} measure), then computing rWG. #' \item Multiple variables (\emph{multi-item} measure), then computing rWG(J), where J = the number of items. @@ -1153,7 +1159,7 @@ HLM_summary = function(model=NULL, #' Then \code{rwg.levels} should be provided (= A in the above formula). #' For example, if the measure is a 5-point Likert scale, you should set \code{rwg.levels=5}. #' } -#' @param digits,nsmall Number of decimal places of output. Default is 3. +#' @param digits Number of decimal places of output. Defaults to \code{3}. #' #' @return Invisibly return a list of results. #' @@ -1170,6 +1176,8 @@ HLM_summary = function(model=NULL, #' \emph{Journal of Applied Psychology, 69}, 85--98. #' #' @seealso +#' \code{\link{cor_multilevel}} +#' #' \href{https://CRAN.R-project.org/package=multilevel}{R package "multilevel"} #' #' @examples @@ -1188,7 +1196,7 @@ HLM_summary = function(model=NULL, HLM_ICC_rWG = function(data, group, icc.var, rwg.vars=icc.var, rwg.levels=0, - digits=3, nsmall=digits) { + digits=3) { data = as.data.frame(data) ## ICC(1) and ICC(2) @@ -1261,8 +1269,8 @@ HLM_ICC_rWG = function(data, group, icc.var, ICC variable: \"{icc.var}\" - ICC(1) = {formatF(ICC1, nsmall)} <> - ICC(2) = {formatF(ICC2, nsmall)} <> + ICC(1) = {formatF(ICC1, digits)} <> + ICC(2) = {formatF(ICC2, digits)} <> {rwg.name} variable{ifelse(length(rwg.vars)==1, '', 's')}: \"{paste(rwg.vars, collapse='\", \"')}\" @@ -1270,7 +1278,7 @@ HLM_ICC_rWG = function(data, group, icc.var, ") summ_rwg = as.data.frame(t(as.matrix(summary(rwg)))) rownames(summ_rwg) = rwg.name - print_table(summ_rwg, nsmalls=nsmall) + print_table(summ_rwg, digits=digits) cat("\n") invisible(list(ICC1=ICC1, ICC2=ICC2, rwg=rwg.out)) diff --git a/R/bruceR-stats_5_advance.R b/R/bruceR-stats_5_advance.R index 8c46327..db0455a 100644 --- a/R/bruceR-stats_5_advance.R +++ b/R/bruceR-stats_5_advance.R @@ -377,7 +377,7 @@ boot_ci = function(boot, #' \code{mod1.val=c(1, 3, 5)} or \code{mod1.val=c("A", "B", "C")}. #' @param ci Method for estimating the standard error (SE) and #' 95\% confidence interval (CI) of indirect effect(s). -#' Default is \code{"boot"} for (generalized) linear models or +#' Defaults to \code{"boot"} for (generalized) linear models or #' \code{"mcmc"} for (generalized) linear mixed models (i.e., multilevel models). #' \describe{ #' \item{\code{"boot"}}{Percentile Bootstrap} @@ -389,10 +389,10 @@ boot_ci = function(boot, #' You \emph{should not} report the 95\% CIs of simple slopes as Bootstrap or Monte Carlo CIs, #' because they are just standard CIs without any resampling method. #' @param nsim Number of simulation samples (bootstrap resampling or Monte Carlo simulation) -#' for estimating SE and 95\% CI. Default is \code{100} for running examples faster. +#' for estimating SE and 95\% CI. Defaults to \code{100} for running examples faster. #' In formal analyses, however, \strong{\code{nsim=1000} (or larger)} is strongly suggested! #' @param seed Random seed for obtaining reproducible results. -#' Default is \code{NULL}. +#' Defaults to \code{NULL}. #' You may set to any number you prefer #' (e.g., \code{seed=1234}, just an uncountable number). #' @@ -403,12 +403,12 @@ boot_ci = function(boot, #' get exactly the same results across different R packages #' (e.g., \code{\link[lavaan:lavaan-class]{lavaan}} vs. \code{\link[mediation:mediate]{mediation}}) #' and software (e.g., SPSS, Mplus, R, jamovi). -#' @param center Centering numeric (continuous) predictors? Default is \code{TRUE} (suggested). -#' @param std Standardizing variables to get standardized coefficients? Default is \code{FALSE}. +#' @param center Centering numeric (continuous) predictors? Defaults to \code{TRUE} (suggested). +#' @param std Standardizing variables to get standardized coefficients? Defaults to \code{FALSE}. #' If \code{TRUE}, it will standardize all numeric (continuous) variables #' before building regression models. #' However, it is \emph{not suggested} to set \code{std=TRUE} for \emph{generalized} linear (mixed) models. -#' @param digits,nsmall Number of decimal places of output. Default is \code{3}. +#' @param digits Number of decimal places of output. Defaults to \code{3}. #' @param file File name of MS Word (\code{.doc}). #' Currently, only regression model summary can be saved. #' @@ -539,30 +539,31 @@ boot_ci = function(boot, #' ## https://github.com/psychbruce/bruceR/tree/main/note #' } #' @export -PROCESS = function(data, - y="", - x="", - meds=c(), - mods=c(), - covs=c(), - clusters=c(), - hlm.re.m="", - hlm.re.y="", - hlm.type=c("1-1-1", "2-1-1", "2-2-1"), - med.type=c("parallel", "serial"), # "p"*, "s" - mod.type=c("2-way", "3-way"), # "2"*, "3" - mod.path=c("x-y", "x-m", "m-y", "all"), - cov.path=c("y", "m", "both"), - mod1.val=NULL, - mod2.val=NULL, - ci=c("boot", "bc.boot", "bca.boot", "mcmc"), - nsim=100, - seed=NULL, - center=TRUE, - std=FALSE, - digits=3, - nsmall=digits, - file=NULL) { +PROCESS = function( + data, + y="", + x="", + meds=c(), + mods=c(), + covs=c(), + clusters=c(), + hlm.re.m="", + hlm.re.y="", + hlm.type=c("1-1-1", "2-1-1", "2-2-1"), + med.type=c("parallel", "serial"), # "p"*, "s" + mod.type=c("2-way", "3-way"), # "2"*, "3" + mod.path=c("x-y", "x-m", "m-y", "all"), + cov.path=c("y", "m", "both"), + mod1.val=NULL, + mod2.val=NULL, + ci=c("boot", "bc.boot", "bca.boot", "mcmc"), + nsim=100, + seed=NULL, + center=TRUE, + std=FALSE, + digits=3, + file=NULL +) { ## Default Setting warning.y.class = "\"y\" should be a numeric variable or a factor variable with only 2 levels." warning.x.class = "\"x\" should be a numeric variable or a factor variable with only 2 levels." @@ -944,7 +945,7 @@ CAUTION: rm(data.v.temp, data.c.temp) } model_summary(c(list(model.t), model.m, list(model.y)), - nsmall=nsmall, std=std, file=file) + digits=digits, std=std, file=file) file = NULL ## PROCESS Model Summary @@ -983,7 +984,7 @@ CAUTION: x.label=\"X\", y.label=\"Y\", eff.tag=\"{eff.tag}\", - nsmall, file=file) + digits, file=file) RES = c(RES, list(res))") } run.process.mod.xm = function(i, eff.tag="") { @@ -994,8 +995,8 @@ CAUTION: mod.type, x.label=\"X\", y.label=\"M\", - eff.tag=\"{eff.tag}\", - nsmall, file=file) + eff.tag=\"{eff.tag}\", + digits, file=file) RES = c(RES, list(res))") } run.process.mod.my = function(i, eff.tag="") { @@ -1007,7 +1008,7 @@ CAUTION: x.label=\"M\", y.label=\"Y\", eff.tag=\"{eff.tag}\", - nsmall, file=file) + digits, file=file) RES = c(RES, list(res))") } run.process.med = function(eff.tag="") { @@ -1019,7 +1020,7 @@ CAUTION: direct=ifelse(length(mods)==0, TRUE, FALSE), total=ifelse(length(meds)==1, TRUE, FALSE), eff.tag=\"{eff.tag}\", - nsmall, file=file) + digits, file=file) RES = c(RES, list(res))") } conditional = NULL @@ -1033,7 +1034,7 @@ CAUTION: # res = process_lav(data.v, y, x, meds, covs, # med.type, cov.path, # ci, nsim, seed, - # nsmall=nsmall, + # digits=digits, # file=file) # RES = c(RES, list(res)) # lavaan ERROR: unordered factor(s) detected; make them numeric or ordered: pass gender @@ -1046,7 +1047,7 @@ CAUTION: res = process_lav(data.v, y, x, meds, covs, clusters, med.type, cov.path, ci, nsim, seed, - nsmall=nsmall, + digits=digits, file=file) RES = c(RES, list(res)) } else { @@ -1060,11 +1061,11 @@ CAUTION: de$df = NULL conf.int = confint(model.y) conf.int = conf.int[which(row.names(conf.int)==row.names(de)),] - de$CI = paste0("[", paste(formatF(conf.int, nsmall), collapse=", "), "]") + de$CI = paste0("[", paste(formatF(conf.int, digits), collapse=", "), "]") names(de)[1] = "Effect" names(de)[5] = "[95% CI]" row.names(de)[1] = "Direct (c')" - print_table(de, nsmalls=nsmall, file=file) + print_table(de, digits=digits, file=file) cat("\n") } for(i in 1:length(meds)) { @@ -1104,7 +1105,7 @@ CAUTION: #' @param ci Method for estimating standard error (SE) and #' 95\% confidence interval (CI). #' -#' Default is \code{"raw"} (the standard approach of \code{lavaan}). +#' Defaults to \code{"raw"} (the standard approach of \code{lavaan}). #' Other options: #' \describe{ #' \item{\code{"boot"}}{Percentile Bootstrap} @@ -1114,10 +1115,10 @@ CAUTION: #' @param nsim Number of simulation samples (bootstrap resampling) #' for estimating SE and 95\% CI. #' In formal analyses, \strong{\code{nsim=1000} (or larger)} is strongly suggested. -#' @param seed Random seed for obtaining reproducible results. Default is \code{NULL}. -#' @param digits,nsmall Number of decimal places of output. Default is \code{3}. -#' @param print Print results. Default is \code{TRUE}. -#' @param covariance Print (co)variances. Default is \code{FALSE}. +#' @param seed Random seed for obtaining reproducible results. Defaults to \code{NULL}. +#' @param digits Number of decimal places of output. Defaults to \code{3}. +#' @param print Print results. Defaults to \code{TRUE}. +#' @param covariance Print (co)variances. Defaults to \code{FALSE}. #' @param file File name of MS Word (\code{.doc}). #' #' @return @@ -1192,14 +1193,16 @@ CAUTION: #' # lavaan_summary(lv1, ci="boot", nsim=1000, seed=1) #' #' @export -lavaan_summary = function(lavaan, - ci=c("raw", "boot", "bc.boot", "bca.boot"), - nsim=100, - seed=NULL, - digits=3, nsmall=digits, - print=TRUE, - covariance=FALSE, - file=NULL) { +lavaan_summary = function( + lavaan, + ci=c("raw", "boot", "bc.boot", "bca.boot"), + nsim=100, + seed=NULL, + digits=3, + print=TRUE, + covariance=FALSE, + file=NULL +) { FIT = lavaan::fitMeasures(lavaan) try({ @@ -1289,21 +1292,21 @@ lavaan_summary = function(lavaan, cat("\n") Print(" <> - {p(chi2=FIT['chisq'], df=FIT['df'], n=FIT['ntotal'], nsmall=nsmall)} - \u03c7\u00b2/<> = {FIT['chisq']/FIT['df']:.{nsmall}}{ifelse(FIT['df']==0, ' <>', '')} - AIC = {FIT['aic']:.{nsmall}} <> - BIC = {FIT['bic']:.{nsmall}} <> - CFI = {FIT['cfi']:.{nsmall}} <> - TLI = {FIT['tli']:.{nsmall}} <> - NFI = {FIT['nfi']:.{nsmall}} <> - IFI = {FIT['ifi']:.{nsmall}} <> - GFI = {FIT['gfi']:.{nsmall}} <> - AGFI = {FIT['agfi']:.{nsmall}} <> - RMSEA = {FIT['rmsea']:.{nsmall}}, 90% CI [{FIT['rmsea.ci.lower']:.{nsmall}}, {FIT['rmsea.ci.upper']:.{nsmall}}] <> - SRMR = {FIT['srmr']:.{nsmall}} <> + {p(chi2=FIT['chisq'], df=FIT['df'], n=FIT['ntotal'], digits=digits)} + \u03c7\u00b2/<> = {FIT['chisq']/FIT['df']:.{digits}}{ifelse(FIT['df']==0, ' <>', '')} + AIC = {FIT['aic']:.{digits}} <> + BIC = {FIT['bic']:.{digits}} <> + CFI = {FIT['cfi']:.{digits}} <> + TLI = {FIT['tli']:.{digits}} <> + NFI = {FIT['nfi']:.{digits}} <> + IFI = {FIT['ifi']:.{digits}} <> + GFI = {FIT['gfi']:.{digits}} <> + AGFI = {FIT['agfi']:.{digits}} <> + RMSEA = {FIT['rmsea']:.{digits}}, 90% CI [{FIT['rmsea.ci.lower']:.{digits}}, {FIT['rmsea.ci.upper']:.{digits}}] <> + SRMR = {FIT['srmr']:.{digits}} <> ") cat("\n") - print_table(ALL, row.names=TRUE, nsmalls=nsmall, + print_table(ALL, row.names=TRUE, digits=digits, title="<>", note=Glue("<>. {CI} Confidence Interval (CI) and SE.")) cat("\n") @@ -1311,7 +1314,7 @@ lavaan_summary = function(lavaan, if(!is.null(file)) { print_table( - ALL, nsmalls=nsmall, row.names=TRUE, + ALL, digits=digits, row.names=TRUE, title="Table. Model Estimates.", note="Note. * p < .05. ** p < .01. *** p < .001.", file=file, @@ -1329,12 +1332,14 @@ lavaan_summary = function(lavaan, ## Model 4 and 6 -process_lav = function(data, y, x, meds, covs, clusters, - med.type, cov.path, - ci, nsim, seed, - nsmall=3, - file=NULL, - print=TRUE) { +process_lav = function( + data, y, x, meds, covs, clusters, + med.type, cov.path, + ci, nsim, seed, + digits=3, + file=NULL, + print=TRUE +) { if(length(clusters)>=1) stop("Multilevel serial mediation is not supported currently.", call.=TRUE) @@ -1375,13 +1380,13 @@ process_lav = function(data, y, x, meds, covs, clusters, if(print) { MED.print = MED MED.print$Beta = NULL - MED.print$CI = cc_ci(MED[["BootLLCI"]], MED[["BootULCI"]], nsmall) + MED.print$CI = cc_ci(MED[["BootLLCI"]], MED[["BootULCI"]], digits) names(MED.print)[length(MED.print)] = "[Boot 95% CI]" MED.print$Beta = MED$Beta print_table( dplyr::select(MED.print, !c("BootLLCI", "BootULCI")), row.names=TRUE, - nsmalls=nsmall, + digits=digits, line=is.null(file), file=file, title=Glue(" @@ -1397,7 +1402,7 @@ process_lav = function(data, y, x, meds, covs, clusters, } -extract_med = function(med, ci, nsmall=3, direct=TRUE, total=TRUE) { +extract_med = function(med, ci, digits=3, direct=TRUE, total=TRUE) { boot = ifelse(grepl("boot", ci), TRUE, FALSE) # MED = rbind( # c(med$d.avg, sd(med$d.avg.sims), med$d.avg.ci, med$d.avg.p), @@ -1418,7 +1423,7 @@ extract_med = function(med, ci, nsmall=3, direct=TRUE, total=TRUE) { names(MED) = c("Effect", "S.E.", "LLCI", "ULCI", "p") MED$z = MED$Effect / MED$S.E. MED$pval = p.z(MED$z) - MED$CI = cc_ci(MED[[3]], MED[[4]], nsmall) + MED$CI = cc_ci(MED[[3]], MED[[4]], digits) names(MED)[8] = ifelse(boot, "[Boot 95% CI]", "[MCMC 95% CI]") if(direct==FALSE) total = FALSE if(direct==FALSE) MED = MED[which(row.names(MED)!="Direct (c')"),] @@ -1427,21 +1432,23 @@ extract_med = function(med, ci, nsmall=3, direct=TRUE, total=TRUE) { } -process_med = function(model.m, - model.y, - x, y, - medi, - conditional=NULL, # process_mod => RES0 - simple.slopes=NULL, # process_mod => RES - ci, - nsim=100, - seed=1, - direct=TRUE, - total=TRUE, - eff.tag="", - nsmall=3, - file=NULL, - print=TRUE) { +process_med = function( + model.m, + model.y, + x, y, + medi, + conditional=NULL, # process_mod => RES0 + simple.slopes=NULL, # process_mod => RES + ci, + nsim=100, + seed=1, + direct=TRUE, + total=TRUE, + eff.tag="", + digits=3, + file=NULL, + print=TRUE +) { if(inherits(model.m, "lmerModLmerTest")) class(model.m) = "lmerMod" if(inherits(model.y, "lmerModLmerTest")) class(model.y) = "lmerMod" boot = ifelse(grepl("boot", ci), TRUE, FALSE) @@ -1464,7 +1471,7 @@ process_med = function(model.m, boot=boot, boot.ci.type="perc", # "bca", "perc" sims=nsim) - MED = extract_med(med, ci, nsmall, direct, total) + MED = extract_med(med, ci, digits, direct, total) } else { # moderated mediation COV.list = list() @@ -1492,7 +1499,7 @@ process_med = function(model.m, boot=boot, boot.ci.type="perc", # "bca", "perc" sims=nsim) - MEDi = extract_med(med, ci, nsmall, direct=FALSE, total=FALSE) + MEDi = extract_med(med, ci, digits, direct=FALSE, total=FALSE) # MEDi = cbind(data.frame(Path=row.names(MEDi)), MEDi) # names(MEDi)[1] = " " row.names(MEDi) = NULL @@ -1515,7 +1522,7 @@ process_med = function(model.m, print_table( dplyr::select(MED, !c("LLCI", "ULCI", "p")), row.names=is.null(conditional), - nsmalls=nsmall, + digits=digits, line=is.null(file), title=Glue(" <> \"{x}\" (X) ==> \"{medi}\" (M) ==> \"{y}\" (Y)>>{eff.tag}"), @@ -1529,21 +1536,23 @@ process_med = function(model.m, } -process_mod = function(model0, - model, - data.c, - x, y, - mod1, - mod2=NULL, - mod1.val=NULL, - mod2.val=NULL, - mod.type=c("2-way", "3-way"), - x.label="X", - y.label="Y", - eff.tag="", - nsmall=3, - file=NULL, - print=TRUE) { +process_mod = function( + model0, + model, + data.c, + x, y, + mod1, + mod2=NULL, + mod1.val=NULL, + mod2.val=NULL, + mod.type=c("2-way", "3-way"), + x.label="X", + y.label="Y", + eff.tag="", + digits=3, + file=NULL, + print=TRUE +) { data.c = data.c suppressWarnings({ simple.slopes = interactions::sim_slopes( @@ -1588,17 +1597,17 @@ process_mod = function(model0, if(is.null(mod2)) { res0 = res = cbind(data.frame(Mod1=mod1.vals), res[-1]) if(c("- 1 SD", "Mean", "+ 1 SD") %allin% names(mod1.vals)) - res[[1]] = str_trim(formatF(res[[1]], nsmall)) %^% + res[[1]] = str_trim(formatF(res[[1]], digits)) %^% c(" (- SD)", " (Mean)", " (+ SD)") names(res0)[1] = mod1 names(res)[1] = "\"" %^% mod1 %^% "\"" } else { res0 = res = cbind(data.frame(Mod2=mod2.val), res) if(c("- 1 SD", "Mean", "+ 1 SD") %allin% names(mod2.vals)) - res[[1]] = str_trim(formatF(res[[1]], nsmall)) %^% + res[[1]] = str_trim(formatF(res[[1]], digits)) %^% c(" (- SD)", " (Mean)", " (+ SD)")[i] if(c("- 1 SD", "Mean", "+ 1 SD") %allin% names(mod1.vals)) - res[[2]] = str_trim(formatF(res[[2]], nsmall)) %^% + res[[2]] = str_trim(formatF(res[[2]], digits)) %^% c(" (- SD)", " (Mean)", " (+ SD)") names(res0)[1] = mod2 names(res0)[2] = mod1 @@ -1609,12 +1618,12 @@ process_mod = function(model0, RES = rbind(RES, res) RES0 = rbind(RES0, res0) } - RES$`[95% CI]` = cc_ci(RES[["LLCI"]], RES[["ULCI"]], nsmall) - RES[[1]] = format(str_trim(formatF(RES[[1]], nsmall)), + RES$`[95% CI]` = cc_ci(RES[["LLCI"]], RES[["ULCI"]], digits) + RES[[1]] = format(str_trim(formatF(RES[[1]], digits)), width=nchar(names(RES)[1])) names(RES)[1] = format(names(RES)[1], width=max(nchar(RES[[1]]))) if(!is.null(mod2)) { - RES[[2]] = format(str_trim(formatF(RES[[2]], nsmall)), + RES[[2]] = format(str_trim(formatF(RES[[2]], digits)), width=nchar(names(RES)[2])) names(RES)[2] = format(names(RES)[2], width=max(nchar(RES[[2]]))) } @@ -1648,20 +1657,20 @@ process_mod = function(model0, if(print) { if(eff.tag!="") eff.tag="\n" %^% eff.tag print_table( - MOD, row.names=TRUE, nsmalls=c(2, 0, 0, 0), + MOD, row.names=TRUE, digits=c(2, 0, 0, 0), line=is.null(file), title=Glue("Interaction Effect{ifelse(is.null(mod2), '', 's')} on \"{y}\" ({y.label})")) cat("\n") if(!is.null(mod2) & mod.type=="3-way") { print_table( - MOD.MOD, row.names=FALSE, nsmalls=c(0, 0, 2, 0, 0, 0), + MOD.MOD, row.names=FALSE, digits=c(0, 0, 2, 0, 0, 0), line=is.null(file), title=Glue("Conditional Interaction Effects on \"{y}\" ({y.label})")) cat("\n") } print_table( dplyr::select(RES, !c("LLCI", "ULCI")), - row.names=FALSE, nsmalls=nsmall, + row.names=FALSE, digits=digits, line=is.null(file), title=Glue("<> \"{x}\" ({x.label}) ==> \"{y}\" ({y.label})>>{eff.tag}")) cat("\n") @@ -1727,7 +1736,7 @@ process_mod = function(model0, #' which is performed using the \code{\link[mediation]{mediation}} package. #' #' @param model Mediation model built using \code{\link[mediation:mediate]{mediation::mediate()}}. -#' @param digits,nsmall Number of decimal places of output. Default is \code{3}. +#' @param digits Number of decimal places of output. Defaults to \code{3}. #' @param file File name of MS Word (\code{.doc}). ## @param print.avg Just set as \code{TRUE} for a concise output. ## For details, see the "Value" section in \code{\link[mediation:mediate]{mediation::mediate()}}. @@ -1773,7 +1782,7 @@ process_mod = function(model0, #' } #' #' @export -med_summary = function(model, digits=3, nsmall=digits, file=NULL) { +med_summary = function(model, digits=3, file=NULL) { # for raw function, see: # edit(mediation::mediate) # edit(mediation:::print.summary.mediate) @@ -1814,14 +1823,14 @@ med_summary = function(model, digits=3, nsmall=digits, file=NULL) { c(x$tau.coef, sd(x$tau.sims), x$tau.ci, x$tau.p)) smat = as.data.frame(smat) row.names(smat) = c("Indirect", "Direct", "Total") - smat$CI = cc_ci(smat[[3]], smat[[4]], nsmall) + smat$CI = cc_ci(smat[[3]], smat[[4]], digits) names(smat) = c("Effect", "S.E.", ifelse(x$boot, "Boot LLCI", "LLCI"), ifelse(x$boot, "Boot ULCI", "ULCI"), "pval", ifelse(x$boot, "[Boot 95% CI]", "[MCMC 95% CI]")) - print_table(smat[c(1, 2, 6, 5)], nsmalls=nsmall) + print_table(smat[c(1, 2, 6, 5)], digits=digits) Print(ci.type) Print("Sample Size: {x$nobs}") Print("Simulations: {x$sims} ({ifelse(x$boot, 'Bootstrap', 'Monte Carlo')})") @@ -1833,7 +1842,7 @@ med_summary = function(model, digits=3, nsmall=digits, file=NULL) { smat.new.names[5] = "p" smat.new[[6]] = str_replace_all(smat.new[[6]], "-", "\u2013") print_table( - smat.new[c(1, 2, 6, 5)], nsmalls=nsmall, + smat.new[c(1, 2, 6, 5)], digits=digits, col.names=c(smat.new.names[c(1, 2, 6, 5)], " "), file=file, file.align.head=c("left", "right", "right", "right", "right", "left"), @@ -1876,21 +1885,21 @@ med_summary = function(model, digits=3, nsmall=digits, file=NULL) { #' #' @param formula Model formula like \code{y ~ x}. #' @param data Data frame. -#' @param lag.max Maximum time lag. Default is \code{30}. -#' @param sig.level Significance level. Default is \code{0.05}. +#' @param lag.max Maximum time lag. Defaults to \code{30}. +#' @param sig.level Significance level. Defaults to \code{0.05}. #' @param xbreaks X-axis breaks. #' @param ybreaks Y-axis breaks. -#' @param ylim Y-axis limits. Default is \code{NULL} to automatically estimate. +#' @param ylim Y-axis limits. Defaults to \code{NULL} to automatically estimate. #' @param alpha.ns Color transparency (opacity: 0~1) for non-significant values. -#' Default is \code{1} for no transparency (i.e., opaque color). -#' @param pos.color Color for positive values. Default is \code{"black"}. -#' @param neg.color Color for negative values. Default is \code{"black"}. +#' Defaults to \code{1} for no transparency (i.e., opaque color). +#' @param pos.color Color for positive values. Defaults to \code{"black"}. +#' @param neg.color Color for negative values. Defaults to \code{"black"}. #' @param ci.color Color for upper and lower bounds of significant values. -#' Default is \code{"blue"}. -#' @param title Plot title. Default is an illustration of the formula. +#' Defaults to \code{"blue"}. +#' @param title Plot title. Defaults to an illustration of the formula. #' @param subtitle Plot subtitle. -#' @param xlab X-axis title. Default is \code{"Lag"}. -#' @param ylab Y-axis title. Default is \code{"Cross-Correlation"}. +#' @param xlab X-axis title. Defaults to \code{"Lag"}. +#' @param ylab Y-axis title. Defaults to \code{"Cross-Correlation"}. #' #' @return #' A \code{gg} object, which you can further modify using @@ -1967,8 +1976,8 @@ ccf_plot = function(formula, data, #' Granger causality does not necessarily constitute a true causal effect. #' #' @inheritParams ccf_plot -#' @param lags Time lags. Default is \code{1:5}. -#' @param test.reverse Whether to test reverse causality. Default is \code{TRUE}. +#' @param lags Time lags. Defaults to \code{1:5}. +#' @param test.reverse Whether to test reverse causality. Defaults to \code{TRUE}. #' @param file File name of MS Word (\code{.doc}). #' @param ... Further arguments passed to \code{\link[lmtest:grangertest]{lmtest::grangertest()}}. #' For example, you may use \emph{robust} standard errors by specifying @@ -2110,14 +2119,14 @@ vargranger = function(varmodel, var.y, var.x) { #' Granger causality does not necessarily constitute a true causal effect. #' #' @param varmodel VAR model fitted using the \code{\link[vars:VAR]{vars::VAR()}} function. -#' @param var.y,var.x [Optional] Default is \code{NULL} (all variables). +#' @param var.y,var.x [Optional] Defaults to \code{NULL} (all variables). #' If specified, then perform tests for specific variables. #' Values can be a single variable (e.g., \code{"X"}), #' a vector of variables (e.g., \code{c("X1", "X2")}), #' or a string containing regular expression (e.g., \code{"X1|X2"}). -#' @param test \emph{F} test and/or Wald \eqn{\chi}^2 test. Default is both: \code{c("F", "Chisq")}. +#' @param test \emph{F} test and/or Wald \eqn{\chi}^2 test. Defaults to both: \code{c("F", "Chisq")}. #' @param file File name of MS Word (\code{.doc}). -#' @param check.dropped Check dropped variables. Default is \code{FALSE}. +#' @param check.dropped Check dropped variables. Defaults to \code{FALSE}. #' #' @return A data frame of results. #' @@ -2224,7 +2233,7 @@ granger_causality = function(varmodel, var.y=NULL, var.x=NULL, {paste(test.text, collapse=' and ')} based on VAR({varmodel$p}) model: ") print_table(result[c(12:13, test.which)], - nsmalls=0, + digits=0, row.names=FALSE) cat("\n") @@ -2234,7 +2243,7 @@ granger_causality = function(varmodel, var.y=NULL, var.x=NULL, str_replace_all("^-+$", "") print_table( result[c(12, test.which)], - nsmalls=0, + digits=0, row.names=FALSE, col.names=col.which, title=paste0("Table. Granger Causality Test (Multivariate) Based on VAR(", varmodel$p, ") Model."), diff --git a/R/bruceR-stats_6_plot.R b/R/bruceR-stats_6_plot.R index 97bf92b..e20e768 100644 --- a/R/bruceR-stats_6_plot.R +++ b/R/bruceR-stats_6_plot.R @@ -19,12 +19,12 @@ #' \item \code{\link[ggtext:element_textbox]{ggtext::element_textbox()}} #' } #' -#' @param markdown Use \code{element_markdown()} instead of \code{element_text()}. Default is \code{FALSE}. +#' @param markdown Use \code{element_markdown()} instead of \code{element_text()}. Defaults to \code{FALSE}. #' If set to \code{TRUE}, then you should also use \code{element_markdown()} in \code{theme()} (if any). -#' @param base.size Basic font size. Default is 12. -#' @param line.size Line width. Default is 0.5. +#' @param base.size Basic font size. Defaults to \code{12}. +#' @param line.size Line width. Defaults to \code{0.5}. #' @param border \code{TRUE}, \code{FALSE}, or \code{"black"} (default). -#' @param bg Background color of whole plot. Default is \code{"white"}. +#' @param bg Background color of whole plot. Defaults to \code{"white"}. #' You can use any colors or choose from some pre-set color palettes: #' \code{"stata", "stata.grey", "solar", "wsj", "light", "dust"}. #' @@ -32,7 +32,7 @@ #' #' \code{ggthemr::colour_plot(c(stata="#EAF2F3", stata.grey="#E8E8E8", #' solar="#FDF6E3", wsj="#F8F2E4", light="#F6F1EB", dust="#FAF7F2"))} -#' @param panel.bg Background color of panel. Default is \code{"white"}. +#' @param panel.bg Background color of panel. Defaults to \code{"white"}. #' @param tag Font face of tag. Choose from \code{"plain", "italic", "bold", "bold.italic"}. #' @param plot.title Font face of title. Choose from \code{"plain", "italic", "bold", "bold.italic"}. #' @param axis.title Font face of axis text. Choose from \code{"plain", "italic", "bold", "bold.italic"}. @@ -42,10 +42,10 @@ #' @param font Text font. Only applicable to Windows system. #' @param grid.x \code{FALSE}, \code{""} (default), or a color (e.g., \code{"grey90"}) to set the color of panel grid (x). #' @param grid.y \code{FALSE}, \code{""} (default), or a color (e.g., \code{"grey90"}) to set the color of panel grid (y). -#' @param line.x Draw the x-axis line. Default is \code{TRUE}. -#' @param line.y Draw the y-axis line. Default is \code{TRUE}. -#' @param tick.x Draw the x-axis ticks. Default is \code{TRUE}. -#' @param tick.y Draw the y-axis ticks. Default is \code{TRUE}. +#' @param line.x Draw the x-axis line. Defaults to \code{TRUE}. +#' @param line.y Draw the y-axis line. Defaults to \code{TRUE}. +#' @param tick.x Draw the x-axis ticks. Defaults to \code{TRUE}. +#' @param tick.y Draw the y-axis ticks. Defaults to \code{TRUE}. #' #' @return A theme object that should be used for \code{ggplot2}. #' diff --git a/R/bruceR.R b/R/bruceR.R index fa70032..d9c8abc 100644 --- a/R/bruceR.R +++ b/R/bruceR.R @@ -167,6 +167,8 @@ if(FALSE) { #' \code{\link{Corr}} #' #' \code{\link{cor_diff}} +#' +#' \code{\link{cor_multilevel}} #' } #' #' \item{\strong{(5) T-Test, Multi-Factor ANOVA, Simple-Effect Analysis, and Post-Hoc Multiple Comparison}}{ @@ -231,7 +233,7 @@ NULL #' @importFrom stats sd var cor median na.omit complete.cases #' @importFrom stats p.adjust pnorm pt pf pchisq qnorm qt quantile rnorm anova update terms drop1 #' @importFrom stats lm coef confint residuals df.residual sigma as.formula terms.formula model.response model.frame -#' @importFrom dplyr %>% select left_join sym group_by summarise mutate transmute across +#' @importFrom dplyr %>% select left_join sym group_by summarise mutate transmute across rename #' @importFrom data.table data.table is.data.table as.data.table #' @importFrom data.table := .BY .EACHI .GRP .I .N .NGRP .SD .onAttach = function(libname, pkgname) { diff --git a/R/bruceR_utils.R b/R/bruceR_utils.R index 7e99c2d..0174e49 100644 --- a/R/bruceR_utils.R +++ b/R/bruceR_utils.R @@ -340,7 +340,7 @@ sprintf_transformer = function(text, envir) { #' #' @param ... Character string(s) to run. #' You can use \code{"{ }"} to insert any R object in the environment. -#' @param silent Suppress error/warning messages. Default is \code{FALSE}. +#' @param silent Suppress error/warning messages. Defaults to \code{FALSE}. #' #' @return Invisibly return the running expression(s). #' @@ -369,10 +369,10 @@ Run = function(..., silent=FALSE) { #' #' @param ... Character string(s). #' @param sep Pattern for separation. -#' Default is \code{"auto"}: +#' Defaults to \code{"auto"}: #' \code{,} \code{;} \code{|} \code{\\n} \code{\\t} #' @param trim Remove whitespace from start and end of string(s)? -#' Default is \code{TRUE}. +#' Defaults to \code{TRUE}. #' #' @return Character vector. #' @@ -409,17 +409,17 @@ cc = function(..., sep="auto", trim=TRUE) { } -cc_ci = function(llci, ulci, nsmall) { +cc_ci = function(llci, ulci, digits) { paste0("[", - formatF(llci, nsmall), ", ", - formatF(ulci, nsmall), "]") + formatF(llci, digits), ", ", + formatF(ulci, digits), "]") } -cc_m_ci = function(mean, llci, ulci, nsmall) { - paste0(formatF(mean, nsmall), " [", - formatF(llci, nsmall), ", ", - formatF(ulci, nsmall), "]") +cc_m_ci = function(mean, llci, ulci, digits) { + paste0(formatF(mean, digits), " [", + formatF(llci, digits), ", ", + formatF(ulci, digits), "]") } @@ -466,9 +466,9 @@ fct_rev = function(f) { #' It has been used in many other functions of \code{bruceR} (see below). #' #' @param x Matrix, data.frame (or data.table), or any model object (e.g., \code{lm, glm, lmer, glmer, ...}). -#' @param digits,nsmalls Numeric vector specifying the number of decimal places of output. Default is \code{3}. -#' @param nspaces Number of whitespaces between columns. Default is \code{1}. -#' @param row.names,col.names Print row/column names. Default is \code{TRUE} (column names are always printed). +#' @param digits Numeric vector specifying the number of decimal places of output. Defaults to \code{3}. +#' @param nspaces Number of whitespaces between columns. Defaults to \code{1}. +#' @param row.names,col.names Print row/column names. Defaults to \code{TRUE} (column names are always printed). #' To modify the names, you can use a character vector with the same length as the raw names. #' @param title Title text, which will be inserted in

(HTML code). #' @param note Note text, which will be inserted in

(HTML code). @@ -514,15 +514,17 @@ fct_rev = function(f) { #' unlink("model.doc") # delete file for code check #' #' @export -print_table = function(x, digits=3, nsmalls=digits, - nspaces=1, - row.names=TRUE, - col.names=TRUE, - title="", note="", append="", - line=TRUE, - file=NULL, - file.align.head="auto", - file.align.text="auto") { +print_table = function( + x, digits=3, + nspaces=1, + row.names=TRUE, + col.names=TRUE, + title="", note="", append="", + line=TRUE, + file=NULL, + file.align.head="auto", + file.align.text="auto" +) { ## Preprocess data.frame ## if(!inherits(x, c("matrix", "data.frame", "data.table"))) { coef.table = coef(summary(x)) @@ -530,7 +532,7 @@ print_table = function(x, digits=3, nsmalls=digits, } x = as.data.frame(x) sig = NULL - if(length(nsmalls)==1) nsmalls = rep(nsmalls, length(x)) + if(length(digits)==1) digits = rep(digits, length(x)) for(j in 1:length(x)) { if(inherits(x[,j], "factor")) x[,j] = as.character(x[,j]) @@ -540,7 +542,7 @@ print_table = function(x, digits=3, nsmalls=digits, names(x)[j] = "p" x[,j] = p.trans(x[,j]) } else { - x[,j] = formatF(x[,j], nsmalls[j]) + x[,j] = formatF(x[,j], digits[j]) } if(grepl("^S\\.E\\.$|^Std\\. Error$|^se$|^SE$|^BootSE$", names(x)[j])) { x[,j] = paste0("(", x[,j], ")") # add ( ) to S.E. @@ -760,7 +762,7 @@ formatN = function(x, mark=",") { #' Format numeric values. #' #' @param x A number or numeric vector. -#' @param digits,nsmall Number of decimal places of output. Default is \code{3}. +#' @param digits Number of decimal places of output. Defaults to \code{3}. #' #' @return Formatted character string. #' @@ -770,12 +772,12 @@ formatN = function(x, mark=",") { #' @seealso \code{\link[base:format]{format}}, \code{\link{formatN}} #' #' @export -formatF = function(x, digits=3, nsmall=digits) { - # format(x, digits=0, nsmall=nsmall, scientific=FALSE) +formatF = function(x, digits=3) { + # format(x, digits=0, nsmall=digits, scientific=FALSE) if(inherits(x, "character")) { xf = sprintf(paste0("%-", max(nchar(x), na.rm=TRUE), "s"), x) # left adjustment } else { - x = sprintf(paste0("%.", nsmall, "f"), x) + x = sprintf(paste0("%.", digits, "f"), x) xf = sprintf(paste0("%", max(nchar(x), na.rm=TRUE), "s"), x) } return(xf) @@ -803,8 +805,8 @@ RGB = function(r, g, b, alpha) { #' Timer (compute time difference). #' #' @param t0 Time at the beginning. -#' @param unit Options: \code{"auto", "secs", "mins", "hours", "days", "weeks"}. Default is \code{"secs"}. -#' @param digits,nsmall Number of decimal places of output. Default is \code{0}. +#' @param unit Options: \code{"auto", "secs", "mins", "hours", "days", "weeks"}. Defaults to \code{"secs"}. +#' @param digits Number of decimal places of output. Defaults to \code{0}. #' #' @return A character string of time difference. #' @@ -816,9 +818,9 @@ RGB = function(r, g, b, alpha) { #' } #' #' @export -dtime = function(t0, unit="secs", digits=0, nsmall=digits) { +dtime = function(t0, unit="secs", digits=0) { dt = difftime(Sys.time(), t0, units=unit) - format(dt, digits=1, nsmall=nsmall) + format(dt, digits=1, nsmall=digits) } @@ -835,7 +837,7 @@ dtime = function(t0, unit="secs", digits=0, nsmall=digits) { #' (version >= 1.2) is required for running this function. #' #' @param path \code{NULL} (default) or a specific path. -#' Default is to extract the path of the currently opened file +#' Defaults to extract the path of the currently opened file #' (usually .R or .Rmd) using the \code{rstudioapi::getSourceEditorContext} function. #' @param ask \code{TRUE} or \code{FALSE} (default). #' If \code{TRUE}, you can select a folder with the prompt of a dialog. @@ -937,18 +939,19 @@ file_ext = function(filename) { #' @param file File name (with extension). #' If unspecified, then data will be imported from clipboard. #' @param sheet [Only for Excel] Excel sheet name (or sheet number). -#' Default is the first sheet. +#' Defaults to the first sheet. #' Ignored if the sheet is specified via \code{range}. -#' @param range [Only for Excel] Excel cell range. Default are all cells in a sheet. +#' @param range [Only for Excel] Excel cell range. Defaults to all cells in a sheet. #' You may specify it as \code{range="A1:E100"} or \code{range="Sheet1!A1:E100"}. -#' @param encoding File encoding. Default is \code{NULL}. +#' @param encoding File encoding. Defaults to \code{NULL}. #' Alternatives can be \code{"UTF-8"}, \code{"GBK"}, \code{"CP936"}, etc. #' #' If you find messy code for Chinese text in the imported data, #' it is usually effective to set \code{encoding="UTF-8"}. -#' @param header Does the first row contain column names (\code{TRUE} or \code{FALSE})? Default is \code{"auto"}. -#' @param setclass,as Class of the imported data. Default is \code{"data.frame"}. +#' @param header Does the first row contain column names (\code{TRUE} or \code{FALSE})? Defaults to \code{"auto"}. +#' @param setclass,as Class of the imported data. Defaults to \code{"data.frame"}. #' Ignored if the data file is R object (.rds, .rda, .rdata, .Rdata). +#' @param verbose Print data information? Defaults to \code{FALSE}. #' #' Alternatives can be: #' \itemize{ @@ -976,10 +979,13 @@ file_ext = function(filename) { #' @seealso \code{\link{export}} #' #' @export -import = function(file, - sheet=NULL, range=NULL, - encoding=NULL, header="auto", - setclass=as, as="data.frame") { +import = function( + file, + sheet=NULL, range=NULL, + encoding=NULL, header="auto", + setclass=as, as="data.frame", + verbose=FALSE +) { ## initialize if(missing(file)) { file = "clipboard" @@ -1061,10 +1067,12 @@ import = function(file, } ## report data - if(is.data.frame(data)) - Print("<> Successfully imported: {nrow(data)} obs. of {ncol(data)} variables") - else - Print("<> Successfully imported: {length(data)} values of class `{class(data)[1]}`") + if(verbose) { + if(is.data.frame(data)) + Print("<> Successfully imported: {nrow(data)} obs. of {ncol(data)} variables") + else + Print("<> Successfully imported: {length(data)} values of class `{class(data)[1]}`") + } ## return data if(is.null(setclass) | fmt %in% c("rds", "rda", "rdata")) { @@ -1113,16 +1121,17 @@ import = function(file, #' @param file File name (with extension). #' If unspecified, then data will be exported to clipboard. #' @param sheet [Only for Excel] Excel sheet name(s). -#' Default is Sheet1, Sheet2, ... +#' Defaults to Sheet1, Sheet2, ... #' You may specify multiple sheet names in a character vector #' \code{c()} with the \emph{same length} as \code{x} (see examples). -#' @param encoding File encoding. Default is \code{NULL}. +#' @param encoding File encoding. Defaults to \code{NULL}. #' Alternatives can be \code{"UTF-8"}, \code{"GBK"}, \code{"CP936"}, etc. #' #' If you find messy code for Chinese text in the exported data (often in CSV when opened with Excel), #' it is usually effective to set \code{encoding="GBK"} or \code{encoding="CP936"}. -#' @param header Does the first row contain column names (\code{TRUE} or \code{FALSE})? Default is \code{"auto"}. -#' @param overwrite Overwrite the existing file (if any)? Default is \code{TRUE}. +#' @param header Does the first row contain column names (\code{TRUE} or \code{FALSE})? Defaults to \code{"auto"}. +#' @param overwrite Overwrite the existing file (if any)? Defaults to \code{TRUE}. +#' @param verbose Print output information? Defaults to \code{FALSE}. #' #' @return No return value. #' @@ -1153,9 +1162,12 @@ import = function(file, #' @seealso \code{\link{import}}, \code{\link{print_table}} #' #' @export -export = function(x, file, sheet=NULL, - encoding=NULL, header="auto", - overwrite=TRUE) { +export = function( + x, file, sheet=NULL, + encoding=NULL, header="auto", + overwrite=TRUE, + verbose=FALSE +) { ## initialize if(missing(file)) { file = "clipboard" @@ -1272,10 +1284,12 @@ export = function(x, file, sheet=NULL, } ## report status - if(fmt=="clipboard") - Print("<> Successfully paste to clipboard") - else - Print("<> Successfully saved to <>") + if(verbose) { + if(fmt=="clipboard") + Print("<> Successfully paste to clipboard") + else + Print("<> Successfully saved to <>") + } } diff --git a/R/deprecated.R b/R/deprecated.R index 9b51eb0..118621e 100644 --- a/R/deprecated.R +++ b/R/deprecated.R @@ -686,3 +686,253 @@ # results=as.data.table(results)[order(singular, BIC, AIC, raw.id),] # return(results) # } + + +## modified `psych::cor.plot()` +## see comment lines +# cor_plot <- function (r, numbers = TRUE, colors = TRUE, n = 51, main = NULL, +# zlim = c(-1, 1), show.legend = TRUE, labels = NULL, n.legend = 10, +# select = NULL, pval = NULL, cuts = c(0.001, 0.01), scale = TRUE, +# cex, MAR, upper = TRUE, diag = TRUE, +# symmetric = TRUE, stars = FALSE, adjust = "holm", xaxis = 1, +# xlas = 0, ylas = 2, gr = NULL, alpha = 0.75, min.length = NULL, +# digits=2, # added in bruceR +# ...) +# { +# oldpar <- graphics::par(no.readonly = TRUE) +# on.exit(graphics::par(oldpar)) +# if (missing(MAR)) +# # MAR <- 5 +# MAR <- 4 +# if (!is.matrix(r) & (!is.data.frame(r))) { +# if ((length(class(r)) > 1) & (inherits(r, "psych"))) { +# switch(class(r)[2], omega = { +# r <- r$schmid$sl +# nff <- ncol(r) +# r <- r[, 1:(nff - 3)] +# if (is.null(main)) { +# main <- "Omega plot" +# } +# }, cor.ci = { +# pval <- 2 * (1 - r$ptci) +# r <- r$rho +# }, fa = { +# r <- r$loadings +# if (is.null(main)) { +# main <- "Factor Loadings plot" +# } +# }, pc = { +# r <- r$loadings +# if (is.null(main)) { +# main <- "PCA Loadings plot" +# } +# }, principal = { +# r <- r$loadings +# if (is.null(main)) { +# main <- "PCA Loadings plot" +# } +# }) +# } +# } +# else { +# if (symmetric & !psych::isCorrelation(r) & (nrow(r) != ncol(r))) { +# cp <- psych::corr.test(r, adjust = adjust) +# r <- cp$r +# pval <- cp$p +# if (is.null(main)) { +# main <- "Correlation plot" +# } +# } +# } +# R <- r <- as.matrix(r) +# if (!is.null(select)) +# r <- r[select, select] +# if (min(dim(r)) < 2) { +# stop("You need at least two dimensions to make a meaningful plot.", call.=TRUE) +# } +# if (is.null(n)) { +# n <- dim(r)[2] +# } +# nf <- dim(r)[2] +# nvar <- dim(r)[1] +# if (!upper) +# r[col(r) > row(r)] <- NA +# if (!diag) +# r[col(r) == row(r)] <- NA +# if (nf == nvar) +# r <- t(r) +# if (missing(pval) | is.null(pval)) { +# pval <- matrix(rep(1, nvar * nf), nvar) +# } +# else { +# if (length(pval) != nvar * nf) { +# pr = matrix(0, nvar, nf) +# pr[row(pr) > col(pr)] <- pval +# pr <- pr + t(pr) +# diag(pr) <- 0 +# pval <- pr +# } +# if (!stars) { +# pval <- psych::con2cat(pval, cuts = cuts) +# pval <- (length(cuts) + 1 - pval)/length(cuts) +# } +# pval <- t(pval) +# } +# if (is.null(labels)) { +# if (is.null(rownames(r))) +# rownames(r) <- paste("V", 1:nvar) +# if (is.null(colnames(r))) +# colnames(r) <- paste("V", 1:nf) +# } +# else { +# rownames(r) <- colnames(r) <- labels +# } +# if (!is.null(min.length)) { +# rownames(r) <- abbreviate(rownames(r), minlength = min.length) +# colnames(r) <- abbreviate(colnames(r), minlength = min.length) +# } +# max.len <- max(nchar(rownames(r)))/6 +# if (is.null(zlim)) { +# zlim <- range(r) +# } +# if (colors) { +# if (missing(gr)) { +# gr <- grDevices::colorRampPalette(c("red", "white", "blue")) +# } +# if (max(r, na.rm = TRUE) > 1) { +# maxr <- max(r) +# n1 <- n * (zlim[2] - zlim[1])/(maxr - zlim[1]) +# colramp <- rep(NA, n) +# n1 <- ceiling(n1) +# colramp[1:(n1 + 1)] <- gr(n1 + 1) +# colramp[(n1 + 1):n] <- colramp[n1 + 1] +# zlim[2] <- maxr +# } +# else { +# colramp <- gr(n) +# } +# } +# else { +# colramp <- grDevices::grey((n:0)/n) +# } +# colramp <- grDevices::adjustcolor(colramp, alpha.f = alpha) +# if (nvar != nf) { +# r <- t(r) +# } +# ord1 <- seq(nvar, 1, -1) +# if (nf == nvar) { +# r <- r[, ord1] +# pval <- pval[, ord1] +# } +# else { +# r <- r[, ord1] +# pval <- t(pval[ord1, ]) +# } +# # graphics::par(mar = c(MAR + max.len, MAR + max.len, 4, 0.5)) +# graphics::par(mar = c(MAR + max.len, MAR + max.len, 2.5, 0.5)) +# if (show.legend) { +# graphics::layout(matrix(c(1, 2), nrow = 1), widths = c(0.9, 0.1), +# heights = c(1, 1)) +# } +# graphics::image(r, col = colramp, axes = FALSE, main = main, zlim = zlim) +# graphics::box() +# at1 <- (0:(nf - 1))/(nf - 1) +# at2 <- (0:(nvar - 1))/(nvar - 1) +# lab1 <- rownames(r) +# lab2 <- colnames(r) +# if (xaxis == 3) { +# line <- -0.5 +# tick <- FALSE +# } +# else { +# line <- NA +# tick <- TRUE +# } +# if (max.len > 0.5) { +# graphics::axis(2, at = at2, labels = lab2, las = ylas, ...) +# graphics::axis(xaxis, at = at1, labels = lab1, las = xlas, line = line, +# tick = tick, ...) +# } +# else { +# graphics::axis(2, at = at2, labels = lab2, las = ylas, ...) +# graphics::axis(xaxis, at = at1, labels = lab1, las = xlas, line = line, +# tick = tick, ...) +# } +# if (numbers) { +# rx <- rep(at1, ncol(r)) +# ry <- rep(at2, each = nrow(r)) +# # rv <- round(r, 2) # modified in bruceR +# rv <- formatF(r, digits) # modified in bruceR +# if (stars) { +# symp <- stats::symnum(pval, corr = FALSE, cutpoints = c(0, +# 0.001, 0.01, 0.05, 1), symbols = c("***", "**", +# "*", " "), legend = FALSE) +# rv[!is.na(rv)] <- paste0(rv[!is.na(rv)], symp[!is.na(rv)]) +# rv <- gsub("NA.*", "", rv) # modified in bruceR +# if (missing(cex)) +# cex = 9/max(nrow(r), ncol(r)) +# graphics::text(rx, ry, rv, cex = cex, ...) +# } +# else { +# if (missing(cex)) +# cex = 9/max(nrow(r), ncol(r)) +# if (scale) { +# graphics::text(rx, ry, rv, cex = pval * cex, ...) +# } +# else { +# graphics::text(rx, ry, rv, cex = cex, ...) +# } +# } +# } +# if (show.legend) { +# leg <- matrix(seq(from = zlim[1], to = zlim[2], by = (zlim[2] - +# zlim[1])/n), nrow = 1) +# # graphics::par(mar = c(MAR, 0, 4, 3)) +# graphics::par(mar = c(MAR, 0, 2.5, 3)) +# graphics::image(leg, col = colramp, axes = FALSE, zlim = zlim) +# at2 <- seq(0, 1, 1/n.legend) +# labels = seq(zlim[1], zlim[2], (zlim[2] - zlim[1])/(length(at2) - +# 1)) +# graphics::axis(4, at = at2, labels = labels, las = 2, ...) +# } +# invisible(R) +# } + +# cor_plot(r=cor$r, adjust="none", digits=digits, +# numbers=TRUE, zlim=plot.range, +# diag=FALSE, xlas=2, n=plot.color.levels, +# pval=cor$p, stars=TRUE, +# alpha=1, gr=grDevices::colorRampPalette(plot.palette), +# main="Correlation Matrix") + + +# if(!is.null(plot.file)) { +# if(str_detect(plot.file, "\\.png$")) +# grDevices::png(plot.file, width=plot.width, height=plot.height, +# units="in", res=plot.dpi) +# if(str_detect(plot.file, "\\.pdf$")) +# grDevices::pdf(plot.file, width=plot.width, height=plot.height) +# } +# corrplot::corrplot( +# cor$r, +# p.mat=cor$p, +# diag=FALSE, +# method="color", +# tl.col="black", +# tl.srt=45, +# cl.align.text="l", +# addCoef.col="black", +# number.digits=digits, +# insig="label_sig", +# sig.level=c(0.001, 0.01, 0.05), +# pch="*", +# pch.cex=2, +# pch.col="grey20") +# if(!is.null(plot.file)) { +# grDevices::dev.off() +# plot.file = str_split(plot.file, "/", simplify=TRUE) +# plot.path = paste0(getwd(), '/', plot.file[length(plot.file)]) +# Print("<> Plot saved to <>") +# cat("\n") +# } + diff --git a/README.md b/README.md index 243336a..d4c84d6 100644 --- a/README.md +++ b/README.md @@ -25,13 +25,13 @@ This package includes easy-to-use functions for: Han-Wu-Shuang (Bruce) Bao 包寒吴霜 -Email: [baohws\@foxmail.com](mailto:baohws@foxmail.com) +📬 [baohws\@foxmail.com](mailto:baohws@foxmail.com) -Homepage: [psychbruce.github.io](https://psychbruce.github.io) +📋 [psychbruce.github.io](https://psychbruce.github.io) ## Citation -- Bao, H.-W.-S. (2023). bruceR: Broadly useful convenient and efficient R functions. R package version 0.8.x. +- Bao, H.-W.-S. (2023). *bruceR: Broadly useful convenient and efficient R functions*. R package version 2023.8. ## User Guide @@ -118,6 +118,7 @@ Loading `bruceR` with `library(bruceR)` will also load these R packages for you: - `Freq()` - `Corr()` - `cor_diff()` + - `cor_multilevel()` 5. **T-Test, Multi-Factor ANOVA, Simple-Effect Analysis, and Post-Hoc Multiple Comparison** diff --git a/_pkgdown.yml b/_pkgdown.yml index 814c11b..4839a13 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -3,5 +3,7 @@ template: bootstrap: 5 bootswatch: simplex bslib: - heading_font: {google: "Nunito Sans"} - base_font: {google: "Nunito"} + base_font: + google: "Roboto" + heading_font: + google: "Lexend" # Anton, Oswald, Ubuntu, Lexend, Fjalla One diff --git a/cran-comments.md b/cran-comments.md index 6044be9..68d2827 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,11 +1,11 @@ ## News -In this version (0.8.10), I have fixed all problems and improved some functions. +Fixed bugs and improved functions. ## Test environments -- Windows 11 (local installation), R 4.2.2 -- Mac OS 11.5 (check_mac_release), R 4.2.1 +- Windows 11 (local installation), R 4.3.0 +- Mac OS 13.3 (check_mac_release), R 4.3.0 ## Package check results @@ -13,8 +13,8 @@ passing `devtools::check_win_devel()` ## R CMD check results -passing (0 errors | 0 warnings | 0 note) +passing (0 errors | 0 warnings | 0 notes) ## Reverse dependencies -Checks on downstream dependencies (`ChineseNames` and `PsychWordVec`) found no problems. +Checks on downstream dependencies (`ChineseNames`, `PsychWordVec`, and `FMAT`) found no problems. diff --git a/man/Alpha.Rd b/man/Alpha.Rd index 9adbf9a..61286ab 100644 --- a/man/Alpha.Rd +++ b/man/Alpha.Rd @@ -4,16 +4,7 @@ \alias{Alpha} \title{Reliability analysis (Cronbach's \eqn{\alpha} and McDonald's \eqn{\omega}).} \usage{ -Alpha( - data, - var, - items, - vars = NULL, - varrange = NULL, - rev = NULL, - digits = 3, - nsmall = digits -) +Alpha(data, var, items, vars = NULL, varrange = NULL, rev = NULL, digits = 3) } \arguments{ \item{data}{Data frame.} @@ -34,7 +25,7 @@ A character string specifying the positions ("starting:stopping") of variables. (1) a character vector specifying the reverse-scoring variables (recommended), or (2) a numeric vector specifying the item number of reverse-scoring variables (not recommended).} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} } \value{ A list of results obtained from diff --git a/man/CFA.Rd b/man/CFA.Rd index 7e7b65c..ca3019d 100644 --- a/man/CFA.Rd +++ b/man/CFA.Rd @@ -12,7 +12,6 @@ CFA( orthogonal = FALSE, missing = "listwise", digits = 3, - nsmall = digits, file = NULL ) } @@ -23,7 +22,7 @@ CFA( \item{estimator}{The estimator to be used (for details, see \link[lavaan:lavOptions]{lavaan options}). -Default is \code{"ML"}. +Defaults to \code{"ML"}. Can be one of the following: \describe{ \item{\code{"ML"}}{Maximum Likelihood (can be extended to @@ -36,13 +35,13 @@ Can be one of the following: \item{\code{"DLS"}}{Distributionally-weighted Least Squares} }} -\item{highorder}{High-order factor. Default is \code{""}.} +\item{highorder}{High-order factor. Defaults to \code{""}.} -\item{orthogonal}{Default is \code{FALSE}. If \code{TRUE}, all covariances among latent variables are set to zero.} +\item{orthogonal}{Defaults to \code{FALSE}. If \code{TRUE}, all covariances among latent variables are set to zero.} -\item{missing}{Default is \code{"listwise"}. Alternative is \code{"fiml"} ("Full Information Maximum Likelihood").} +\item{missing}{Defaults to \code{"listwise"}. Alternative is \code{"fiml"} ("Full Information Maximum Likelihood").} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} \item{file}{File name of MS Word (\code{.doc}).} } diff --git a/man/Corr.Rd b/man/Corr.Rd index 346bf8c..5fd6781 100644 --- a/man/Corr.Rd +++ b/man/Corr.Rd @@ -10,12 +10,10 @@ Corr( p.adjust = "none", all.as.numeric = TRUE, digits = 2, - nsmall = digits, file = NULL, plot = TRUE, - plot.range = c(-1, 1), - plot.palette = NULL, - plot.color.levels = 201, + plot.r.size = 4, + plot.colors = NULL, plot.file = NULL, plot.width = 8, plot.height = 6, @@ -34,37 +32,37 @@ For details, see \code{\link[stats:p.adjust]{stats::p.adjust()}}.} \item{all.as.numeric}{\code{TRUE} (default) or \code{FALSE}. Transform all variables into numeric (continuous).} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{2}.} +\item{digits}{Number of decimal places of output. Defaults to \code{2}.} \item{file}{File name of MS Word (\code{.doc}).} \item{plot}{\code{TRUE} (default) or \code{FALSE}. Plot the correlation matrix.} -\item{plot.range}{Range of correlation coefficients for plot. Default is \code{c(-1, 1)}.} +\item{plot.r.size}{Font size of correlation text label. Defaults to \code{4}.} -\item{plot.palette}{Color gradient for plot. Default is \code{c("#B52127", "white", "#2171B5")}. -You may also set it to, e.g., \code{c("red", "white", "blue")}.} - -\item{plot.color.levels}{Default is \code{201}.} +\item{plot.colors}{Plot colors (character vector). Defaults to "RdBu" of the Color Brewer Palette.} \item{plot.file}{\code{NULL} (default, plot in RStudio) or a file name (\code{"xxx.png"}).} -\item{plot.width}{Width (in "inch") of the saved plot. Default is \code{8}.} +\item{plot.width}{Width (in "inch") of the saved plot. Defaults to \code{8}.} -\item{plot.height}{Height (in "inch") of the saved plot. Default is \code{6}.} +\item{plot.height}{Height (in "inch") of the saved plot. Defaults to \code{6}.} -\item{plot.dpi}{DPI (dots per inch) of the saved plot. Default is \code{500}.} +\item{plot.dpi}{DPI (dots per inch) of the saved plot. Defaults to \code{500}.} } \value{ -Invisibly return the correlation results obtained from -\code{\link[psych:corr.test]{psych::corr.test()}}. +Invisibly return a list with +(1) correlation results from +\code{\link[psych:corr.test]{psych::corr.test()}} and +(2) a \code{ggplot2} object if \code{plot=TRUE}. } \description{ Correlation analysis. } \examples{ Corr(airquality) -Corr(airquality, p.adjust="bonferroni") +Corr(airquality, p.adjust="bonferroni", + plot.colors=c("#b2182b", "white", "#2166ac")) d = as.data.table(psych::bfi) added(d, { @@ -81,4 +79,6 @@ Corr(d[, .(age, gender, education, E, A, C, N, O)]) } \seealso{ \code{\link{Describe}} + +\code{\link{cor_multilevel}} } diff --git a/man/Describe.Rd b/man/Describe.Rd index 969a914..896a3af 100644 --- a/man/Describe.Rd +++ b/man/Describe.Rd @@ -8,7 +8,6 @@ Describe( data, all.as.numeric = TRUE, digits = 2, - nsmall = digits, file = NULL, plot = FALSE, upper.triangle = FALSE, @@ -25,7 +24,7 @@ Describe( \item{all.as.numeric}{\code{TRUE} (default) or \code{FALSE}. Transform all variables into numeric (continuous).} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{2}.} +\item{digits}{Number of decimal places of output. Defaults to \code{2}.} \item{file}{File name of MS Word (\code{.doc}).} @@ -40,16 +39,16 @@ Add fitting lines to scatter plots (if any).} \item{plot.file}{\code{NULL} (default, plot in RStudio) or a file name (\code{"xxx.png"}).} -\item{plot.width}{Width (in "inch") of the saved plot. Default is \code{8}.} +\item{plot.width}{Width (in "inch") of the saved plot. Defaults to \code{8}.} -\item{plot.height}{Height (in "inch") of the saved plot. Default is \code{6}.} +\item{plot.height}{Height (in "inch") of the saved plot. Defaults to \code{6}.} -\item{plot.dpi}{DPI (dots per inch) of the saved plot. Default is \code{500}.} +\item{plot.dpi}{DPI (dots per inch) of the saved plot. Defaults to \code{500}.} } \value{ -Invisibly return a list consisting of +Invisibly return a list with (1) a data frame of descriptive statistics and -(2) a \code{ggplot2} object if users set \code{plot=TRUE}. +(2) a \code{ggplot2} object if \code{plot=TRUE}. } \description{ Descriptive statistics. diff --git a/man/EFA.Rd b/man/EFA.Rd index d96f813..4380235 100644 --- a/man/EFA.Rd +++ b/man/EFA.Rd @@ -22,7 +22,6 @@ EFA( max.iter = 25, min.eigen = 1, digits = 3, - nsmall = digits, file = NULL ) @@ -77,20 +76,20 @@ A character string specifying the positions ("starting:stopping") of variables. \item (any number >= 1) - user-defined fixed number }} -\item{sort.loadings}{Sort factor/component loadings by size? Default is \code{TRUE}.} +\item{sort.loadings}{Sort factor/component loadings by size? Defaults to \code{TRUE}.} \item{hide.loadings}{A number (0~1) for hiding absolute factor/component loadings below this value. -Default is \code{0} (does not hide any loading).} +Defaults to \code{0} (does not hide any loading).} -\item{plot.scree}{Display the scree plot? Default is \code{TRUE}.} +\item{plot.scree}{Display the scree plot? Defaults to \code{TRUE}.} -\item{kaiser}{Do the Kaiser normalization (as in SPSS)? Default is \code{TRUE}.} +\item{kaiser}{Do the Kaiser normalization (as in SPSS)? Defaults to \code{TRUE}.} -\item{max.iter}{Maximum number of iterations for convergence. Default is \code{25} (the same as in SPSS).} +\item{max.iter}{Maximum number of iterations for convergence. Defaults to \code{25} (the same as in SPSS).} -\item{min.eigen}{Minimum eigenvalue (used if \code{nfactors="eigen"}). Default is \code{1}.} +\item{min.eigen}{Minimum eigenvalue (used if \code{nfactors="eigen"}). Defaults to \code{1}.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} \item{file}{File name of MS Word (\code{.doc}).} diff --git a/man/EMMEANS.Rd b/man/EMMEANS.Rd index 9cf04c5..801ebb6 100644 --- a/man/EMMEANS.Rd +++ b/man/EMMEANS.Rd @@ -13,8 +13,7 @@ EMMEANS( p.adjust = "bonferroni", sd.pooled = NULL, model.type = "multivariate", - digits = 3, - nsmall = digits + digits = 3 ) } \arguments{ @@ -26,21 +25,21 @@ it reports the results of omnibus test or simple main effect. If set to a character vector (e.g., \code{c("A", "B")}), it also reports the results of simple interaction effect.} -\item{by}{Moderator variable(s). Default is \code{NULL}.} +\item{by}{Moderator variable(s). Defaults to \code{NULL}.} \item{contrast}{Contrast method for multiple comparisons. -Default is \code{"pairwise"}. +Defaults to \code{"pairwise"}. Alternatives can be \code{"pairwise"} (\code{"revpairwise"}), \code{"seq"} (\code{"consec"}), \code{"poly"}, \code{"eff"}. For details, see \code{?emmeans::`contrast-methods`}.} \item{reverse}{The order of levels to be contrasted. -Default is \code{TRUE} (higher level vs. lower level).} +Defaults to \code{TRUE} (higher level vs. lower level).} \item{p.adjust}{Adjustment method of \emph{p} values for multiple comparisons. -Default is \code{"bonferroni"}. -For polynomial contrasts, default is \code{"none"}. +Defaults to \code{"bonferroni"}. +For polynomial contrasts, defaults to \code{"none"}. Alternatives can be \code{"none"}, \code{"fdr"}, \code{"hochberg"}, \code{"hommel"}, \code{"holm"}, \code{"tukey"}, \code{"mvt"}, @@ -61,7 +60,7 @@ for \code{\link[emmeans:joint_tests]{emmeans::joint_tests()}} and \code{\link[em \code{"univariate"} requires also specifying \code{aov.include=TRUE} in \code{\link{MANOVA}} (not recommended by the \code{afex} package; for details, see \code{\link[afex:aov_car]{afex::aov_ez()}}).} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} } \value{ The same model object as returned by diff --git a/man/Freq.Rd b/man/Freq.Rd index 9ecae9a..d49ab5b 100644 --- a/man/Freq.Rd +++ b/man/Freq.Rd @@ -4,7 +4,7 @@ \alias{Freq} \title{Frequency statistics.} \usage{ -Freq(x, varname, labels, sort = "", digits = 1, nsmall = digits, file = NULL) +Freq(x, varname, labels, sort = "", digits = 1, file = NULL) } \arguments{ \item{x}{A vector of values (or a data frame).} @@ -16,7 +16,7 @@ Freq(x, varname, labels, sort = "", digits = 1, nsmall = digits, file = NULL) \item{sort}{\code{""} (default, sorted by the order of variable values/labels), \code{"-"} (decreasing by N), or \code{"+"} (increasing by N).} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{1}.} +\item{digits}{Number of decimal places of output. Defaults to \code{1}.} \item{file}{File name of MS Word (\code{.doc}).} } diff --git a/man/GLM_summary.Rd b/man/GLM_summary.Rd index 03a74a2..c8b5144 100644 --- a/man/GLM_summary.Rd +++ b/man/GLM_summary.Rd @@ -4,14 +4,7 @@ \alias{GLM_summary} \title{Tidy report of GLM (\code{lm} and \code{glm} models).} \usage{ -GLM_summary( - model, - robust = FALSE, - cluster = NULL, - digits = 3, - nsmall = digits, - ... -) +GLM_summary(model, robust = FALSE, cluster = NULL, digits = 3, ...) } \arguments{ \item{model}{A model fitted with \code{lm} or \code{glm} function.} @@ -27,7 +20,7 @@ For details, see \code{?sandwich::vcovHC} and \code{?jtools::summ.lm}. \item{cluster}{[Only for \code{lm} and \code{glm}] Cluster-robust standard errors are computed if cluster is set to the name of the input data's cluster variable or is a vector of clusters.} -\item{digits, nsmall}{Number of decimal places of output. Default is 3.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} \item{...}{Other arguments. You may re-define \code{formula}, \code{data}, or \code{family}.} } diff --git a/man/HLM_ICC_rWG.Rd b/man/HLM_ICC_rWG.Rd index 5764448..2db0b83 100644 --- a/man/HLM_ICC_rWG.Rd +++ b/man/HLM_ICC_rWG.Rd @@ -10,8 +10,7 @@ HLM_ICC_rWG( icc.var, rwg.vars = icc.var, rwg.levels = 0, - digits = 3, - nsmall = digits + digits = 3 ) } \arguments{ @@ -21,7 +20,7 @@ HLM_ICC_rWG( \item{icc.var}{Key variable for analysis (usually the dependent variable).} -\item{rwg.vars}{Default is \code{icc.var}. It can be: +\item{rwg.vars}{Defaults to \code{icc.var}. It can be: \itemize{ \item A single variable (\emph{single-item} measure), then computing rWG. \item Multiple variables (\emph{multi-item} measure), then computing rWG(J), where J = the number of items. @@ -38,7 +37,7 @@ it is required to specify which type of uniform distribution is. For example, if the measure is a 5-point Likert scale, you should set \code{rwg.levels=5}. }} -\item{digits, nsmall}{Number of decimal places of output. Default is 3.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} } \value{ Invisibly return a list of results. @@ -112,5 +111,7 @@ Estimating within-group interrater reliability with and without response bias. \emph{Journal of Applied Psychology, 69}, 85--98. } \seealso{ +\code{\link{cor_multilevel}} + \href{https://CRAN.R-project.org/package=multilevel}{R package "multilevel"} } diff --git a/man/HLM_summary.Rd b/man/HLM_summary.Rd index 5591ea7..d24ccce 100644 --- a/man/HLM_summary.Rd +++ b/man/HLM_summary.Rd @@ -4,7 +4,7 @@ \alias{HLM_summary} \title{Tidy report of HLM (\code{lmer} and \code{glmer} models).} \usage{ -HLM_summary(model = NULL, test.rand = FALSE, digits = 3, nsmall = digits, ...) +HLM_summary(model = NULL, test.rand = FALSE, digits = 3, ...) } \arguments{ \item{model}{A model fitted with \code{lmer} or \code{glmer} function using the \code{lmerTest} package.} @@ -15,7 +15,7 @@ Test random effects (i.e., variance components) by using the likelihood-ratio te which is asymptotically chi-square distributed. For large datasets, it is much time-consuming.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} \item{...}{Other arguments. You may re-define \code{formula}, \code{data}, or \code{family}.} } diff --git a/man/MANOVA.Rd b/man/MANOVA.Rd index 707c0fe..565875c 100644 --- a/man/MANOVA.Rd +++ b/man/MANOVA.Rd @@ -17,7 +17,6 @@ MANOVA( sph.correction = "none", aov.include = FALSE, digits = 3, - nsmall = digits, file = NULL ) } @@ -79,20 +78,20 @@ Tips on regular expression: \item{covariate}{Covariates. Multiple variables should be included in a character vector \code{c()}.} -\item{ss.type}{Type of sums of squares (SS) for ANOVA. Default is \code{"III"}. +\item{ss.type}{Type of sums of squares (SS) for ANOVA. Defaults to \code{"III"}. Possible values are \code{"II"}, \code{"III"}, \code{2}, or \code{3}.} \item{sph.correction}{[Only for repeated measures with >= 3 levels] -Sphericity correction method for adjusting the degrees of freedom (\emph{df}) when the sphericity assumption is violated. Default is \code{"none"}. +Sphericity correction method for adjusting the degrees of freedom (\emph{df}) when the sphericity assumption is violated. Defaults to \code{"none"}. If Mauchly's test of sphericity is significant, you may set it to \code{"GG"} (Greenhouse-Geisser) or \code{"HF"} (Huynh-Feldt).} \item{aov.include}{Include the \code{aov} object in the returned object? -Default is \code{FALSE}, as suggested by \code{\link[afex:aov_car]{afex::aov_ez()}} +Defaults to \code{FALSE}, as suggested by \code{\link[afex:aov_car]{afex::aov_ez()}} (please see the \code{include_aov} argument in this help page, which provides a detailed explanation). If \code{TRUE}, you should also specify \code{model.type="univariate"} in \code{\link{EMMEANS}}.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} \item{file}{File name of MS Word (\code{.doc}).} } diff --git a/man/PROCESS.Rd b/man/PROCESS.Rd index 5b10481..f79899a 100644 --- a/man/PROCESS.Rd +++ b/man/PROCESS.Rd @@ -27,7 +27,6 @@ PROCESS( center = TRUE, std = FALSE, digits = 3, - nsmall = digits, file = NULL ) } @@ -109,7 +108,7 @@ You may manually specify a vector of certain values: e.g., \item{ci}{Method for estimating the standard error (SE) and 95\% confidence interval (CI) of indirect effect(s). -Default is \code{"boot"} for (generalized) linear models or +Defaults to \code{"boot"} for (generalized) linear models or \code{"mcmc"} for (generalized) linear mixed models (i.e., multilevel models). \describe{ \item{\code{"boot"}}{Percentile Bootstrap} @@ -122,11 +121,11 @@ You \emph{should not} report the 95\% CIs of simple slopes as Bootstrap or Monte because they are just standard CIs without any resampling method.} \item{nsim}{Number of simulation samples (bootstrap resampling or Monte Carlo simulation) -for estimating SE and 95\% CI. Default is \code{100} for running examples faster. +for estimating SE and 95\% CI. Defaults to \code{100} for running examples faster. In formal analyses, however, \strong{\code{nsim=1000} (or larger)} is strongly suggested!} \item{seed}{Random seed for obtaining reproducible results. -Default is \code{NULL}. +Defaults to \code{NULL}. You may set to any number you prefer (e.g., \code{seed=1234}, just an uncountable number). @@ -138,14 +137,14 @@ get exactly the same results across different R packages (e.g., \code{\link[lavaan:lavaan-class]{lavaan}} vs. \code{\link[mediation:mediate]{mediation}}) and software (e.g., SPSS, Mplus, R, jamovi).} -\item{center}{Centering numeric (continuous) predictors? Default is \code{TRUE} (suggested).} +\item{center}{Centering numeric (continuous) predictors? Defaults to \code{TRUE} (suggested).} -\item{std}{Standardizing variables to get standardized coefficients? Default is \code{FALSE}. +\item{std}{Standardizing variables to get standardized coefficients? Defaults to \code{FALSE}. If \code{TRUE}, it will standardize all numeric (continuous) variables before building regression models. However, it is \emph{not suggested} to set \code{std=TRUE} for \emph{generalized} linear (mixed) models.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} \item{file}{File name of MS Word (\code{.doc}). Currently, only regression model summary can be saved.} diff --git a/man/Run.Rd b/man/Run.Rd index 41a492a..8c97875 100644 --- a/man/Run.Rd +++ b/man/Run.Rd @@ -10,7 +10,7 @@ Run(..., silent = FALSE) \item{...}{Character string(s) to run. You can use \code{"{ }"} to insert any R object in the environment.} -\item{silent}{Suppress error/warning messages. Default is \code{FALSE}.} +\item{silent}{Suppress error/warning messages. Defaults to \code{FALSE}.} } \value{ Invisibly return the running expression(s). diff --git a/man/TTEST.Rd b/man/TTEST.Rd index 7453f28..a893cd0 100644 --- a/man/TTEST.Rd +++ b/man/TTEST.Rd @@ -17,7 +17,6 @@ TTEST( factor.rev = TRUE, bayes.prior = "medium", digits = 2, - nsmall = digits, file = NULL ) } @@ -34,11 +33,11 @@ Multiple variables should be included in a character vector \code{c()}. Only necessary for independent-samples \emph{t}-test.} -\item{paired}{For paired-samples \emph{t}-test, set it as \code{TRUE}. Default is \code{FALSE}.} +\item{paired}{For paired-samples \emph{t}-test, set it as \code{TRUE}. Defaults to \code{FALSE}.} \item{paired.d.type}{Type of Cohen's \emph{d} for paired-samples \emph{t}-test (see Lakens, 2013). -Default is \code{"dz"}. Options include: +Defaults to \code{"dz"}. Options include: \describe{ \item{\code{"dz"} (\emph{d} for standardized difference)}{ Cohen's \eqn{d_{z} = \frac{M_{diff}}{SD_{diff}}} @@ -53,21 +52,21 @@ Default is \code{"dz"}. Options include: }} \item{var.equal}{If Levene's test indicates a violation of the homogeneity of variance, -then you should better set this argument as \code{FALSE}. Default is \code{TRUE}.} +then you should better set this argument as \code{FALSE}. Defaults to \code{TRUE}.} -\item{mean.diff}{Whether to display results of mean difference and its 95\% CI. Default is \code{TRUE}.} +\item{mean.diff}{Whether to display results of mean difference and its 95\% CI. Defaults to \code{TRUE}.} -\item{test.value}{The true value of the mean (or difference in means for a two-samples test). Default is \code{0}.} +\item{test.value}{The true value of the mean (or difference in means for a two-samples test). Defaults to \code{0}.} \item{test.sided}{Any of \code{"="} (two-sided, the default), \code{"<"} (one-sided), or \code{">"} (one-sided).} \item{factor.rev}{Whether to reverse the levels of factor (X) -such that the test compares higher vs. lower level. Default is \code{TRUE}.} +such that the test compares higher vs. lower level. Defaults to \code{TRUE}.} -\item{bayes.prior}{Prior scale in Bayesian \emph{t}-test. Default is 0.707. +\item{bayes.prior}{Prior scale in Bayesian \emph{t}-test. Defaults to 0.707. See details in \code{\link[BayesFactor:ttestBF]{BayesFactor::ttestBF()}}.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{2}.} +\item{digits}{Number of decimal places of output. Defaults to \code{2}.} \item{file}{File name of MS Word (\code{.doc}).} } diff --git a/man/add.Rd b/man/add.Rd index e638b66..74031b6 100644 --- a/man/add.Rd +++ b/man/add.Rd @@ -34,7 +34,7 @@ Passing to \code{\link[data.table:data.table]{data.table}}: \code{DT[ , , by]}} \item{drop}{Drop existing variables and return only new variables? -Default is \code{FALSE}, which returns all variables.} +Defaults to \code{FALSE}, which returns all variables.} } \value{ \code{add()} returns a new diff --git a/man/bruceR-package.Rd b/man/bruceR-package.Rd index b38653b..6a9f433 100644 --- a/man/bruceR-package.Rd +++ b/man/bruceR-package.Rd @@ -127,6 +127,8 @@ Loading \code{bruceR} by \code{library(bruceR)} will also load these R packages \code{\link{Corr}} \code{\link{cor_diff}} + + \code{\link{cor_multilevel}} } \item{\strong{(5) T-Test, Multi-Factor ANOVA, Simple-Effect Analysis, and Post-Hoc Multiple Comparison}}{ diff --git a/man/cc.Rd b/man/cc.Rd index 35422fa..ad1edf0 100644 --- a/man/cc.Rd +++ b/man/cc.Rd @@ -10,11 +10,11 @@ cc(..., sep = "auto", trim = TRUE) \item{...}{Character string(s).} \item{sep}{Pattern for separation. -Default is \code{"auto"}: +Defaults to \code{"auto"}: \code{,} \code{;} \code{|} \code{\\n} \code{\\t}} \item{trim}{Remove whitespace from start and end of string(s)? -Default is \code{TRUE}.} +Defaults to \code{TRUE}.} } \value{ Character vector. diff --git a/man/ccf_plot.Rd b/man/ccf_plot.Rd index 1ed149a..2afa8a3 100644 --- a/man/ccf_plot.Rd +++ b/man/ccf_plot.Rd @@ -27,33 +27,33 @@ ccf_plot( \item{data}{Data frame.} -\item{lag.max}{Maximum time lag. Default is \code{30}.} +\item{lag.max}{Maximum time lag. Defaults to \code{30}.} -\item{sig.level}{Significance level. Default is \code{0.05}.} +\item{sig.level}{Significance level. Defaults to \code{0.05}.} \item{xbreaks}{X-axis breaks.} \item{ybreaks}{Y-axis breaks.} -\item{ylim}{Y-axis limits. Default is \code{NULL} to automatically estimate.} +\item{ylim}{Y-axis limits. Defaults to \code{NULL} to automatically estimate.} \item{alpha.ns}{Color transparency (opacity: 0~1) for non-significant values. -Default is \code{1} for no transparency (i.e., opaque color).} +Defaults to \code{1} for no transparency (i.e., opaque color).} -\item{pos.color}{Color for positive values. Default is \code{"black"}.} +\item{pos.color}{Color for positive values. Defaults to \code{"black"}.} -\item{neg.color}{Color for negative values. Default is \code{"black"}.} +\item{neg.color}{Color for negative values. Defaults to \code{"black"}.} \item{ci.color}{Color for upper and lower bounds of significant values. -Default is \code{"blue"}.} +Defaults to \code{"blue"}.} -\item{title}{Plot title. Default is an illustration of the formula.} +\item{title}{Plot title. Defaults to an illustration of the formula.} \item{subtitle}{Plot subtitle.} -\item{xlab}{X-axis title. Default is \code{"Lag"}.} +\item{xlab}{X-axis title. Defaults to \code{"Lag"}.} -\item{ylab}{Y-axis title. Default is \code{"Cross-Correlation"}.} +\item{ylab}{Y-axis title. Defaults to \code{"Cross-Correlation"}.} } \value{ A \code{gg} object, which you can further modify using diff --git a/man/cor_multilevel.Rd b/man/cor_multilevel.Rd new file mode 100644 index 0000000..60637be --- /dev/null +++ b/man/cor_multilevel.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/bruceR-stats_1_basic.R +\name{cor_multilevel} +\alias{cor_multilevel} +\title{Multilevel correlations (within-level and between-level).} +\usage{ +cor_multilevel(data, group, digits = 3) +} +\arguments{ +\item{data}{Data frame.} + +\item{group}{Grouping variable.} + +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} +} +\value{ +Invisibly return a list of results. +} +\description{ +Multilevel correlations (within-level and between-level). +For details, see description in \code{\link{HLM_ICC_rWG}}. +} +\examples{ +# see https://psychbruce.github.io/supp/CEM + +} +\seealso{ +\code{\link{Corr}} + +\code{\link{HLM_ICC_rWG}} +} diff --git a/man/dtime.Rd b/man/dtime.Rd index 90b00cb..dc2e090 100644 --- a/man/dtime.Rd +++ b/man/dtime.Rd @@ -4,14 +4,14 @@ \alias{dtime} \title{Timer (compute time difference).} \usage{ -dtime(t0, unit = "secs", digits = 0, nsmall = digits) +dtime(t0, unit = "secs", digits = 0) } \arguments{ \item{t0}{Time at the beginning.} -\item{unit}{Options: \code{"auto", "secs", "mins", "hours", "days", "weeks"}. Default is \code{"secs"}.} +\item{unit}{Options: \code{"auto", "secs", "mins", "hours", "days", "weeks"}. Defaults to \code{"secs"}.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{0}.} +\item{digits}{Number of decimal places of output. Defaults to \code{0}.} } \value{ A character string of time difference. diff --git a/man/export.Rd b/man/export.Rd index e952107..e23c42d 100644 --- a/man/export.Rd +++ b/man/export.Rd @@ -10,7 +10,8 @@ export( sheet = NULL, encoding = NULL, header = "auto", - overwrite = TRUE + overwrite = TRUE, + verbose = FALSE ) } \arguments{ @@ -24,19 +25,21 @@ you'd better specify \code{file} with extensions .rda, .rdata, or .Rdata.} If unspecified, then data will be exported to clipboard.} \item{sheet}{[Only for Excel] Excel sheet name(s). -Default is Sheet1, Sheet2, ... +Defaults to Sheet1, Sheet2, ... You may specify multiple sheet names in a character vector \code{c()} with the \emph{same length} as \code{x} (see examples).} -\item{encoding}{File encoding. Default is \code{NULL}. +\item{encoding}{File encoding. Defaults to \code{NULL}. Alternatives can be \code{"UTF-8"}, \code{"GBK"}, \code{"CP936"}, etc. If you find messy code for Chinese text in the exported data (often in CSV when opened with Excel), it is usually effective to set \code{encoding="GBK"} or \code{encoding="CP936"}.} -\item{header}{Does the first row contain column names (\code{TRUE} or \code{FALSE})? Default is \code{"auto"}.} +\item{header}{Does the first row contain column names (\code{TRUE} or \code{FALSE})? Defaults to \code{"auto"}.} -\item{overwrite}{Overwrite the existing file (if any)? Default is \code{TRUE}.} +\item{overwrite}{Overwrite the existing file (if any)? Defaults to \code{TRUE}.} + +\item{verbose}{Print output information? Defaults to \code{FALSE}.} } \value{ No return value. diff --git a/man/formatF.Rd b/man/formatF.Rd index 7d85b29..d9342ee 100644 --- a/man/formatF.Rd +++ b/man/formatF.Rd @@ -4,12 +4,12 @@ \alias{formatF} \title{Format numeric values.} \usage{ -formatF(x, digits = 3, nsmall = digits) +formatF(x, digits = 3) } \arguments{ \item{x}{A number or numeric vector.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} } \value{ Formatted character string. diff --git a/man/formula_expand.Rd b/man/formula_expand.Rd index e5c2150..71df5a3 100644 --- a/man/formula_expand.Rd +++ b/man/formula_expand.Rd @@ -9,7 +9,7 @@ formula_expand(formula, as.char = FALSE) \arguments{ \item{formula}{R formula or a character string indicating the formula.} -\item{as.char}{Return character? Default is \code{FALSE}.} +\item{as.char}{Return character? Defaults to \code{FALSE}.} } \value{ A formula/character object including all expanded terms. diff --git a/man/grand_mean_center.Rd b/man/grand_mean_center.Rd index f54f960..249cf2e 100644 --- a/man/grand_mean_center.Rd +++ b/man/grand_mean_center.Rd @@ -11,10 +11,10 @@ grand_mean_center(data, vars = names(data), std = FALSE, add.suffix = "") \item{vars}{Variable(s) to be centered.} -\item{std}{Standardized or not. Default is \code{FALSE}.} +\item{std}{Standardized or not. Defaults to \code{FALSE}.} \item{add.suffix}{The suffix of the centered variable(s). -Default is \code{""}. You may set it to \code{"_c"}, \code{"_center"}, etc.} +Defaults to \code{""}. You may set it to \code{"_c"}, \code{"_center"}, etc.} } \value{ A new data object containing the centered variable(s). diff --git a/man/granger_causality.Rd b/man/granger_causality.Rd index 2f54888..065c8c6 100644 --- a/man/granger_causality.Rd +++ b/man/granger_causality.Rd @@ -16,17 +16,17 @@ granger_causality( \arguments{ \item{varmodel}{VAR model fitted using the \code{\link[vars:VAR]{vars::VAR()}} function.} -\item{var.y, var.x}{[Optional] Default is \code{NULL} (all variables). +\item{var.y, var.x}{[Optional] Defaults to \code{NULL} (all variables). If specified, then perform tests for specific variables. Values can be a single variable (e.g., \code{"X"}), a vector of variables (e.g., \code{c("X1", "X2")}), or a string containing regular expression (e.g., \code{"X1|X2"}).} -\item{test}{\emph{F} test and/or Wald \eqn{\chi}^2 test. Default is both: \code{c("F", "Chisq")}.} +\item{test}{\emph{F} test and/or Wald \eqn{\chi}^2 test. Defaults to both: \code{c("F", "Chisq")}.} \item{file}{File name of MS Word (\code{.doc}).} -\item{check.dropped}{Check dropped variables. Default is \code{FALSE}.} +\item{check.dropped}{Check dropped variables. Defaults to \code{FALSE}.} } \value{ A data frame of results. diff --git a/man/granger_test.Rd b/man/granger_test.Rd index 70dcd00..deada01 100644 --- a/man/granger_test.Rd +++ b/man/granger_test.Rd @@ -11,9 +11,9 @@ granger_test(formula, data, lags = 1:5, test.reverse = TRUE, file = NULL, ...) \item{data}{Data frame.} -\item{lags}{Time lags. Default is \code{1:5}.} +\item{lags}{Time lags. Defaults to \code{1:5}.} -\item{test.reverse}{Whether to test reverse causality. Default is \code{TRUE}.} +\item{test.reverse}{Whether to test reverse causality. Defaults to \code{TRUE}.} \item{file}{File name of MS Word (\code{.doc}).} diff --git a/man/grapes-grapes-COMPUTE-grapes-grapes.Rd b/man/grapes-grapes-COMPUTE-grapes-grapes.Rd index 191fe48..7ca26ed 100644 --- a/man/grapes-grapes-COMPUTE-grapes-grapes.Rd +++ b/man/grapes-grapes-COMPUTE-grapes-grapes.Rd @@ -107,9 +107,9 @@ A character string specifying the positions ("starting:stopping") of variables. \item{range, likert}{[Optional] Range of likert scale (e.g., \code{1:5}, \code{c(1, 5)}). If not provided, it will be automatically estimated from the given data (BUT you should use this carefully).} -\item{na.rm}{Ignore missing values. Default is \code{TRUE}.} +\item{na.rm}{Ignore missing values. Defaults to \code{TRUE}.} -\item{values}{[Only for \code{CONSEC}] Values to be counted as consecutive identical values. Default is all numbers (\code{0:9}).} +\item{values}{[Only for \code{CONSEC}] Values to be counted as consecutive identical values. Defaults to all numbers (\code{0:9}).} } \value{ A vector of computed values. diff --git a/man/group_mean_center.Rd b/man/group_mean_center.Rd index 521f551..e4a8bae 100644 --- a/man/group_mean_center.Rd +++ b/man/group_mean_center.Rd @@ -20,13 +20,13 @@ group_mean_center( \item{by}{Grouping variable.} -\item{std}{Standardized or not. Default is \code{FALSE}.} +\item{std}{Standardized or not. Defaults to \code{FALSE}.} \item{add.suffix}{The suffix of the centered variable(s). -Default is \code{""}. You may set it to \code{"_c"}, \code{"_center"}, etc.} +Defaults to \code{""}. You may set it to \code{"_c"}, \code{"_center"}, etc.} \item{add.group.mean}{The suffix of the variable name(s) of group means. -Default is \code{"_mean"} (see Examples).} +Defaults to \code{"_mean"} (see Examples).} } \value{ A new data object containing the centered variable(s). diff --git a/man/import.Rd b/man/import.Rd index 104181d..b324a67 100644 --- a/man/import.Rd +++ b/man/import.Rd @@ -11,7 +11,8 @@ import( encoding = NULL, header = "auto", setclass = as, - as = "data.frame" + as = "data.frame", + verbose = FALSE ) } \arguments{ @@ -19,22 +20,24 @@ import( If unspecified, then data will be imported from clipboard.} \item{sheet}{[Only for Excel] Excel sheet name (or sheet number). -Default is the first sheet. +Defaults to the first sheet. Ignored if the sheet is specified via \code{range}.} -\item{range}{[Only for Excel] Excel cell range. Default are all cells in a sheet. +\item{range}{[Only for Excel] Excel cell range. Defaults to all cells in a sheet. You may specify it as \code{range="A1:E100"} or \code{range="Sheet1!A1:E100"}.} -\item{encoding}{File encoding. Default is \code{NULL}. +\item{encoding}{File encoding. Defaults to \code{NULL}. Alternatives can be \code{"UTF-8"}, \code{"GBK"}, \code{"CP936"}, etc. If you find messy code for Chinese text in the imported data, it is usually effective to set \code{encoding="UTF-8"}.} -\item{header}{Does the first row contain column names (\code{TRUE} or \code{FALSE})? Default is \code{"auto"}.} +\item{header}{Does the first row contain column names (\code{TRUE} or \code{FALSE})? Defaults to \code{"auto"}.} -\item{setclass, as}{Class of the imported data. Default is \code{"data.frame"}. -Ignored if the data file is R object (.rds, .rda, .rdata, .Rdata). +\item{setclass, as}{Class of the imported data. Defaults to \code{"data.frame"}. +Ignored if the data file is R object (.rds, .rda, .rdata, .Rdata).} + +\item{verbose}{Print data information? Defaults to \code{FALSE}. Alternatives can be: \itemize{ diff --git a/man/lavaan_summary.Rd b/man/lavaan_summary.Rd index 1af3c89..807a70a 100644 --- a/man/lavaan_summary.Rd +++ b/man/lavaan_summary.Rd @@ -10,7 +10,6 @@ lavaan_summary( nsim = 100, seed = NULL, digits = 3, - nsmall = digits, print = TRUE, covariance = FALSE, file = NULL @@ -22,7 +21,7 @@ lavaan_summary( \item{ci}{Method for estimating standard error (SE) and 95\% confidence interval (CI). -Default is \code{"raw"} (the standard approach of \code{lavaan}). +Defaults to \code{"raw"} (the standard approach of \code{lavaan}). Other options: \describe{ \item{\code{"boot"}}{Percentile Bootstrap} @@ -34,13 +33,13 @@ Other options: for estimating SE and 95\% CI. In formal analyses, \strong{\code{nsim=1000} (or larger)} is strongly suggested.} -\item{seed}{Random seed for obtaining reproducible results. Default is \code{NULL}.} +\item{seed}{Random seed for obtaining reproducible results. Defaults to \code{NULL}.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} -\item{print}{Print results. Default is \code{TRUE}.} +\item{print}{Print results. Defaults to \code{TRUE}.} -\item{covariance}{Print (co)variances. Default is \code{FALSE}.} +\item{covariance}{Print (co)variances. Defaults to \code{FALSE}.} \item{file}{File name of MS Word (\code{.doc}).} } diff --git a/man/med_summary.Rd b/man/med_summary.Rd index 4479190..7737a8e 100644 --- a/man/med_summary.Rd +++ b/man/med_summary.Rd @@ -4,12 +4,12 @@ \alias{med_summary} \title{Tidy report of mediation analysis.} \usage{ -med_summary(model, digits = 3, nsmall = digits, file = NULL) +med_summary(model, digits = 3, file = NULL) } \arguments{ \item{model}{Mediation model built using \code{\link[mediation:mediate]{mediation::mediate()}}.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} \item{file}{File name of MS Word (\code{.doc}).} } diff --git a/man/model_summary.Rd b/man/model_summary.Rd index 44f7d80..f04a510 100644 --- a/man/model_summary.Rd +++ b/man/model_summary.Rd @@ -8,7 +8,6 @@ model_summary( model.list, std = FALSE, digits = 3, - nsmall = digits, file = NULL, check = TRUE, zero = ifelse(std, FALSE, TRUE), @@ -23,11 +22,11 @@ model_summary( \item{model.list}{A single model or a list of (various types of) models. Most types of regression models are supported!} -\item{std}{Standardized coefficients? Default is \code{FALSE}. +\item{std}{Standardized coefficients? Defaults to \code{FALSE}. Only applicable to linear models and linear mixed models. Not applicable to generalized linear (mixed) models.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{3}.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} \item{file}{File name of MS Word (\code{.doc}).} @@ -35,7 +34,7 @@ Not applicable to generalized linear (mixed) models.} using \code{\link[performance:check_collinearity]{performance::check_collinearity()}}. You may turn it off by setting \code{check=FALSE}.} -\item{zero}{Display "0" before "."? Default is \code{TRUE}.} +\item{zero}{Display "0" before "."? Defaults to \code{TRUE}.} \item{modify.se}{Replace standard errors. Useful if you need to replace raw SEs with robust SEs. diff --git a/man/p.Rd b/man/p.Rd index c142563..b179fe1 100644 --- a/man/p.Rd +++ b/man/p.Rd @@ -19,8 +19,7 @@ p( df = NULL, df1 = NULL, df2 = NULL, - digits = 2, - nsmall = digits + digits = 2 ) p.z(z) @@ -38,7 +37,7 @@ p.chi2(chi2, df) \item{n, df, df1, df2}{Sample size or degree of freedom.} -\item{digits, nsmall}{Number of decimal places of output. Default is \code{2}.} +\item{digits}{Number of decimal places of output. Defaults to \code{2}.} } \value{ \emph{p} value statistics. diff --git a/man/print_table.Rd b/man/print_table.Rd index 7c9539f..dad4dab 100644 --- a/man/print_table.Rd +++ b/man/print_table.Rd @@ -7,7 +7,6 @@ print_table( x, digits = 3, - nsmalls = digits, nspaces = 1, row.names = TRUE, col.names = TRUE, @@ -23,11 +22,11 @@ print_table( \arguments{ \item{x}{Matrix, data.frame (or data.table), or any model object (e.g., \code{lm, glm, lmer, glmer, ...}).} -\item{digits, nsmalls}{Numeric vector specifying the number of decimal places of output. Default is \code{3}.} +\item{digits}{Numeric vector specifying the number of decimal places of output. Defaults to \code{3}.} -\item{nspaces}{Number of whitespaces between columns. Default is \code{1}.} +\item{nspaces}{Number of whitespaces between columns. Defaults to \code{1}.} -\item{row.names, col.names}{Print row/column names. Default is \code{TRUE} (column names are always printed). +\item{row.names, col.names}{Print row/column names. Defaults to \code{TRUE} (column names are always printed). To modify the names, you can use a character vector with the same length as the raw names.} \item{title}{Title text, which will be inserted in

(HTML code).} diff --git a/man/regress.Rd b/man/regress.Rd index f15309c..c26c99a 100644 --- a/man/regress.Rd +++ b/man/regress.Rd @@ -9,7 +9,6 @@ regress( data, family = NULL, digits = 3, - nsmall = digits, robust = FALSE, cluster = NULL, test.rand = FALSE @@ -20,9 +19,10 @@ regress( \item{data}{Data frame.} -\item{family}{[Optional] The same as in \code{glm} and \code{glmer} (e.g., \code{family=binomial} fits a logistic regression model).} +\item{family}{[Optional] The same as in \code{glm} and \code{glmer} +(e.g., \code{family=binomial} fits a logistic regression model).} -\item{digits, nsmall}{Number of decimal places of output. Default is 3.} +\item{digits}{Number of decimal places of output. Defaults to \code{3}.} \item{robust}{[Only for \code{lm} and \code{glm}] \code{FALSE} (default), \code{TRUE} (then the default is \code{"HC1"}), diff --git a/man/scaler.Rd b/man/scaler.Rd index dd3bfc6..1965c7e 100644 --- a/man/scaler.Rd +++ b/man/scaler.Rd @@ -9,9 +9,9 @@ scaler(v, min = 0, max = 1) \arguments{ \item{v}{Variable (numeric vector).} -\item{min}{Minimum value (default is 0).} +\item{min}{Minimum value (defaults to 0).} -\item{max}{Maximum value (default is 1).} +\item{max}{Maximum value (defaults to 1).} } \value{ A vector of rescaled variable. diff --git a/man/set.wd.Rd b/man/set.wd.Rd index 25ce0ee..cf04740 100644 --- a/man/set.wd.Rd +++ b/man/set.wd.Rd @@ -11,7 +11,7 @@ set_wd(path = NULL, ask = FALSE) } \arguments{ \item{path}{\code{NULL} (default) or a specific path. -Default is to extract the path of the currently opened file +Defaults to extract the path of the currently opened file (usually .R or .Rmd) using the \code{rstudioapi::getSourceEditorContext} function.} \item{ask}{\code{TRUE} or \code{FALSE} (default). diff --git a/man/theme_bruce.Rd b/man/theme_bruce.Rd index fa5895b..b699643 100644 --- a/man/theme_bruce.Rd +++ b/man/theme_bruce.Rd @@ -27,16 +27,16 @@ theme_bruce( ) } \arguments{ -\item{markdown}{Use \code{element_markdown()} instead of \code{element_text()}. Default is \code{FALSE}. +\item{markdown}{Use \code{element_markdown()} instead of \code{element_text()}. Defaults to \code{FALSE}. If set to \code{TRUE}, then you should also use \code{element_markdown()} in \code{theme()} (if any).} -\item{base.size}{Basic font size. Default is 12.} +\item{base.size}{Basic font size. Defaults to \code{12}.} -\item{line.size}{Line width. Default is 0.5.} +\item{line.size}{Line width. Defaults to \code{0.5}.} \item{border}{\code{TRUE}, \code{FALSE}, or \code{"black"} (default).} -\item{bg}{Background color of whole plot. Default is \code{"white"}. +\item{bg}{Background color of whole plot. Defaults to \code{"white"}. You can use any colors or choose from some pre-set color palettes: \code{"stata", "stata.grey", "solar", "wsj", "light", "dust"}. @@ -45,7 +45,7 @@ To see these colors, you can type: \code{ggthemr::colour_plot(c(stata="#EAF2F3", stata.grey="#E8E8E8", solar="#FDF6E3", wsj="#F8F2E4", light="#F6F1EB", dust="#FAF7F2"))}} -\item{panel.bg}{Background color of panel. Default is \code{"white"}.} +\item{panel.bg}{Background color of panel. Defaults to \code{"white"}.} \item{tag}{Font face of tag. Choose from \code{"plain", "italic", "bold", "bold.italic"}.} @@ -65,13 +65,13 @@ solar="#FDF6E3", wsj="#F8F2E4", light="#F6F1EB", dust="#FAF7F2"))}} \item{grid.y}{\code{FALSE}, \code{""} (default), or a color (e.g., \code{"grey90"}) to set the color of panel grid (y).} -\item{line.x}{Draw the x-axis line. Default is \code{TRUE}.} +\item{line.x}{Draw the x-axis line. Defaults to \code{TRUE}.} -\item{line.y}{Draw the y-axis line. Default is \code{TRUE}.} +\item{line.y}{Draw the y-axis line. Defaults to \code{TRUE}.} -\item{tick.x}{Draw the x-axis ticks. Default is \code{TRUE}.} +\item{tick.x}{Draw the x-axis ticks. Defaults to \code{TRUE}.} -\item{tick.y}{Draw the y-axis ticks. Default is \code{TRUE}.} +\item{tick.y}{Draw the y-axis ticks. Defaults to \code{TRUE}.} } \value{ A theme object that should be used for \code{ggplot2}.