Skip to content

Commit

Permalink
Merge pull request #84 from konfound-project/newitcv
Browse files Browse the repository at this point in the history
Newitcv merge at last!
  • Loading branch information
jrosen48 authored Aug 8, 2024
2 parents 10ab141 + d458d3f commit 757cc1a
Show file tree
Hide file tree
Showing 35 changed files with 3,089 additions and 1,077 deletions.
3 changes: 3 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ export(konfound)
export(mkonfound)
export(pkonfound)
export(tkonfound)
export(tkonfound_fig)
importFrom(broom,glance)
importFrom(broom,tidy)
importFrom(broom.mixed,tidy)
Expand Down Expand Up @@ -58,6 +59,8 @@ importFrom(stats,chisq.test)
importFrom(stats,cor)
importFrom(stats,fisher.test)
importFrom(stats,glm)
importFrom(stats,pt)
importFrom(stats,qt)
importFrom(stats,var)
importFrom(tidyr,gather)
importFrom(utils,globalVariables)
5 changes: 3 additions & 2 deletions R/cop_pse_auxiliary.R
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ cal_ryz <- function(ryxGz, R2){
if (R2yz >= 0) {
ryz <- sqrt(R2yz)
} else {
stop("Error! R2yz < 0!")
stop("The calculated variance in Y explained by Z is less than 0. This can occur if Z\n suppresses the relationship between X and Y. That is, if partialling on Z increases\n the relationship between X and Y. Note: the unconditional ITCV is not conceptualized\n for this scenario.")
}
return(ryz)
}
Expand All @@ -25,7 +25,8 @@ cal_ryz <- function(ryxGz, R2){
#' @importFrom lavaan parameterEstimates
cal_rxz <- function(var_x, var_y, R2, df, std_err){
R2xz <- 1 - ((var_y * (1 - R2))/(var_x * df * std_err^2))
if (R2xz <= 0) {stop("Error! R2xz < 0!")}
if (R2xz <= 0) {stop("Error! R2xz < 0!
Consider adding more significant digits to your input values.")}
## Note output the number of R2xz
rxz <- sqrt(R2xz)
return(rxz)
Expand Down
48 changes: 48 additions & 0 deletions R/helper_output_list.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
output_list <- function(obs_r, act_r,
critical_r, r_final, rxcv, rycv, rxcvGz, rycvGz,
itcvGz, itcv, r2xz, r2yz,
delta_star, delta_star_restricted, delta_exact, delta_pctbias,
cor_oster, cor_exact,
beta_threshold, beta_threshold_verify,
perc_bias_to_change,
RIR_primary, RIR_supplemental, RIR_perc,
fragility_primary, fragility_supplemental,
starting_table, final_table,
user_SE, analysis_SE,
Fig_ITCV, Fig_RIR) {
result <- list(obs_r = obs_r,
act_r = act_r,
critical_r = critical_r,
r_final = r_final,
rxcv = rxcv,
rycv = rycv,
rxcvGz = rxcvGz,
rycvGz = rycvGz,
itcvGz = itcvGz,
itcv = itcv,
r2xz = r2xz,
r2yz = r2yz,
delta_star = delta_star,
delta_star_restricted = delta_star_restricted,
delta_exact = delta_exact,
delta_pctbias = delta_pctbias,
cor_oster = cor_oster,
cor_exact = cor_exact,
beta_threshold = beta_threshold,
beta_threshold_verify = beta_threshold_verify,
perc_bias_to_change = perc_bias_to_change,
RIR_primary = RIR_primary,
RIR_supplemental = RIR_supplemental,
RIR_perc = RIR_perc,
fragility_primary = fragility_primary,
fragility_supplemental = fragility_supplemental,
starting_table = starting_table,
final_table = final_table,
user_SE = user_SE,
analysis_SE = analysis_SE,
Fig_ITCV = Fig_ITCV,
Fig_RIR = Fig_RIR)
result <- result[!is.na(result)]
message("For interpretation, check out to_return = 'print'.")
return(result)
}
324 changes: 198 additions & 126 deletions R/helper_output_print.R

Large diffs are not rendered by default.

63 changes: 36 additions & 27 deletions R/konfound-lm.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

#' Konfound Analysis for Linear Models
#'
#' This function performs konfound analysis on a linear model object
#' This function performs konfound analysis on a linear model object
#' produced by lm.
#' It calculates the sensitivity of inferences for coefficients in the model.
#' It supports analysis for a single variable or multiple variables.
Expand All @@ -16,35 +16,44 @@
#' @return The results of the konfound analysis for the specified variable(s).
#' @importFrom broom tidy glance
#' @importFrom dplyr select filter bind_cols
konfound_lm <- function(model_object,
tested_variable_string,
alpha,
tails,
index,
#' @importFrom stats var
konfound_lm <- function(model_object,
tested_variable_string,
alpha,
tails,
index,
to_return) {
tidy_output <- broom::tidy(model_object) # tidying output
glance_output <- broom::glance(model_object)

coef_df <- tidy_output[tidy_output$term == tested_variable_string, ]

est_eff <- coef_df$estimate
std_err <- coef_df$std.error
n_obs <- glance_output$nobs
n_covariates <- glance_output$df

out <- test_sensitivity(
est_eff = est_eff,
std_err = std_err,
n_obs = n_obs,
n_covariates = n_covariates,
alpha = alpha,
tails = tails,
index = index,
nu = 0,
to_return = to_return,
model_object = model_object,
tested_variable = tested_variable_string)

sdx <- unname(sqrt(diag(stats::var(model_object$model)))[tested_variable_string])

est_eff <- coef_df$estimate
std_err <- coef_df$std.error
n_obs <- glance_output$nobs
n_covariates <- unname(glance_output$df)
sdy <- unname(sqrt(diag(var(model_object$model)))[1])
R2 <- summary(model_object)$r.squared

out <- test_sensitivity(
est_eff = est_eff,
std_err = std_err,
n_obs = n_obs,
n_covariates = n_covariates - 1,
sdx = sdx,
sdy = sdy,
R2 = R2,
alpha = alpha,
tails = tails,
index = index,
nu = 0,
far_bound = 0,
eff_thr = NA,
to_return = to_return,
model_object = model_object,
tested_variable = tested_variable_string
)

return(out)

}
49 changes: 22 additions & 27 deletions R/konfound.R
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
#' Konfound Analysis for Various Model Types
#'
#' Performs sensitivity analysis on fitted models including
#' linear models (`lm`), generalized linear models (`glm`),
#' Performs sensitivity analysis on fitted models including
#' linear models (`lm`), generalized linear models (`glm`),
#' and linear mixed-effects models (`lmerMod`).
#' It calculates the amount of bias required to invalidate or
#' sustain an inference,and the impact of an omitted variable
#' It calculates the amount of bias required to invalidate or
#' sustain an inference,and the impact of an omitted variable
#' necessary to affect the inference.
#'
#' @param model_object A model object produced by `lm`, `glm`, or `lme4::lmer`.
Expand All @@ -13,13 +13,13 @@
#' @param tails Number of tails for the test (1 or 2).
#' @param index Type of sensitivity analysis ('RIR' by default).
#' @param to_return Type of output to return ('print', 'raw_output', 'table').
#' @param two_by_two Boolean; if `TRUE`, uses a 2x2 table approach
#' @param two_by_two Boolean; if `TRUE`, uses a 2x2 table approach
#' for `glm` dichotomous variables.
#' @param n_treat Number of treatment cases
#' @param n_treat Number of treatment cases
#' (used only if `two_by_two` is `TRUE`).
#' @param switch_trm Boolean; switch treatment and control in the analysis.
#' @param replace Replacement method for treatment cases ('control' by default).
#' @return Depending on `to_return`, prints the result, returns a raw output,
#' @return Depending on `to_return`, prints the result, returns a raw output,
#' or a summary table.
#' @importFrom rlang enquo quo_name
#' @importFrom lme4 fixef lmer
Expand Down Expand Up @@ -69,13 +69,13 @@ konfound <- function(model_object,

# Stop messages
if (!(class(model_object)[1] %in% c("lm", "glm", "lmerMod"))) {
stop("konfound() is currently implemented for models estimated with
stop("konfound() is currently implemented for models estimated with
lm(), glm(), and lme4::lmer(); consider using pkonfound() instead")
}

# Dealing with non-standard evaluation
tested_variable_enquo <- rlang::enquo(tested_variable)
# dealing with non-standard evaluation
tested_variable_enquo <- rlang::enquo(tested_variable)
# dealing with non-standard evaluation
#(so unquoted names for tested_variable can be used)
tested_variable_string <- rlang::quo_name(tested_variable_enquo)

Expand All @@ -94,14 +94,9 @@ konfound <- function(model_object,
}

if (inherits(model_object, "glm") & two_by_two == FALSE) {
message("Note that for a non-linear model,
impact threshold should not be interpreted.")
message("Note that this is only an approximation. For exact results
in terms of the number of cases that must be switched from treatment
success to treatment failure to invalidate the inference see:
https://msu.edu/~kenfrank/non%20linear%20replacement%20treatment.xlsm")
message("If a dichotomous independent variable is used, consider using
the 2X2 table approach enabled with the argument two_by_two = TRUE")
message("Note that if your model is a logistic regression, we recommend using the pkonfound command for logistic regression with manually entered parameter estimates and other quantities.")
message("Note that this is only an approximation. For exact results in terms of the number of cases that must be switched from treatment success to treatment failure to invalidate the inference see: https://msu.edu/~kenfrank/non%20linear%20replacement%20treatment.xlsm")
message("If a dichotomous independent variable is used, consider using the 2X2 table approach enabled with the argument two_by_two = TRUE")
output <- konfound_glm(
model_object = model_object,
tested_variable_string = tested_variable_string,
Expand All @@ -115,9 +110,9 @@ konfound <- function(model_object,

if (inherits(model_object, "glm") & two_by_two == TRUE) {

if(is.null(n_treat)) stop("Please provide a value for n_treat to use
if(is.null(n_treat)) stop("Please provide a value for n_treat to use
this functionality with a dichotomous predictor")

output <- konfound_glm_dichotomous(
model_object = model_object,
tested_variable_string = tested_variable_string,
Expand All @@ -143,14 +138,14 @@ konfound <- function(model_object,
to_return = to_return
)

message("Note that the Kenward-Roger approximation is used to
estimate degrees of freedom for the predictor(s) of interest.
We are presently working to add other methods for calculating
the degrees of freedom for the predictor(s) of interest.
If you wish to use other methods now, consider those detailed here:
message("Note that the Kenward-Roger approximation is used to
estimate degrees of freedom for the predictor(s) of interest.
We are presently working to add other methods for calculating
the degrees of freedom for the predictor(s) of interest.
If you wish to use other methods now, consider those detailed here:
https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html
#why-doesnt-lme4-display-denominator-degrees-of-freedomp-values-what-other-options-do-i-have.
You can then enter degrees of freedom obtained from another method along with the coefficient,
#why-doesnt-lme4-display-denominator-degrees-of-freedomp-values-what-other-options-do-i-have.
You can then enter degrees of freedom obtained from another method along with the coefficient,
number of observations, and number of covariates to the pkonfound() function to quantify the robustness of the inference.")

return(output)
Expand Down
13 changes: 7 additions & 6 deletions R/nonlinear_auxiliary.R
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,8 @@ getswitch <- function(table_bstart, thr_t, switch_trm, n_obs) {
b_tryall <- b - (b - 1) * (1 - as.numeric(switch_trm))
tryall_t <- get_t_kfnl(a_tryall, b_tryall, c_tryall, d_tryall)
tryall_est <- log(a_tryall*d_tryall/c_tryall/b_tryall)
allnotenough <- isTRUE(thr_t - tryall_t > 0 & tryall_est*est_eff_start > 0)
allnotenough <- isTRUE(thr_t - tryall_t > 0 &
tryall_est * est_eff_start >= 0)
}
if (t_start > thr_t) {
# transfer cases from A to B or D to C to decrease odds ratio
Expand All @@ -141,7 +142,7 @@ getswitch <- function(table_bstart, thr_t, switch_trm, n_obs) {
b_tryall <- b + (a - 1) * (1 - as.numeric(switch_trm))
tryall_t <- get_t_kfnl(a_tryall, b_tryall, c_tryall, d_tryall)
tryall_est <- log(a_tryall*d_tryall/c_tryall/b_tryall)
allnotenough <- isTRUE(tryall_t - thr_t > 0 & tryall_est*est_eff_start > 0)
allnotenough <- isTRUE(tryall_t - thr_t > 0 & tryall_est*est_eff_start >= 0)
}

### run following if transfering one row is enough
Expand Down Expand Up @@ -402,7 +403,7 @@ if (!dcroddsratio_start) {
b_tryall <- b - (b - 1) * (1 - as.numeric(switch_trm))
tryall_p <- chisq_p(a_tryall, b_tryall, c_tryall, d_tryall)
tryall_est <- log(a_tryall*d_tryall/c_tryall/b_tryall)
allnotenough <- isTRUE((thr_p-tryall_p)*tryall_est< 0 & tryall_est*est > 0)
allnotenough <- isTRUE((thr_p-tryall_p)*tryall_est< 0 & tryall_est*est >= 0)
}
if (dcroddsratio_start ) {
# transfer cases from A to B or D to C to decrease odds ratio
Expand All @@ -412,7 +413,7 @@ if (dcroddsratio_start ) {
b_tryall <- b + (a - 1) * (1 - as.numeric(switch_trm))
tryall_p <- chisq_p(a_tryall, b_tryall, c_tryall, d_tryall)
tryall_est <- log(a_tryall*d_tryall/c_tryall/b_tryall)
allnotenough <- isTRUE((thr_p-tryall_p)*tryall_est> 0 & tryall_est*est > 0)
allnotenough <- isTRUE((thr_p-tryall_p)*tryall_est> 0 & tryall_est*est >= 0)
}

### run following if transfering one row is enough
Expand Down Expand Up @@ -748,7 +749,7 @@ getswitch_fisher <- function(a, b, c, d, thr_p = 0.05, switch_trm = TRUE){
if (d_tryall == 0) {d_tryall <- d_tryall + 0.5}
tryall_p <- fisher_p(a_tryall, b_tryall, c_tryall, d_tryall)
tryall_est <- log(a_tryall*d_tryall/c_tryall/b_tryall)
allnotenough <- isTRUE((thr_p-tryall_p)*tryall_est< 0 & tryall_est*est > 0)
allnotenough <- isTRUE((thr_p-tryall_p)*tryall_est< 0 & tryall_est*est >= 0)
}
if (dcroddsratio_start ) {
# transfer cases from A to B or D to C to decrease odds ratio
Expand All @@ -762,7 +763,7 @@ getswitch_fisher <- function(a, b, c, d, thr_p = 0.05, switch_trm = TRUE){
if (d_tryall == 0) {d_tryall <- d_tryall + 0.5}
tryall_p <- fisher_p(a_tryall, b_tryall, c_tryall, d_tryall)
tryall_est <- log(a_tryall*d_tryall/c_tryall/b_tryall)
allnotenough <- isTRUE((thr_p-tryall_p)*tryall_est> 0 & tryall_est*est > 0)
allnotenough <- isTRUE((thr_p-tryall_p)*tryall_est> 0 & tryall_est*est >= 0)
}

### run following if transfering one row is enough
Expand Down
Loading

0 comments on commit 757cc1a

Please sign in to comment.