Skip to content

Commit

Permalink
Minor Edits
Browse files Browse the repository at this point in the history
1. Added calculation code and print output for ITCV benchmark
2. Added impact table for R
3. Suppress chi-square warning message in non-linear model (RIR)
  • Loading branch information
JihoonChoi26 committed Dec 2, 2024
1 parent 84e66ba commit 19c743f
Show file tree
Hide file tree
Showing 5 changed files with 126 additions and 8 deletions.
4 changes: 3 additions & 1 deletion R/helper_output_list.R
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
output_list <- function(obs_r, act_r,
critical_r, r_final, rxcv, rycv, rxcvGz, rycvGz,
itcvGz, itcv, r2xz, r2yz,
itcvGz, itcv, r2xz, r2yz, benchmark_corr_product = NA, itcv_ratio_to_benchmark = NA,
delta_star, delta_star_restricted, delta_exact, delta_pctbias,
cor_oster, cor_exact,
beta_threshold, beta_threshold_verify,
Expand All @@ -26,6 +26,8 @@ output_list <- function(obs_r, act_r,
itcv = itcv,
r2xz = r2xz,
r2yz = r2yz,
benchmark_corr_product = benchmark_corr_product,
itcv_ratio_to_benchmark = itcv_ratio_to_benchmark,
delta_star = delta_star,
delta_star_restricted = delta_star_restricted,
delta_exact = delta_exact,
Expand Down
26 changes: 25 additions & 1 deletion R/helper_output_print.R
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@
#' @param rycv the correlation between y and CV.
#' @param rxcvGz the correlation between predictor of interest and CV necessary to nullify the inference for smallest impact, conditioning on all observed covariates.
#' @param rycvGz the correlation between outcome and CV necessary to nullify the inference for smallest impact, conditioning on all observed covariates.
#' @param benchmark_corr_product the product of the correlations of covariates Z with X and Y (Rxz * Ryz), measuring the observed association strength.
#' @param itcv_ratio_to_benchmark the ratio of the ITCV to the benchmark_corr_product, indicating the robustness of inference.
#' @importFrom crayon bold underline italic

output_print <- function(n_covariates,
Expand All @@ -57,7 +59,9 @@ output_print <- function(n_covariates,
rxcv = NA,
rycv = NA,
rxcvGz,
rycvGz) {
rycvGz,
benchmark_corr_product = NA,
itcv_ratio_to_benchmark = NA) {
if (index == "RIR"){
cat(crayon::bold("Robustness of Inference to Replacement (RIR):\n"))
if ((abs(est_eff) > abs(beta_threshhold)) & is.na(eff_thr) == TRUE) {
Expand Down Expand Up @@ -343,6 +347,26 @@ output_print <- function(n_covariates,
cat("\n")
}

if (!is.na(benchmark_corr_product)) {
cat("Interpretation of Benchmark Correlations for ITCV:")
cat("\n")
cat(paste0("Benchmark correlation product ('benchmark_corr_product') is Rxz*Ryz = ", round(benchmark_corr_product, 4),
", showing"))
cat("\n")
cat("the association strength of all observed covariates Z with X and Y.\n")
cat("\n")
cat(paste0("The ratio ('itcv_ratio_to_benchmark') is ", round(itcv_ratio_to_benchmark, 4),
", indicating the robustness of inference."))
cat("\n")
cat("A smaller ratio means required correlations to nullify the inference would need to be")
cat("\n")
cat("much stronger than observed.\n")
cat("\n")
cat("If Z includes pretests or fixed effects, the benchmark may be inflated, making the ratio\n")
cat("unusually small. Interpret robustness cautiously in such cases.\n")
cat("\n")
}

cat("See Frank (2000) for a description of the method.")
cat("\n")
cat("\n")
Expand Down
77 changes: 75 additions & 2 deletions R/helper_output_table.R
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@
#' @importFrom broom tidy
#' @importFrom purrr modify_if
#' @importFrom stats cor
#' @importFrom dplyr select filter mutate
#' @importFrom ppcor pcor
#' @importFrom dplyr select filter mutate arrange %>%
#' @importFrom rlang !! enquo
output_table <- function(model_object, tested_variable) {
p <- all.vars(model_object$call)[1]
Expand Down Expand Up @@ -46,5 +47,77 @@ output_table <- function(model_object, tested_variable) {
round,
digits = 3)

return(model_output)
options(pillar.neg = FALSE)

# Observed Impact Table
impact_table <- tibble(
term = covariate_names,
`Cor(vX)` = NA_real_,
`Cor(vY)` = NA_real_,
Impact = NA_real_
)

for (i in seq_along(covariate_names)) {
covariate <- covariate_names[i]
if (all(c(covariate, tested_variable, p) %in% colnames(cor_df))) {
cor_vx <- cor_df[covariate, tested_variable]
cor_vy <- cor_df[covariate, p]
impact <- cor_vx * cor_vy
impact_table$`Cor(vX)`[i] <- round(cor_vx, 4)
impact_table$`Cor(vY)`[i] <- round(cor_vy, 4)
impact_table$Impact[i] <- round(impact, 4)
}
}

# Partial Impact Table
impact_table_partial <- tibble(
term = covariate_names,
`Partial Cor(vX)` = NA_real_,
`Partial Cor(vY)` = NA_real_,
Partial_Impact = NA_real_
)

# Compute partial correlations with error handling
for (i in seq_along(covariate_names)) {
covariate <- covariate_names[i]
tryCatch({
pcor_vx <- suppressWarnings(ppcor::pcor(model_object$model[, c(covariate, tested_variable, covariate_names)])$estimate[1, 2])
pcor_vy <- suppressWarnings(ppcor::pcor(model_object$model[, c(covariate, p, covariate_names)])$estimate[1, 2])
partial_impact <- pcor_vx * pcor_vy
impact_table_partial$`Partial Cor(vX)`[i] <- round(pcor_vx, 4)
impact_table_partial$`Partial Cor(vY)`[i] <- round(pcor_vy, 4)
impact_table_partial$Partial_Impact[i] <- round(partial_impact, 4)
}, error = function(e) {
# Handle errors during partial correlation computation
impact_table_partial$`Partial Cor(vX)`[i] <- NA
impact_table_partial$`Partial Cor(vY)`[i] <- NA
impact_table_partial$Partial_Impact[i] <- NA
})
}

# Sort Partial Impact Table in descending order
impact_table_partial <- impact_table_partial %>% arrange(desc(Partial_Impact))

cat(paste0("X represents ", tested_variable, ", Y represents ", p,
", v represents each covariate.\n",
"First table is based on unconditional correlations, second table is based on\n",
"partial correlations.\n\n"))

# Check if any row has all Partial Correlation components as NA
if (any(is.na(impact_table_partial$`Partial Cor(vX)`) &
is.na(impact_table_partial$`Partial Cor(vY)`) &
is.na(impact_table_partial$Partial_Impact))) {
stop(
paste0(
"Numerical instability detected in partial correlation. This indicates potential multicollinearity or scaling issues.\n",
"To resolve this issue, consider:\n",
"1) Standardize predictors with scale().\n",
"2) Remove or combine highly correlated predictors.\n",
"3) Apply regularization (e.g., ridge regression)."
)
)
}

# Return all three tables as a list
return(list(Main_Output = model_output, Unconditional_Impact = impact_table, Partial_Impact = impact_table_partial))
}
21 changes: 20 additions & 1 deletion R/test_sensitivity.R
Original file line number Diff line number Diff line change
Expand Up @@ -290,6 +290,20 @@ test_sensitivity <- function(est_eff,
r_final <- (act_r_forVF - r_con * rycvGz)/sqrt((1 - r_con^2) * (1 - rycvGz^2))
## compare r_final with critical_r

# calculate ITCV benchmark only if r2xz and r2yz are not NA
benchmark_corr_product <- if (!is.na(r2xz) && !is.na(r2yz)) {
r2xz * r2yz
} else {
NA
}

# itcv_ratio_to_benchmark only if benchmark_corr_product, uncond_rcxv, and uncond_rcyv are not NA
itcv_ratio_to_benchmark <- if (!is.na(benchmark_corr_product) && !is.na(uncond_rxcv) && !is.na(uncond_rycv)) {
abs((uncond_rxcv * uncond_rycv) / benchmark_corr_product)
} else {
NA
}

# output dispatch

if (length(to_return) > 1) {
Expand Down Expand Up @@ -331,6 +345,8 @@ test_sensitivity <- function(est_eff,
rxcvGz = rxcvGz, rycvGz = rycvGz,
itcvGz = itcvGz, itcv = uncond_rxcv * uncond_rycv,
r2xz = r2xz, r2yz = r2yz,
benchmark_corr_product = NA,
itcv_ratio_to_benchmark = NA,
delta_star = NA, delta_star_restricted = NA,
delta_exact = NA, delta_pctbias = NA,
cor_oster = NA, cor_exact = NA,
Expand All @@ -353,7 +369,10 @@ test_sensitivity <- function(est_eff,
} else if (to_return == "corr_plot") {
return(plot_correlation(r_con = r_con, obs_r = obs_r, critical_r = critical_r))
} else if (to_return == "print") {
return(output_print(n_covariates, est_eff, beta_threshold, bias, sustain, nu, eff_thr, recase, obs_r, critical_r, r_con, itcv, alpha, index, far_bound, sdx, sdy, R2, uncond_rxcv, uncond_rycv, rxcvGz, rycvGz))
return(output_print(n_covariates, est_eff, beta_threshold, bias, sustain, nu, eff_thr,
recase, obs_r, critical_r, r_con, itcv, alpha, index, far_bound,
sdx, sdy, R2, uncond_rxcv, uncond_rycv, rxcvGz, rycvGz,
benchmark_corr_product, itcv_ratio_to_benchmark))
} else if (to_return == "table") {
return(output_table(model_object, tested_variable))
} else {
Expand Down
6 changes: 3 additions & 3 deletions R/test_sensitivity_ln.R
Original file line number Diff line number Diff line change
Expand Up @@ -322,9 +322,9 @@ test_sensitivity_ln <- function(est_eff,


### chi-square p
p_start_chi <- chisq.test(final_solution$table_start,correct = FALSE)$p.value
p_final_chi <- chisq.test(final_solution$table_final,correct = FALSE)$p.value

p_start_chi <- suppressWarnings(chisq.test(final_solution$table_start,correct = FALSE)$p.value)
p_final_chi <- suppressWarnings(chisq.test(final_solution$table_final,correct = FALSE)$p.value)
### Fisher's p
p_start_fisher <- suppressWarnings(fisher.test(final_solution$table_start)$p.value)
p_final_fisher <- suppressWarnings(fisher.test(final_solution$table_final)$p.value)
Expand Down

0 comments on commit 19c743f

Please sign in to comment.