From 8e4e3d93077db664e5548b03e9053da06d6bf684 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:15 -0400 Subject: [PATCH 01/31] DESCRIPTION: bump roxygen2 version --- DESCRIPTION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DESCRIPTION b/DESCRIPTION index ec27b49..31adbb7 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -52,4 +52,4 @@ Suggests: testthat (>= 2.1.0), withr Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.1 +RoxygenNote: 7.3.2 From eeaa1b9fbfe2a38bdec5730b58aeb02983fc7a16 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:15 -0400 Subject: [PATCH 02/31] test-format-report.R: simplify an assertion The test of format_scores_for_render() constructs the set of expected criteria from the object read from *.scorecard.json, tweaking for the renames of the testing criteria. This makes it difficult for a reader to know what's expected. Reduce the amount of code logic in the test by replacing this logic with a hard-coded list of expected criteria. --- tests/testthat/test-format-report.R | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/testthat/test-format-report.R b/tests/testthat/test-format-report.R index 7044996..b91fe44 100644 --- a/tests/testthat/test-format-report.R +++ b/tests/testthat/test-format-report.R @@ -27,9 +27,13 @@ describe("formatting functions", { expect_true(all(grepl(paste0(RISK_LEVELS, collapse = "|"), scores_df$risk))) # ensure all category criteria are present - expected_criteria <- c(names(purrr::list_c(pkg_scores$scores)), "R CMD CHECK", "coverage") - expected_criteria <- expected_criteria[!grepl("check|covr", expected_criteria)] - expect_true(all(scores_df$criteria %in% expected_criteria)) + expect_setequal( + scores_df[["criteria"]], + c( + "R CMD CHECK", "coverage", + DOCUMENTATION_METRICS, MAINTENANCE_METRICS, TRANSPARENCY_METRICS + ) + ) # Check overall category scores overall_scores_df <- formatted_pkg_scores$formatted$overall_scores From bd85be5c3cbc5a98b6047d7897b7d1074705e90f Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:15 -0400 Subject: [PATCH 03/31] scorecard-template.Rmd: fix default parameter values The scorecard template uses values like "r NULL" and "`r NA`" for the parameter defaults. If these values aren't specified during the rmarkdown::render(), they come through as strings (e.g., "r NULL" and "`r NA`"), not the intended R objects. At the moment, this doesn't matter because all the values are supplied to the rmarkdown::render() call. But that will change when support for externally scored packages is added and dep_versions_df is left unspecified. Set the defaults to all these values to null, allowing downstream R code to check whether the parameter is specified with is.null(). --- inst/templates/scorecard-template.Rmd | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/inst/templates/scorecard-template.Rmd b/inst/templates/scorecard-template.Rmd index 386c8c1..12e4b00 100644 --- a/inst/templates/scorecard-template.Rmd +++ b/inst/templates/scorecard-template.Rmd @@ -21,11 +21,11 @@ header-includes: params: set_title: "Scorecard" scorecard_footer: "Generated with mpn.scorecard" - pkg_scores: "`r list()`" - comments_block: "r NULL" - extra_notes_data: "r NULL" - exports_df: "r NULL" - dep_versions_df: "`r NA`" + pkg_scores: null + comments_block: null + extra_notes_data: null + exports_df: null + dep_versions_df: null title: > `r params$set_title` subtitle: 'MPN Scorecard' @@ -98,7 +98,7 @@ format_metadata(formatted_pkg_scores$metadata) ``` ```{r, results = "asis"} -if (identical(params$dep_versions_df, NA)) { +if (is.null(params$dep_versions_df)) { stop("Required versions_df parameter not specified.") } format_dependency_versions(params$dep_versions_df) From 01ad5f7bae93772b5eedea6d39c9e9851f0b3f40 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:15 -0400 Subject: [PATCH 04/31] render: allow omitting the dependency table The dependency table, at least as coded, is very tied to R. An upcoming commit will add support for rendering a scorecard for non-R packages. Place the guard in format_dependency_versions() rather than the scorecard-template.Rmd block for consistency with format_comments() and format_traceability_matrix(). --- R/format-report.R | 4 ++++ inst/templates/scorecard-template.Rmd | 3 --- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/R/format-report.R b/R/format-report.R index cdc7799..6c40673 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -414,6 +414,10 @@ format_metadata <- function(metadata_list){ # format()), but "format_" is used for consistency with other functions in this # file. format_dependency_versions <- function(df) { + if (is.null(df)) { + return(invisible(NULL)) + } + out <- prepare_dependency_versions(df) if (inherits(out, "flextable")) { # Note: knit_print.flextable() does _not_ print to stdout. diff --git a/inst/templates/scorecard-template.Rmd b/inst/templates/scorecard-template.Rmd index 12e4b00..b27ef49 100644 --- a/inst/templates/scorecard-template.Rmd +++ b/inst/templates/scorecard-template.Rmd @@ -98,9 +98,6 @@ format_metadata(formatted_pkg_scores$metadata) ``` ```{r, results = "asis"} -if (is.null(params$dep_versions_df)) { - stop("Required versions_df parameter not specified.") -} format_dependency_versions(params$dep_versions_df) ``` From 606fb494a695c9aeee50cc3356760058a961ea72 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 05/31] render: don't assume coverage values are already rounded format_appendix() and format_scores_for_render() rely on the coverage values coming in rounded. An upcoming commit will allow rendering a scorecard for non-R packages, with coverage values coming from an external source. In that case, we can't expect the values to come in rounded, and, in any case, it makes sense for rendering code to be responsible for the rendered format. With this change, the values are always rendered to two decimal places (e.g., "100.00%" instead of "100%"). That has the advantage of nicer alignment in the table. --- R/format-report.R | 2 +- R/render-scorecard.R | 4 +++- tests/testthat/test-format-report.R | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/R/format-report.R b/R/format-report.R index 6c40673..8ecb8e1 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -758,7 +758,7 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE){ covr_results_df <- extra_notes_data$covr_results_df if (is.numeric(covr_results_df$test_coverage)) { covr_results_df <- covr_results_df %>% - dplyr::mutate(test_coverage = paste0(.data$test_coverage, "%")) %>% + dplyr::mutate(test_coverage = sprintf("%.2f%%", .data$test_coverage)) %>% format_colnames_to_title() # Create flextable and format diff --git a/R/render-scorecard.R b/R/render-scorecard.R index fa2129e..a2e5e7c 100644 --- a/R/render-scorecard.R +++ b/R/render-scorecard.R @@ -126,7 +126,9 @@ format_scores_for_render <- function(pkg_scores, risk_breaks = c(0.3, 0.7)) { if(category_name == "testing"){ formatted_df <- formatted_df %>% mutate( result = ifelse( - (.data$criteria == "covr" & !is.na(.data$score)), paste0(.data$score*100, "%"), .data$result + .data$criteria == "covr" & !is.na(.data$score), + sprintf("%.2f%%", .data$score * 100), + .data$result ), criteria = ifelse(.data$criteria == "check", "R CMD CHECK", "coverage") ) diff --git a/tests/testthat/test-format-report.R b/tests/testthat/test-format-report.R index b91fe44..f74b5e8 100644 --- a/tests/testthat/test-format-report.R +++ b/tests/testthat/test-format-report.R @@ -85,7 +85,7 @@ describe("formatting functions", { expect_true(all(c("Criteria", "Score", "Result", "Risk") %in% names(flex_df$body$dataset))) expect_true(all(c("Criteria", "Result", "Risk") %in% flex_df$col_keys)) expect_equal(flex_df$body$dataset$Score, c(1, 1)) - expect_equal(flex_df$body$dataset$Result, c("Passing (score: 1)", "100%")) + expect_equal(flex_df$body$dataset$Result, c("Passing (score: 1)", "100.00%")) ## High rcmdcheck score ## result_dir <- result_dirs_select[["pass_no_docs"]] @@ -97,7 +97,7 @@ describe("formatting functions", { flex_df <- format_testing_scores(formatted_pkg_scores) expect_equal(flex_df$body$dataset$Score, c(0.75, 1)) - expect_equal(flex_df$body$dataset$Result, c("Passing (score: 0.75)", "100%")) + expect_equal(flex_df$body$dataset$Result, c("Passing (score: 0.75)", "100.00%")) ## Failing rcmdcheck score ## From 7d809f6a13a9f6f3990387137c1243b0382f9c53 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 06/31] format_traceability_matrix: don't require test_dirs column An upcoming commit will add support for externally scored packages, and test_dirs won't provided for that input. --- R/format-report.R | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/R/format-report.R b/R/format-report.R index 8ecb8e1..86a35cf 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -687,11 +687,14 @@ format_traceability_matrix <- function( ) # Get testing directories for caption - test_dirs <- exported_func_df %>% tidyr::unnest(test_dirs) %>% dplyr::pull(test_dirs) %>% unique() - test_dirs <- test_dirs[test_dirs != ""] %>% paste(collapse = ", ") - - # Remove testing directory column (not a column due to horizontal space limits) - exported_func_df <- exported_func_df %>% dplyr::select(-"test_dirs") + if ("test_dirs" %in% names(exported_func_df)) { + test_dirs <- exported_func_df %>% tidyr::unnest(test_dirs) %>% dplyr::pull(test_dirs) %>% unique() + test_dirs <- test_dirs[test_dirs != ""] %>% paste(collapse = ", ") + # Remove testing directory column (not a column due to horizontal space limits) + exported_func_df <- exported_func_df %>% dplyr::select(-"test_dirs") + } else { + test_dirs <- NULL + } # Format Table if(isTRUE(wrap_cols)){ @@ -709,11 +712,15 @@ format_traceability_matrix <- function( # Create flextable exported_func_flex <- flextable_formatted(exported_func_df, pg_width = 7, font_size = 9) %>% - flextable::set_caption("Traceability Matrix") %>% - flextable::add_footer_row( + flextable::set_caption("Traceability Matrix") + + if (!is.null(test_dirs)) { + exported_func_flex <- flextable::add_footer_row( + exported_func_flex, values = flextable::as_paragraph(glue::glue("Testing directories: {test_dirs}")), colwidths = c(4) ) + } # Add stripe and other formatting details exported_func_flex <- exported_func_flex %>% From b6527fc15b8741936cfc99f659a335d0d472e566 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 07/31] aaa.R: assign *.scorecard.json keys to a variable --- R/aaa.R | 12 ++++++++++++ tests/testthat/test-score-pkg.R | 12 ++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/R/aaa.R b/R/aaa.R index 72ddcab..d179877 100644 --- a/R/aaa.R +++ b/R/aaa.R @@ -17,3 +17,15 @@ DOCUMENTATION_METRICS <- c("has_vignettes", "has_website", "has_news") #, export MAINTENANCE_METRICS <- c("has_maintainer", "news_current")#, "last_30_bugs_status") TRANSPARENCY_METRICS <- c("has_source_control", "has_bug_reports_url") TESTING_METRICS <- c("check", "covr") + +SCORECARD_JSON_KEYS <- c( + "mpn_scorecard_version", + "pkg_name", + "pkg_version", + "out_dir", + "pkg_tar_path", + "md5sum_check", + "scores", + "metadata", + "category_scores" +) diff --git a/tests/testthat/test-score-pkg.R b/tests/testthat/test-score-pkg.R index d88cf1f..a098da3 100644 --- a/tests/testthat/test-score-pkg.R +++ b/tests/testthat/test-score-pkg.R @@ -25,11 +25,7 @@ describe("score_pkg", { pkg_scores <- jsonlite::fromJSON(json_path) # Check json attributes - expect_equal( - names(pkg_scores), - c("mpn_scorecard_version","pkg_name", "pkg_version", "out_dir", - "pkg_tar_path", "md5sum_check", "scores", "metadata", "category_scores") - ) + expect_identical(names(pkg_scores), SCORECARD_JSON_KEYS) # These tests also serve to confirm the correct environment vars in `local_check_envvar` were set # Check category scores @@ -70,11 +66,7 @@ describe("score_pkg", { pkg_scores <- jsonlite::fromJSON(json_path) # Check json attributes - expect_equal( - names(pkg_scores), - c("mpn_scorecard_version", "pkg_name", "pkg_version", "out_dir", - "pkg_tar_path", "md5sum_check", "scores", "metadata", "category_scores") - ) + expect_identical(names(pkg_scores), SCORECARD_JSON_KEYS) expect_equal(pkg_scores$scores$testing$check, 0) expect_equal(pkg_scores$scores$testing$covr, "NA") # NA character when read from json From 65b029fbe70b6ed6cbb124dfbe01b3bb0454e5ba Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 08/31] score_pkg: rename 'covr' metric to 'coverage' 'covr' is tied to a particular package and language. Instead choose a name that is based on the result type. --- R/aaa.R | 2 +- R/calc-overall-scores.R | 4 ++-- R/render-scorecard.R | 21 +++++++++++++++++++-- R/score-pkg.R | 2 +- tests/testthat/test-score-pkg.R | 8 ++++---- 5 files changed, 27 insertions(+), 10 deletions(-) diff --git a/R/aaa.R b/R/aaa.R index d179877..dff4f6e 100644 --- a/R/aaa.R +++ b/R/aaa.R @@ -16,7 +16,7 @@ utils::globalVariables(c(".")) DOCUMENTATION_METRICS <- c("has_vignettes", "has_website", "has_news") #, export_help) MAINTENANCE_METRICS <- c("has_maintainer", "news_current")#, "last_30_bugs_status") TRANSPARENCY_METRICS <- c("has_source_control", "has_bug_reports_url") -TESTING_METRICS <- c("check", "covr") +TESTING_METRICS <- c("check", "coverage") SCORECARD_JSON_KEYS <- c( "mpn_scorecard_version", diff --git a/R/calc-overall-scores.R b/R/calc-overall-scores.R index c8616e3..9b0e128 100644 --- a/R/calc-overall-scores.R +++ b/R/calc-overall-scores.R @@ -14,8 +14,8 @@ calc_overall_scores <- function(scorelist) { scorelist$category_scores <- purrr::map(categories, ~{ indiv_scores <- unlist(scorelist$scores[[.x]]) # Penalize coverage failures: NA --> 0 - if("covr" %in% names(indiv_scores) && is.na(indiv_scores[["covr"]])){ - indiv_scores[["covr"]] <- 0 + if ("coverage" %in% names(indiv_scores) && is.na(indiv_scores[["coverage"]])) { + indiv_scores[["coverage"]] <- 0 } round(mean(indiv_scores), 3) }) %>% purrr::set_names(categories) diff --git a/R/render-scorecard.R b/R/render-scorecard.R index a2e5e7c..0eed202 100644 --- a/R/render-scorecard.R +++ b/R/render-scorecard.R @@ -35,6 +35,7 @@ render_scorecard <- function( # load scores from JSON pkg_scores <- jsonlite::fromJSON(json_path) + pkg_scores <- scorecard_json_compat(pkg_scores, json_path) check_scores_valid(pkg_scores, json_path) # map scores to risk and format into tables to be written to PDF @@ -84,6 +85,22 @@ render_scorecard <- function( } +scorecard_json_compat <- function(data, path) { + # Handle files written by score_pkg() before it renamed "covr" to "coverage". + tscores <- data[["scores"]][["testing"]] + if (is.null(tscores[["coverage"]])) { + + covr_val <- tscores[["covr"]] + if (is.null(covr_val)) { + abort(paste("Expected either 'coverage' or 'covr' value in", path)) + } + tscores[["coverage"]] <- covr_val + tscores[["covr"]] <- NULL + data[["scores"]][["testing"]] <- tscores + } + + return(data) +} #' Prepare the raw risk scores to be rendered into PDF #' @@ -126,7 +143,7 @@ format_scores_for_render <- function(pkg_scores, risk_breaks = c(0.3, 0.7)) { if(category_name == "testing"){ formatted_df <- formatted_df %>% mutate( result = ifelse( - .data$criteria == "covr" & !is.na(.data$score), + .data$criteria == "coverage" & !is.na(.data$score), sprintf("%.2f%%", .data$score * 100), .data$result ), @@ -187,7 +204,7 @@ map_answer <- function(scores, criteria, answer_breaks = c(0, 1)) { } else { paste0("Passing (score: ", .x, ")") } - }else if(.y != "covr"){ + } else if (.y != "coverage") { if (.x == answer_breaks[1]) { "No" } else if(.x == answer_breaks[2]) { diff --git a/R/score-pkg.R b/R/score-pkg.R index 9d6d5b3..5f6f632 100644 --- a/R/score-pkg.R +++ b/R/score-pkg.R @@ -75,7 +75,7 @@ score_pkg <- function( # run check and covr and write results to disk rcmdcheck_args$path <- pkg res$scores$testing$check <- add_rcmdcheck(out_dir, rcmdcheck_args) # use tarball - res$scores$testing$covr <- add_coverage( + res$scores$testing$coverage <- add_coverage( pkg_source_path, # must use untarred package dir out_dir, covr_timeout diff --git a/tests/testthat/test-score-pkg.R b/tests/testthat/test-score-pkg.R index a098da3..1c9cb9b 100644 --- a/tests/testthat/test-score-pkg.R +++ b/tests/testthat/test-score-pkg.R @@ -37,7 +37,7 @@ describe("score_pkg", { # check individual scores expect_equal(pkg_scores$scores$testing$check, 1) - expect_equal(pkg_scores$scores$testing$covr, 1) + expect_equal(pkg_scores$scores$testing$coverage, 1) }) @@ -69,7 +69,7 @@ describe("score_pkg", { expect_identical(names(pkg_scores), SCORECARD_JSON_KEYS) expect_equal(pkg_scores$scores$testing$check, 0) - expect_equal(pkg_scores$scores$testing$covr, "NA") # NA character when read from json + expect_equal(pkg_scores$scores$testing$coverage, "NA") # NA character when read from json expect_equal(pkg_scores$category_scores$testing, 0) # confirm overall category score is still a number }) @@ -91,7 +91,7 @@ describe("score_pkg", { pkg_scores <- jsonlite::fromJSON(json_path) expect_equal(pkg_scores$scores$testing$check, 1) - expect_identical(pkg_scores$scores$testing$covr, "NA") + expect_identical(pkg_scores$scores$testing$coverage, "NA") covr_res <- readRDS(get_result_path(result_dir, "covr.rds")) expect_s3_class(covr_res$errors, "callr_timeout_error") @@ -155,7 +155,7 @@ describe("score_pkg", { expect_equal(res$category_scores$overall, 0.5) # With failed coverage - pkg_scores$scores$testing$covr <- NA + pkg_scores$scores$testing$coverage <- NA res <- calc_overall_scores(pkg_scores) expect_equal(res$category_scores$testing, 0.5) expect_equal(res$category_scores$overall, 0.3) From 4602372ca82723cf817b0ab10f409d2ad4c2c6ea Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 09/31] format_appendix: rename 'R Script' column to 'Code File' An upcoming commit will add support for externally scored non-R packages, in which case 'R Script' doesn't work for the column name. We could switch from 'R script' to something else at runtime base on the type of package being scored. However, 'Code File' seems appropriate enough for the coverage result of any type of package, so go with that to avoid unnecessary branching in the code and divergence in the report. --- R/create-extra-notes.R | 8 ++++---- R/format-report.R | 2 +- tests/testthat/test-create-extra-notes.R | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/R/create-extra-notes.R b/R/create-extra-notes.R index cd3a8bb..2200d50 100644 --- a/R/create-extra-notes.R +++ b/R/create-extra-notes.R @@ -16,18 +16,18 @@ create_extra_notes <- function( covr_results <- readRDS(covr_path) if (inherits(covr_results$errors, "error")) { covr_results_df <- data.frame( - r_script = "File coverage failed", + code_file = "File coverage failed", test_coverage = conditionMessage(covr_results$errors) ) } else if (length(covr_results$coverage$filecoverage)) { covr_results_df <- covr_results$coverage$filecoverage %>% as.data.frame() covr_results_df <- covr_results_df %>% - mutate(r_script = row.names(covr_results_df)) %>% - dplyr::select("r_script", "test_coverage" = ".") + mutate(code_file = row.names(covr_results_df)) %>% + dplyr::select("code_file", "test_coverage" = ".") row.names(covr_results_df) <- NULL } else { covr_results_df <- data.frame( - r_script = "No coverage results", + code_file = "No coverage results", test_coverage = covr_results$notes ) } diff --git a/R/format-report.R b/R/format-report.R index 86a35cf..b4902a6 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -807,7 +807,7 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE){ cat("\n") if (is.null(covr_results_flex)) { - err_type <- covr_results_df$r_script + err_type <- covr_results_df$code_file if (identical(err_type, "File coverage failed")) { cat("\n\nCalculating code coverage failed with following error:\n\n") cat_verbatim(covr_results_df$test_coverage) diff --git a/tests/testthat/test-create-extra-notes.R b/tests/testthat/test-create-extra-notes.R index 935207a..4ddbbb5 100644 --- a/tests/testthat/test-create-extra-notes.R +++ b/tests/testthat/test-create-extra-notes.R @@ -9,7 +9,7 @@ describe("creating extra notes", { # Confirm values - covr expect_equal(unique(extra_notes_data$covr_results_df$test_coverage), 100) - expect_equal(unique(extra_notes_data$covr_results_df$r_script), "R/myscript.R") + expect_equal(unique(extra_notes_data$covr_results_df$code_file), "R/myscript.R") # Confirm values - R CMD Check expect_true(grepl("Status: OK", extra_notes_data$check_output)) @@ -23,7 +23,7 @@ describe("creating extra notes", { # Confirm values - covr expect_true(grepl("cannot open", unique(extra_notes_data$covr_results_df$test_coverage))) - expect_identical(extra_notes_data$covr_results_df$r_script, "File coverage failed") + expect_identical(extra_notes_data$covr_results_df$code_file, "File coverage failed") # Confirm values - R CMD Check expect_true(grepl("ERROR", extra_notes_data$check_output)) }) @@ -35,7 +35,7 @@ describe("creating extra notes", { result_dir <- setups$pkg_result_dir[case] res <- create_extra_notes(result_dir) expect_identical( - res$covr_results_df$r_script, + res$covr_results_df$code_file, "No coverage results" ) expect_identical( From 8022da52e6b54ce32dcaf9ea364b98ccbf3abb05 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 10/31] create_extra_notes: rename covr_results_df to cov_results_df Use a key that can also work for the coverage data coming in from externally scored non-R packages. --- R/create-extra-notes.R | 2 +- R/format-report.R | 2 +- tests/testthat/test-create-extra-notes.R | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/R/create-extra-notes.R b/R/create-extra-notes.R index 2200d50..fdfa725 100644 --- a/R/create-extra-notes.R +++ b/R/create-extra-notes.R @@ -34,7 +34,7 @@ create_extra_notes <- function( return( list( - covr_results_df = covr_results_df, + cov_results_df = covr_results_df, check_output = check_results$stdout ) ) diff --git a/R/format-report.R b/R/format-report.R index b4902a6..8a96658 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -762,7 +762,7 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE){ ### Covr Results ### # Format Table - covr_results_df <- extra_notes_data$covr_results_df + covr_results_df <- extra_notes_data$cov_results_df if (is.numeric(covr_results_df$test_coverage)) { covr_results_df <- covr_results_df %>% dplyr::mutate(test_coverage = sprintf("%.2f%%", .data$test_coverage)) %>% diff --git a/tests/testthat/test-create-extra-notes.R b/tests/testthat/test-create-extra-notes.R index 4ddbbb5..944361f 100644 --- a/tests/testthat/test-create-extra-notes.R +++ b/tests/testthat/test-create-extra-notes.R @@ -8,8 +8,8 @@ describe("creating extra notes", { extra_notes_data <- create_extra_notes(result_dir_x) # Confirm values - covr - expect_equal(unique(extra_notes_data$covr_results_df$test_coverage), 100) - expect_equal(unique(extra_notes_data$covr_results_df$code_file), "R/myscript.R") + expect_equal(unique(extra_notes_data$cov_results_df$test_coverage), 100) + expect_equal(unique(extra_notes_data$cov_results_df$code_file), "R/myscript.R") # Confirm values - R CMD Check expect_true(grepl("Status: OK", extra_notes_data$check_output)) @@ -22,8 +22,8 @@ describe("creating extra notes", { extra_notes_data <- create_extra_notes(result_dir_x) # Confirm values - covr - expect_true(grepl("cannot open", unique(extra_notes_data$covr_results_df$test_coverage))) - expect_identical(extra_notes_data$covr_results_df$code_file, "File coverage failed") + expect_true(grepl("cannot open", unique(extra_notes_data$cov_results_df$test_coverage))) + expect_identical(extra_notes_data$cov_results_df$code_file, "File coverage failed") # Confirm values - R CMD Check expect_true(grepl("ERROR", extra_notes_data$check_output)) }) @@ -35,11 +35,11 @@ describe("creating extra notes", { result_dir <- setups$pkg_result_dir[case] res <- create_extra_notes(result_dir) expect_identical( - res$covr_results_df$code_file, + res$cov_results_df$code_file, "No coverage results" ) expect_identical( - res$covr_results_df$test_coverage, + res$cov_results_df$test_coverage, "no testable functions found" ) }) From af60b6db4cbe04875dc3033a781a361b3461b811 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 11/31] format_appendix: rename variables to replace "covr" with "cov" format_appendix() will also process coverage data from non-R packages, so choose a name that reflects that. --- R/format-report.R | 24 ++++++++++++------------ tests/testthat/test-format-report.R | 6 +++--- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/R/format-report.R b/R/format-report.R index 8a96658..96638b2 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -762,14 +762,14 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE){ ### Covr Results ### # Format Table - covr_results_df <- extra_notes_data$cov_results_df - if (is.numeric(covr_results_df$test_coverage)) { - covr_results_df <- covr_results_df %>% + cov_results_df <- extra_notes_data$cov_results_df + if (is.numeric(cov_results_df$test_coverage)) { + cov_results_df <- cov_results_df %>% dplyr::mutate(test_coverage = sprintf("%.2f%%", .data$test_coverage)) %>% format_colnames_to_title() # Create flextable and format - covr_results_flex <- flextable_formatted(covr_results_df, pg_width = 4) %>% + cov_results_flex <- flextable_formatted(cov_results_df, pg_width = 4) %>% flextable::set_caption("Test Coverage") %>% flextable::align(align = "right", part = "all", j=2) %>% flextable::add_footer_row( @@ -779,11 +779,11 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE){ )), colwidths = c(2) ) - covr_results_flex <- covr_results_flex %>% flex_header() %>% + cov_results_flex <- cov_results_flex %>% flex_header() %>% flex_footer(footer_bg = "transparent", footer_ft = "black") %>% flex_stripe(border = FALSE) } else { - covr_results_flex <- NULL + cov_results_flex <- NULL } ### R CMD Check Results ### @@ -792,7 +792,7 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE){ if(isTRUE(return_vals)){ return( list( - covr_results_flex = covr_results_flex, + cov_results_flex = cov_results_flex, check_output = check_output ) ) @@ -806,21 +806,21 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE){ cat(sub_header_strs[2]) cat("\n") - if (is.null(covr_results_flex)) { - err_type <- covr_results_df$code_file + if (is.null(cov_results_flex)) { + err_type <- cov_results_df$code_file if (identical(err_type, "File coverage failed")) { cat("\n\nCalculating code coverage failed with following error:\n\n") - cat_verbatim(covr_results_df$test_coverage) + cat_verbatim(cov_results_df$test_coverage) } else if (identical(err_type, "No coverage results")) { cat( "\n\n", "Unable to calculate coverage: ", - covr_results_df$test_coverage, "\n\n" + cov_results_df$test_coverage, "\n\n" ) } else { stop("Unknown error type: ", err_type) } } else { - cat(knitr::knit_print(covr_results_flex)) + cat(knitr::knit_print(cov_results_flex)) } cat("\n") diff --git a/tests/testthat/test-format-report.R b/tests/testthat/test-format-report.R index f74b5e8..4d7ff29 100644 --- a/tests/testthat/test-format-report.R +++ b/tests/testthat/test-format-report.R @@ -193,13 +193,13 @@ describe("formatting functions", { extra_notes_frmt <- format_appendix(extra_notes_data, return_vals = TRUE) # Test covr dataframe - covr_results_df <- extra_notes_frmt$covr_results_flex$body$dataset + covr_results_df <- extra_notes_frmt$cov_results_flex$body$dataset expect_equal( - names(format_colnames_to_title(extra_notes_data$covr_results_df)), + names(format_colnames_to_title(extra_notes_data$cov_results_df)), names(covr_results_df) ) expect_equal( - unique(unname(unlist(extra_notes_frmt$covr_results_flex$footer$dataset))), + unique(unname(unlist(extra_notes_frmt$cov_results_flex$footer$dataset))), paste( "Test coverage is calculated per script, rather than per function.", "See Traceability Matrix for function-to-test-file mapping." From 599394ff34ce5f5f83b28601dfb30e80a89f3d46 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 12/31] map_answer: make including check score optional The 'R CMD check' score factors in the numbers of notes/warnings/errors in addition to whether the tests passed or failed, so it's a score in the 0 to 1 range. However, package types other than R will likely only have a pass/fail, in which case it doesn't make sense to display the specific score. --- R/render-scorecard.R | 10 ++++++++-- man/map_answer.Rd | 9 ++++++++- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/R/render-scorecard.R b/R/render-scorecard.R index 0eed202..7d2b533 100644 --- a/R/render-scorecard.R +++ b/R/render-scorecard.R @@ -184,6 +184,7 @@ map_risk <- function(scores, risk_breaks) { #' @param scores vector of risk scores #' @param criteria vector of criteria names #' @param answer_breaks breaks determining 'Yes'/'Passing' or 'No'/'Failed'. `NA` has special handling. See details. +#' @param include_check_score Whether to include score in the result check. #' #' @details #' If value is not found in `answer_breaks`, it is skipped over @@ -193,7 +194,8 @@ map_risk <- function(scores, risk_breaks) { #' covr is skipped over unless it is `NA` (indicates test failures), as this is formatted as a percent separately #' #' @keywords internal -map_answer <- function(scores, criteria, answer_breaks = c(0, 1)) { +map_answer <- function(scores, criteria, answer_breaks = c(0, 1), + include_check_score = TRUE) { checkmate::assert_numeric(scores, lower = 0, upper = 1) answer_breaks <- sort(answer_breaks) purrr::map2_chr(scores, criteria, ~ { @@ -202,7 +204,11 @@ map_answer <- function(scores, criteria, answer_breaks = c(0, 1)) { if(is.na(.x) || .x == answer_breaks[1]) { "Failed" } else { - paste0("Passing (score: ", .x, ")") + if (isTRUE(include_check_score)) { + paste0("Passing (score: ", .x, ")") + } else { + "Passing" + } } } else if (.y != "coverage") { if (.x == answer_breaks[1]) { diff --git a/man/map_answer.Rd b/man/map_answer.Rd index 0cc0e98..ae59439 100644 --- a/man/map_answer.Rd +++ b/man/map_answer.Rd @@ -4,7 +4,12 @@ \alias{map_answer} \title{Use answer_breaks to map results into character strings} \usage{ -map_answer(scores, criteria, answer_breaks = c(0, 1)) +map_answer( + scores, + criteria, + answer_breaks = c(0, 1), + include_check_score = TRUE +) } \arguments{ \item{scores}{vector of risk scores} @@ -12,6 +17,8 @@ map_answer(scores, criteria, answer_breaks = c(0, 1)) \item{criteria}{vector of criteria names} \item{answer_breaks}{breaks determining 'Yes'/'Passing' or 'No'/'Failed'. \code{NA} has special handling. See details.} + +\item{include_check_score}{Whether to include score in the result check.} } \description{ Use answer_breaks to map results into character strings From 693d1c5e7cae31fa45f50072191c5f5f62799394 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 13/31] format_traceability_matrix: avoid formula for purrr function Formulas prevent 'R CMD check' from detecting issues like undefined variables. Use a standard anonymous function instead. --- R/format-report.R | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/R/format-report.R b/R/format-report.R index 96638b2..eea0535 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -700,8 +700,10 @@ format_traceability_matrix <- function( if(isTRUE(wrap_cols)){ exported_func_df <- exported_func_df %>% dplyr::mutate( - dplyr::across("exported_function":"documentation", ~ - wrap_text(.x, width = 24, indent = TRUE, strict = TRUE)), + dplyr::across( + "exported_function":"documentation", + function(x) wrap_text(x, width = 24, indent = TRUE, strict = TRUE) + ), # Tests can be longer due to page width (pg_width) settings (we make it wider) test_files = purrr::map_chr(.data$test_files, function(tests){ wrap_text(tests, width = 40, strict = TRUE, wrap_sym = NULL) From dcd2ee2ceeca22e490ced251cf2d3a2c3ed84adb Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 14/31] format_traceability_matrix: avoid range for column selection format_traceability_matrix() selects columns for text wrapping with a range. Instead use a vector, which makes it easier for the reader to know which columns were intended and makes it less likely to behave unexpectedly if the data frame structure changes. --- NAMESPACE | 1 + R/aaa.R | 2 +- R/format-report.R | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/NAMESPACE b/NAMESPACE index 33e98ba..7f27c55 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -25,4 +25,5 @@ importFrom(rlang,abort) importFrom(rlang,inform) importFrom(rlang,sym) importFrom(rlang,warn) +importFrom(tidyselect,all_of) importFrom(tidyselect,everything) diff --git a/R/aaa.R b/R/aaa.R index dff4f6e..3e620fa 100644 --- a/R/aaa.R +++ b/R/aaa.R @@ -1,5 +1,5 @@ -#' @importFrom tidyselect everything +#' @importFrom tidyselect everything all_of NULL diff --git a/R/format-report.R b/R/format-report.R index eea0535..3334350 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -701,7 +701,7 @@ format_traceability_matrix <- function( exported_func_df <- exported_func_df %>% dplyr::mutate( dplyr::across( - "exported_function":"documentation", + all_of(c("exported_function", "code_file", "documentation")), function(x) wrap_text(x, width = 24, indent = TRUE, strict = TRUE) ), # Tests can be longer due to page width (pg_width) settings (we make it wider) From 91f88b88f6e23a80216256c55c79ef22ec2434cb Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 15/31] format_metadata: don't error if sys or env_vars is missing If either sys or env_vars is missing, format_metadata() errors with Error in t.default(unlist(NULL)) : argument is not a matrix To prepare for the upcoming introduction of the externally scored input, rework format_metadata() to not require sys and env_vars and to abort if date or executor isn't specified. --- R/format-report.R | 22 ++++++++++++++-------- tests/testthat/test-format-report.R | 11 +++++++++++ 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/R/format-report.R b/R/format-report.R index 3334350..7d3cc02 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -388,16 +388,22 @@ format_testing_scores <- function(formatted_pkg_scores){ #' #' @keywords internal format_metadata <- function(metadata_list){ - # Create system info table - executor_tbl <- data.frame(executor = metadata_list$executor) - data_tbl <- data.frame(date = metadata_list$date) - env_vars_tbl <- as.data.frame(t(unlist(metadata_list$info$env_vars))) - system_info_tbl <- as.data.frame(t(unlist(metadata_list$info$sys))) + date <- metadata_list[["date"]] + if (is.null(date)) { + abort("`date` required in `metadata_list`") + } + executor <- metadata_list[["executor"]] + if (is.null(executor)) { + abort("`executor` required in `metadata_list`") + } + info <- metadata_list[["info"]] + data <- c(date = date, executor = executor, info[["sys"]], info[["env_vars"]]) - all_info_tbl <- cbind(data_tbl, executor_tbl, system_info_tbl, env_vars_tbl) - all_info_tbl <- data.frame(Category = stringr::str_to_title(names(all_info_tbl)), - Value = unlist(all_info_tbl)) + all_info_tbl <- data.frame( + Category = stringr::str_to_title(names(data)), + Value = unname(unlist(data)) + ) # Create flextable system_info_flextable <- diff --git a/tests/testthat/test-format-report.R b/tests/testthat/test-format-report.R index 4d7ff29..d01e1b4 100644 --- a/tests/testthat/test-format-report.R +++ b/tests/testthat/test-format-report.R @@ -238,6 +238,17 @@ describe("formatting functions", { "Unable to calculate R dependency table due to failing `R CMD check`." ) }) + + it("format_metadata handles NULL input", { + flex_df <- format_metadata(list(date = "2024-01-01", executor = "foo")) + expect_identical( + flex_df[["body"]][["dataset"]], + data.frame( + Category = c("Date", "Executor"), + Value = c("2024-01-01", "foo") + ) + ) + }) }) describe("cat_verbatim", { From f81aa48a1a62c34ffe7ec5d7ee40b0bac16e381f Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 16/31] score_pkg: record scorecard_type An upcoming commit will add support for non-R packages. In practice, we can detect an R package versus non-R package by looking at the files in the results directory, but we might as well explicitly record a type when dumping the scores for an R package. --- R/aaa.R | 1 + R/render-scorecard.R | 5 +++++ R/score-pkg.R | 1 + 3 files changed, 7 insertions(+) diff --git a/R/aaa.R b/R/aaa.R index 3e620fa..e139b30 100644 --- a/R/aaa.R +++ b/R/aaa.R @@ -22,6 +22,7 @@ SCORECARD_JSON_KEYS <- c( "mpn_scorecard_version", "pkg_name", "pkg_version", + "scorecard_type", "out_dir", "pkg_tar_path", "md5sum_check", diff --git a/R/render-scorecard.R b/R/render-scorecard.R index 7d2b533..5559213 100644 --- a/R/render-scorecard.R +++ b/R/render-scorecard.R @@ -86,6 +86,11 @@ render_scorecard <- function( } scorecard_json_compat <- function(data, path) { + # Handle files written by score_pkg() before it included scorecard_type. + if (is.null(data[["scorecard_type"]])) { + data[["scorecard_type"]] <- "R" + } + # Handle files written by score_pkg() before it renamed "covr" to "coverage". tscores <- data[["scores"]][["testing"]] if (is.null(tscores[["coverage"]])) { diff --git a/R/score-pkg.R b/R/score-pkg.R index 5f6f632..b79112a 100644 --- a/R/score-pkg.R +++ b/R/score-pkg.R @@ -57,6 +57,7 @@ score_pkg <- function( mpn_scorecard_version = mpn_scorecard_ver, pkg_name = pkg_name, pkg_version = pkg_ver, + scorecard_type = "R", out_dir = out_dir, pkg_tar_path = pkg, md5sum_check = tools::md5sum(pkg), From 66236188977e6b6fbbae5271fb171fbb685e8ac8 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 17/31] score_pkg: add mpn_scorecard_format key to *.scorecard.json Version the on-disk format. If we need to do a major rework of the format, this provides a mechanism to signal the change and to detect older formats. --- R/aaa.R | 3 +++ R/score-pkg.R | 1 + 2 files changed, 4 insertions(+) diff --git a/R/aaa.R b/R/aaa.R index e139b30..3d12264 100644 --- a/R/aaa.R +++ b/R/aaa.R @@ -18,7 +18,10 @@ MAINTENANCE_METRICS <- c("has_maintainer", "news_current")#, "last_30_bugs_statu TRANSPARENCY_METRICS <- c("has_source_control", "has_bug_reports_url") TESTING_METRICS <- c("check", "coverage") +MPN_SCORECARD_FORMAT <- "1.0" + SCORECARD_JSON_KEYS <- c( + "mpn_scorecard_format", "mpn_scorecard_version", "pkg_name", "pkg_version", diff --git a/R/score-pkg.R b/R/score-pkg.R index b79112a..ba113ea 100644 --- a/R/score-pkg.R +++ b/R/score-pkg.R @@ -54,6 +54,7 @@ score_pkg <- function( # start building up scorecard list res <- list( + mpn_scorecard_format = MPN_SCORECARD_FORMAT, mpn_scorecard_version = mpn_scorecard_ver, pkg_name = pkg_name, pkg_version = pkg_ver, From 7f3fe88b63c1ff8766bda7a259ad86e1a9e18809 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 18/31] render_scorecard: fix a checkmate assertion results_dir is fed directly to get_result_path(), so NULL is not okay: > get_result_path(NULL, "scorecard.pdf") Error in basename(out_dir) : a character vector argument expected --- R/render-scorecard.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/render-scorecard.R b/R/render-scorecard.R index 5559213..603b294 100644 --- a/R/render-scorecard.R +++ b/R/render-scorecard.R @@ -44,7 +44,7 @@ render_scorecard <- function( comments_block <- check_for_comments(results_dir) # Output file - checkmate::assert_string(results_dir, null.ok = TRUE) + checkmate::assert_string(results_dir) out_file <- get_result_path(results_dir, "scorecard.pdf") check_exists_and_overwrite(out_file, overwrite) From 7371fddc5cfbe3ad930b5099115c24fd8171caa9 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 19/31] render_scorecard: drop note about traceability matrix file This note isn't relevant for the user. The *.export_doc.rds file is not something that they should ever write without the help of make_traceability_matrix(), and we do not advertise the underlying structure. And with upcoming support for externally scored package, a user-defined matrix can come in through a YAML file. --- R/render-scorecard.R | 1 - man/render_scorecard.Rd | 1 - 2 files changed, 2 deletions(-) diff --git a/R/render-scorecard.R b/R/render-scorecard.R index 603b294..36dc090 100644 --- a/R/render-scorecard.R +++ b/R/render-scorecard.R @@ -16,7 +16,6 @@ #' **Note** that it must follow the naming convention of `_.comments.txt` #' #' If a traceability matrix is found in `results_dir`, it will automatically be included unless overridden via `add_traceability`. -#' **Note** that it must follow the naming convention of `_.export_doc.rds` #' #' @export render_scorecard <- function( diff --git a/man/render_scorecard.Rd b/man/render_scorecard.Rd index ecdec07..6e5e149 100644 --- a/man/render_scorecard.Rd +++ b/man/render_scorecard.Rd @@ -33,5 +33,4 @@ If a plain text comments file is found in \code{results_dir}, it will automatica \strong{Note} that it must follow the naming convention of \verb{_.comments.txt} If a traceability matrix is found in \code{results_dir}, it will automatically be included unless overridden via \code{add_traceability}. -\strong{Note} that it must follow the naming convention of \verb{_.export_doc.rds} } From aa2cb8faf67a224aeb25bf603397d084d243a513 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 20/31] render_scorecard: move preparation of params to dedicated function Externally scored packages are going to pass in different parameters. Move the parameter preparation for "internally" scored packages outside of render_scorecard() so that render_scorecard() can handle both internally or externally scored packages by selecting which param function to call. --- R/render-scorecard.R | 53 +++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/R/render-scorecard.R b/R/render-scorecard.R index 36dc090..ab1b0be 100644 --- a/R/render-scorecard.R +++ b/R/render-scorecard.R @@ -24,13 +24,27 @@ render_scorecard <- function( overwrite = FALSE, add_traceability = "auto" ) { + checkmate::assert_numeric(risk_breaks, lower = 0, upper = 1, len = 2) - json_path <- get_result_path(results_dir, "scorecard.json") + checkmate::assert_string(results_dir) + out_file <- get_result_path(results_dir, "scorecard.pdf") + check_exists_and_overwrite(out_file, overwrite) + + rendered_file <- rmarkdown::render( + system.file(SCORECARD_RMD_TEMPLATE, package = "mpn.scorecard", mustWork = TRUE), # TODO: do we want to expose this to users, to pass their own custom template? + output_dir = results_dir, + output_file = basename(out_file), + quiet = TRUE, + params = get_render_params(results_dir, risk_breaks, add_traceability) + ) - # input checking + return(invisible(rendered_file)) +} + +get_render_params <- function(results_dir, risk_breaks, add_traceability) { + json_path <- get_result_path(results_dir, "scorecard.json") checkmate::assert_string(json_path) checkmate::assert_file_exists(json_path) - checkmate::assert_numeric(risk_breaks, lower = 0, upper = 1, len = 2) # load scores from JSON pkg_scores <- jsonlite::fromJSON(json_path) @@ -42,12 +56,6 @@ render_scorecard <- function( comments_block <- check_for_comments(results_dir) - # Output file - checkmate::assert_string(results_dir) - out_file <- get_result_path(results_dir, "scorecard.pdf") - check_exists_and_overwrite(out_file, overwrite) - - # Appendix extra_notes_data <- create_extra_notes(results_dir) @@ -62,26 +70,15 @@ render_scorecard <- function( as.character(utils::packageVersion("mpn.scorecard")) ) - # Render rmarkdown - rendered_file <- rmarkdown::render( - system.file(SCORECARD_RMD_TEMPLATE, package = "mpn.scorecard", mustWork = TRUE), # TODO: do we want to expose this to users, to pass their own custom template? - output_dir = results_dir, - output_file = basename(out_file), - quiet = TRUE, - params = list( - set_title = paste("Scorecard:", pkg_scores$pkg_name, pkg_scores$pkg_version), - scorecard_footer = mpn_scorecard_ver, - pkg_scores = formatted_pkg_scores, - comments_block = comments_block, - extra_notes_data = extra_notes_data, - exports_df = exports_df, - dep_versions_df = dep_versions_df - ) + list( + set_title = paste("Scorecard:", pkg_scores$pkg_name, pkg_scores$pkg_version), + scorecard_footer = mpn_scorecard_ver, + pkg_scores = formatted_pkg_scores, + comments_block = comments_block, + extra_notes_data = extra_notes_data, + exports_df = exports_df, + dep_versions_df = dep_versions_df ) - - # Render to PDF, invisibly return the path to the PDF - return(invisible(rendered_file)) - } scorecard_json_compat <- function(data, path) { From 66e0b02e74e57ec8d9779a1f1ada6ebc0bb72425 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 21/31] get_result_path: add extensions for externally scored packages --- R/util.R | 8 ++++++-- man/get_result_path.Rd | 5 +++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/R/util.R b/R/util.R index 50888ab..fb20cf8 100644 --- a/R/util.R +++ b/R/util.R @@ -30,8 +30,12 @@ check_exists_and_overwrite <- function(path, overwrite) { get_result_path <- function( out_dir, ext = c( - "scorecard.json", "scorecard.pdf", "check.rds", "covr.rds", "comments.txt", - "summary.pdf", "export_doc.rds", "mitigation.txt" + "scorecard.json", "scorecard.pdf", "comments.txt", "mitigation.txt", + # Internally scored + "check.rds", "covr.rds", "export_doc.rds", "summary.pdf", + # Externally scored + "check.txt", "coverage.json", "matrix.yaml", "metadata.json", "pkg.json", + "scores.json" ) ){ diff --git a/man/get_result_path.Rd b/man/get_result_path.Rd index 134f11e..19c86c9 100644 --- a/man/get_result_path.Rd +++ b/man/get_result_path.Rd @@ -6,8 +6,9 @@ \usage{ get_result_path( out_dir, - ext = c("scorecard.json", "scorecard.pdf", "check.rds", "covr.rds", "comments.txt", - "summary.pdf", "export_doc.rds", "mitigation.txt") + ext = c("scorecard.json", "scorecard.pdf", "comments.txt", "mitigation.txt", + "check.rds", "covr.rds", "export_doc.rds", "summary.pdf", "check.txt", + "coverage.json", "matrix.yaml", "metadata.json", "pkg.json", "scores.json") ) } \arguments{ From 3f0a2f14cdcaa08f578359b928ba6504608b92b0 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 22/31] render_scorecard: support externally scored packages With recent commits, all of the formatting functions have been decoupled, at least conditionally, from the assumption that it is an R package that's being scored. Define a set of files that any type of package can use to render a scorecard. The structure of the results directory for external scores is similar to that of the directory for "internal" scores. check.txt replaces check.rds, coverage.json replaces covr.rds, and matrix.yaml replaces export_doc.rds. For scorecard.json, an external _could_ write that file. But doing so is awkward because that file includes overall scores derived from other scores in the file. To compute those, the external scorer would need to either duplicate the scoring logic or depend on mpn.scorecard. So instead have the external scorer produce parts of scorecard.json, and make mpn.scorecard responsible for stitching those parts together at render time. A notable limitation of the external input is that the check values must be 0 or 1; 0 through 1, like is done for R packages, is not supported. There's a number of ways we could add support for this, but punt on that given that all our initial non-R packages of interest need only a pass/fail result. --- DESCRIPTION | 3 +- R/external.R | 245 ++++++++++++++++++ R/format-report.R | 31 ++- R/render-scorecard.R | 27 +- _pkgdown.yml | 1 + inst/templates/scorecard-template.Rmd | 9 +- man/check_for_comments.Rd | 3 +- man/check_for_traceability.Rd | 3 +- man/create_extra_notes.Rd | 3 +- man/external_scores.Rd | 155 +++++++++++ man/format_appendix.Rd | 2 +- man/format_traceability_matrix.Rd | 2 +- man/make_traceability_matrix.Rd | 3 +- man/render_scorecard.Rd | 8 +- tests/testthat/helpers-external.R | 116 +++++++++ tests/testthat/test-format-report-external.R | 69 +++++ .../testthat/test-render-scorecard-external.R | 41 +++ 17 files changed, 700 insertions(+), 21 deletions(-) create mode 100644 R/external.R create mode 100644 man/external_scores.Rd create mode 100644 tests/testthat/helpers-external.R create mode 100644 tests/testthat/test-format-report-external.R create mode 100644 tests/testthat/test-render-scorecard-external.R diff --git a/DESCRIPTION b/DESCRIPTION index 31adbb7..28b608a 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -44,7 +44,8 @@ Imports: stringr, tibble, tidyselect, - tidyr + tidyr, + yaml Suggests: devtools, pdftools, diff --git a/R/external.R b/R/external.R new file mode 100644 index 0000000..c9573bd --- /dev/null +++ b/R/external.R @@ -0,0 +1,245 @@ +#' Rendering externally scored packages +#' +#' @description +#' +#' For R packages, mpn.scorecard handles both scoring and rendering. The +#' workflow is to first score the package with [score_pkg()], then to optionally +#' generate a traceability matrix with [make_traceability_matrix()], and finally +#' to render the scorecard with [render_scorecard()]. +#' +#' [render_scorecard()] also supports rendering packages that are scored outside +#' of mpn.scorecard. The scorer is responsible for preparing a results directory +#' with the set of files described below. +#' +#' @details +#' +#' ## Input files +#' +#' The following input files define results for the scorecard to render. These +#' must reside in a directory named `_`, following the naming +#' of the output directory returned by [score_pkg()]. +#' +#' * `_.pkg.json`: This file provides general information +#' about the package being scored. It requires the following keys: +#' +#' * `mpn_scorecard_format`: The version of the format in which these input +#' files are specified. This should be "1.0". +#' +#' * `pkg_name`, `pkg_version`: The name and version of the package. +#' +#' * `scorecard_type`: The type of package. Two types are currently +#' recognized and receive special handling: "R" and "cli". Everything else +#' falls back to default handling. +#' +#' If you're specifying "R" here, you should probably use [score_pkg()] +#' instead. +#' +#' Example: +#' +#' ``` +#' { +#' "mpn_scorecard_format": "1.0", +#' "pkg_name": "foo", +#' "pkg_version": "1.2.3", +#' "scorecard_type": "cli" +#' } +#' ``` +#' +#' * `_.check.txt`: Output from the package check. This is +#' included in the appendix verbatim. +#' +#' * `_.coverage.json`: Code coverage percentages. The values +#' will be rounded to two decimal places when rendering. This file is +#' optional. +#' +#' Example: +#' +#' ``` +#' { +#' "overall": 91.54265, +#' "files": [ +#' { +#' "file": "cmd/foo.go", +#' "coverage": 98.7643 +#' }, +#' { +#' "file": "cmd/bar.go", +#' "coverage": 84.321 +#' } +#' ] +#' } +#' ``` +#' +#' * `_.scores.json`: Scores for individual metrics grouped +#' into four categories: "testing", "documentation", "maintenance", and +#' "transparency". Each category must have a least one score. +#' +#' For the testing category, both "check and "coverage" scores are required. +#' "check" should be 1 if the tests passed and 0 if they failed. "coverage" +#' should match the "overall" value from `_.coverage.json`, +#' divided by 100. +#' +#' Example: +#' +#' ``` +#' { +#' "testing": { +#' "check": 1, +#' "coverage": 0.9154265 +#' }, +#' "documentation": { +#' "has_website": 1, +#' "has_news": 1 +#' }, +#' "maintenance": { +#' "has_maintainer": 1, +#' "news_current": 1 +#' }, +#' "transparency": { +#' "has_source_control": 1, +#' "has_bug_reports_url": 1 +#' } +#' } +#' ``` +#' +#' * `_.metadata.json`: Information to include in the +#' "System Info" table. The table will include the "date" and "executor" +#' value, as well as any key-value pairs defined under "info.env_vars" and +#' "info.sys". The "date" and "executor" keys are required. +#' +#' Example: +#' +#' ``` +#' { +#' "date": "2024-08-01 08:19:12", +#' "executor": "Bobert", +#' "info": { +#' "env_vars": { +#' "METWORX_VERSION": "22.09" +#' }, +#' "sys": { +#' "sysname": "Linux", +#' "machine": "x86_64" +#' } +#' } +#' } +#' ``` +#' +#' * `_.matrix.yaml`: A file defining entries to render as +#' the traceability matrix table. The traceability matrix table is meant to +#' map all user-facing entry points (e.g., exported functions or available +#' commands for a command-line executable) to the relevant documentation and +#' test files. +#' +#' The file should consist of a sequence of entries with the following items: +#' +#' * `entrypoint`: The name of the entry point. +#' +#' * `code`: The path to where the entry point is defined. +#' +#' * `doc`: The path to the entry point's main documentation. +#' +#' * `tests`: A list of paths where the entry point is tested. +#' +#' What the entry point is called in the table depends on `scorecard_type`. +#' For "cli", the column name is "Command" and, for "R", it is +#' "Exported Function". For all other types, it is "Entry Point". +#' +#' This file is optional if the `add_traceability` argument of +#' [render_scorecard()] is "auto" or `FALSE`. +#' +#' Example: +#' +#' ``` +#' - entrypoint: foo +#' skip: true +#' +#' - entrypoint: foo bar +#' code: cmd/bar.go +#' doc: docs/commands/foo_bar.md +#' tests: +#' - cmd/bar_test.go +#' - integration/bar_test.go +#' ``` +#' +#' @name external_scores +#' @aliases render_external +NULL + +get_render_params_external <- function(results_dir, risk_breaks, add_traceability) { + pkg_scores <- build_pkg_scores(results_dir) + + if (identical(add_traceability, "auto")) { + add_traceability <- file.exists(get_result_path(results_dir, "matrix.yaml")) + } + if (isTRUE(add_traceability)) { + tmat <- read_traceability_matrix(results_dir) + } else { + tmat <- NULL + } + + list( + set_title = paste("Scorecard:", pkg_scores$pkg_name, pkg_scores$pkg_version), + scorecard_footer = format_scorecard_version( + scorecard_ver = utils::packageVersion("mpn.scorecard") + ), + pkg_scores = format_scores_for_render(pkg_scores, risk_breaks), + comments_block = check_for_comments(results_dir), + extra_notes_data = list( + cov_results_df = read_coverage_results(results_dir), + check_output = read_check_output(results_dir) + ), + exports_df = tmat + ) +} + +build_pkg_scores <- function(results_dir) { + pkg_json <- get_result_path(results_dir, "pkg.json") + checkmate::assert_file_exists(pkg_json) + meta_json <- get_result_path(results_dir, "metadata.json") + checkmate::assert_file_exists(meta_json) + scores_json <- get_result_path(results_dir, "scores.json") + checkmate::assert_file_exists(scores_json) + + res <- c( + jsonlite::read_json(pkg_json), + scores = list(jsonlite::read_json(scores_json)), + metadata = list(jsonlite::read_json(meta_json)) + ) + + return(calc_overall_scores(res)) +} + +read_traceability_matrix <- function(results_dir) { + fname <- get_result_path(results_dir, "matrix.yaml") + checkmate::assert_file_exists(fname) + + entries <- yaml::read_yaml(fname) + entries <- purrr::discard(entries, function(e) isTRUE(e[["skip"]])) + tibble::tibble( + entrypoint = purrr::map_chr(entries, "entrypoint"), + code_file = purrr::map_chr(entries, "code"), + documentation = purrr::map_chr(entries, "doc"), + test_files = purrr::map(entries, function(x) { + if (length(x[["tests"]])) x[["tests"]] else character() + }) + ) +} + +read_check_output <- function(results_dir) { + fname <- get_result_path(results_dir, "check.txt") + checkmate::assert_file_exists(fname) + return(readChar(fname, file.size(fname))) +} + +read_coverage_results <- function(results_dir) { + fname <- get_result_path(results_dir, "coverage.json") + checkmate::assert_file_exists(fname) + + data <- jsonlite::read_json(fname) + filecov <- data[["files"]] + tibble::tibble( + code_file = purrr::map_chr(filecov, "file"), + test_coverage = purrr::map_dbl(filecov, "coverage") + ) +} diff --git a/R/format-report.R b/R/format-report.R index 7d3cc02..93e1404 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -680,7 +680,8 @@ format_colnames_to_title <- function(df){ #' @keywords internal format_traceability_matrix <- function( exports_df, - wrap_cols = TRUE + wrap_cols = TRUE, + scorecard_type = "R" ){ checkmate::assert_logical(wrap_cols) @@ -702,12 +703,29 @@ format_traceability_matrix <- function( test_dirs <- NULL } + if ("exported_function" %in% names(exported_func_df)) { + # Align internal scoring with external format. + exported_func_df <- dplyr::rename(exported_func_df, + entrypoint = "exported_function" + ) + } + + entry_name <- switch(scorecard_type, + "R" = "Exported Function", + "cli" = "Command", + "Entry Point" + ) + exported_func_df <- dplyr::rename( + exported_func_df, + !!entry_name := "entrypoint" + ) + # Format Table if(isTRUE(wrap_cols)){ exported_func_df <- exported_func_df %>% dplyr::mutate( dplyr::across( - all_of(c("exported_function", "code_file", "documentation")), + all_of(c(entry_name, "code_file", "documentation")), function(x) wrap_text(x, width = 24, indent = TRUE, strict = TRUE) ), # Tests can be longer due to page width (pg_width) settings (we make it wider) @@ -765,8 +783,13 @@ trace_matrix_notes <- function(exports_df){ #' @param return_vals Logical (T/F). If `TRUE`, return the objects instead of printing them out for `rmarkdown`. Used for testing. #' #' @keywords internal -format_appendix <- function(extra_notes_data, return_vals = FALSE){ - sub_header_strs <- c("\n## R CMD Check\n\n", "\n## Test coverage\n\n") +format_appendix <- function(extra_notes_data, return_vals = FALSE, scorecard_type = "R") { + check_title <- if (identical(scorecard_type, "R")) { + "R CMD Check" + } else { + "Check output" + } + sub_header_strs <- c(paste0("\n## ", check_title, "\n\n"), "\n## Test coverage\n\n") ### Covr Results ### # Format Table diff --git a/R/render-scorecard.R b/R/render-scorecard.R index ab1b0be..49f9c08 100644 --- a/R/render-scorecard.R +++ b/R/render-scorecard.R @@ -1,6 +1,10 @@ -#' Take a JSON from score_pkg() and render a pdf +#' Render a scorecard PDF from a directory of results #' -#' @param results_dir directory containing json file and individual results. Output file path from [score_pkg()] +#' Create a scorecard from a results directory prepared by [score_pkg()] or an +#' external scorer (see [external_scores]). +#' +#' @param results_dir Directory with scoring results. This is the path returned +#' by [score_pkg()]. #' @param risk_breaks A numeric vector of length 2, with both numbers being #' between 0 and 1. These are used for the "breaks" when classifying scores #' into Risk categories. For example, for the default value of `c(0.3, 0.7)`, @@ -30,12 +34,18 @@ render_scorecard <- function( out_file <- get_result_path(results_dir, "scorecard.pdf") check_exists_and_overwrite(out_file, overwrite) + if (file.exists(get_result_path(results_dir, "pkg.json"))) { + param_fn <- get_render_params_external + } else { + param_fn <- get_render_params + } + rendered_file <- rmarkdown::render( system.file(SCORECARD_RMD_TEMPLATE, package = "mpn.scorecard", mustWork = TRUE), # TODO: do we want to expose this to users, to pass their own custom template? output_dir = results_dir, output_file = basename(out_file), quiet = TRUE, - params = get_render_params(results_dir, risk_breaks, add_traceability) + params = param_fn(results_dir, risk_breaks, add_traceability) ) return(invisible(rendered_file)) @@ -113,6 +123,11 @@ scorecard_json_compat <- function(data, path) { #' #' @keywords internal format_scores_for_render <- function(pkg_scores, risk_breaks = c(0.3, 0.7)) { + stype <- pkg_scores[["scorecard_type"]] + if (is.null(stype)) { + abort("bug: scorecard_type is unexpectedly absent from pkg_scores") + } + check_label <- if (identical(stype, "R")) "R CMD CHECK" else "check" # build list of formatted tibbles pkg_scores$formatted <- list() @@ -135,7 +150,9 @@ format_scores_for_render <- function(pkg_scores, risk_breaks = c(0.3, 0.7)) { score = ifelse(.x == "NA", NA_integer_, .x) ) %>% mutate( - result = map_answer(.data$score, .data$criteria), + result = map_answer(.data$score, .data$criteria, + include_check_score = identical(stype, "R") + ), risk = map_risk(.data$score, risk_breaks) ) }) %>% purrr::list_rbind() @@ -148,7 +165,7 @@ format_scores_for_render <- function(pkg_scores, risk_breaks = c(0.3, 0.7)) { sprintf("%.2f%%", .data$score * 100), .data$result ), - criteria = ifelse(.data$criteria == "check", "R CMD CHECK", "coverage") + criteria = ifelse(.data$criteria == "check", check_label, "coverage") ) } diff --git a/_pkgdown.yml b/_pkgdown.yml index bd15915..3edc577 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -22,3 +22,4 @@ reference: contents: - render_scorecard - render_scorecard_summary + - external_scores diff --git a/inst/templates/scorecard-template.Rmd b/inst/templates/scorecard-template.Rmd index b27ef49..6145e83 100644 --- a/inst/templates/scorecard-template.Rmd +++ b/inst/templates/scorecard-template.Rmd @@ -40,6 +40,11 @@ knitr::opts_chunk$set(echo = FALSE, warning = FALSE, message = FALSE, library(dplyr) library(flextable) set_flextable_defaults(font.color = "#333333", border.color = "#999999", padding = 4, fonts_ignore = TRUE) + +scorecard_type <- params[["pkg_scores"]][["scorecard_type"]] +if (is.null(scorecard_type)) { + stop("pkg_scores unexpectedly does not have 'scorecard_type' field") +} ``` ```{r setup_data} @@ -85,7 +90,7 @@ trace_matrix_notes(params$exports_df) ``` ```{r} -format_traceability_matrix(params$exports_df) +format_traceability_matrix(params$exports_df, scorecard_type = scorecard_type) ``` \newpage @@ -102,7 +107,7 @@ format_dependency_versions(params$dep_versions_df) ``` ```{r, results = "asis"} -format_appendix(params$extra_notes_data) +format_appendix(params$extra_notes_data, scorecard_type = scorecard_type) ``` diff --git a/man/check_for_comments.Rd b/man/check_for_comments.Rd index aa4fa1f..c6380d7 100644 --- a/man/check_for_comments.Rd +++ b/man/check_for_comments.Rd @@ -7,7 +7,8 @@ check_for_comments(results_dir) } \arguments{ -\item{results_dir}{directory containing json file and individual results. Output file path from \code{\link[=score_pkg]{score_pkg()}}} +\item{results_dir}{Directory with scoring results. This is the path returned +by \code{\link[=score_pkg]{score_pkg()}}.} } \description{ Look for comment file and return contents if is found diff --git a/man/check_for_traceability.Rd b/man/check_for_traceability.Rd index dcd9330..ac0e815 100644 --- a/man/check_for_traceability.Rd +++ b/man/check_for_traceability.Rd @@ -7,7 +7,8 @@ check_for_traceability(results_dir, add_traceability) } \arguments{ -\item{results_dir}{directory containing json file and individual results. Output file path from \code{\link[=score_pkg]{score_pkg()}}} +\item{results_dir}{Directory with scoring results. This is the path returned +by \code{\link[=score_pkg]{score_pkg()}}.} \item{add_traceability}{Logical (T/F). If \code{TRUE}, append a table that links package functionality to the documentation and test files. Defaults to "auto", which will include the matrix if found.} diff --git a/man/create_extra_notes.Rd b/man/create_extra_notes.Rd index 2a8fc07..cf43b08 100644 --- a/man/create_extra_notes.Rd +++ b/man/create_extra_notes.Rd @@ -7,7 +7,8 @@ create_extra_notes(results_dir) } \arguments{ -\item{results_dir}{directory containing json file and individual results. Output file path from \code{\link[=score_pkg]{score_pkg()}}} +\item{results_dir}{Directory with scoring results. This is the path returned +by \code{\link[=score_pkg]{score_pkg()}}.} } \description{ Create extra notes summarizing the covr & rcmdcheck outputs, and documentation diff --git a/man/external_scores.Rd b/man/external_scores.Rd new file mode 100644 index 0000000..8d05510 --- /dev/null +++ b/man/external_scores.Rd @@ -0,0 +1,155 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/external.R +\name{external_scores} +\alias{external_scores} +\alias{render_external} +\title{Rendering externally scored packages} +\description{ +For R packages, mpn.scorecard handles both scoring and rendering. The +workflow is to first score the package with \code{\link[=score_pkg]{score_pkg()}}, then to optionally +generate a traceability matrix with \code{\link[=make_traceability_matrix]{make_traceability_matrix()}}, and finally +to render the scorecard with \code{\link[=render_scorecard]{render_scorecard()}}. + +\code{\link[=render_scorecard]{render_scorecard()}} also supports rendering packages that are scored outside +of mpn.scorecard. The scorer is responsible for preparing a results directory +with the set of files described below. +} +\details{ +\subsection{Input files}{ + +The following input files define results for the scorecard to render. These +must reside in a directory named \verb{_}, following the naming +of the output directory returned by \code{\link[=score_pkg]{score_pkg()}}. +\itemize{ +\item \verb{_.pkg.json}: This file provides general information +about the package being scored. It requires the following keys: +\itemize{ +\item \code{mpn_scorecard_format}: The version of the format in which these input +files are specified. This should be "1.0". +\item \code{pkg_name}, \code{pkg_version}: The name and version of the package. +\item \code{scorecard_type}: The type of package. Two types are currently +recognized and receive special handling: "R" and "cli". Everything else +falls back to default handling. + +If you're specifying "R" here, you should probably use \code{\link[=score_pkg]{score_pkg()}} +instead. +} + +Example: + +\if{html}{\out{
}}\preformatted{\{ + "mpn_scorecard_format": "1.0", + "pkg_name": "foo", + "pkg_version": "1.2.3", + "scorecard_type": "cli" +\} +}\if{html}{\out{
}} +\item \verb{_.check.txt}: Output from the package check. This is +included in the appendix verbatim. +\item \verb{_.coverage.json}: Code coverage percentages. The values +will be rounded to two decimal places when rendering. This file is +optional. + +Example: + +\if{html}{\out{
}}\preformatted{\{ + "overall": 91.54265, + "files": [ + \{ + "file": "cmd/foo.go", + "coverage": 98.7643 + \}, + \{ + "file": "cmd/bar.go", + "coverage": 84.321 + \} + ] +\} +}\if{html}{\out{
}} +\item \verb{_.scores.json}: Scores for individual metrics grouped +into four categories: "testing", "documentation", "maintenance", and +"transparency". Each category must have a least one score. + +For the testing category, both "check and "coverage" scores are required. +"check" should be 1 if the tests passed and 0 if they failed. "coverage" +should match the "overall" value from \verb{_.coverage.json}, +divided by 100. + +Example: + +\if{html}{\out{
}}\preformatted{\{ + "testing": \{ + "check": 1, + "coverage": 0.9154265 + \}, + "documentation": \{ + "has_website": 1, + "has_news": 1 + \}, + "maintenance": \{ + "has_maintainer": 1, + "news_current": 1 + \}, + "transparency": \{ + "has_source_control": 1, + "has_bug_reports_url": 1 + \} +\} +}\if{html}{\out{
}} +\item \verb{_.metadata.json}: Information to include in the +"System Info" table. The table will include the "date" and "executor" +value, as well as any key-value pairs defined under "info.env_vars" and +"info.sys". The "date" and "executor" keys are required. + +Example: + +\if{html}{\out{
}}\preformatted{\{ + "date": "2024-08-01 08:19:12", + "executor": "Bobert", + "info": \{ + "env_vars": \{ + "METWORX_VERSION": "22.09" + \}, + "sys": \{ + "sysname": "Linux", + "machine": "x86_64" + \} + \} +\} +}\if{html}{\out{
}} +\item \verb{_.matrix.yaml}: A file defining entries to render as +the traceability matrix table. The traceability matrix table is meant to +map all user-facing entry points (e.g., exported functions or available +commands for a command-line executable) to the relevant documentation and +test files. + +The file should consist of a sequence of entries with the following items: +\itemize{ +\item \code{entrypoint}: The name of the entry point. +\item \code{code}: The path to where the entry point is defined. +\item \code{doc}: The path to the entry point's main documentation. +\item \code{tests}: A list of paths where the entry point is tested. +} + +What the entry point is called in the table depends on \code{scorecard_type}. +For "cli", the column name is "Command" and, for "R", it is +"Exported Function". For all other types, it is "Entry Point". + +This file is optional if the \code{add_traceability} argument of +\code{\link[=render_scorecard]{render_scorecard()}} is "auto" or \code{FALSE}. + +Example: + +\if{html}{\out{
}}\preformatted{- entrypoint: foo + skip: true + +- entrypoint: foo bar + code: cmd/bar.go + doc: docs/commands/foo_bar.md + tests: + - cmd/bar_test.go + - integration/bar_test.go +}\if{html}{\out{
}} +} +} +} diff --git a/man/format_appendix.Rd b/man/format_appendix.Rd index c0966de..136af72 100644 --- a/man/format_appendix.Rd +++ b/man/format_appendix.Rd @@ -4,7 +4,7 @@ \alias{format_appendix} \title{Format Appendix} \usage{ -format_appendix(extra_notes_data, return_vals = FALSE) +format_appendix(extra_notes_data, return_vals = FALSE, scorecard_type = "R") } \arguments{ \item{extra_notes_data}{named list. Output of \code{\link[=create_extra_notes]{create_extra_notes()}}} diff --git a/man/format_traceability_matrix.Rd b/man/format_traceability_matrix.Rd index 72460f0..f3f4609 100644 --- a/man/format_traceability_matrix.Rd +++ b/man/format_traceability_matrix.Rd @@ -4,7 +4,7 @@ \alias{format_traceability_matrix} \title{Format Traceability Matrix} \usage{ -format_traceability_matrix(exports_df, wrap_cols = TRUE) +format_traceability_matrix(exports_df, wrap_cols = TRUE, scorecard_type = "R") } \arguments{ \item{exports_df}{tibble. Output of \code{\link[=make_traceability_matrix]{make_traceability_matrix()}}} diff --git a/man/make_traceability_matrix.Rd b/man/make_traceability_matrix.Rd index d973640..d7110f3 100644 --- a/man/make_traceability_matrix.Rd +++ b/man/make_traceability_matrix.Rd @@ -9,7 +9,8 @@ make_traceability_matrix(pkg_tar_path, results_dir = NULL, verbose = FALSE) \arguments{ \item{pkg_tar_path}{path to a tarball} -\item{results_dir}{directory containing json file and individual results. Output file path from \code{\link[=score_pkg]{score_pkg()}}} +\item{results_dir}{Directory with scoring results. This is the path returned +by \code{\link[=score_pkg]{score_pkg()}}.} \item{verbose}{Logical (\code{TRUE}/\code{FALSE}). If \code{TRUE}, show any warnings/messages per function.} } diff --git a/man/render_scorecard.Rd b/man/render_scorecard.Rd index 6e5e149..5ed31c5 100644 --- a/man/render_scorecard.Rd +++ b/man/render_scorecard.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/render-scorecard.R \name{render_scorecard} \alias{render_scorecard} -\title{Take a JSON from score_pkg() and render a pdf} +\title{Render a scorecard PDF from a directory of results} \usage{ render_scorecard( results_dir, @@ -12,7 +12,8 @@ render_scorecard( ) } \arguments{ -\item{results_dir}{directory containing json file and individual results. Output file path from \code{\link[=score_pkg]{score_pkg()}}} +\item{results_dir}{Directory with scoring results. This is the path returned +by \code{\link[=score_pkg]{score_pkg()}}.} \item{risk_breaks}{A numeric vector of length 2, with both numbers being between 0 and 1. These are used for the "breaks" when classifying scores @@ -26,7 +27,8 @@ is "Medium Risk", and \verb{0.7 <= score < 1} is "High Risk".} Defaults to "auto", which will include the matrix if found.} } \description{ -Take a JSON from score_pkg() and render a pdf +Create a scorecard from a results directory prepared by \code{\link[=score_pkg]{score_pkg()}} or an +external scorer (see \link{external_scores}). } \details{ If a plain text comments file is found in \code{results_dir}, it will automatically be included. diff --git a/tests/testthat/helpers-external.R b/tests/testthat/helpers-external.R new file mode 100644 index 0000000..639b550 --- /dev/null +++ b/tests/testthat/helpers-external.R @@ -0,0 +1,116 @@ +#' Create a temporary directory with external results +#' +#' @param pattern,clean,.local_envir Arguments passed to +#' `withr::local_tempdir()`. +local_create_external_results <- function(pattern = "mpn-scorecard-tests-", + .local_envir = parent.frame(), + clean = TRUE) { + tdir <- withr::local_tempdir( + pattern = pattern, .local_envir = .local_envir, clean = clean + ) + + pkg <- "foo" + version <- "1.2.3" + + prefix <- paste0(pkg, "_", version) + rdir <- file.path(tdir, prefix) + fs::dir_create(rdir) + + stem <- file.path(tdir, prefix, prefix) + + jsonlite::write_json( + list( + mpn_scorecard_format = MPN_SCORECARD_FORMAT, + pkg_name = pkg, + pkg_version = version, + scorecard_type = "cli" + ), + paste0(stem, ".pkg.json"), + auto_unbox = TRUE + ) + + cat("check", "output", sep = "\n", file = paste0(stem, ".check.txt")) + + jsonlite::write_json( + list( + overall = 91.54265, + files = list( + list( + file = "cmd/foo.go", + coverage = 98.7643 + ), + list( + file = "cmd/bar.go", + coverage = 84.321 + ) + ) + ), + paste0(stem, ".coverage.json"), + auto_unbox = TRUE + ) + + jsonlite::write_json( + list( + testing = list( + check = 1, + coverage = 0.9154265 + ), + documentation = list( + has_website = 1, + has_news = 1 + ), + maintenance = list( + has_maintainer = 1, + news_current = 1 + ), + transparency = list( + has_source_control = 1, + has_bug_reports_url = 1 + ) + ), + paste0(stem, ".scores.json"), + auto_unbox = TRUE + ) + + jsonlite::write_json( + list( + date = "2024-08-01 08:19:12", + executor = "Bobert", + info = list( + env_vars = list( + METWORX_VERSION = "22.09" + ), + sys = list( + sysname = "Linux", + machine = "x86_64" + ) + ) + ), + paste0(stem, ".metadata.json"), + auto_unbox = TRUE + ) + + yaml::write_yaml( + list( + list( + entrypoint = "foo", + skip = TRUE + ), + list( + entrypoint = "foo bar", + code = "cmd/bar.go", + doc = "docs/commands/foo_bar.md", + tests = c("cmd/bar_test.go", "integration/bar_test.go") + ), + list( + entrypoint = "foo baz", + code = "cmd/baz.go", + doc = "docs/commands/foo_baz.md", + tests = "cmd/baz_test.go" + ) + ), + paste0(stem, ".matrix.yaml") + ) + + return(rdir) +} diff --git a/tests/testthat/test-format-report-external.R b/tests/testthat/test-format-report-external.R new file mode 100644 index 0000000..30302ea --- /dev/null +++ b/tests/testthat/test-format-report-external.R @@ -0,0 +1,69 @@ +# These are slimmed down variants of some tests from test-format-report.R, +# focused on checking if the formatting functions are compatible with the +# objects read in from external results. + +results_dir <- local_create_external_results() + +describe("formatting functions with external scores", { + it("format_scores_for_render", { + pkg_scores <- build_pkg_scores(results_dir) + formatted <- format_scores_for_render(pkg_scores)[["formatted"]] + + overall_scores <- formatted[["overall_scores"]] + expect_identical( + names(overall_scores), + c("category", "category_score", "risk") + ) + expect_identical( + overall_scores[["category"]], + c(METRIC_CATEGORIES, "overall") + ) + + expect_identical( + purrr::map(formatted[["category_scores"]], "criteria"), + list( + testing = TESTING_METRICS, + documentation = c("has_website", "has_news"), + maintenance = MAINTENANCE_METRICS, + transparency = TRANSPARENCY_METRICS + ) + ) + }) + + it("format_testing_scores", { + pkg_scores <- build_pkg_scores(results_dir) + flex_df <- format_testing_scores(format_scores_for_render(pkg_scores)) + res <- flex_df[["body"]][["dataset"]] + expect_identical(res[["Result"]], c("Passing", "91.54%")) + }) + + it("format_traceability_matrix", { + pkg_scores <- build_pkg_scores(results_dir) + tmat <- read_traceability_matrix(results_dir) + formatted <- format_traceability_matrix(tmat, + scorecard_type = pkg_scores[["scorecard_type"]] + ) + res <- formatted[["body"]][["dataset"]] + expect_identical( + names(res), + c("Command", "Code File", "Documentation", "Test Files") + ) + expect_identical(res[["Command"]], c("foo bar", "foo baz")) + }) + + it("format_appendix", { + input <- list( + check_output = read_check_output(results_dir), + cov_results_df = read_coverage_results(results_dir) + ) + formatted <- format_appendix(input, return_vals = TRUE) + + cov_res <- formatted[["cov_results_flex"]][["body"]][["dataset"]] + expect_identical(names(cov_res), c("Code File", "Test Coverage")) + + expect_identical( + formatted[["check_output"]], + "check\noutput\n" + ) + }) +}) diff --git a/tests/testthat/test-render-scorecard-external.R b/tests/testthat/test-render-scorecard-external.R new file mode 100644 index 0000000..349050f --- /dev/null +++ b/tests/testthat/test-render-scorecard-external.R @@ -0,0 +1,41 @@ +# These are slimmed down variants of some tests from test-render-scorecard.R to +# check that render_scorecard() doesn't choke on external results. + +skip_if_render_pdf() + +describe("render scorecard for external scores", { + it("render_scorecard - with traceability YAML", { + rdir <- local_create_external_results() + + pdf_path <- render_scorecard(rdir) + toc <- pdftools::pdf_toc(pdf = pdf_path)[["children"]] + expect_identical( + purrr::map_chr(toc, "title"), + c("Overview", "Details", "Traceability Matrix", "Appendix") + ) + + pdf_path <- render_scorecard(rdir, + add_traceability = FALSE, overwrite = TRUE + ) + toc <- pdftools::pdf_toc(pdf = pdf_path)[["children"]] + expect_identical( + purrr::map_chr(toc, "title"), + c("Overview", "Details", "Appendix") + ) + }) + + it("render_scorecard - without traceability YAML", { + rdir <- local_create_external_results() + fs::file_delete(get_result_path(rdir, "matrix.yaml")) + + pdf_path <- render_scorecard(rdir) + toc <- pdftools::pdf_toc(pdf = pdf_path)[["children"]] + expect_identical( + purrr::map_chr(toc, "title"), + c("Overview", "Details", "Appendix") + ) + + fs::file_delete(pdf_path) + expect_error(render_scorecard(rdir, add_traceability = TRUE)) + }) +}) From 2d5c2ec101438e3dea07808aff809f9212c1e8fc Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 23/31] external: make coverage optional For some externally scored packages, it is not be feasible to calculate coverage. Don't bother making coverage optional for internally scored R packages, because in that case we'll try to calculate coverage with covr and, at the very least, record the error. --- R/external.R | 20 ++++++++++++++--- R/format-report.R | 6 +++++ man/external_scores.Rd | 5 +++-- .../testthat/test-render-scorecard-external.R | 22 +++++++++++++++++++ 4 files changed, 48 insertions(+), 5 deletions(-) diff --git a/R/external.R b/R/external.R index c9573bd..a74876a 100644 --- a/R/external.R +++ b/R/external.R @@ -74,8 +74,9 @@ #' into four categories: "testing", "documentation", "maintenance", and #' "transparency". Each category must have a least one score. #' -#' For the testing category, both "check and "coverage" scores are required. -#' "check" should be 1 if the tests passed and 0 if they failed. "coverage" +#' For the testing category, "check is required. "check" should be 1 if the +#' tests passed and 0 if they failed. "coverage" is required if the +#' `_.coverage.json` coverage" file exists. The value #' should match the "overall" value from `_.coverage.json`, #' divided by 100. #' @@ -178,6 +179,19 @@ get_render_params_external <- function(results_dir, risk_breaks, add_traceabilit tmat <- NULL } + has_cov_score <- !is.null(pkg_scores[["scores"]][["testing"]][["coverage"]]) + cov_file <- get_result_path(results_dir, "coverage.json") + has_cov_file <- file.exists(cov_file) + if (has_cov_score && has_cov_file) { + cov <- read_coverage_results(results_dir) + } else if (has_cov_score) { + abort(c("Coverage is in scores but coverage file is missing:", cov_file)) + } else if (has_cov_file) { + abort("Coverage file exists but scores do not include a coverage value.") + } else { + cov <- NULL + } + list( set_title = paste("Scorecard:", pkg_scores$pkg_name, pkg_scores$pkg_version), scorecard_footer = format_scorecard_version( @@ -186,7 +200,7 @@ get_render_params_external <- function(results_dir, risk_breaks, add_traceabilit pkg_scores = format_scores_for_render(pkg_scores, risk_breaks), comments_block = check_for_comments(results_dir), extra_notes_data = list( - cov_results_df = read_coverage_results(results_dir), + cov_results_df = cov, check_output = read_check_output(results_dir) ), exports_df = tmat diff --git a/R/format-report.R b/R/format-report.R index 93e1404..d8289a6 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -832,6 +832,12 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE, scorecard_typ # R CMD Check cat(sub_header_strs[1]) cat_verbatim(check_output) + + if (is.null(cov_results_df)) { + # This is an externally scored package without coverage. + return(invisible(NULL)) + } + cat("\\newpage") # Coverage cat(sub_header_strs[2]) diff --git a/man/external_scores.Rd b/man/external_scores.Rd index 8d05510..dc02571 100644 --- a/man/external_scores.Rd +++ b/man/external_scores.Rd @@ -70,8 +70,9 @@ Example: into four categories: "testing", "documentation", "maintenance", and "transparency". Each category must have a least one score. -For the testing category, both "check and "coverage" scores are required. -"check" should be 1 if the tests passed and 0 if they failed. "coverage" +For the testing category, "check is required. "check" should be 1 if the +tests passed and 0 if they failed. "coverage" is required if the +\verb{_.coverage.json} coverage" file exists. The value should match the "overall" value from \verb{_.coverage.json}, divided by 100. diff --git a/tests/testthat/test-render-scorecard-external.R b/tests/testthat/test-render-scorecard-external.R index 349050f..cd5fe53 100644 --- a/tests/testthat/test-render-scorecard-external.R +++ b/tests/testthat/test-render-scorecard-external.R @@ -38,4 +38,26 @@ describe("render scorecard for external scores", { fs::file_delete(pdf_path) expect_error(render_scorecard(rdir, add_traceability = TRUE)) }) + + it("render_scorecard - without coverage", { + rdir <- local_create_external_results() + fs::file_delete(get_result_path(rdir, "coverage.json")) + + expect_error(render_scorecard(rdir), "coverage file is missing") + + scores <- jsonlite::read_json(get_result_path(rdir, "scores.json")) + scores[["testing"]][["coverage"]] <- NULL + jsonlite::write_json(scores, get_result_path(rdir, "scores.json"), + auto_unbox = TRUE + ) + + pdf_path <- render_scorecard(rdir) + toc <- pdftools::pdf_toc(pdf = pdf_path)[["children"]] + appendix <- toc[[4]] + expect_identical(appendix[["title"]], "Appendix") + expect_identical( + purrr::map_chr(appendix[["children"]], "title"), + c("System Info", "Check output") + ) + }) }) From fb70eeb251d205916c45d78cfd35cf13ac1fd701 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 24/31] external: add some basic validation of inputs --- R/aaa.R | 1 + R/external.R | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/R/aaa.R b/R/aaa.R index 3d12264..b9c1dcf 100644 --- a/R/aaa.R +++ b/R/aaa.R @@ -12,6 +12,7 @@ RISK_LEVELS <- c("NA - unexpected", "High Risk", "Medium Risk", "Low Risk") utils::globalVariables(c(".")) +METRIC_CATEGORIES <- c("testing", "documentation", "maintenance", "transparency") DOCUMENTATION_METRICS <- c("has_vignettes", "has_website", "has_news") #, export_help) MAINTENANCE_METRICS <- c("has_maintainer", "news_current")#, "last_30_bugs_status") diff --git a/R/external.R b/R/external.R index a74876a..2474b7f 100644 --- a/R/external.R +++ b/R/external.R @@ -221,6 +221,26 @@ build_pkg_scores <- function(results_dir) { metadata = list(jsonlite::read_json(meta_json)) ) + to_drop <- c( + "mpn_scorecard_version", "out_dir", "pkg_tar_path", "md5sum_check", + "category_scores" + ) + extkeys <- SCORECARD_JSON_KEYS[!SCORECARD_JSON_KEYS %in% to_drop] + assert_json_keys(res, extkeys) + assert_json_keys(res[["metadata"]], c("date", "executor")) + + scores <- res[["scores"]] + # In general, these score names aren't a hard-coded set, but there should at + # least be one per category... + assert_json_keys(scores, METRIC_CATEGORIES) + nscores_per_cat <- purrr::map_int(scores[METRIC_CATEGORIES], length) + if (any(nscores_per_cat < 1)) { + no_scores <- names(scores[METRIC_CATEGORIES])[nscores_per_cat < 1] + abort(c("The following categories do not have any scores:", no_scores)) + } + # ... and check score should be present. + assert_json_keys(scores[["testing"]], "check") + return(calc_overall_scores(res)) } @@ -230,6 +250,10 @@ read_traceability_matrix <- function(results_dir) { entries <- yaml::read_yaml(fname) entries <- purrr::discard(entries, function(e) isTRUE(e[["skip"]])) + for (e in entries) { + assert_keys(e, c("entrypoint", "code", "doc", "tests"), yaml::as.yaml) + } + tibble::tibble( entrypoint = purrr::map_chr(entries, "entrypoint"), code_file = purrr::map_chr(entries, "code"), @@ -252,8 +276,28 @@ read_coverage_results <- function(results_dir) { data <- jsonlite::read_json(fname) filecov <- data[["files"]] + for (e in filecov) { + assert_json_keys(e, c("file", "coverage")) + } + tibble::tibble( code_file = purrr::map_chr(filecov, "file"), test_coverage = purrr::map_dbl(filecov, "coverage") ) } + +assert_json_keys <- function(entry, keys) { + assert_keys( + entry, + keys, + function(...) jsonlite::toJSON(..., auto_unbox = TRUE, pretty = TRUE) + ) +} + +assert_keys <- function(entry, keys, render_fn) { + for (k in keys) { + if (is.null(entry[[k]])) { + abort(paste0("entry is missing key: ", k, "\n", render_fn(entry))) + } + } +} From 00231b954aa441b9e1a08555d00ad6654f3b187c Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 6 Aug 2024 16:38:16 -0400 Subject: [PATCH 25/31] format_appendix: wrap file column in coverage table For bbi, two code files exceed the column width, leading to them being truncated and, for reasons I don't understand, the heading for the next column ("Test Coverage") being split into two lines. Based on trial and error, all of bbi's files fit in the column when wrapped to a width of 45, including bbi's 45-character "parsers/nmparser/parse_parameter_structure.go". --- R/format-report.R | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/R/format-report.R b/R/format-report.R index d8289a6..8819361 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -796,7 +796,12 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE, scorecard_typ cov_results_df <- extra_notes_data$cov_results_df if (is.numeric(cov_results_df$test_coverage)) { cov_results_df <- cov_results_df %>% - dplyr::mutate(test_coverage = sprintf("%.2f%%", .data$test_coverage)) %>% + dplyr::mutate( + code_file = wrap_text(.data$code_file, + width = 45, indent = TRUE, strict = TRUE + ), + test_coverage = sprintf("%.2f%%", .data$test_coverage) + ) %>% format_colnames_to_title() # Create flextable and format From 24bdf6ac9e19bae3223449ca1bc760aa7d3b05ac Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Sat, 10 Aug 2024 12:36:32 -0400 Subject: [PATCH 26/31] test-format-report.R: trigger covr failure case format_appendix() has a good amount of conditional handling. Add a test to make sure the covr failure arm isn't unintentionally made unreachable by upstream changes. --- tests/testthat/test-format-report.R | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/testthat/test-format-report.R b/tests/testthat/test-format-report.R index d01e1b4..44d6801 100644 --- a/tests/testthat/test-format-report.R +++ b/tests/testthat/test-format-report.R @@ -207,6 +207,18 @@ describe("formatting functions", { ) }) + it("format_appendix: covr failure", { + expect_output( + format_appendix( + list( + check_output = "anything", + cov_results_df = list(code_file = "File coverage failed") + ) + ), + "Calculating code coverage failed" + ) + }) + it("dependency versions", { setups <- pkg_dirs$pkg_setups_df From e17a7c9edc1dafc4da5f5beaed6545ac3e0edb64 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Sat, 10 Aug 2024 11:58:08 -0400 Subject: [PATCH 27/31] format_appendix: replace empty coverage table with note External scorers are responsible for providing the *.coverage.json, and that file can have an overall coverage score but not per file scores. If there are no rows in the coverage table, replace the table with a note to make it clear that this is expected rather than a rendering error. Thanks to @seth127 for the suggestion. Re: https://github.com/metrumresearchgroup/mpn.scorecard/pull/70#discussion_r1712232315 --- R/format-report.R | 2 ++ tests/testthat/test-format-report-external.R | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/R/format-report.R b/R/format-report.R index 8819361..f36c51e 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -861,6 +861,8 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE, scorecard_typ } else { stop("Unknown error type: ", err_type) } + } else if (nrow(cov_results_df) == 0) { + cat("Per file test coverage not provided.") } else { cat(knitr::knit_print(cov_results_flex)) } diff --git a/tests/testthat/test-format-report-external.R b/tests/testthat/test-format-report-external.R index 30302ea..a4323a6 100644 --- a/tests/testthat/test-format-report-external.R +++ b/tests/testthat/test-format-report-external.R @@ -66,4 +66,22 @@ describe("formatting functions with external scores", { "check\noutput\n" ) }) + + it("format_appendix: no per file coverage", { + results_dir <- local_create_external_results() + covfile <- get_result_path(results_dir, "coverage.json") + cov <- jsonlite::read_json(covfile) + cov[["files"]] <- list() + jsonlite::write_json(cov, covfile) + + expect_output( + format_appendix( + list( + check_output = "anything", + cov_results_df = read_coverage_results(results_dir) + ) + ), + "Per file test coverage not provided" + ) + }) }) From cc0b6963d9a53b63a61a6aeb7b329f4cf1e955c1 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Mon, 12 Aug 2024 07:57:28 -0400 Subject: [PATCH 28/31] format_traceability_matrix: wrap test files at default characters When wrapping the entry point, code file, and documentation file, format_traceability_matrix() uses wrap_text's default wrap_sym value so that breaks are made at "/", "_", and "-" to get under the width. If those splits fail to get the string under the width, then the file is split at any character. For tests files, `wrap_sym = NULL` is passed, so wrap_text() goes straight for splitting at any character, leading to entries like .../foo/bar/baz_tes t.go Switch to preferring splits at "/", "_", and "-" so that intra "word" breaks are less likely. .../foo/bar/ baz_test.go Re: https://github.com/metrumresearchgroup/mpn.scorecard/pull/70#discussion_r1712216162 --- R/format-report.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/format-report.R b/R/format-report.R index f36c51e..bee53bf 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -730,7 +730,7 @@ format_traceability_matrix <- function( ), # Tests can be longer due to page width (pg_width) settings (we make it wider) test_files = purrr::map_chr(.data$test_files, function(tests){ - wrap_text(tests, width = 40, strict = TRUE, wrap_sym = NULL) + wrap_text(tests, width = 40, strict = TRUE) }) ) } From bc4ab1aaea5dc2686833dd8b89fa7098d91492a2 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Thu, 8 Aug 2024 10:27:08 -0400 Subject: [PATCH 29/31] summary: guard against external scores render_scorecard_summary() and summarize_package_results() have not been reworked to support external scores. We use these functions for MPN. For non-R packages, we don't yet have a need for these functions. Document that these functions don't accept external scores, and add a custom guard so that a more informative error is given if any results directory with external scores is passed. --- R/render-scorecard-summary.R | 13 +++++++++++-- man/build_risk_summary.Rd | 3 ++- man/render_scorecard_summary.Rd | 3 ++- man/summarize_package_results.Rd | 3 ++- tests/testthat/test-summarize-package-results.R | 4 ++++ 5 files changed, 21 insertions(+), 5 deletions(-) diff --git a/R/render-scorecard-summary.R b/R/render-scorecard-summary.R index d68e36d..86041a2 100644 --- a/R/render-scorecard-summary.R +++ b/R/render-scorecard-summary.R @@ -1,7 +1,8 @@ #' Render PDF summary of scorecards #' -#' @param result_dirs A vector of output directories +#' @param result_dirs A vector of output directories, each one produced by +#' [score_pkg()]. [external_scores] are not supported. #' @param snapshot A report subtitle indicating the grouping of these packages, such as an MPN snapshot. See details. #' @param out_dir Output directory for saving scorecard summary. If `NULL`, assumes all `result_dirs` point to the same output directory #' @inheritParams render_scorecard @@ -23,6 +24,8 @@ render_scorecard_summary <- function(result_dirs, snapshot <- paste('MPN Snapshot', snapshot) } + assert_no_external(result_dirs) + # Format overall scores and risk overall_risk_summary <- build_risk_summary(result_dirs, risk_breaks, out_dir) @@ -142,7 +145,7 @@ build_risk_summary <- function(result_dirs, #' @returns a dataframe #' @export summarize_package_results <- function(result_dirs){ - + assert_no_external(result_dirs) json_paths <- get_result_path(result_dirs, "scorecard.json") risk_summary_df <- tibble::tibble( @@ -170,3 +173,9 @@ summarize_package_results <- function(result_dirs){ return(risk_summary_df) } + +assert_no_external <- function(result_dirs, call = rlang::caller_env()) { + if (any(fs::file_exists(get_result_path(result_dirs, "pkg.json")))) { + abort("External scores are not supported.", call = call) + } +} diff --git a/man/build_risk_summary.Rd b/man/build_risk_summary.Rd index bc2d9f6..8ca72ab 100644 --- a/man/build_risk_summary.Rd +++ b/man/build_risk_summary.Rd @@ -7,7 +7,8 @@ build_risk_summary(result_dirs, risk_breaks, out_dir, append_out_dir = FALSE) } \arguments{ -\item{result_dirs}{A vector of output directories} +\item{result_dirs}{A vector of output directories, each one produced by +\code{\link[=score_pkg]{score_pkg()}}. \link{external_scores} are not supported.} \item{risk_breaks}{A numeric vector of length 2, with both numbers being between 0 and 1. These are used for the "breaks" when classifying scores diff --git a/man/render_scorecard_summary.Rd b/man/render_scorecard_summary.Rd index a7ef7b5..6c0d63c 100644 --- a/man/render_scorecard_summary.Rd +++ b/man/render_scorecard_summary.Rd @@ -13,7 +13,8 @@ render_scorecard_summary( ) } \arguments{ -\item{result_dirs}{A vector of output directories} +\item{result_dirs}{A vector of output directories, each one produced by +\code{\link[=score_pkg]{score_pkg()}}. \link{external_scores} are not supported.} \item{risk_breaks}{A numeric vector of length 2, with both numbers being between 0 and 1. These are used for the "breaks" when classifying scores diff --git a/man/summarize_package_results.Rd b/man/summarize_package_results.Rd index c0c0c90..52446b6 100644 --- a/man/summarize_package_results.Rd +++ b/man/summarize_package_results.Rd @@ -7,7 +7,8 @@ summarize_package_results(result_dirs) } \arguments{ -\item{result_dirs}{A vector of output directories} +\item{result_dirs}{A vector of output directories, each one produced by +\code{\link[=score_pkg]{score_pkg()}}. \link{external_scores} are not supported.} } \value{ a dataframe diff --git a/tests/testthat/test-summarize-package-results.R b/tests/testthat/test-summarize-package-results.R index 56e58aa..0a4f0d7 100644 --- a/tests/testthat/test-summarize-package-results.R +++ b/tests/testthat/test-summarize-package-results.R @@ -72,4 +72,8 @@ describe("summarize_package_results", { ) }) + it("aborts on external scores", { + rdir <- local_create_external_results() + expect_error(render_scorecard_summary(rdir), "not supported") + }) }) From 43d391ee68925090bd9046569e741df1578b4a52 Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Thu, 8 Aug 2024 10:27:08 -0400 Subject: [PATCH 30/31] readme: add short section on external scores --- README.Rmd | 4 ++++ README.md | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/README.Rmd b/README.Rmd index c00efe0..bce77f9 100644 --- a/README.Rmd +++ b/README.Rmd @@ -147,3 +147,7 @@ browseURL(pdf_sum_path) The report provides additional context, session info, proof points, etc., but will render a table that looks like the one below: + +## Rendering scorecards for non-R packages + +The workflow described above focuses on **R** packages. Generating scorecards for other types of packages is also supported. In this case, the scoring happens outside of `mpn.scorecard`, and a directory of results is fed to `render_scorecard`. The required input format is described at [`?external_scores`](https://metrumresearchgroup.github.io/mpn.scorecard/reference/external_scores.html). diff --git a/README.md b/README.md index 498dc32..78a9e07 100644 --- a/README.md +++ b/README.md @@ -172,3 +172,12 @@ The report provides additional context, session info, proof points, etc., but will render a table that looks like the one below: + +## Rendering scorecards for non-R packages + +The workflow described above focuses on **R** packages. Generating +scorecards for other types of packages is also supported. In this case, +the scoring happens outside of `mpn.scorecard`, and a directory of +results is fed to `render_scorecard`. The required input format is +described at +[`?external_scores`](https://metrumresearchgroup.github.io/mpn.scorecard/reference/external_scores.html). From 909459483b775207cdcd4acfeb46df38e9ad817d Mon Sep 17 00:00:00 2001 From: Kyle Meyer Date: Tue, 13 Aug 2024 08:39:42 -0400 Subject: [PATCH 31/31] format_appendix: decrease width for file in coverage table For bbi, the "o" in parsers/nmparser/parse_parameter_comments.go (44 characters) is partially truncated. Decrease the width by one to prevent this case, and, given these aren't fixed-width fonts, by one more to hopefully provide enough margin to avoid this in other cases. --- R/format-report.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/format-report.R b/R/format-report.R index bee53bf..9e66beb 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -798,7 +798,7 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE, scorecard_typ cov_results_df <- cov_results_df %>% dplyr::mutate( code_file = wrap_text(.data$code_file, - width = 45, indent = TRUE, strict = TRUE + width = 43, indent = TRUE, strict = TRUE ), test_coverage = sprintf("%.2f%%", .data$test_coverage) ) %>%