diff --git a/DESCRIPTION b/DESCRIPTION index ec27b49..28b608a 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -44,7 +44,8 @@ Imports: stringr, tibble, tidyselect, - tidyr + tidyr, + yaml Suggests: devtools, pdftools, @@ -52,4 +53,4 @@ Suggests: testthat (>= 2.1.0), withr Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.1 +RoxygenNote: 7.3.2 diff --git a/NAMESPACE b/NAMESPACE index 33e98ba..7f27c55 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -25,4 +25,5 @@ importFrom(rlang,abort) importFrom(rlang,inform) importFrom(rlang,sym) importFrom(rlang,warn) +importFrom(tidyselect,all_of) importFrom(tidyselect,everything) diff --git a/R/aaa.R b/R/aaa.R index 72ddcab..b9c1dcf 100644 --- a/R/aaa.R +++ b/R/aaa.R @@ -1,5 +1,5 @@ -#' @importFrom tidyselect everything +#' @importFrom tidyselect everything all_of NULL @@ -12,8 +12,25 @@ RISK_LEVELS <- c("NA - unexpected", "High Risk", "Medium Risk", "Low Risk") utils::globalVariables(c(".")) +METRIC_CATEGORIES <- c("testing", "documentation", "maintenance", "transparency") DOCUMENTATION_METRICS <- c("has_vignettes", "has_website", "has_news") #, export_help) MAINTENANCE_METRICS <- c("has_maintainer", "news_current")#, "last_30_bugs_status") TRANSPARENCY_METRICS <- c("has_source_control", "has_bug_reports_url") -TESTING_METRICS <- c("check", "covr") +TESTING_METRICS <- c("check", "coverage") + +MPN_SCORECARD_FORMAT <- "1.0" + +SCORECARD_JSON_KEYS <- c( + "mpn_scorecard_format", + "mpn_scorecard_version", + "pkg_name", + "pkg_version", + "scorecard_type", + "out_dir", + "pkg_tar_path", + "md5sum_check", + "scores", + "metadata", + "category_scores" +) diff --git a/R/calc-overall-scores.R b/R/calc-overall-scores.R index c8616e3..9b0e128 100644 --- a/R/calc-overall-scores.R +++ b/R/calc-overall-scores.R @@ -14,8 +14,8 @@ calc_overall_scores <- function(scorelist) { scorelist$category_scores <- purrr::map(categories, ~{ indiv_scores <- unlist(scorelist$scores[[.x]]) # Penalize coverage failures: NA --> 0 - if("covr" %in% names(indiv_scores) && is.na(indiv_scores[["covr"]])){ - indiv_scores[["covr"]] <- 0 + if ("coverage" %in% names(indiv_scores) && is.na(indiv_scores[["coverage"]])) { + indiv_scores[["coverage"]] <- 0 } round(mean(indiv_scores), 3) }) %>% purrr::set_names(categories) diff --git a/R/create-extra-notes.R b/R/create-extra-notes.R index cd3a8bb..fdfa725 100644 --- a/R/create-extra-notes.R +++ b/R/create-extra-notes.R @@ -16,25 +16,25 @@ create_extra_notes <- function( covr_results <- readRDS(covr_path) if (inherits(covr_results$errors, "error")) { covr_results_df <- data.frame( - r_script = "File coverage failed", + code_file = "File coverage failed", test_coverage = conditionMessage(covr_results$errors) ) } else if (length(covr_results$coverage$filecoverage)) { covr_results_df <- covr_results$coverage$filecoverage %>% as.data.frame() covr_results_df <- covr_results_df %>% - mutate(r_script = row.names(covr_results_df)) %>% - dplyr::select("r_script", "test_coverage" = ".") + mutate(code_file = row.names(covr_results_df)) %>% + dplyr::select("code_file", "test_coverage" = ".") row.names(covr_results_df) <- NULL } else { covr_results_df <- data.frame( - r_script = "No coverage results", + code_file = "No coverage results", test_coverage = covr_results$notes ) } return( list( - covr_results_df = covr_results_df, + cov_results_df = covr_results_df, check_output = check_results$stdout ) ) diff --git a/R/external.R b/R/external.R new file mode 100644 index 0000000..2474b7f --- /dev/null +++ b/R/external.R @@ -0,0 +1,303 @@ +#' Rendering externally scored packages +#' +#' @description +#' +#' For R packages, mpn.scorecard handles both scoring and rendering. The +#' workflow is to first score the package with [score_pkg()], then to optionally +#' generate a traceability matrix with [make_traceability_matrix()], and finally +#' to render the scorecard with [render_scorecard()]. +#' +#' [render_scorecard()] also supports rendering packages that are scored outside +#' of mpn.scorecard. The scorer is responsible for preparing a results directory +#' with the set of files described below. +#' +#' @details +#' +#' ## Input files +#' +#' The following input files define results for the scorecard to render. These +#' must reside in a directory named `_`, following the naming +#' of the output directory returned by [score_pkg()]. +#' +#' * `_.pkg.json`: This file provides general information +#' about the package being scored. It requires the following keys: +#' +#' * `mpn_scorecard_format`: The version of the format in which these input +#' files are specified. This should be "1.0". +#' +#' * `pkg_name`, `pkg_version`: The name and version of the package. +#' +#' * `scorecard_type`: The type of package. Two types are currently +#' recognized and receive special handling: "R" and "cli". Everything else +#' falls back to default handling. +#' +#' If you're specifying "R" here, you should probably use [score_pkg()] +#' instead. +#' +#' Example: +#' +#' ``` +#' { +#' "mpn_scorecard_format": "1.0", +#' "pkg_name": "foo", +#' "pkg_version": "1.2.3", +#' "scorecard_type": "cli" +#' } +#' ``` +#' +#' * `_.check.txt`: Output from the package check. This is +#' included in the appendix verbatim. +#' +#' * `_.coverage.json`: Code coverage percentages. The values +#' will be rounded to two decimal places when rendering. This file is +#' optional. +#' +#' Example: +#' +#' ``` +#' { +#' "overall": 91.54265, +#' "files": [ +#' { +#' "file": "cmd/foo.go", +#' "coverage": 98.7643 +#' }, +#' { +#' "file": "cmd/bar.go", +#' "coverage": 84.321 +#' } +#' ] +#' } +#' ``` +#' +#' * `_.scores.json`: Scores for individual metrics grouped +#' into four categories: "testing", "documentation", "maintenance", and +#' "transparency". Each category must have a least one score. +#' +#' For the testing category, "check is required. "check" should be 1 if the +#' tests passed and 0 if they failed. "coverage" is required if the +#' `_.coverage.json` coverage" file exists. The value +#' should match the "overall" value from `_.coverage.json`, +#' divided by 100. +#' +#' Example: +#' +#' ``` +#' { +#' "testing": { +#' "check": 1, +#' "coverage": 0.9154265 +#' }, +#' "documentation": { +#' "has_website": 1, +#' "has_news": 1 +#' }, +#' "maintenance": { +#' "has_maintainer": 1, +#' "news_current": 1 +#' }, +#' "transparency": { +#' "has_source_control": 1, +#' "has_bug_reports_url": 1 +#' } +#' } +#' ``` +#' +#' * `_.metadata.json`: Information to include in the +#' "System Info" table. The table will include the "date" and "executor" +#' value, as well as any key-value pairs defined under "info.env_vars" and +#' "info.sys". The "date" and "executor" keys are required. +#' +#' Example: +#' +#' ``` +#' { +#' "date": "2024-08-01 08:19:12", +#' "executor": "Bobert", +#' "info": { +#' "env_vars": { +#' "METWORX_VERSION": "22.09" +#' }, +#' "sys": { +#' "sysname": "Linux", +#' "machine": "x86_64" +#' } +#' } +#' } +#' ``` +#' +#' * `_.matrix.yaml`: A file defining entries to render as +#' the traceability matrix table. The traceability matrix table is meant to +#' map all user-facing entry points (e.g., exported functions or available +#' commands for a command-line executable) to the relevant documentation and +#' test files. +#' +#' The file should consist of a sequence of entries with the following items: +#' +#' * `entrypoint`: The name of the entry point. +#' +#' * `code`: The path to where the entry point is defined. +#' +#' * `doc`: The path to the entry point's main documentation. +#' +#' * `tests`: A list of paths where the entry point is tested. +#' +#' What the entry point is called in the table depends on `scorecard_type`. +#' For "cli", the column name is "Command" and, for "R", it is +#' "Exported Function". For all other types, it is "Entry Point". +#' +#' This file is optional if the `add_traceability` argument of +#' [render_scorecard()] is "auto" or `FALSE`. +#' +#' Example: +#' +#' ``` +#' - entrypoint: foo +#' skip: true +#' +#' - entrypoint: foo bar +#' code: cmd/bar.go +#' doc: docs/commands/foo_bar.md +#' tests: +#' - cmd/bar_test.go +#' - integration/bar_test.go +#' ``` +#' +#' @name external_scores +#' @aliases render_external +NULL + +get_render_params_external <- function(results_dir, risk_breaks, add_traceability) { + pkg_scores <- build_pkg_scores(results_dir) + + if (identical(add_traceability, "auto")) { + add_traceability <- file.exists(get_result_path(results_dir, "matrix.yaml")) + } + if (isTRUE(add_traceability)) { + tmat <- read_traceability_matrix(results_dir) + } else { + tmat <- NULL + } + + has_cov_score <- !is.null(pkg_scores[["scores"]][["testing"]][["coverage"]]) + cov_file <- get_result_path(results_dir, "coverage.json") + has_cov_file <- file.exists(cov_file) + if (has_cov_score && has_cov_file) { + cov <- read_coverage_results(results_dir) + } else if (has_cov_score) { + abort(c("Coverage is in scores but coverage file is missing:", cov_file)) + } else if (has_cov_file) { + abort("Coverage file exists but scores do not include a coverage value.") + } else { + cov <- NULL + } + + list( + set_title = paste("Scorecard:", pkg_scores$pkg_name, pkg_scores$pkg_version), + scorecard_footer = format_scorecard_version( + scorecard_ver = utils::packageVersion("mpn.scorecard") + ), + pkg_scores = format_scores_for_render(pkg_scores, risk_breaks), + comments_block = check_for_comments(results_dir), + extra_notes_data = list( + cov_results_df = cov, + check_output = read_check_output(results_dir) + ), + exports_df = tmat + ) +} + +build_pkg_scores <- function(results_dir) { + pkg_json <- get_result_path(results_dir, "pkg.json") + checkmate::assert_file_exists(pkg_json) + meta_json <- get_result_path(results_dir, "metadata.json") + checkmate::assert_file_exists(meta_json) + scores_json <- get_result_path(results_dir, "scores.json") + checkmate::assert_file_exists(scores_json) + + res <- c( + jsonlite::read_json(pkg_json), + scores = list(jsonlite::read_json(scores_json)), + metadata = list(jsonlite::read_json(meta_json)) + ) + + to_drop <- c( + "mpn_scorecard_version", "out_dir", "pkg_tar_path", "md5sum_check", + "category_scores" + ) + extkeys <- SCORECARD_JSON_KEYS[!SCORECARD_JSON_KEYS %in% to_drop] + assert_json_keys(res, extkeys) + assert_json_keys(res[["metadata"]], c("date", "executor")) + + scores <- res[["scores"]] + # In general, these score names aren't a hard-coded set, but there should at + # least be one per category... + assert_json_keys(scores, METRIC_CATEGORIES) + nscores_per_cat <- purrr::map_int(scores[METRIC_CATEGORIES], length) + if (any(nscores_per_cat < 1)) { + no_scores <- names(scores[METRIC_CATEGORIES])[nscores_per_cat < 1] + abort(c("The following categories do not have any scores:", no_scores)) + } + # ... and check score should be present. + assert_json_keys(scores[["testing"]], "check") + + return(calc_overall_scores(res)) +} + +read_traceability_matrix <- function(results_dir) { + fname <- get_result_path(results_dir, "matrix.yaml") + checkmate::assert_file_exists(fname) + + entries <- yaml::read_yaml(fname) + entries <- purrr::discard(entries, function(e) isTRUE(e[["skip"]])) + for (e in entries) { + assert_keys(e, c("entrypoint", "code", "doc", "tests"), yaml::as.yaml) + } + + tibble::tibble( + entrypoint = purrr::map_chr(entries, "entrypoint"), + code_file = purrr::map_chr(entries, "code"), + documentation = purrr::map_chr(entries, "doc"), + test_files = purrr::map(entries, function(x) { + if (length(x[["tests"]])) x[["tests"]] else character() + }) + ) +} + +read_check_output <- function(results_dir) { + fname <- get_result_path(results_dir, "check.txt") + checkmate::assert_file_exists(fname) + return(readChar(fname, file.size(fname))) +} + +read_coverage_results <- function(results_dir) { + fname <- get_result_path(results_dir, "coverage.json") + checkmate::assert_file_exists(fname) + + data <- jsonlite::read_json(fname) + filecov <- data[["files"]] + for (e in filecov) { + assert_json_keys(e, c("file", "coverage")) + } + + tibble::tibble( + code_file = purrr::map_chr(filecov, "file"), + test_coverage = purrr::map_dbl(filecov, "coverage") + ) +} + +assert_json_keys <- function(entry, keys) { + assert_keys( + entry, + keys, + function(...) jsonlite::toJSON(..., auto_unbox = TRUE, pretty = TRUE) + ) +} + +assert_keys <- function(entry, keys, render_fn) { + for (k in keys) { + if (is.null(entry[[k]])) { + abort(paste0("entry is missing key: ", k, "\n", render_fn(entry))) + } + } +} diff --git a/R/format-report.R b/R/format-report.R index cdc7799..9e66beb 100644 --- a/R/format-report.R +++ b/R/format-report.R @@ -388,16 +388,22 @@ format_testing_scores <- function(formatted_pkg_scores){ #' #' @keywords internal format_metadata <- function(metadata_list){ - # Create system info table - executor_tbl <- data.frame(executor = metadata_list$executor) - data_tbl <- data.frame(date = metadata_list$date) - env_vars_tbl <- as.data.frame(t(unlist(metadata_list$info$env_vars))) - system_info_tbl <- as.data.frame(t(unlist(metadata_list$info$sys))) + date <- metadata_list[["date"]] + if (is.null(date)) { + abort("`date` required in `metadata_list`") + } + executor <- metadata_list[["executor"]] + if (is.null(executor)) { + abort("`executor` required in `metadata_list`") + } + info <- metadata_list[["info"]] + data <- c(date = date, executor = executor, info[["sys"]], info[["env_vars"]]) - all_info_tbl <- cbind(data_tbl, executor_tbl, system_info_tbl, env_vars_tbl) - all_info_tbl <- data.frame(Category = stringr::str_to_title(names(all_info_tbl)), - Value = unlist(all_info_tbl)) + all_info_tbl <- data.frame( + Category = stringr::str_to_title(names(data)), + Value = unname(unlist(data)) + ) # Create flextable system_info_flextable <- @@ -414,6 +420,10 @@ format_metadata <- function(metadata_list){ # format()), but "format_" is used for consistency with other functions in this # file. format_dependency_versions <- function(df) { + if (is.null(df)) { + return(invisible(NULL)) + } + out <- prepare_dependency_versions(df) if (inherits(out, "flextable")) { # Note: knit_print.flextable() does _not_ print to stdout. @@ -670,7 +680,8 @@ format_colnames_to_title <- function(df){ #' @keywords internal format_traceability_matrix <- function( exports_df, - wrap_cols = TRUE + wrap_cols = TRUE, + scorecard_type = "R" ){ checkmate::assert_logical(wrap_cols) @@ -683,21 +694,43 @@ format_traceability_matrix <- function( ) # Get testing directories for caption - test_dirs <- exported_func_df %>% tidyr::unnest(test_dirs) %>% dplyr::pull(test_dirs) %>% unique() - test_dirs <- test_dirs[test_dirs != ""] %>% paste(collapse = ", ") + if ("test_dirs" %in% names(exported_func_df)) { + test_dirs <- exported_func_df %>% tidyr::unnest(test_dirs) %>% dplyr::pull(test_dirs) %>% unique() + test_dirs <- test_dirs[test_dirs != ""] %>% paste(collapse = ", ") + # Remove testing directory column (not a column due to horizontal space limits) + exported_func_df <- exported_func_df %>% dplyr::select(-"test_dirs") + } else { + test_dirs <- NULL + } + + if ("exported_function" %in% names(exported_func_df)) { + # Align internal scoring with external format. + exported_func_df <- dplyr::rename(exported_func_df, + entrypoint = "exported_function" + ) + } - # Remove testing directory column (not a column due to horizontal space limits) - exported_func_df <- exported_func_df %>% dplyr::select(-"test_dirs") + entry_name <- switch(scorecard_type, + "R" = "Exported Function", + "cli" = "Command", + "Entry Point" + ) + exported_func_df <- dplyr::rename( + exported_func_df, + !!entry_name := "entrypoint" + ) # Format Table if(isTRUE(wrap_cols)){ exported_func_df <- exported_func_df %>% dplyr::mutate( - dplyr::across("exported_function":"documentation", ~ - wrap_text(.x, width = 24, indent = TRUE, strict = TRUE)), + dplyr::across( + all_of(c(entry_name, "code_file", "documentation")), + function(x) wrap_text(x, width = 24, indent = TRUE, strict = TRUE) + ), # Tests can be longer due to page width (pg_width) settings (we make it wider) test_files = purrr::map_chr(.data$test_files, function(tests){ - wrap_text(tests, width = 40, strict = TRUE, wrap_sym = NULL) + wrap_text(tests, width = 40, strict = TRUE) }) ) } @@ -705,11 +738,15 @@ format_traceability_matrix <- function( # Create flextable exported_func_flex <- flextable_formatted(exported_func_df, pg_width = 7, font_size = 9) %>% - flextable::set_caption("Traceability Matrix") %>% - flextable::add_footer_row( + flextable::set_caption("Traceability Matrix") + + if (!is.null(test_dirs)) { + exported_func_flex <- flextable::add_footer_row( + exported_func_flex, values = flextable::as_paragraph(glue::glue("Testing directories: {test_dirs}")), colwidths = c(4) ) + } # Add stripe and other formatting details exported_func_flex <- exported_func_flex %>% @@ -746,19 +783,29 @@ trace_matrix_notes <- function(exports_df){ #' @param return_vals Logical (T/F). If `TRUE`, return the objects instead of printing them out for `rmarkdown`. Used for testing. #' #' @keywords internal -format_appendix <- function(extra_notes_data, return_vals = FALSE){ - sub_header_strs <- c("\n## R CMD Check\n\n", "\n## Test coverage\n\n") +format_appendix <- function(extra_notes_data, return_vals = FALSE, scorecard_type = "R") { + check_title <- if (identical(scorecard_type, "R")) { + "R CMD Check" + } else { + "Check output" + } + sub_header_strs <- c(paste0("\n## ", check_title, "\n\n"), "\n## Test coverage\n\n") ### Covr Results ### # Format Table - covr_results_df <- extra_notes_data$covr_results_df - if (is.numeric(covr_results_df$test_coverage)) { - covr_results_df <- covr_results_df %>% - dplyr::mutate(test_coverage = paste0(.data$test_coverage, "%")) %>% + cov_results_df <- extra_notes_data$cov_results_df + if (is.numeric(cov_results_df$test_coverage)) { + cov_results_df <- cov_results_df %>% + dplyr::mutate( + code_file = wrap_text(.data$code_file, + width = 43, indent = TRUE, strict = TRUE + ), + test_coverage = sprintf("%.2f%%", .data$test_coverage) + ) %>% format_colnames_to_title() # Create flextable and format - covr_results_flex <- flextable_formatted(covr_results_df, pg_width = 4) %>% + cov_results_flex <- flextable_formatted(cov_results_df, pg_width = 4) %>% flextable::set_caption("Test Coverage") %>% flextable::align(align = "right", part = "all", j=2) %>% flextable::add_footer_row( @@ -768,11 +815,11 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE){ )), colwidths = c(2) ) - covr_results_flex <- covr_results_flex %>% flex_header() %>% + cov_results_flex <- cov_results_flex %>% flex_header() %>% flex_footer(footer_bg = "transparent", footer_ft = "black") %>% flex_stripe(border = FALSE) } else { - covr_results_flex <- NULL + cov_results_flex <- NULL } ### R CMD Check Results ### @@ -781,7 +828,7 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE){ if(isTRUE(return_vals)){ return( list( - covr_results_flex = covr_results_flex, + cov_results_flex = cov_results_flex, check_output = check_output ) ) @@ -790,26 +837,34 @@ format_appendix <- function(extra_notes_data, return_vals = FALSE){ # R CMD Check cat(sub_header_strs[1]) cat_verbatim(check_output) + + if (is.null(cov_results_df)) { + # This is an externally scored package without coverage. + return(invisible(NULL)) + } + cat("\\newpage") # Coverage cat(sub_header_strs[2]) cat("\n") - if (is.null(covr_results_flex)) { - err_type <- covr_results_df$r_script + if (is.null(cov_results_flex)) { + err_type <- cov_results_df$code_file if (identical(err_type, "File coverage failed")) { cat("\n\nCalculating code coverage failed with following error:\n\n") - cat_verbatim(covr_results_df$test_coverage) + cat_verbatim(cov_results_df$test_coverage) } else if (identical(err_type, "No coverage results")) { cat( "\n\n", "Unable to calculate coverage: ", - covr_results_df$test_coverage, "\n\n" + cov_results_df$test_coverage, "\n\n" ) } else { stop("Unknown error type: ", err_type) } + } else if (nrow(cov_results_df) == 0) { + cat("Per file test coverage not provided.") } else { - cat(knitr::knit_print(covr_results_flex)) + cat(knitr::knit_print(cov_results_flex)) } cat("\n") diff --git a/R/render-scorecard-summary.R b/R/render-scorecard-summary.R index d68e36d..86041a2 100644 --- a/R/render-scorecard-summary.R +++ b/R/render-scorecard-summary.R @@ -1,7 +1,8 @@ #' Render PDF summary of scorecards #' -#' @param result_dirs A vector of output directories +#' @param result_dirs A vector of output directories, each one produced by +#' [score_pkg()]. [external_scores] are not supported. #' @param snapshot A report subtitle indicating the grouping of these packages, such as an MPN snapshot. See details. #' @param out_dir Output directory for saving scorecard summary. If `NULL`, assumes all `result_dirs` point to the same output directory #' @inheritParams render_scorecard @@ -23,6 +24,8 @@ render_scorecard_summary <- function(result_dirs, snapshot <- paste('MPN Snapshot', snapshot) } + assert_no_external(result_dirs) + # Format overall scores and risk overall_risk_summary <- build_risk_summary(result_dirs, risk_breaks, out_dir) @@ -142,7 +145,7 @@ build_risk_summary <- function(result_dirs, #' @returns a dataframe #' @export summarize_package_results <- function(result_dirs){ - + assert_no_external(result_dirs) json_paths <- get_result_path(result_dirs, "scorecard.json") risk_summary_df <- tibble::tibble( @@ -170,3 +173,9 @@ summarize_package_results <- function(result_dirs){ return(risk_summary_df) } + +assert_no_external <- function(result_dirs, call = rlang::caller_env()) { + if (any(fs::file_exists(get_result_path(result_dirs, "pkg.json")))) { + abort("External scores are not supported.", call = call) + } +} diff --git a/R/render-scorecard.R b/R/render-scorecard.R index fa2129e..49f9c08 100644 --- a/R/render-scorecard.R +++ b/R/render-scorecard.R @@ -1,6 +1,10 @@ -#' Take a JSON from score_pkg() and render a pdf +#' Render a scorecard PDF from a directory of results #' -#' @param results_dir directory containing json file and individual results. Output file path from [score_pkg()] +#' Create a scorecard from a results directory prepared by [score_pkg()] or an +#' external scorer (see [external_scores]). +#' +#' @param results_dir Directory with scoring results. This is the path returned +#' by [score_pkg()]. #' @param risk_breaks A numeric vector of length 2, with both numbers being #' between 0 and 1. These are used for the "breaks" when classifying scores #' into Risk categories. For example, for the default value of `c(0.3, 0.7)`, @@ -16,7 +20,6 @@ #' **Note** that it must follow the naming convention of `_.comments.txt` #' #' If a traceability matrix is found in `results_dir`, it will automatically be included unless overridden via `add_traceability`. -#' **Note** that it must follow the naming convention of `_.export_doc.rds` #' #' @export render_scorecard <- function( @@ -25,16 +28,37 @@ render_scorecard <- function( overwrite = FALSE, add_traceability = "auto" ) { + checkmate::assert_numeric(risk_breaks, lower = 0, upper = 1, len = 2) - json_path <- get_result_path(results_dir, "scorecard.json") + checkmate::assert_string(results_dir) + out_file <- get_result_path(results_dir, "scorecard.pdf") + check_exists_and_overwrite(out_file, overwrite) - # input checking + if (file.exists(get_result_path(results_dir, "pkg.json"))) { + param_fn <- get_render_params_external + } else { + param_fn <- get_render_params + } + + rendered_file <- rmarkdown::render( + system.file(SCORECARD_RMD_TEMPLATE, package = "mpn.scorecard", mustWork = TRUE), # TODO: do we want to expose this to users, to pass their own custom template? + output_dir = results_dir, + output_file = basename(out_file), + quiet = TRUE, + params = param_fn(results_dir, risk_breaks, add_traceability) + ) + + return(invisible(rendered_file)) +} + +get_render_params <- function(results_dir, risk_breaks, add_traceability) { + json_path <- get_result_path(results_dir, "scorecard.json") checkmate::assert_string(json_path) checkmate::assert_file_exists(json_path) - checkmate::assert_numeric(risk_breaks, lower = 0, upper = 1, len = 2) # load scores from JSON pkg_scores <- jsonlite::fromJSON(json_path) + pkg_scores <- scorecard_json_compat(pkg_scores, json_path) check_scores_valid(pkg_scores, json_path) # map scores to risk and format into tables to be written to PDF @@ -42,12 +66,6 @@ render_scorecard <- function( comments_block <- check_for_comments(results_dir) - # Output file - checkmate::assert_string(results_dir, null.ok = TRUE) - out_file <- get_result_path(results_dir, "scorecard.pdf") - check_exists_and_overwrite(out_file, overwrite) - - # Appendix extra_notes_data <- create_extra_notes(results_dir) @@ -62,28 +80,38 @@ render_scorecard <- function( as.character(utils::packageVersion("mpn.scorecard")) ) - # Render rmarkdown - rendered_file <- rmarkdown::render( - system.file(SCORECARD_RMD_TEMPLATE, package = "mpn.scorecard", mustWork = TRUE), # TODO: do we want to expose this to users, to pass their own custom template? - output_dir = results_dir, - output_file = basename(out_file), - quiet = TRUE, - params = list( - set_title = paste("Scorecard:", pkg_scores$pkg_name, pkg_scores$pkg_version), - scorecard_footer = mpn_scorecard_ver, - pkg_scores = formatted_pkg_scores, - comments_block = comments_block, - extra_notes_data = extra_notes_data, - exports_df = exports_df, - dep_versions_df = dep_versions_df - ) + list( + set_title = paste("Scorecard:", pkg_scores$pkg_name, pkg_scores$pkg_version), + scorecard_footer = mpn_scorecard_ver, + pkg_scores = formatted_pkg_scores, + comments_block = comments_block, + extra_notes_data = extra_notes_data, + exports_df = exports_df, + dep_versions_df = dep_versions_df ) +} - # Render to PDF, invisibly return the path to the PDF - return(invisible(rendered_file)) +scorecard_json_compat <- function(data, path) { + # Handle files written by score_pkg() before it included scorecard_type. + if (is.null(data[["scorecard_type"]])) { + data[["scorecard_type"]] <- "R" + } -} + # Handle files written by score_pkg() before it renamed "covr" to "coverage". + tscores <- data[["scores"]][["testing"]] + if (is.null(tscores[["coverage"]])) { + covr_val <- tscores[["covr"]] + if (is.null(covr_val)) { + abort(paste("Expected either 'coverage' or 'covr' value in", path)) + } + tscores[["coverage"]] <- covr_val + tscores[["covr"]] <- NULL + data[["scores"]][["testing"]] <- tscores + } + + return(data) +} #' Prepare the raw risk scores to be rendered into PDF #' @@ -95,6 +123,11 @@ render_scorecard <- function( #' #' @keywords internal format_scores_for_render <- function(pkg_scores, risk_breaks = c(0.3, 0.7)) { + stype <- pkg_scores[["scorecard_type"]] + if (is.null(stype)) { + abort("bug: scorecard_type is unexpectedly absent from pkg_scores") + } + check_label <- if (identical(stype, "R")) "R CMD CHECK" else "check" # build list of formatted tibbles pkg_scores$formatted <- list() @@ -117,7 +150,9 @@ format_scores_for_render <- function(pkg_scores, risk_breaks = c(0.3, 0.7)) { score = ifelse(.x == "NA", NA_integer_, .x) ) %>% mutate( - result = map_answer(.data$score, .data$criteria), + result = map_answer(.data$score, .data$criteria, + include_check_score = identical(stype, "R") + ), risk = map_risk(.data$score, risk_breaks) ) }) %>% purrr::list_rbind() @@ -126,9 +161,11 @@ format_scores_for_render <- function(pkg_scores, risk_breaks = c(0.3, 0.7)) { if(category_name == "testing"){ formatted_df <- formatted_df %>% mutate( result = ifelse( - (.data$criteria == "covr" & !is.na(.data$score)), paste0(.data$score*100, "%"), .data$result + .data$criteria == "coverage" & !is.na(.data$score), + sprintf("%.2f%%", .data$score * 100), + .data$result ), - criteria = ifelse(.data$criteria == "check", "R CMD CHECK", "coverage") + criteria = ifelse(.data$criteria == "check", check_label, "coverage") ) } @@ -165,6 +202,7 @@ map_risk <- function(scores, risk_breaks) { #' @param scores vector of risk scores #' @param criteria vector of criteria names #' @param answer_breaks breaks determining 'Yes'/'Passing' or 'No'/'Failed'. `NA` has special handling. See details. +#' @param include_check_score Whether to include score in the result check. #' #' @details #' If value is not found in `answer_breaks`, it is skipped over @@ -174,7 +212,8 @@ map_risk <- function(scores, risk_breaks) { #' covr is skipped over unless it is `NA` (indicates test failures), as this is formatted as a percent separately #' #' @keywords internal -map_answer <- function(scores, criteria, answer_breaks = c(0, 1)) { +map_answer <- function(scores, criteria, answer_breaks = c(0, 1), + include_check_score = TRUE) { checkmate::assert_numeric(scores, lower = 0, upper = 1) answer_breaks <- sort(answer_breaks) purrr::map2_chr(scores, criteria, ~ { @@ -183,9 +222,13 @@ map_answer <- function(scores, criteria, answer_breaks = c(0, 1)) { if(is.na(.x) || .x == answer_breaks[1]) { "Failed" } else { - paste0("Passing (score: ", .x, ")") + if (isTRUE(include_check_score)) { + paste0("Passing (score: ", .x, ")") + } else { + "Passing" + } } - }else if(.y != "covr"){ + } else if (.y != "coverage") { if (.x == answer_breaks[1]) { "No" } else if(.x == answer_breaks[2]) { diff --git a/R/score-pkg.R b/R/score-pkg.R index 9d6d5b3..ba113ea 100644 --- a/R/score-pkg.R +++ b/R/score-pkg.R @@ -54,9 +54,11 @@ score_pkg <- function( # start building up scorecard list res <- list( + mpn_scorecard_format = MPN_SCORECARD_FORMAT, mpn_scorecard_version = mpn_scorecard_ver, pkg_name = pkg_name, pkg_version = pkg_ver, + scorecard_type = "R", out_dir = out_dir, pkg_tar_path = pkg, md5sum_check = tools::md5sum(pkg), @@ -75,7 +77,7 @@ score_pkg <- function( # run check and covr and write results to disk rcmdcheck_args$path <- pkg res$scores$testing$check <- add_rcmdcheck(out_dir, rcmdcheck_args) # use tarball - res$scores$testing$covr <- add_coverage( + res$scores$testing$coverage <- add_coverage( pkg_source_path, # must use untarred package dir out_dir, covr_timeout diff --git a/R/util.R b/R/util.R index 50888ab..fb20cf8 100644 --- a/R/util.R +++ b/R/util.R @@ -30,8 +30,12 @@ check_exists_and_overwrite <- function(path, overwrite) { get_result_path <- function( out_dir, ext = c( - "scorecard.json", "scorecard.pdf", "check.rds", "covr.rds", "comments.txt", - "summary.pdf", "export_doc.rds", "mitigation.txt" + "scorecard.json", "scorecard.pdf", "comments.txt", "mitigation.txt", + # Internally scored + "check.rds", "covr.rds", "export_doc.rds", "summary.pdf", + # Externally scored + "check.txt", "coverage.json", "matrix.yaml", "metadata.json", "pkg.json", + "scores.json" ) ){ diff --git a/README.Rmd b/README.Rmd index c00efe0..bce77f9 100644 --- a/README.Rmd +++ b/README.Rmd @@ -147,3 +147,7 @@ browseURL(pdf_sum_path) The report provides additional context, session info, proof points, etc., but will render a table that looks like the one below: + +## Rendering scorecards for non-R packages + +The workflow described above focuses on **R** packages. Generating scorecards for other types of packages is also supported. In this case, the scoring happens outside of `mpn.scorecard`, and a directory of results is fed to `render_scorecard`. The required input format is described at [`?external_scores`](https://metrumresearchgroup.github.io/mpn.scorecard/reference/external_scores.html). diff --git a/README.md b/README.md index 498dc32..78a9e07 100644 --- a/README.md +++ b/README.md @@ -172,3 +172,12 @@ The report provides additional context, session info, proof points, etc., but will render a table that looks like the one below: + +## Rendering scorecards for non-R packages + +The workflow described above focuses on **R** packages. Generating +scorecards for other types of packages is also supported. In this case, +the scoring happens outside of `mpn.scorecard`, and a directory of +results is fed to `render_scorecard`. The required input format is +described at +[`?external_scores`](https://metrumresearchgroup.github.io/mpn.scorecard/reference/external_scores.html). diff --git a/_pkgdown.yml b/_pkgdown.yml index bd15915..3edc577 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -22,3 +22,4 @@ reference: contents: - render_scorecard - render_scorecard_summary + - external_scores diff --git a/inst/templates/scorecard-template.Rmd b/inst/templates/scorecard-template.Rmd index 386c8c1..6145e83 100644 --- a/inst/templates/scorecard-template.Rmd +++ b/inst/templates/scorecard-template.Rmd @@ -21,11 +21,11 @@ header-includes: params: set_title: "Scorecard" scorecard_footer: "Generated with mpn.scorecard" - pkg_scores: "`r list()`" - comments_block: "r NULL" - extra_notes_data: "r NULL" - exports_df: "r NULL" - dep_versions_df: "`r NA`" + pkg_scores: null + comments_block: null + extra_notes_data: null + exports_df: null + dep_versions_df: null title: > `r params$set_title` subtitle: 'MPN Scorecard' @@ -40,6 +40,11 @@ knitr::opts_chunk$set(echo = FALSE, warning = FALSE, message = FALSE, library(dplyr) library(flextable) set_flextable_defaults(font.color = "#333333", border.color = "#999999", padding = 4, fonts_ignore = TRUE) + +scorecard_type <- params[["pkg_scores"]][["scorecard_type"]] +if (is.null(scorecard_type)) { + stop("pkg_scores unexpectedly does not have 'scorecard_type' field") +} ``` ```{r setup_data} @@ -85,7 +90,7 @@ trace_matrix_notes(params$exports_df) ``` ```{r} -format_traceability_matrix(params$exports_df) +format_traceability_matrix(params$exports_df, scorecard_type = scorecard_type) ``` \newpage @@ -98,14 +103,11 @@ format_metadata(formatted_pkg_scores$metadata) ``` ```{r, results = "asis"} -if (identical(params$dep_versions_df, NA)) { - stop("Required versions_df parameter not specified.") -} format_dependency_versions(params$dep_versions_df) ``` ```{r, results = "asis"} -format_appendix(params$extra_notes_data) +format_appendix(params$extra_notes_data, scorecard_type = scorecard_type) ``` diff --git a/man/build_risk_summary.Rd b/man/build_risk_summary.Rd index bc2d9f6..8ca72ab 100644 --- a/man/build_risk_summary.Rd +++ b/man/build_risk_summary.Rd @@ -7,7 +7,8 @@ build_risk_summary(result_dirs, risk_breaks, out_dir, append_out_dir = FALSE) } \arguments{ -\item{result_dirs}{A vector of output directories} +\item{result_dirs}{A vector of output directories, each one produced by +\code{\link[=score_pkg]{score_pkg()}}. \link{external_scores} are not supported.} \item{risk_breaks}{A numeric vector of length 2, with both numbers being between 0 and 1. These are used for the "breaks" when classifying scores diff --git a/man/check_for_comments.Rd b/man/check_for_comments.Rd index aa4fa1f..c6380d7 100644 --- a/man/check_for_comments.Rd +++ b/man/check_for_comments.Rd @@ -7,7 +7,8 @@ check_for_comments(results_dir) } \arguments{ -\item{results_dir}{directory containing json file and individual results. Output file path from \code{\link[=score_pkg]{score_pkg()}}} +\item{results_dir}{Directory with scoring results. This is the path returned +by \code{\link[=score_pkg]{score_pkg()}}.} } \description{ Look for comment file and return contents if is found diff --git a/man/check_for_traceability.Rd b/man/check_for_traceability.Rd index dcd9330..ac0e815 100644 --- a/man/check_for_traceability.Rd +++ b/man/check_for_traceability.Rd @@ -7,7 +7,8 @@ check_for_traceability(results_dir, add_traceability) } \arguments{ -\item{results_dir}{directory containing json file and individual results. Output file path from \code{\link[=score_pkg]{score_pkg()}}} +\item{results_dir}{Directory with scoring results. This is the path returned +by \code{\link[=score_pkg]{score_pkg()}}.} \item{add_traceability}{Logical (T/F). If \code{TRUE}, append a table that links package functionality to the documentation and test files. Defaults to "auto", which will include the matrix if found.} diff --git a/man/create_extra_notes.Rd b/man/create_extra_notes.Rd index 2a8fc07..cf43b08 100644 --- a/man/create_extra_notes.Rd +++ b/man/create_extra_notes.Rd @@ -7,7 +7,8 @@ create_extra_notes(results_dir) } \arguments{ -\item{results_dir}{directory containing json file and individual results. Output file path from \code{\link[=score_pkg]{score_pkg()}}} +\item{results_dir}{Directory with scoring results. This is the path returned +by \code{\link[=score_pkg]{score_pkg()}}.} } \description{ Create extra notes summarizing the covr & rcmdcheck outputs, and documentation diff --git a/man/external_scores.Rd b/man/external_scores.Rd new file mode 100644 index 0000000..dc02571 --- /dev/null +++ b/man/external_scores.Rd @@ -0,0 +1,156 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/external.R +\name{external_scores} +\alias{external_scores} +\alias{render_external} +\title{Rendering externally scored packages} +\description{ +For R packages, mpn.scorecard handles both scoring and rendering. The +workflow is to first score the package with \code{\link[=score_pkg]{score_pkg()}}, then to optionally +generate a traceability matrix with \code{\link[=make_traceability_matrix]{make_traceability_matrix()}}, and finally +to render the scorecard with \code{\link[=render_scorecard]{render_scorecard()}}. + +\code{\link[=render_scorecard]{render_scorecard()}} also supports rendering packages that are scored outside +of mpn.scorecard. The scorer is responsible for preparing a results directory +with the set of files described below. +} +\details{ +\subsection{Input files}{ + +The following input files define results for the scorecard to render. These +must reside in a directory named \verb{_}, following the naming +of the output directory returned by \code{\link[=score_pkg]{score_pkg()}}. +\itemize{ +\item \verb{_.pkg.json}: This file provides general information +about the package being scored. It requires the following keys: +\itemize{ +\item \code{mpn_scorecard_format}: The version of the format in which these input +files are specified. This should be "1.0". +\item \code{pkg_name}, \code{pkg_version}: The name and version of the package. +\item \code{scorecard_type}: The type of package. Two types are currently +recognized and receive special handling: "R" and "cli". Everything else +falls back to default handling. + +If you're specifying "R" here, you should probably use \code{\link[=score_pkg]{score_pkg()}} +instead. +} + +Example: + +\if{html}{\out{
}}\preformatted{\{ + "mpn_scorecard_format": "1.0", + "pkg_name": "foo", + "pkg_version": "1.2.3", + "scorecard_type": "cli" +\} +}\if{html}{\out{
}} +\item \verb{_.check.txt}: Output from the package check. This is +included in the appendix verbatim. +\item \verb{_.coverage.json}: Code coverage percentages. The values +will be rounded to two decimal places when rendering. This file is +optional. + +Example: + +\if{html}{\out{
}}\preformatted{\{ + "overall": 91.54265, + "files": [ + \{ + "file": "cmd/foo.go", + "coverage": 98.7643 + \}, + \{ + "file": "cmd/bar.go", + "coverage": 84.321 + \} + ] +\} +}\if{html}{\out{
}} +\item \verb{_.scores.json}: Scores for individual metrics grouped +into four categories: "testing", "documentation", "maintenance", and +"transparency". Each category must have a least one score. + +For the testing category, "check is required. "check" should be 1 if the +tests passed and 0 if they failed. "coverage" is required if the +\verb{_.coverage.json} coverage" file exists. The value +should match the "overall" value from \verb{_.coverage.json}, +divided by 100. + +Example: + +\if{html}{\out{
}}\preformatted{\{ + "testing": \{ + "check": 1, + "coverage": 0.9154265 + \}, + "documentation": \{ + "has_website": 1, + "has_news": 1 + \}, + "maintenance": \{ + "has_maintainer": 1, + "news_current": 1 + \}, + "transparency": \{ + "has_source_control": 1, + "has_bug_reports_url": 1 + \} +\} +}\if{html}{\out{
}} +\item \verb{_.metadata.json}: Information to include in the +"System Info" table. The table will include the "date" and "executor" +value, as well as any key-value pairs defined under "info.env_vars" and +"info.sys". The "date" and "executor" keys are required. + +Example: + +\if{html}{\out{
}}\preformatted{\{ + "date": "2024-08-01 08:19:12", + "executor": "Bobert", + "info": \{ + "env_vars": \{ + "METWORX_VERSION": "22.09" + \}, + "sys": \{ + "sysname": "Linux", + "machine": "x86_64" + \} + \} +\} +}\if{html}{\out{
}} +\item \verb{_.matrix.yaml}: A file defining entries to render as +the traceability matrix table. The traceability matrix table is meant to +map all user-facing entry points (e.g., exported functions or available +commands for a command-line executable) to the relevant documentation and +test files. + +The file should consist of a sequence of entries with the following items: +\itemize{ +\item \code{entrypoint}: The name of the entry point. +\item \code{code}: The path to where the entry point is defined. +\item \code{doc}: The path to the entry point's main documentation. +\item \code{tests}: A list of paths where the entry point is tested. +} + +What the entry point is called in the table depends on \code{scorecard_type}. +For "cli", the column name is "Command" and, for "R", it is +"Exported Function". For all other types, it is "Entry Point". + +This file is optional if the \code{add_traceability} argument of +\code{\link[=render_scorecard]{render_scorecard()}} is "auto" or \code{FALSE}. + +Example: + +\if{html}{\out{
}}\preformatted{- entrypoint: foo + skip: true + +- entrypoint: foo bar + code: cmd/bar.go + doc: docs/commands/foo_bar.md + tests: + - cmd/bar_test.go + - integration/bar_test.go +}\if{html}{\out{
}} +} +} +} diff --git a/man/format_appendix.Rd b/man/format_appendix.Rd index c0966de..136af72 100644 --- a/man/format_appendix.Rd +++ b/man/format_appendix.Rd @@ -4,7 +4,7 @@ \alias{format_appendix} \title{Format Appendix} \usage{ -format_appendix(extra_notes_data, return_vals = FALSE) +format_appendix(extra_notes_data, return_vals = FALSE, scorecard_type = "R") } \arguments{ \item{extra_notes_data}{named list. Output of \code{\link[=create_extra_notes]{create_extra_notes()}}} diff --git a/man/format_traceability_matrix.Rd b/man/format_traceability_matrix.Rd index 72460f0..f3f4609 100644 --- a/man/format_traceability_matrix.Rd +++ b/man/format_traceability_matrix.Rd @@ -4,7 +4,7 @@ \alias{format_traceability_matrix} \title{Format Traceability Matrix} \usage{ -format_traceability_matrix(exports_df, wrap_cols = TRUE) +format_traceability_matrix(exports_df, wrap_cols = TRUE, scorecard_type = "R") } \arguments{ \item{exports_df}{tibble. Output of \code{\link[=make_traceability_matrix]{make_traceability_matrix()}}} diff --git a/man/get_result_path.Rd b/man/get_result_path.Rd index 134f11e..19c86c9 100644 --- a/man/get_result_path.Rd +++ b/man/get_result_path.Rd @@ -6,8 +6,9 @@ \usage{ get_result_path( out_dir, - ext = c("scorecard.json", "scorecard.pdf", "check.rds", "covr.rds", "comments.txt", - "summary.pdf", "export_doc.rds", "mitigation.txt") + ext = c("scorecard.json", "scorecard.pdf", "comments.txt", "mitigation.txt", + "check.rds", "covr.rds", "export_doc.rds", "summary.pdf", "check.txt", + "coverage.json", "matrix.yaml", "metadata.json", "pkg.json", "scores.json") ) } \arguments{ diff --git a/man/make_traceability_matrix.Rd b/man/make_traceability_matrix.Rd index d973640..d7110f3 100644 --- a/man/make_traceability_matrix.Rd +++ b/man/make_traceability_matrix.Rd @@ -9,7 +9,8 @@ make_traceability_matrix(pkg_tar_path, results_dir = NULL, verbose = FALSE) \arguments{ \item{pkg_tar_path}{path to a tarball} -\item{results_dir}{directory containing json file and individual results. Output file path from \code{\link[=score_pkg]{score_pkg()}}} +\item{results_dir}{Directory with scoring results. This is the path returned +by \code{\link[=score_pkg]{score_pkg()}}.} \item{verbose}{Logical (\code{TRUE}/\code{FALSE}). If \code{TRUE}, show any warnings/messages per function.} } diff --git a/man/map_answer.Rd b/man/map_answer.Rd index 0cc0e98..ae59439 100644 --- a/man/map_answer.Rd +++ b/man/map_answer.Rd @@ -4,7 +4,12 @@ \alias{map_answer} \title{Use answer_breaks to map results into character strings} \usage{ -map_answer(scores, criteria, answer_breaks = c(0, 1)) +map_answer( + scores, + criteria, + answer_breaks = c(0, 1), + include_check_score = TRUE +) } \arguments{ \item{scores}{vector of risk scores} @@ -12,6 +17,8 @@ map_answer(scores, criteria, answer_breaks = c(0, 1)) \item{criteria}{vector of criteria names} \item{answer_breaks}{breaks determining 'Yes'/'Passing' or 'No'/'Failed'. \code{NA} has special handling. See details.} + +\item{include_check_score}{Whether to include score in the result check.} } \description{ Use answer_breaks to map results into character strings diff --git a/man/render_scorecard.Rd b/man/render_scorecard.Rd index ecdec07..5ed31c5 100644 --- a/man/render_scorecard.Rd +++ b/man/render_scorecard.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/render-scorecard.R \name{render_scorecard} \alias{render_scorecard} -\title{Take a JSON from score_pkg() and render a pdf} +\title{Render a scorecard PDF from a directory of results} \usage{ render_scorecard( results_dir, @@ -12,7 +12,8 @@ render_scorecard( ) } \arguments{ -\item{results_dir}{directory containing json file and individual results. Output file path from \code{\link[=score_pkg]{score_pkg()}}} +\item{results_dir}{Directory with scoring results. This is the path returned +by \code{\link[=score_pkg]{score_pkg()}}.} \item{risk_breaks}{A numeric vector of length 2, with both numbers being between 0 and 1. These are used for the "breaks" when classifying scores @@ -26,12 +27,12 @@ is "Medium Risk", and \verb{0.7 <= score < 1} is "High Risk".} Defaults to "auto", which will include the matrix if found.} } \description{ -Take a JSON from score_pkg() and render a pdf +Create a scorecard from a results directory prepared by \code{\link[=score_pkg]{score_pkg()}} or an +external scorer (see \link{external_scores}). } \details{ If a plain text comments file is found in \code{results_dir}, it will automatically be included. \strong{Note} that it must follow the naming convention of \verb{_.comments.txt} If a traceability matrix is found in \code{results_dir}, it will automatically be included unless overridden via \code{add_traceability}. -\strong{Note} that it must follow the naming convention of \verb{_.export_doc.rds} } diff --git a/man/render_scorecard_summary.Rd b/man/render_scorecard_summary.Rd index a7ef7b5..6c0d63c 100644 --- a/man/render_scorecard_summary.Rd +++ b/man/render_scorecard_summary.Rd @@ -13,7 +13,8 @@ render_scorecard_summary( ) } \arguments{ -\item{result_dirs}{A vector of output directories} +\item{result_dirs}{A vector of output directories, each one produced by +\code{\link[=score_pkg]{score_pkg()}}. \link{external_scores} are not supported.} \item{risk_breaks}{A numeric vector of length 2, with both numbers being between 0 and 1. These are used for the "breaks" when classifying scores diff --git a/man/summarize_package_results.Rd b/man/summarize_package_results.Rd index c0c0c90..52446b6 100644 --- a/man/summarize_package_results.Rd +++ b/man/summarize_package_results.Rd @@ -7,7 +7,8 @@ summarize_package_results(result_dirs) } \arguments{ -\item{result_dirs}{A vector of output directories} +\item{result_dirs}{A vector of output directories, each one produced by +\code{\link[=score_pkg]{score_pkg()}}. \link{external_scores} are not supported.} } \value{ a dataframe diff --git a/tests/testthat/helpers-external.R b/tests/testthat/helpers-external.R new file mode 100644 index 0000000..639b550 --- /dev/null +++ b/tests/testthat/helpers-external.R @@ -0,0 +1,116 @@ +#' Create a temporary directory with external results +#' +#' @param pattern,clean,.local_envir Arguments passed to +#' `withr::local_tempdir()`. +local_create_external_results <- function(pattern = "mpn-scorecard-tests-", + .local_envir = parent.frame(), + clean = TRUE) { + tdir <- withr::local_tempdir( + pattern = pattern, .local_envir = .local_envir, clean = clean + ) + + pkg <- "foo" + version <- "1.2.3" + + prefix <- paste0(pkg, "_", version) + rdir <- file.path(tdir, prefix) + fs::dir_create(rdir) + + stem <- file.path(tdir, prefix, prefix) + + jsonlite::write_json( + list( + mpn_scorecard_format = MPN_SCORECARD_FORMAT, + pkg_name = pkg, + pkg_version = version, + scorecard_type = "cli" + ), + paste0(stem, ".pkg.json"), + auto_unbox = TRUE + ) + + cat("check", "output", sep = "\n", file = paste0(stem, ".check.txt")) + + jsonlite::write_json( + list( + overall = 91.54265, + files = list( + list( + file = "cmd/foo.go", + coverage = 98.7643 + ), + list( + file = "cmd/bar.go", + coverage = 84.321 + ) + ) + ), + paste0(stem, ".coverage.json"), + auto_unbox = TRUE + ) + + jsonlite::write_json( + list( + testing = list( + check = 1, + coverage = 0.9154265 + ), + documentation = list( + has_website = 1, + has_news = 1 + ), + maintenance = list( + has_maintainer = 1, + news_current = 1 + ), + transparency = list( + has_source_control = 1, + has_bug_reports_url = 1 + ) + ), + paste0(stem, ".scores.json"), + auto_unbox = TRUE + ) + + jsonlite::write_json( + list( + date = "2024-08-01 08:19:12", + executor = "Bobert", + info = list( + env_vars = list( + METWORX_VERSION = "22.09" + ), + sys = list( + sysname = "Linux", + machine = "x86_64" + ) + ) + ), + paste0(stem, ".metadata.json"), + auto_unbox = TRUE + ) + + yaml::write_yaml( + list( + list( + entrypoint = "foo", + skip = TRUE + ), + list( + entrypoint = "foo bar", + code = "cmd/bar.go", + doc = "docs/commands/foo_bar.md", + tests = c("cmd/bar_test.go", "integration/bar_test.go") + ), + list( + entrypoint = "foo baz", + code = "cmd/baz.go", + doc = "docs/commands/foo_baz.md", + tests = "cmd/baz_test.go" + ) + ), + paste0(stem, ".matrix.yaml") + ) + + return(rdir) +} diff --git a/tests/testthat/test-create-extra-notes.R b/tests/testthat/test-create-extra-notes.R index 935207a..944361f 100644 --- a/tests/testthat/test-create-extra-notes.R +++ b/tests/testthat/test-create-extra-notes.R @@ -8,8 +8,8 @@ describe("creating extra notes", { extra_notes_data <- create_extra_notes(result_dir_x) # Confirm values - covr - expect_equal(unique(extra_notes_data$covr_results_df$test_coverage), 100) - expect_equal(unique(extra_notes_data$covr_results_df$r_script), "R/myscript.R") + expect_equal(unique(extra_notes_data$cov_results_df$test_coverage), 100) + expect_equal(unique(extra_notes_data$cov_results_df$code_file), "R/myscript.R") # Confirm values - R CMD Check expect_true(grepl("Status: OK", extra_notes_data$check_output)) @@ -22,8 +22,8 @@ describe("creating extra notes", { extra_notes_data <- create_extra_notes(result_dir_x) # Confirm values - covr - expect_true(grepl("cannot open", unique(extra_notes_data$covr_results_df$test_coverage))) - expect_identical(extra_notes_data$covr_results_df$r_script, "File coverage failed") + expect_true(grepl("cannot open", unique(extra_notes_data$cov_results_df$test_coverage))) + expect_identical(extra_notes_data$cov_results_df$code_file, "File coverage failed") # Confirm values - R CMD Check expect_true(grepl("ERROR", extra_notes_data$check_output)) }) @@ -35,11 +35,11 @@ describe("creating extra notes", { result_dir <- setups$pkg_result_dir[case] res <- create_extra_notes(result_dir) expect_identical( - res$covr_results_df$r_script, + res$cov_results_df$code_file, "No coverage results" ) expect_identical( - res$covr_results_df$test_coverage, + res$cov_results_df$test_coverage, "no testable functions found" ) }) diff --git a/tests/testthat/test-format-report-external.R b/tests/testthat/test-format-report-external.R new file mode 100644 index 0000000..a4323a6 --- /dev/null +++ b/tests/testthat/test-format-report-external.R @@ -0,0 +1,87 @@ +# These are slimmed down variants of some tests from test-format-report.R, +# focused on checking if the formatting functions are compatible with the +# objects read in from external results. + +results_dir <- local_create_external_results() + +describe("formatting functions with external scores", { + it("format_scores_for_render", { + pkg_scores <- build_pkg_scores(results_dir) + formatted <- format_scores_for_render(pkg_scores)[["formatted"]] + + overall_scores <- formatted[["overall_scores"]] + expect_identical( + names(overall_scores), + c("category", "category_score", "risk") + ) + expect_identical( + overall_scores[["category"]], + c(METRIC_CATEGORIES, "overall") + ) + + expect_identical( + purrr::map(formatted[["category_scores"]], "criteria"), + list( + testing = TESTING_METRICS, + documentation = c("has_website", "has_news"), + maintenance = MAINTENANCE_METRICS, + transparency = TRANSPARENCY_METRICS + ) + ) + }) + + it("format_testing_scores", { + pkg_scores <- build_pkg_scores(results_dir) + flex_df <- format_testing_scores(format_scores_for_render(pkg_scores)) + res <- flex_df[["body"]][["dataset"]] + expect_identical(res[["Result"]], c("Passing", "91.54%")) + }) + + it("format_traceability_matrix", { + pkg_scores <- build_pkg_scores(results_dir) + tmat <- read_traceability_matrix(results_dir) + formatted <- format_traceability_matrix(tmat, + scorecard_type = pkg_scores[["scorecard_type"]] + ) + res <- formatted[["body"]][["dataset"]] + expect_identical( + names(res), + c("Command", "Code File", "Documentation", "Test Files") + ) + expect_identical(res[["Command"]], c("foo bar", "foo baz")) + }) + + it("format_appendix", { + input <- list( + check_output = read_check_output(results_dir), + cov_results_df = read_coverage_results(results_dir) + ) + formatted <- format_appendix(input, return_vals = TRUE) + + cov_res <- formatted[["cov_results_flex"]][["body"]][["dataset"]] + expect_identical(names(cov_res), c("Code File", "Test Coverage")) + + expect_identical( + formatted[["check_output"]], + "check\noutput\n" + ) + }) + + it("format_appendix: no per file coverage", { + results_dir <- local_create_external_results() + covfile <- get_result_path(results_dir, "coverage.json") + cov <- jsonlite::read_json(covfile) + cov[["files"]] <- list() + jsonlite::write_json(cov, covfile) + + expect_output( + format_appendix( + list( + check_output = "anything", + cov_results_df = read_coverage_results(results_dir) + ) + ), + "Per file test coverage not provided" + ) + }) +}) diff --git a/tests/testthat/test-format-report.R b/tests/testthat/test-format-report.R index 7044996..44d6801 100644 --- a/tests/testthat/test-format-report.R +++ b/tests/testthat/test-format-report.R @@ -27,9 +27,13 @@ describe("formatting functions", { expect_true(all(grepl(paste0(RISK_LEVELS, collapse = "|"), scores_df$risk))) # ensure all category criteria are present - expected_criteria <- c(names(purrr::list_c(pkg_scores$scores)), "R CMD CHECK", "coverage") - expected_criteria <- expected_criteria[!grepl("check|covr", expected_criteria)] - expect_true(all(scores_df$criteria %in% expected_criteria)) + expect_setequal( + scores_df[["criteria"]], + c( + "R CMD CHECK", "coverage", + DOCUMENTATION_METRICS, MAINTENANCE_METRICS, TRANSPARENCY_METRICS + ) + ) # Check overall category scores overall_scores_df <- formatted_pkg_scores$formatted$overall_scores @@ -81,7 +85,7 @@ describe("formatting functions", { expect_true(all(c("Criteria", "Score", "Result", "Risk") %in% names(flex_df$body$dataset))) expect_true(all(c("Criteria", "Result", "Risk") %in% flex_df$col_keys)) expect_equal(flex_df$body$dataset$Score, c(1, 1)) - expect_equal(flex_df$body$dataset$Result, c("Passing (score: 1)", "100%")) + expect_equal(flex_df$body$dataset$Result, c("Passing (score: 1)", "100.00%")) ## High rcmdcheck score ## result_dir <- result_dirs_select[["pass_no_docs"]] @@ -93,7 +97,7 @@ describe("formatting functions", { flex_df <- format_testing_scores(formatted_pkg_scores) expect_equal(flex_df$body$dataset$Score, c(0.75, 1)) - expect_equal(flex_df$body$dataset$Result, c("Passing (score: 0.75)", "100%")) + expect_equal(flex_df$body$dataset$Result, c("Passing (score: 0.75)", "100.00%")) ## Failing rcmdcheck score ## @@ -189,13 +193,13 @@ describe("formatting functions", { extra_notes_frmt <- format_appendix(extra_notes_data, return_vals = TRUE) # Test covr dataframe - covr_results_df <- extra_notes_frmt$covr_results_flex$body$dataset + covr_results_df <- extra_notes_frmt$cov_results_flex$body$dataset expect_equal( - names(format_colnames_to_title(extra_notes_data$covr_results_df)), + names(format_colnames_to_title(extra_notes_data$cov_results_df)), names(covr_results_df) ) expect_equal( - unique(unname(unlist(extra_notes_frmt$covr_results_flex$footer$dataset))), + unique(unname(unlist(extra_notes_frmt$cov_results_flex$footer$dataset))), paste( "Test coverage is calculated per script, rather than per function.", "See Traceability Matrix for function-to-test-file mapping." @@ -203,6 +207,18 @@ describe("formatting functions", { ) }) + it("format_appendix: covr failure", { + expect_output( + format_appendix( + list( + check_output = "anything", + cov_results_df = list(code_file = "File coverage failed") + ) + ), + "Calculating code coverage failed" + ) + }) + it("dependency versions", { setups <- pkg_dirs$pkg_setups_df @@ -234,6 +250,17 @@ describe("formatting functions", { "Unable to calculate R dependency table due to failing `R CMD check`." ) }) + + it("format_metadata handles NULL input", { + flex_df <- format_metadata(list(date = "2024-01-01", executor = "foo")) + expect_identical( + flex_df[["body"]][["dataset"]], + data.frame( + Category = c("Date", "Executor"), + Value = c("2024-01-01", "foo") + ) + ) + }) }) describe("cat_verbatim", { diff --git a/tests/testthat/test-render-scorecard-external.R b/tests/testthat/test-render-scorecard-external.R new file mode 100644 index 0000000..cd5fe53 --- /dev/null +++ b/tests/testthat/test-render-scorecard-external.R @@ -0,0 +1,63 @@ +# These are slimmed down variants of some tests from test-render-scorecard.R to +# check that render_scorecard() doesn't choke on external results. + +skip_if_render_pdf() + +describe("render scorecard for external scores", { + it("render_scorecard - with traceability YAML", { + rdir <- local_create_external_results() + + pdf_path <- render_scorecard(rdir) + toc <- pdftools::pdf_toc(pdf = pdf_path)[["children"]] + expect_identical( + purrr::map_chr(toc, "title"), + c("Overview", "Details", "Traceability Matrix", "Appendix") + ) + + pdf_path <- render_scorecard(rdir, + add_traceability = FALSE, overwrite = TRUE + ) + toc <- pdftools::pdf_toc(pdf = pdf_path)[["children"]] + expect_identical( + purrr::map_chr(toc, "title"), + c("Overview", "Details", "Appendix") + ) + }) + + it("render_scorecard - without traceability YAML", { + rdir <- local_create_external_results() + fs::file_delete(get_result_path(rdir, "matrix.yaml")) + + pdf_path <- render_scorecard(rdir) + toc <- pdftools::pdf_toc(pdf = pdf_path)[["children"]] + expect_identical( + purrr::map_chr(toc, "title"), + c("Overview", "Details", "Appendix") + ) + + fs::file_delete(pdf_path) + expect_error(render_scorecard(rdir, add_traceability = TRUE)) + }) + + it("render_scorecard - without coverage", { + rdir <- local_create_external_results() + fs::file_delete(get_result_path(rdir, "coverage.json")) + + expect_error(render_scorecard(rdir), "coverage file is missing") + + scores <- jsonlite::read_json(get_result_path(rdir, "scores.json")) + scores[["testing"]][["coverage"]] <- NULL + jsonlite::write_json(scores, get_result_path(rdir, "scores.json"), + auto_unbox = TRUE + ) + + pdf_path <- render_scorecard(rdir) + toc <- pdftools::pdf_toc(pdf = pdf_path)[["children"]] + appendix <- toc[[4]] + expect_identical(appendix[["title"]], "Appendix") + expect_identical( + purrr::map_chr(appendix[["children"]], "title"), + c("System Info", "Check output") + ) + }) +}) diff --git a/tests/testthat/test-score-pkg.R b/tests/testthat/test-score-pkg.R index d88cf1f..1c9cb9b 100644 --- a/tests/testthat/test-score-pkg.R +++ b/tests/testthat/test-score-pkg.R @@ -25,11 +25,7 @@ describe("score_pkg", { pkg_scores <- jsonlite::fromJSON(json_path) # Check json attributes - expect_equal( - names(pkg_scores), - c("mpn_scorecard_version","pkg_name", "pkg_version", "out_dir", - "pkg_tar_path", "md5sum_check", "scores", "metadata", "category_scores") - ) + expect_identical(names(pkg_scores), SCORECARD_JSON_KEYS) # These tests also serve to confirm the correct environment vars in `local_check_envvar` were set # Check category scores @@ -41,7 +37,7 @@ describe("score_pkg", { # check individual scores expect_equal(pkg_scores$scores$testing$check, 1) - expect_equal(pkg_scores$scores$testing$covr, 1) + expect_equal(pkg_scores$scores$testing$coverage, 1) }) @@ -70,14 +66,10 @@ describe("score_pkg", { pkg_scores <- jsonlite::fromJSON(json_path) # Check json attributes - expect_equal( - names(pkg_scores), - c("mpn_scorecard_version", "pkg_name", "pkg_version", "out_dir", - "pkg_tar_path", "md5sum_check", "scores", "metadata", "category_scores") - ) + expect_identical(names(pkg_scores), SCORECARD_JSON_KEYS) expect_equal(pkg_scores$scores$testing$check, 0) - expect_equal(pkg_scores$scores$testing$covr, "NA") # NA character when read from json + expect_equal(pkg_scores$scores$testing$coverage, "NA") # NA character when read from json expect_equal(pkg_scores$category_scores$testing, 0) # confirm overall category score is still a number }) @@ -99,7 +91,7 @@ describe("score_pkg", { pkg_scores <- jsonlite::fromJSON(json_path) expect_equal(pkg_scores$scores$testing$check, 1) - expect_identical(pkg_scores$scores$testing$covr, "NA") + expect_identical(pkg_scores$scores$testing$coverage, "NA") covr_res <- readRDS(get_result_path(result_dir, "covr.rds")) expect_s3_class(covr_res$errors, "callr_timeout_error") @@ -163,7 +155,7 @@ describe("score_pkg", { expect_equal(res$category_scores$overall, 0.5) # With failed coverage - pkg_scores$scores$testing$covr <- NA + pkg_scores$scores$testing$coverage <- NA res <- calc_overall_scores(pkg_scores) expect_equal(res$category_scores$testing, 0.5) expect_equal(res$category_scores$overall, 0.3) diff --git a/tests/testthat/test-summarize-package-results.R b/tests/testthat/test-summarize-package-results.R index 56e58aa..0a4f0d7 100644 --- a/tests/testthat/test-summarize-package-results.R +++ b/tests/testthat/test-summarize-package-results.R @@ -72,4 +72,8 @@ describe("summarize_package_results", { ) }) + it("aborts on external scores", { + rdir <- local_create_external_results() + expect_error(render_scorecard_summary(rdir), "not supported") + }) })