diff --git a/.github/.gitignore b/.github/.gitignore new file mode 100644 index 0000000..2d19fc7 --- /dev/null +++ b/.github/.gitignore @@ -0,0 +1 @@ +*.html diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000..8d9d9c8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,20 @@ +--- +name: Feature Request +about: Suggest an idea for the course templates +title: '' +labels: '' +assignees: cansavvy + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/problem-report.md b/.github/ISSUE_TEMPLATE/problem-report.md new file mode 100644 index 0000000..5175adf --- /dev/null +++ b/.github/ISSUE_TEMPLATE/problem-report.md @@ -0,0 +1,30 @@ +--- +name: Problem Report +about: Create a report to help improve ottrpal and its documentation +title: problem +labels: bug +assignees: cansavvy + +--- + +**Describe what is not working with the template or is unclear in the documentation** +A clear and concise description of what the bug is. + +**Please link to the specific course repository you are working on** + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. +What does the render look like versus what did you write and supply to `ottrpal`? + +**Additional context** +Add any other context about the problem here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..9c3808e --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,17 @@ + + +### Purpose/implementation Section + +#### What changes are being implemented in this Pull Request? + + + +#### What was your approach? + + + +#### What GitHub issue does your pull request address? + + + +### Tell potential reviewers what kind of feedback you are soliciting. diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml new file mode 100644 index 0000000..d894284 --- /dev/null +++ b/.github/workflows/R-CMD-check.yaml @@ -0,0 +1,86 @@ +# For help debugging build failures open an issue on the RStudio community with the 'github-actions' tag. +# https://community.rstudio.com/new-topic?category=Package%20development&tags=github-actions +on: + push: + branches: + - main + - master + pull_request: + branches: + - main + - master + +name: R-CMD-check + +jobs: + R-CMD-check: + runs-on: ${{ matrix.config.os }} + + name: ${{ matrix.config.os }} (${{ matrix.config.r }}) + + strategy: + fail-fast: false + matrix: + config: + - {os: windows-latest, r: 'release'} + - {os: macOS-latest, r: 'release'} + - {os: ubuntu-20.04, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/focal/latest"} + - {os: ubuntu-20.04, r: 'devel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/focal/latest"} + + env: + R_REMOTES_NO_ERRORS_FROM_WARNINGS: true + RSPM: ${{ matrix.config.rspm }} + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + + steps: + - uses: actions/checkout@v2 + + - uses: r-lib/actions/setup-r@v1 + with: + r-version: ${{ matrix.config.r }} + + - uses: r-lib/actions/setup-pandoc@v1 + + - name: Query dependencies + run: | + install.packages('remotes') + saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) + writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") + shell: Rscript {0} + + - name: Restore R package cache + if: runner.os != 'Windows' + uses: actions/cache@v2 + with: + path: ${{ env.R_LIBS_USER }} + key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} + restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- + + - name: Install system dependencies + if: runner.os == 'Linux' + run: | + while read -r cmd + do + eval sudo $cmd + done < <(Rscript -e 'writeLines(remotes::system_requirements("ubuntu", "20.04"))') + + - name: Install dependencies + run: | + remotes::install_deps(dependencies = TRUE) + remotes::install_cran("rcmdcheck") + shell: Rscript {0} + + - name: Check + env: + _R_CHECK_CRAN_INCOMING_REMOTE_: false + run: | + options(crayon.enabled = TRUE) + rcmdcheck::rcmdcheck(args = c("--no-manual", "--as-cran"), check_dir = "check") + shell: Rscript {0} + + - name: Upload check results + if: failure() + uses: actions/upload-artifact@main + with: + name: ${{ runner.os }}-r${{ matrix.config.r }}-results + path: check diff --git a/R/ari_burn_subtitles.R b/R/ari_burn_subtitles.R index 38673e4..06c7339 100644 --- a/R/ari_burn_subtitles.R +++ b/R/ari_burn_subtitles.R @@ -1,31 +1,32 @@ #' Burn Subtitles into a video -#' -#' @note This needs \code{ffmpeg} that was compiled with -#' \code{--enable-libass} as per +#' +#' @note This needs \code{ffmpeg} that was compiled with +#' \code{--enable-libass} as per #' \url{https://trac.ffmpeg.org/wiki/HowToBurnSubtitlesIntoVideo} #' #' @param video Video in \code{mp4} format #' @param srt Subtitle file in \code{srt} format -#' @param verbose print diagnostic messages. If > 1, +#' @param verbose print diagnostic messages. If > 1, #' then more are printed #' #' @return Name of output video -ari_burn_subtitles = function(video, srt, verbose = FALSE) { - ffmpeg = ffmpeg_exec(quote = TRUE) +ari_burn_subtitles <- function(video, srt, verbose = FALSE) { + ffmpeg <- ffmpeg_exec(quote = TRUE) if (verbose > 0) { message("Burning in Subtitles") } command <- paste( ffmpeg, "-y -i", video, paste0("-vf subtitles=", srt), - video) + video + ) if (verbose > 0) { message(command) } - res = system(command) + res <- system(command) if (res != 0) { warning("Result was non-zero for ffmpeg") } - + return(video) -} \ No newline at end of file +} diff --git a/R/ari_example.R b/R/ari_example.R index 9cbc9c7..e7601c9 100644 --- a/R/ari_example.R +++ b/R/ari_example.R @@ -5,15 +5,15 @@ #' #' @param path The name of the file. If no argument is provided then #' all of the example files will be listed. -#' +#' #' @return A character string #' @export #' @examples #' ari_example("ari_intro.Rmd") -ari_example <- function(path = NULL){ - if(is.null(path)) { +ari_example <- function(path = NULL) { + if (is.null(path)) { list.files(system.file("test", package = "ari")) } else { system.file("test", path, package = "ari", mustWork = TRUE) } -} \ No newline at end of file +} diff --git a/R/ari_narrate.R b/R/ari_narrate.R index 63f4a4c..12557db 100644 --- a/R/ari_narrate.R +++ b/R/ari_narrate.R @@ -1,22 +1,22 @@ #' Create a video from slides and a script -#' +#' #' \code{ari_narrate} creates a video from a script written in markdown and HTML #' slides created with \code{\link[rmarkdown]{rmarkdown}} or a similar package. -#' This function uses \href{https://aws.amazon.com/polly/}{Amazon Polly} +#' This function uses \href{https://aws.amazon.com/polly/}{Amazon Polly} #' via \code{\link{ari_spin}}. #' #' @param script Either a markdown file where every paragraph will be read over #' a corresponding slide, or an \code{.Rmd} file where each HTML comment will #' be used for narration. -#' @param slides A path or URL for an HTML slideshow created with -#' \code{\link[rmarkdown]{rmarkdown}}, \code{xaringan}, or a +#' @param slides A path or URL for an HTML slideshow created with +#' \code{\link[rmarkdown]{rmarkdown}}, \code{xaringan}, or a #' similar package. #' @param output The path to the video file which will be created. -#' @param voice The voice you want to use. See -#' \code{\link[text2speech]{tts_voices}} for more information +#' @param voice The voice you want to use. See +#' \code{\link[text2speech]{tts_voices}} for more information #' about what voices are available. #' @param service speech synthesis service to use, -#' passed to \code{\link[text2speech]{tts}}. +#' passed to \code{\link[text2speech]{tts}}. #' Either \code{"amazon"} or \code{"google"}. #' @param capture_method Either \code{"vectorized"} or \code{"iterative"}. #' The vectorized mode is faster though it can cause screens to repeat. If @@ -33,7 +33,7 @@ #' @param video_codec The video encoder for the splicing. If this #' fails, see \code{ffmpeg -codecs} #' @param cleanup If \code{TRUE}, interim files are deleted -#' +#' #' @return The output from \code{\link{ari_spin}} #' @importFrom xml2 read_html #' @importFrom rvest html_nodes html_text @@ -42,48 +42,48 @@ #' @importFrom webshot webshot #' @importFrom tools file_ext #' @export -#' @examples +#' @examples #' \dontrun{ -#' -#' # +#' +#' # #' ari_narrate(system.file("test", "ari_intro_script.md", package = "ari"), -#' system.file("test", "ari_intro.html", package = "ari"), -#' voice = "Joey") -#' +#' system.file("test", "ari_intro.html", package = "ari"), +#' voice = "Joey" +#' ) #' } -ari_narrate <- function(script, slides, +ari_narrate <- function(script, slides, output = tempfile(fileext = ".mp4"), - voice = text2speech::tts_default_voice(service = service), + voice = text2speech::tts_default_voice(service = service), service = "amazon", capture_method = c("vectorized", "iterative"), subtitles = FALSE, ..., verbose = FALSE, audio_codec = get_audio_codec(), video_codec = get_video_codec(), - cleanup = TRUE){ - - auth = text2speech::tts_auth(service = service) + cleanup = TRUE) { + auth <- text2speech::tts_auth(service = service) if (!auth) { - stop(paste0("It appears you're not authenticated with ", - service, ". Make sure you've ", - "set the appropriate environmental variables ", - "before you proceed.") - ) + stop(paste0( + "It appears you're not authenticated with ", + service, ". Make sure you've ", + "set the appropriate environmental variables ", + "before you proceed." + )) } - - - capture_method = match.arg(capture_method) + + + capture_method <- match.arg(capture_method) if (!(capture_method %in% c("vectorized", "iterative"))) { stop('capture_method must be either "vectorized" or "iterative"') } - + output_dir <- normalizePath(dirname(output)) script <- normalizePath(script) if (file_ext(script) %in% c("Rmd", "rmd") & missing(slides)) { - tfile = tempfile(fileext = ".html") - slides = rmarkdown::render(input = script, output_file = tfile) - } - + tfile <- tempfile(fileext = ".html") + slides <- rmarkdown::render(input = script, output_file = tfile) + } + if (file.exists(slides)) { slides <- normalizePath(slides) if (.Platform$OS.type == "windows") { @@ -96,7 +96,7 @@ ari_narrate <- function(script, slides, file.exists(script), dir.exists(output_dir) ) - + if (file_ext(script) %in% c("Rmd", "rmd")) { paragraphs <- parse_html_comments(script) } else { @@ -105,16 +105,24 @@ ari_narrate <- function(script, slides, on.exit(unlink(html_path, force = TRUE), add = TRUE) } render(script, output_format = html_document(), output_file = html_path) - paragraphs <- map_chr(html_text(html_nodes(read_html(html_path), "p")), - function(x){gsub("\u2019", "'", x)}) + paragraphs <- map_chr( + html_text(html_nodes(read_html(html_path), "p")), + function(x) { + gsub("\u2019", "'", x) + } + ) } - + slide_nums <- seq_along(paragraphs) - img_paths <- file.path(output_dir, - paste0("ari_img_", - slide_nums, "_", - grs(), ".jpeg")) - + img_paths <- file.path( + output_dir, + paste0( + "ari_img_", + slide_nums, "_", + grs(), ".jpeg" + ) + ) + if (capture_method == "vectorized") { webshot(url = paste0(slides, "#", slide_nums), file = img_paths, ...) } else { @@ -122,12 +130,14 @@ ari_narrate <- function(script, slides, webshot(url = paste0(slides, "#", i), file = img_paths[i], ...) } } - + if (cleanup) { on.exit(walk(img_paths, unlink, force = TRUE), add = TRUE) } - ari_spin(images = img_paths, paragraphs = paragraphs, - output = output, voice = voice, - service = service, subtitles = subtitles, - verbose = verbose, cleanup = cleanup) -} \ No newline at end of file + ari_spin( + images = img_paths, paragraphs = paragraphs, + output = output, voice = voice, + service = service, subtitles = subtitles, + verbose = verbose, cleanup = cleanup + ) +} diff --git a/R/ari_spin.R b/R/ari_spin.R index 7712468..19b8b71 100644 --- a/R/ari_spin.R +++ b/R/ari_spin.R @@ -50,35 +50,38 @@ #' \dontrun{ #' #' slides <- system.file("test", c("mab2.png", "mab1.png"), -#' package = "ari") -#' sentences <- c("Welcome to my very interesting lecture.", -#' "Here are some fantastic equations I came up with.") +#' package = "ari" +#' ) +#' sentences <- c( +#' "Welcome to my very interesting lecture.", +#' "Here are some fantastic equations I came up with." +#' ) #' ari_spin(slides, sentences, voice = "Joey") -#' #' } #' -ari_spin <- function( - images, paragraphs, - output = tempfile(fileext = ".mp4"), - voice = text2speech::tts_default_voice(service = service), - service = ifelse(have_polly(), "amazon", "google"), - subtitles = FALSE, - duration = NULL, - tts_args = NULL, - key_or_json_file = NULL, - ...){ +ari_spin <- function(images, paragraphs, + output = tempfile(fileext = ".mp4"), + voice = text2speech::tts_default_voice(service = service), + service = ifelse(have_polly(), "amazon", "google"), + subtitles = FALSE, + duration = NULL, + tts_args = NULL, + key_or_json_file = NULL, + ...) { # check for ffmpeg before any synthesizing ffmpeg_exec() - auth = text2speech::tts_auth( + auth <- text2speech::tts_auth( service = service, - key_or_json_file = key_or_json_file) + key_or_json_file = key_or_json_file + ) if (!auth) { - stop(paste0("It appears you're not authenticated with ", - service, ". Make sure you've ", - "set the appropriate environmental variables ", - "before you proceed.") - ) + stop(paste0( + "It appears you're not authenticated with ", + service, ". Make sure you've ", + "set the appropriate environmental variables ", + "before you proceed." + )) } stopifnot(length(images) > 0) @@ -87,15 +90,17 @@ ari_spin <- function( if (length(paragraphs) == 1) { if (file.exists(paragraphs)) { - paragraphs = readLines(paragraphs, warn = FALSE) - paragraphs = paragraphs[ !paragraphs %in% "" ] + paragraphs <- readLines(paragraphs, warn = FALSE) + paragraphs <- paragraphs[!paragraphs %in% ""] } } - semi_colon = trimws(paragraphs) == ";" + semi_colon <- trimws(paragraphs) == ";" if (any(semi_colon)) { - warning(paste0("Some paragraphs are simply a semicolon - ", - "likely needs to be replaced or slide removed!")) + warning(paste0( + "Some paragraphs are simply a semicolon - ", + "likely needs to be replaced or slide removed!" + )) } stopifnot( @@ -111,47 +116,48 @@ ari_spin <- function( pb <- progress_bar$new( format = "Fetching Narration [:bar] :percent", - total = length(par_along)) + total = length(par_along) + ) for (i in par_along) { - args = tts_args - args$text = paragraphs[i] - args$voice = voice - args$service = service - args$bind_audio = TRUE + args <- tts_args + args$text <- paragraphs[i] + args$voice <- voice + args$service <- service + args$bind_audio <- TRUE wav <- do.call(text2speech::tts, args = args) - wav = reduce(wav$wav, bind) - wav = pad_wav(wav, duration = duration[i]) - ideal_duration[i] = length(wav@left) / wav@samp.rate + wav <- reduce(wav$wav, bind) + wav <- pad_wav(wav, duration = duration[i]) + ideal_duration[i] <- length(wav@left) / wav@samp.rate wavs[[i]] <- wav pb$tick() } if (subtitles) { - sub_file = paste0(file_path_sans_ext(output), ".srt") + sub_file <- paste0(file_path_sans_ext(output), ".srt") ari_subtitles(paragraphs, wavs, sub_file) } - res = ari_stitch(images, wavs, output, ...) - args = list(...) - cleanup = args$cleanup + res <- ari_stitch(images, wavs, output, ...) + args <- list(...) + cleanup <- args$cleanup if (is.null(cleanup)) { - cleanup = TRUE + cleanup <- TRUE } if (!cleanup) { - attr(res, "wavs") = wavs + attr(res, "wavs") <- wavs } - attr(res, "voice") = voice + attr(res, "voice") <- voice if (subtitles) { - attr(res, "subtitles") = sub_file + attr(res, "subtitles") <- sub_file } - attr(res, "service") = service + attr(res, "service") <- service return(res) } #' @rdname ari_spin #' @export -have_polly = function() { +have_polly <- function() { requireNamespace("aws.polly", quietly = TRUE) } diff --git a/R/ari_stitch.R b/R/ari_stitch.R index 21c96e3..e557bda 100644 --- a/R/ari_stitch.R +++ b/R/ari_stitch.R @@ -57,37 +57,38 @@ #' @examples #' \dontrun{ #' if (ffmpeg_version_sufficient()) { -#' result = ari_stitch( -#' ari_example(c("mab1.png", "mab2.png")), -#' list(tuneR::noise(), tuneR::noise())) -#' result = ari_stitch( -#' ari_example(c("mab1.png", "mab2.png")), -#' list(tuneR::noise(), tuneR::noise()), ffmpeg_opts = "-qscale 0", -#' verbose = 2) -#' # system2("open", attributes(result)$outfile) +#' result <- ari_stitch( +#' ari_example(c("mab1.png", "mab2.png")), +#' list(tuneR::noise(), tuneR::noise()) +#' ) +#' result <- ari_stitch( +#' ari_example(c("mab1.png", "mab2.png")), +#' list(tuneR::noise(), tuneR::noise()), +#' ffmpeg_opts = "-qscale 0", +#' verbose = 2 +#' ) +#' # system2("open", attributes(result)$outfile) #' } #' } -ari_stitch <- function( - images, audio, - output = tempfile(fileext = ".mp4"), - verbose = FALSE, - cleanup = TRUE, - ffmpeg_opts = "", - divisible_height = TRUE, - audio_codec = get_audio_codec(), - video_codec = get_video_codec(), - video_sync_method = "2", - audio_bitrate = NULL, - video_bitrate = NULL, - pixel_format = "yuv420p", - fast_start = FALSE, - deinterlace = FALSE, - stereo_audio = TRUE, - duration = NULL, - video_filters = NULL, - frames_per_second = NULL, - check_inputs = TRUE -){ +ari_stitch <- function(images, audio, + output = tempfile(fileext = ".mp4"), + verbose = FALSE, + cleanup = TRUE, + ffmpeg_opts = "", + divisible_height = TRUE, + audio_codec = get_audio_codec(), + video_codec = get_video_codec(), + video_sync_method = "2", + audio_bitrate = NULL, + video_bitrate = NULL, + pixel_format = "yuv420p", + fast_start = FALSE, + deinterlace = FALSE, + stereo_audio = TRUE, + duration = NULL, + video_filters = NULL, + frames_per_second = NULL, + check_inputs = TRUE) { stopifnot(length(images) > 0) images <- normalizePath(images) output_dir <- normalizePath(dirname(output)) @@ -102,16 +103,16 @@ ari_stitch <- function( ) } if (is.character(audio)) { - - audio = lapply(audio, function(x) { - ext = tolower(tools::file_ext(x)) - func = switch(ext, - wav = tuneR::readWave, - mp3 = tuneR::readMP3, - tuneR::readMP3) + audio <- lapply(audio, function(x) { + ext <- tolower(tools::file_ext(x)) + func <- switch(ext, + wav = tuneR::readWave, + mp3 = tuneR::readMP3, + tuneR::readMP3 + ) func(x) }) - audio = pad_wav(audio, duration = duration) + audio <- pad_wav(audio, duration = duration) # # audio = lapply(audio, function(wav) { # ideal_duration <- ceiling(length(wav@left) / wav@samp.rate) @@ -130,7 +131,7 @@ ari_stitch <- function( # }) } # Make a hard path - output = file.path(output_dir, basename(output)) + output <- file.path(output_dir, basename(output)) if (verbose > 0) { message("Writing out Wav for audio") @@ -148,30 +149,34 @@ ari_stitch <- function( # converting all to gif - img_ext = tolower(tools::file_ext(images)) - any_gif = any(img_ext %in% "gif") + img_ext <- tolower(tools::file_ext(images)) + any_gif <- any(img_ext %in% "gif") if (any_gif & !all(img_ext %in% "gif")) { if (verbose > 0) { message("Converting All files to gif!") } for (i in seq_along(images)) { - iext = img_ext[i] + iext <- img_ext[i] if (iext != "gif") { - tfile = tempfile(fileext = ".gif") + tfile <- tempfile(fileext = ".gif") ffmpeg_convert(images[i], outfile = tfile) - images[i] = tfile + images[i] <- tfile } } } - input_txt_path <- file.path(output_dir, - paste0("ari_input_", - grs(), - ".txt")) + input_txt_path <- file.path( + output_dir, + paste0( + "ari_input_", + grs(), + ".txt" + ) + ) ## on windows ffmpeg cancats names adding the working directory, so if ## complete url is provided it adds it twice. if (.Platform$OS.type == "windows") { - new_image_names = file.path(output_dir, basename(images)) + new_image_names <- file.path(output_dir, basename(images)) if (!any(file.exists(new_image_names))) { file.copy(images, to = new_image_names) } else { @@ -181,51 +186,54 @@ ari_stitch <- function( } for (i in seq_along(images)) { cat(paste0("file ", "'", images[i], "'", "\n"), - file = input_txt_path, append = TRUE) + file = input_txt_path, append = TRUE + ) cat(paste0("duration ", duration(audio[[i]]), "\n"), - file = input_txt_path, append = TRUE) + file = input_txt_path, append = TRUE + ) } cat(paste0("file ", "'", images[i], "'", "\n"), - file = input_txt_path, append = TRUE) - input_txt_path = normalizePath(input_txt_path, winslash = "/") + file = input_txt_path, append = TRUE + ) + input_txt_path <- normalizePath(input_txt_path, winslash = "/") # needed for users as per # https://superuser.com/questions/718027/ # ffmpeg-concat-doesnt-work-with-absolute-path # input_txt_path = normalizePath(input_txt_path, winslash = "\\") - ffmpeg = ffmpeg_exec(quote = TRUE) + ffmpeg <- ffmpeg_exec(quote = TRUE) if (!is.null(frames_per_second)) { - video_filters = c(video_filters, paste0("fps=", frames_per_second)) + video_filters <- c(video_filters, paste0("fps=", frames_per_second)) } else { - video_filters = c(video_filters, "fps=5") + video_filters <- c(video_filters, "fps=5") } if (divisible_height) { - video_filters = c(video_filters, '"scale=trunc(iw/2)*2:trunc(ih/2)*2"') + video_filters <- c(video_filters, '"scale=trunc(iw/2)*2:trunc(ih/2)*2"') } # workaround for older ffmpeg # https://stackoverflow.com/questions/32931685/ # the-encoder-aac-is-experimental-but-experimental-codecs-are-not-enabled - experimental = FALSE + experimental <- FALSE if (!is.null(audio_codec)) { if (audio_codec == "aac") { - experimental = TRUE + experimental <- TRUE } } if (deinterlace) { - video_filters = c(video_filters, "yadif") + video_filters <- c(video_filters, "yadif") } - video_filters = paste(video_filters, collapse = ",") - video_filters = paste0("-vf ", video_filters) + video_filters <- paste(video_filters, collapse = ",") + video_filters <- paste0("-vf ", video_filters) if (any(grepl("-vf", ffmpeg_opts))) { warning("Found video filters in ffmpeg_opts, may not be used correctly!") } - ffmpeg_opts = c(video_filters, ffmpeg_opts) - ffmpeg_opts = paste(ffmpeg_opts, collapse = " ") + ffmpeg_opts <- c(video_filters, ffmpeg_opts) + ffmpeg_opts <- paste(ffmpeg_opts, collapse = " ") # shQuote should seankross/ari#5 @@ -234,27 +242,34 @@ ari_stitch <- function( "-f concat -safe 0 -i", shQuote(input_txt_path), "-i", shQuote(wav_path), ifelse(!is.null(video_codec), paste("-c:v", video_codec), - ""), + "" + ), ifelse(!is.null(audio_codec), paste("-c:a", audio_codec), - ""), + "" + ), ifelse(stereo_audio, "-ac 2", ""), ifelse(!is.null(audio_bitrate), paste("-b:a", audio_bitrate), - ""), + "" + ), ifelse(!is.null(video_bitrate), paste("-b:v", video_bitrate), - ""), + "" + ), " -shortest", # ifelse(deinterlace, "-vf yadif", ""), ifelse(!is.null(video_sync_method), paste("-vsync", video_sync_method), - ""), + "" + ), ifelse(!is.null(pixel_format), paste("-pix_fmt", pixel_format), - ""), + "" + ), ifelse(fast_start, "-movflags +faststart", ""), ffmpeg_opts, ifelse(!is.null(frames_per_second), paste0("-r ", frames_per_second), ""), ifelse(experimental, "-strict experimental", ""), "-max_muxing_queue_size 9999", "-threads 2", - shQuote(output)) + shQuote(output) + ) if (verbose > 0) { message(command) } @@ -262,7 +277,7 @@ ari_stitch <- function( message("Input text path is:") cat(readLines(input_txt_path), sep = "\n") } - res = system(command) + res <- system(command) if (res != 0) { warning("Result was non-zero for ffmpeg") } @@ -270,13 +285,13 @@ ari_stitch <- function( if (cleanup) { on.exit(unlink(input_txt_path, force = TRUE), add = TRUE) } - res = file.exists(output) && file.size(output) > 0 + res <- file.exists(output) && file.size(output) > 0 if (!cleanup) { - attr(res, "txt_path") = input_txt_path - attr(res, "wav_path") = wav_path - attr(res, "cmd") = command + attr(res, "txt_path") <- input_txt_path + attr(res, "wav_path") <- wav_path + attr(res, "cmd") <- command } - attr(res, "outfile") = output - attr(res, "images") = images + attr(res, "outfile") <- output + attr(res, "images") <- images invisible(res) } diff --git a/R/ari_subtitles.R b/R/ari_subtitles.R index 3a7c641..5dac5d2 100644 --- a/R/ari_subtitles.R +++ b/R/ari_subtitles.R @@ -5,33 +5,35 @@ #' @importFrom hms hms ari_subtitles <- function(paragraphs, wavs, path, width = 42) { durations <- map_dbl(wavs, ~ length(.x@left) / .x@samp.rate) - + # Break down paragraphs so that they are more readable paragraphs <- map(paragraphs, strwrap, width = width) lines_per_paragraph <- map_dbl(paragraphs, length) durations <- rep(durations / lines_per_paragraph, times = lines_per_paragraph) paragraphs <- unlist(paragraphs) - + cumdur <- cumsum(durations) cumdur <- map(cumdur, hms) cumdur <- map(cumdur, as.character) cumdur <- map(cumdur, substr, start = 0, stop = 12) cumdur <- map(cumdur, gsub, pattern = "\\.", replacement = ",") # need to check to see if no , so 00:00:27 doesn't happen - cumdur = unlist(cumdur) - add = !grepl(",", cumdur) - cumdur[ add ] = paste0(cumdur[add], ",000") + cumdur <- unlist(cumdur) + add <- !grepl(",", cumdur) + cumdur[add] <- paste0(cumdur[add], ",000") cumdur <- c("00:00:00,000", unlist(cumdur)) - cumdur = strsplit(cumdur, ",") + cumdur <- strsplit(cumdur, ",") cumdur <- map_chr(cumdur, function(x) { - x[2] = sprintf("%03.0f", as.numeric(x[2])) - x = paste(x, collapse = ",") + x[2] <- sprintf("%03.0f", as.numeric(x[2])) + x <- paste(x, collapse = ",") }) - - + + result <- seq_along(paragraphs) - result <- map(result, ~ c(.x, paste(cumdur[.x], "-->", cumdur[.x + 1]), - paragraphs[.x], "")) + result <- map(result, ~ c( + .x, paste(cumdur[.x], "-->", cumdur[.x + 1]), + paragraphs[.x], "" + )) result <- unlist(result) writeLines(result, path) -} \ No newline at end of file +} diff --git a/R/ari_talk.R b/R/ari_talk.R index 48c0c1b..2429e9d 100644 --- a/R/ari_talk.R +++ b/R/ari_talk.R @@ -1,10 +1,10 @@ #' Create spoken audio files -#' +#' #' A simple function for demoing how spoken text will sound. #' @param paragraphs A vector strings that will be spoken by Amazon Polly. #' @param output A path to the audio file which will be created. -#' @param voice The voice you want to use. See -#' \code{\link[text2speech]{tts_voices}} for more information +#' @param voice The voice you want to use. See +#' \code{\link[text2speech]{tts_voices}} for more information #' about what voices are available. #' @param service speech synthesis service to use, #' passed to \code{\link[text2speech]{tts}} @@ -15,39 +15,41 @@ #' @importFrom tuneR bind Wave writeWave #' @importFrom purrr map reduce #' @export -ari_talk <- function(paragraphs, +ari_talk <- function(paragraphs, output = tempfile(fileext = ".wav"), voice = text2speech::tts_default_voice(service = service), service = "amazon") { - auth = text2speech::tts_auth(service = service) + auth <- text2speech::tts_auth(service = service) if (!auth) { - stop(paste0("It appears you're not authenticated with ", - service, ". Make sure you've ", - "set the appropriate environmental variables ", - "before you proceed.") - ) + stop(paste0( + "It appears you're not authenticated with ", + service, ". Make sure you've ", + "set the appropriate environmental variables ", + "before you proceed." + )) } output_dir <- normalizePath(dirname(output)) stopifnot( length(paragraphs) > 0, dir.exists(output_dir) ) - + wavs <- vector(mode = "list", length = length(paragraphs)) par_along <- seq_along(paragraphs) - + for (i in par_along) { wav <- text2speech::tts( - text = paragraphs[i], + text = paragraphs[i], voice = voice, service = service, - bind_audio = TRUE) - wav = reduce(wav$wav, bind) + bind_audio = TRUE + ) + wav <- reduce(wav$wav, bind) wavs[[i]] <- wav } - + audio <- reduce(wavs, bind) writeWave(audio, output) - attr(audio, "outfile") = output + attr(audio, "outfile") <- output return(audio) -} \ No newline at end of file +} diff --git a/R/ffmpeg_codecs.R b/R/ffmpeg_codecs.R index e8b58d0..20377f6 100644 --- a/R/ffmpeg_codecs.R +++ b/R/ffmpeg_codecs.R @@ -6,71 +6,72 @@ #' @examples #' \dontrun{ #' if (ffmpeg_version_sufficient()) { -#' ffmpeg_codecs() -#' ffmpeg_video_codecs() -#' ffmpeg_audio_codecs() +#' ffmpeg_codecs() +#' ffmpeg_video_codecs() +#' ffmpeg_audio_codecs() #' } #' } -ffmpeg_codecs = function() { - ffmpeg = ffmpeg_exec(quote = TRUE) - cmd = paste(ffmpeg, "-codecs") - result = system(cmd, ignore.stderr = TRUE, ignore.stdout = TRUE) - res = system(cmd, intern = TRUE, ignore.stderr = TRUE) - res = trimws(res) +ffmpeg_codecs <- function() { + ffmpeg <- ffmpeg_exec(quote = TRUE) + cmd <- paste(ffmpeg, "-codecs") + result <- system(cmd, ignore.stderr = TRUE, ignore.stdout = TRUE) + res <- system(cmd, intern = TRUE, ignore.stderr = TRUE) + res <- trimws(res) if (length(res) == 0) { - res = "" + res <- "" } if (result != 0 & all(res %in% "")) { warning("No codecs output from ffmpeg for codecs") return(NULL) } - res = res[grepl("^([.]|D)", res)] - res = strsplit(res, " ") - res = t(vapply(res, function(x) { - x = trimws(x) - x = x[ x != ""] + res <- res[grepl("^([.]|D)", res)] + res <- strsplit(res, " ") + res <- t(vapply(res, function(x) { + x <- trimws(x) + x <- x[x != ""] if (length(x) >= 3) { - x[3:length(x)] = paste(x[3:length(x)], collapse = " ") + x[3:length(x)] <- paste(x[3:length(x)], collapse = " ") } return(x[seq(3)]) }, FUN.VALUE = character(3))) - colnames(res) = c("capabilities", "codec", "codec_name") - res = as.data.frame(res, stringsAsFactors = FALSE) - + colnames(res) <- c("capabilities", "codec", "codec_name") + res <- as.data.frame(res, stringsAsFactors = FALSE) + if (nrow(res) == 0) { warning("No codecs output from ffmpeg for codecs") return(NULL) - } - res$capabilities = trimws(res$capabilities) - - cap_defns = res[ res$codec == "=", ] - res = res[ res$codec != "=", ] - - cap = do.call("rbind", strsplit(res$capabilities, split = "")) - - cap_defns$codec_name = tolower(cap_defns$codec_name) - cap_defns$codec_name = gsub(" ", "_", cap_defns$codec_name) - cap_defns$codec_name = gsub("-", "_", cap_defns$codec_name) - cap_def = do.call("rbind", strsplit(cap_defns$capabilities, split = "")) - - mat = matrix(NA, ncol = nrow(cap_defns), nrow = nrow(cap)) - colnames(mat) = cap_defns$codec_name - - icol = 4 - indices = apply(cap_def, 1, function(x) which(x != ".")) + } + res$capabilities <- trimws(res$capabilities) + + cap_defns <- res[res$codec == "=", ] + res <- res[res$codec != "=", ] + + cap <- do.call("rbind", strsplit(res$capabilities, split = "")) + + cap_defns$codec_name <- tolower(cap_defns$codec_name) + cap_defns$codec_name <- gsub(" ", "_", cap_defns$codec_name) + cap_defns$codec_name <- gsub("-", "_", cap_defns$codec_name) + cap_def <- do.call("rbind", strsplit(cap_defns$capabilities, split = "")) + + mat <- matrix(NA, ncol = nrow(cap_defns), nrow = nrow(cap)) + colnames(mat) <- cap_defns$codec_name + + icol <- 4 + indices <- apply(cap_def, 1, function(x) which(x != ".")) for (icol in seq(nrow(cap_def))) { - x = cap[, indices[icol]] - mat[, icol] = x %in% cap_def[icol, indices[icol]] + x <- cap[, indices[icol]] + mat[, icol] <- x %in% cap_def[icol, indices[icol]] } - mat = as.data.frame(mat, stringsAsFactors = FALSE) - - res = cbind(res, mat) + mat <- as.data.frame(mat, stringsAsFactors = FALSE) + + res <- cbind(res, mat) if (any(rowSums( - res[, c("video_codec", "audio_codec", "subtitle_codec")]) - > 1)) { + res[, c("video_codec", "audio_codec", "subtitle_codec")] + ) + > 1)) { warning("Format may have changed, please post this issue") } - + # L = list(capabilities = cap_defns, # codecs = res) # return(L) @@ -79,25 +80,25 @@ ffmpeg_codecs = function() { #' @rdname ffmpeg_codecs #' @export -ffmpeg_video_codecs = function() { - res = ffmpeg_codecs() +ffmpeg_video_codecs <- function() { + res <- ffmpeg_codecs() if (is.null(res)) { return(NULL) } - res = res[ res$video_codec, ] - res$video_codec = res$audio_codec = res$subtitle_codec = NULL + res <- res[res$video_codec, ] + res$video_codec <- res$audio_codec <- res$subtitle_codec <- NULL res } #' @rdname ffmpeg_codecs #' @export -ffmpeg_audio_codecs = function() { - res = ffmpeg_codecs() +ffmpeg_audio_codecs <- function() { + res <- ffmpeg_codecs() if (is.null(res)) { return(NULL) - } - res = res[ res$audio_codec, ] - res$video_codec = res$audio_codec = res$subtitle_codec = NULL + } + res <- res[res$audio_codec, ] + res$video_codec <- res$audio_codec <- res$subtitle_codec <- NULL res } @@ -105,102 +106,105 @@ ffmpeg_audio_codecs = function() { #' @rdname ffmpeg_codecs #' @export -ffmpeg_muxers = function() { - ffmpeg = ffmpeg_exec(quote = TRUE) - cmd = paste(ffmpeg, "-muxers") - result = system(cmd, ignore.stderr = TRUE, ignore.stdout = TRUE) - res = system(cmd, intern = TRUE, ignore.stderr = TRUE) - res = trimws(res) +ffmpeg_muxers <- function() { + ffmpeg <- ffmpeg_exec(quote = TRUE) + cmd <- paste(ffmpeg, "-muxers") + result <- system(cmd, ignore.stderr = TRUE, ignore.stdout = TRUE) + res <- system(cmd, intern = TRUE, ignore.stderr = TRUE) + res <- trimws(res) if (length(res) == 0) { - res = "" - } + res <- "" + } if (result != 0 & all(res %in% "")) { warning("No codecs output from ffmpeg for muxers") return(NULL) - } - res = res[grepl("^E", res)] - res = strsplit(res, " ") - res = t(vapply(res, function(x) { - x = trimws(x) - x = x[ x != ""] + } + res <- res[grepl("^E", res)] + res <- strsplit(res, " ") + res <- t(vapply(res, function(x) { + x <- trimws(x) + x <- x[x != ""] if (length(x) >= 3) { - x[3:length(x)] = paste(x[3:length(x)], collapse = " ") + x[3:length(x)] <- paste(x[3:length(x)], collapse = " ") } return(x[seq(3)]) }, FUN.VALUE = character(3))) - colnames(res) = c("capabilities", "muxer", "muxer_name") - res = as.data.frame(res, stringsAsFactors = FALSE) + colnames(res) <- c("capabilities", "muxer", "muxer_name") + res <- as.data.frame(res, stringsAsFactors = FALSE) if (nrow(res) == 0) { warning("No codecs output from ffmpeg for muxers") return(NULL) - } - res$capabilities = trimws(res$capabilities) - + } + res$capabilities <- trimws(res$capabilities) + return(res) } #' @rdname ffmpeg_codecs #' @export -ffmpeg_version = function() { - ffmpeg = ffmpeg_exec(quote = TRUE) - cmd = paste(ffmpeg, "-version") - result = system(cmd, ignore.stderr = TRUE, ignore.stdout = TRUE) - res = system(cmd, intern = TRUE, ignore.stderr = TRUE) - res = trimws(res) +ffmpeg_version <- function() { + ffmpeg <- ffmpeg_exec(quote = TRUE) + cmd <- paste(ffmpeg, "-version") + result <- system(cmd, ignore.stderr = TRUE, ignore.stdout = TRUE) + res <- system(cmd, intern = TRUE, ignore.stderr = TRUE) + res <- trimws(res) if (length(res) == 0) { - res = "" - } + res <- "" + } if (result != 0 & all(res %in% "")) { warning("No codecs output from ffmpeg for version") return(NULL) - } - res = res[grepl("^ffmpeg version", res)] - res = sub("ffmpeg version (.*) Copyright .*", "\\1", res) - res = sub("(ubuntu|debian).*", "", res) - res = sub("-.*", "", res) - res = sub("[+].*", "", res) - res = trimws(res) + } + res <- res[grepl("^ffmpeg version", res)] + res <- sub("ffmpeg version (.*) Copyright .*", "\\1", res) + res <- sub("(ubuntu|debian).*", "", res) + res <- sub("-.*", "", res) + res <- sub("[+].*", "", res) + res <- trimws(res) return(res) } #' @rdname ffmpeg_codecs #' @export -ffmpeg_version_sufficient = function() { +ffmpeg_version_sufficient <- function() { if (have_ffmpeg_exec()) { - ver = package_version("3.2.4") - ff_ver = ffmpeg_version() + ver <- package_version("3.2.4") + ff_ver <- ffmpeg_version() if (is.null(ff_ver)) { - warning(paste0("Cannot get ffmpeg version from ", - "ffmpeg_version, returning FALSE")) + warning(paste0( + "Cannot get ffmpeg version from ", + "ffmpeg_version, returning FALSE" + )) return(FALSE) } - ff_ver_char = ff_ver - ff_ver = package_version(ff_ver, strict = FALSE) + ff_ver_char <- ff_ver + ff_ver <- package_version(ff_ver, strict = FALSE) if (is.na(ff_ver)) { warning( paste0( "ffmpeg version is not parsed, probably a development version,", - "version was ", ff_ver_char, ", make sure you have >= ", + "version was ", ff_ver_char, ", make sure you have >= ", as.character(ver) ) ) return(TRUE) } - res = ff_ver >= ver + res <- ff_ver >= ver } else { - res = FALSE + res <- FALSE } res } #' @rdname ffmpeg_codecs #' @export -check_ffmpeg_version = function() { +check_ffmpeg_version <- function() { if (!ffmpeg_version_sufficient()) { - ff = ffmpeg_version() + ff <- ffmpeg_version() stop(paste0( - "ffmpeg version is not high enough,", - " ffmpeg version is: ", ff)) + "ffmpeg version is not high enough,", + " ffmpeg version is: ", ff + )) } return(invisible(NULL)) -} \ No newline at end of file +} diff --git a/R/ffmpeg_convert.R b/R/ffmpeg_convert.R index 9fd22af..935e3d3 100644 --- a/R/ffmpeg_convert.R +++ b/R/ffmpeg_convert.R @@ -3,42 +3,40 @@ #' @param file Video/PNG file to convert #' @param outfile output file #' @param overwrite should output file be overwritten? -#' @param args arguments to pass to \code{\link{system2}} to pass to +#' @param args arguments to pass to \code{\link{system2}} to pass to #' \code{ffmpeg} #' #' @return A character string of the output file with different attributes #' @export #' #' @examples -#' pngfile = tempfile(fileext = ".png") +#' pngfile <- tempfile(fileext = ".png") #' png(pngfile) #' plot(0, 0) #' dev.off() #' if (have_ffmpeg_exec()) { -#' res = ffmpeg_convert(pngfile) +#' res <- ffmpeg_convert(pngfile) #' } -ffmpeg_convert = function( - file, - outfile = tempfile( - fileext = paste0(".", tools::file_ext(file)) - ), - overwrite = TRUE, - args = NULL -) { - ffmpeg = ffmpeg_exec(quote = FALSE) - file = normalizePath(file) - add_y = NULL +ffmpeg_convert <- function(file, + outfile = tempfile( + fileext = paste0(".", tools::file_ext(file)) + ), + overwrite = TRUE, + args = NULL) { + ffmpeg <- ffmpeg_exec(quote = FALSE) + file <- normalizePath(file) + add_y <- NULL if (file.exists(outfile)) { if (!overwrite) { stop("outfile already exists, overwrite = FALSE") } else { - add_y = "-y" + add_y <- "-y" } } - args = c(add_y, "-i", file, args, outfile) + args <- c(add_y, "-i", file, args, outfile) suppressWarnings({ - res = system2(ffmpeg, args = args, stdout = TRUE, stderr = TRUE) + res <- system2(ffmpeg, args = args, stdout = TRUE, stderr = TRUE) }) - attr(outfile, "result") = res + attr(outfile, "result") <- res return(outfile) -} \ No newline at end of file +} diff --git a/R/ffmpeg_exec.R b/R/ffmpeg_exec.R index e73ef37..1f8330f 100644 --- a/R/ffmpeg_exec.R +++ b/R/ffmpeg_exec.R @@ -10,33 +10,40 @@ #' @examples #' \dontrun{ #' if (have_ffmpeg_exec()) { -#' ffmpeg_exec() +#' ffmpeg_exec() #' } #' } -ffmpeg_exec = function(quote = FALSE) { - ffmpeg <- discard(c(Sys.getenv("ffmpeg"), - Sys.which("ffmpeg")), ~ nchar(.x) == 0)[1] +ffmpeg_exec <- function(quote = FALSE) { + ffmpeg <- discard(c( + Sys.getenv("ffmpeg"), + Sys.which("ffmpeg") + ), ~ nchar(.x) == 0)[1] if (is.na(ffmpeg)) { - stop(paste("Could not find ffmpeg. See the documentation ", - "for ari_stitch() ", - "for more details.")) + stop(paste( + "Could not find ffmpeg. See the documentation ", + "for ari_stitch() ", + "for more details." + )) } if (!ffmpeg %in% c("ffmpeg", "ffmpeg.exe")) { - ffmpeg = normalizePath(ffmpeg, winslash = "/") + ffmpeg <- normalizePath(ffmpeg, winslash = "/") } if (quote) { - ffmpeg = shQuote(ffmpeg) + ffmpeg <- shQuote(ffmpeg) } return(ffmpeg) } #' @export #' @rdname ffmpeg_exec -have_ffmpeg_exec = function() { - exec = try({ - ari::ffmpeg_exec() - }, silent = TRUE) +have_ffmpeg_exec <- function() { + exec <- try( + { + ari::ffmpeg_exec() + }, + silent = TRUE + ) !inherits(exec, "try-error") } @@ -47,11 +54,11 @@ have_ffmpeg_exec = function() { #' @param verbose print diagnostic messages #' @export #' -ffmpeg_error_log = function(file, verbose = TRUE) { - ffmpeg = ffmpeg_exec(quote = TRUE) +ffmpeg_error_log <- function(file, verbose = TRUE) { + ffmpeg <- ffmpeg_exec(quote = TRUE) - file = normalizePath(file, winslash = "/", mustWork = TRUE) - error_file = tempfile(fileext = ".txt") + file <- normalizePath(file, winslash = "/", mustWork = TRUE) + error_file <- tempfile(fileext = ".txt") command <- paste( ffmpeg, "-v error", "-i", shQuote(file), @@ -61,7 +68,7 @@ ffmpeg_error_log = function(file, verbose = TRUE) { if (verbose > 0) { message(command) } - res = system(command) + res <- system(command) if (!file.exists(error_file)) { stop("Error file not generated") } diff --git a/R/pad_wav.R b/R/pad_wav.R index 1a28559..33af86a 100644 --- a/R/pad_wav.R +++ b/R/pad_wav.R @@ -2,8 +2,8 @@ #' #' @param wav list of Wave objects #' @param duration If \code{NULL}, the duration will simply round -#' the Wave up to the next whole integer. If not, these are the -#' duration to pad the Wave *to*. For example 12 means the output +#' the Wave up to the next whole integer. If not, these are the +#' duration to pad the Wave *to*. For example 12 means the output #' Wave will have a length of 12 seconds. Pass \code{NA} to those #' Waves that you want simple rounding. #' @@ -11,53 +11,55 @@ #' @export #' #' @examples -#' wavs = list( -#' tuneR::noise(duration = 1.85*44100), -#' tuneR::noise()) -#' out = pad_wav(wavs) -#' dur = sapply(out, function(x)length(x@left)/ x@samp.rate) -#' duration = c(2, 2) -#' out = pad_wav(wavs, duration = duration) -#' dur = sapply(out, function(x)length(x@left)/ x@samp.rate) +#' wavs <- list( +#' tuneR::noise(duration = 1.85 * 44100), +#' tuneR::noise() +#' ) +#' out <- pad_wav(wavs) +#' dur <- sapply(out, function(x) length(x@left) / x@samp.rate) +#' duration <- c(2, 2) +#' out <- pad_wav(wavs, duration = duration) +#' dur <- sapply(out, function(x) length(x@left) / x@samp.rate) #' stopifnot(all(dur == duration)) -#' duration = c(2, 2.5) -#' out = pad_wav(wavs, duration = duration) -#' dur = sapply(out, function(x)length(x@left)/ x@samp.rate) +#' duration <- c(2, 2.5) +#' out <- pad_wav(wavs, duration = duration) +#' dur <- sapply(out, function(x) length(x@left) / x@samp.rate) #' stopifnot(isTRUE(all.equal(dur, duration))) -pad_wav = function(wav, duration = NULL) { - is_Wave = inherits(wav, "Wave") +pad_wav <- function(wav, duration = NULL) { + is_Wave <- inherits(wav, "Wave") if (is_Wave) { - wav = list(wav) + wav <- list(wav) } if (is.null(duration)) { - duration = rep(NA, length(wav)) + duration <- rep(NA, length(wav)) } stopifnot(length(duration) == length(wav)) - duration = mapply(function(wav, dur) { - ideal_duration = ceiling(length(wav@left)/wav@samp.rate) + duration <- mapply(function(wav, dur) { + ideal_duration <- ceiling(length(wav@left) / wav@samp.rate) if (!is.na(dur)) { - ideal_duration = max(ideal_duration, dur) + ideal_duration <- max(ideal_duration, dur) } ideal_duration }, wav, duration) - - out_wav = mapply(function(wav, ideal_duration) { - left = rep(0, wav@samp.rate * ideal_duration - length(wav@left)) - right = numeric(0) + + out_wav <- mapply(function(wav, ideal_duration) { + left <- rep(0, wav@samp.rate * ideal_duration - length(wav@left)) + right <- numeric(0) if (wav@stereo) { - right = left + right <- left } - end_wav = tuneR::Wave( + end_wav <- tuneR::Wave( left = left, right = right, - bit = wav@bit, + bit = wav@bit, samp.rate = wav@samp.rate, - pcm = wav@pcm) - wav <- tuneR::bind(wav, end_wav) + pcm = wav@pcm + ) + wav <- tuneR::bind(wav, end_wav) wav }, wav, duration, SIMPLIFY = FALSE) if (is_Wave) { - out_wav = out_wav[[1]] + out_wav <- out_wav[[1]] } return(out_wav) -} \ No newline at end of file +} diff --git a/R/set_encoders.R b/R/set_encoders.R index 0fb72a3..4cf82d4 100644 --- a/R/set_encoders.R +++ b/R/set_encoders.R @@ -1,6 +1,6 @@ -get_os = function() { - sys_info = Sys.info() - os = tolower(sys_info[["sysname"]]) +get_os <- function() { + sys_info <- Sys.info() + os <- tolower(sys_info[["sysname"]]) return(os) } @@ -12,41 +12,41 @@ get_os = function() { #' @seealso [ffmpeg_codecs()] for options #' @return A `NULL` output #' -#' +#' #' @rdname codecs #' @export -#' +#' #' @examples #' \dontrun{ #' if (have_ffmpeg_exec()) { -#' print(ffmpeg_version()) -#' get_audio_codec() -#' set_audio_codec(codec = "libfdk_aac") -#' get_audio_codec() -#' set_audio_codec(codec = "aac") -#' get_audio_codec() +#' print(ffmpeg_version()) +#' get_audio_codec() +#' set_audio_codec(codec = "libfdk_aac") +#' get_audio_codec() +#' set_audio_codec(codec = "aac") +#' get_audio_codec() #' } #' if (have_ffmpeg_exec()) { -#' get_video_codec() -#' set_video_codec(codec = "libx265") -#' get_video_codec() -#' set_video_codec(codec = "libx264") -#' get_video_codec() +#' get_video_codec() +#' set_video_codec(codec = "libx265") +#' get_video_codec() +#' set_video_codec(codec = "libx264") +#' get_video_codec() #' } #' ## empty thing #' if (have_ffmpeg_exec()) { -#' video_codec_encode("libx264") -#' -#' audio_codec_encode("aac") +#' video_codec_encode("libx264") +#' +#' audio_codec_encode("aac") #' } #' } -set_audio_codec = function(codec) { +set_audio_codec <- function(codec) { if (missing(codec)) { - os = get_os() - codec = switch(os, - darwin = "libfdk_aac", - windows = "ac3", - linux = "aac" + os <- get_os() + codec <- switch(os, + darwin = "libfdk_aac", + windows = "ac3", + linux = "aac" ) } options(ffmpeg_audio_codec = codec) @@ -54,43 +54,43 @@ set_audio_codec = function(codec) { #' @export #' @rdname codecs -set_video_codec = function(codec = "libx264") { +set_video_codec <- function(codec = "libx264") { options(ffmpeg_video_codec = codec) } #' @export #' @rdname codecs -get_audio_codec = function() { - codec = getOption("ffmpeg_audio_codec") +get_audio_codec <- function() { + codec <- getOption("ffmpeg_audio_codec") if (is.null(codec)) { - os = get_os() - res = ffmpeg_audio_codecs() + os <- get_os() + res <- ffmpeg_audio_codecs() if (is.null(res)) { - fdk_enabled = FALSE + fdk_enabled <- FALSE } else { - fdk_enabled = grepl("fdk", res[ res$codec == "aac", "codec_name"]) + fdk_enabled <- grepl("fdk", res[res$codec == "aac", "codec_name"]) } if (fdk_enabled) { - os_audio_codec = "libfdk_aac" + os_audio_codec <- "libfdk_aac" } else { - os_audio_codec = "aac" + os_audio_codec <- "aac" } - codec = switch(os, - darwin = os_audio_codec, - windows = "ac3", - linux = "aac" + codec <- switch(os, + darwin = os_audio_codec, + windows = "ac3", + linux = "aac" ) set_audio_codec(codec = codec) - } + } return(codec) } #' @export #' @rdname codecs -get_video_codec = function() { - codec = getOption("ffmpeg_video_codec") +get_video_codec <- function() { + codec <- getOption("ffmpeg_video_codec") if (is.null(codec)) { - codec = "libx264" + codec <- "libx264" set_video_codec(codec = codec) } return(codec) @@ -99,30 +99,28 @@ get_video_codec = function() { #' @rdname codecs #' @export -audio_codec_encode = function(codec) { - res = ffmpeg_audio_codecs() +audio_codec_encode <- function(codec) { + res <- ffmpeg_audio_codecs() if (is.null(res)) { warning("Codec could not be checked") return(NA) - } + } stopifnot(length(codec) == 1) - res = res[ res$codec %in% codec | - grepl(codec, res$codec_name), ] + res <- res[res$codec %in% codec | + grepl(codec, res$codec_name), ] res$encoding_supported } #' @rdname codecs #' @export -video_codec_encode = function(codec) { - res = ffmpeg_video_codecs() +video_codec_encode <- function(codec) { + res <- ffmpeg_video_codecs() if (is.null(res)) { warning("Codec could not be checked") return(NA) - } + } stopifnot(length(codec) == 1) - res = res[ res$codec %in% codec | - grepl(codec, res$codec_name), ] + res <- res[res$codec %in% codec | + grepl(codec, res$codec_name), ] res$encoding_supported } - - diff --git a/R/utilities.R b/R/utilities.R index 992f5f5..687a513 100644 --- a/R/utilities.R +++ b/R/utilities.R @@ -1,43 +1,48 @@ -make_same_sample_rate = function(audio, verbose = TRUE) { - if (inherits(audio, "Wave")) return(audio) +make_same_sample_rate <- function(audio, verbose = TRUE) { + if (inherits(audio, "Wave")) { + return(audio) + } - sample_rate = sapply(audio, function(r) r@samp.rate) + sample_rate <- sapply(audio, function(r) r@samp.rate) if (!all(sample_rate == sample_rate[[1]]) && verbose) { message("enforcing same sample rate, using minimum") } - sample_rate = min(sample_rate, na.rm = TRUE) + sample_rate <- min(sample_rate, na.rm = TRUE) if (verbose) { message(paste0("Sample rate downsampled to ", sample_rate)) } - audio = lapply(audio, function(x) { - if (x@samp.rate == sample_rate) return(x) + audio <- lapply(audio, function(x) { + if (x@samp.rate == sample_rate) { + return(x) + } tuneR::downsample(x, samp.rate = sample_rate) }) - sample_rate = sapply(audio, function(r) r@samp.rate) + sample_rate <- sapply(audio, function(r) r@samp.rate) stopifnot(all(sample_rate == sample_rate[[1]])) return(audio) } -is_Wave <- function(x){ +is_Wave <- function(x) { identical(suppressWarnings(as.character(class(x))), "Wave") } # get random string -grs <- function(){ +grs <- function() { paste(sample(c(seq(10), letters, LETTERS), - size = 12, replace = TRUE), collapse = "") + size = 12, replace = TRUE + ), collapse = "") } # how long is a wav? -duration <- function(wav){ +duration <- function(wav) { stopifnot(is_Wave(wav)) length(wav@left) / wav@samp.rate } # get from list # list, name of element, default -gfl <- function(l, n, d){ - if(is.null(l[[n]])){ +gfl <- function(l, n, d) { + if (is.null(l[[n]])) { d } else { l[[n]] @@ -45,16 +50,16 @@ gfl <- function(l, n, d){ } #' @importFrom purrr map_chr compose -string_tirm <- function(s){ - str_rev <- function(t){ +string_tirm <- function(s) { + str_rev <- function(t) { paste(rev(strsplit(t, NULL)[[1]]), collapse = "") } - str_trim_right <- function(x){ + str_trim_right <- function(x) { sub("\\s+$", "", x) } - str_trim_left <- function(x){ + str_trim_left <- function(x) { x <- str_rev(x) x <- str_trim_right(x) str_rev(x) @@ -65,24 +70,25 @@ string_tirm <- function(s){ } # get text from html comments in an Rmd -parse_html_comments <- function(path){ +parse_html_comments <- function(path) { lines_ <- readLines(path, warn = FALSE) starts <- grep("", lines_) - if(length(starts) != length(ends)){ + if (length(starts) != length(ends)) { stop("There's a comment open/close mismatch.") } result <- rep(NA, length(starts)) - for(i in seq_along(starts)){ - if(starts[i] == ends[i]){ # Single line + for (i in seq_along(starts)) { + if (starts[i] == ends[i]) { # Single line result[i] <- lines_[starts[i]] } else { # Multiple lines result[i] <- paste(string_tirm(lines_[starts[i]:ends[i]]), - collapse = " ") + collapse = " " + ) } result[i] <- sub("", "", result[i]) @@ -93,9 +99,9 @@ parse_html_comments <- function(path){ # split a big string into equal-ish sized pieces #' @importFrom purrr map -split_up_text <- function(text){ - pieces <- ceiling(nchar(text)/1500) +split_up_text <- function(text) { + pieces <- ceiling(nchar(text) / 1500) words <- strsplit(text, " ")[[1]] - chunks <- split(words, ceiling(seq_along(words)/(length(words)/pieces))) + chunks <- split(words, ceiling(seq_along(words) / (length(words) / pieces))) map(chunks, paste, collapse = " ") }