From 66e8ef68b834d1987f855113b278fc8be5cdc781 Mon Sep 17 00:00:00 2001 From: "Jeffrey S. Evans" Date: Wed, 15 Nov 2023 20:37:32 -0700 Subject: [PATCH] updates Cleaned up documentation --- DESCRIPTION | 5 +- NAMESPACE | 17 +- R/all_pairwise.R | 9 +- R/annulus.matrix.R | 6 +- R/aspline.downscale.R | 37 ++-- R/background.R | 8 +- R/bbox_poly.R | 45 +++-- R/bearing.distance.R | 4 +- R/breeding.density.R | 18 +- R/built.index.R | 13 +- R/cgls_urls.R | 19 +- R/class.comparison.R | 18 -- R/classBreaks.R | 9 +- R/collinear.R | 10 +- R/combine.R | 28 ++- R/concordance.R | 6 +- R/conf.interval.R | 14 +- R/correlogram.R | 3 +- R/cross.tab.R | 12 +- R/crossCorrelation.R | 90 +++++----- R/csi.R | 22 +-- R/curvature.R | 15 +- R/dahi.R | 14 +- R/date_seq.R | 2 +- R/daymet.point.R | 8 +- R/dispersion.R | 8 +- R/dissection.R | 6 +- R/download.daymet.R | 34 ---- R/download.hansen.R | 12 -- R/download.prism.R | 13 -- R/erase.points.R | 19 +- R/explode.R | 26 --- R/extract_vertices.R | 23 ++- R/focal.lmetrics.R | 27 --- R/fuzzySum.R | 6 +- R/gaussian.kernel.R | 3 +- R/geo.buffer.R | 31 ++-- R/group.pdf.R | 12 +- R/hexagons.R | 30 ++-- R/hli.R | 10 +- R/hli.pt.R | 12 +- R/hsp.R | 8 +- R/hybrid.kmeans.R | 8 +- R/idw.smoothing.R | 15 +- R/impute.loess.R | 10 +- R/insert.R | 26 ++- R/insert.values.R | 12 +- R/is.empty.R | 15 +- R/kde2D.R | 11 -- R/kendall.R | 20 +-- R/kl.divergence.R | 11 +- R/knn.R | 33 ++-- R/lai.R | 10 +- R/land.metrics.R | 30 ---- R/local.min.max.R | 8 +- R/loess.boot.R | 28 +-- R/loess.ci.R | 6 +- R/logistic.regression.R | 33 ++-- R/max_extent.R | 10 +- R/mean_angle.R | 8 +- R/moments.R | 6 +- R/morans.plot.R | 48 ++--- R/mwCorr.R | 13 -- R/nni.R | 14 +- R/nth.values.R | 5 +- R/o.ring.R | 16 +- R/oli.aws.R | 28 +-- R/optimal.k.R | 8 +- R/optimized.sample.variance.R | 13 +- R/outliers.R | 4 +- R/overlap.R | 6 +- R/parea.sample.R | 15 +- R/parse.bits.R | 63 +++---- R/partial.cor.R | 18 +- R/plot.effect.size.R | 2 + R/plot.loess.boot.R | 13 +- R/point.in.poly.R | 19 -- R/poly.regression.R | 18 +- R/polyPerimeter.R | 3 +- R/poly_trend.R | 10 +- R/polygon_extract.R | 17 -- R/pp.subsample.R | 25 +-- R/print.cross.cor.R | 18 ++ R/print.effect.size.R | 5 + R/print.loess.boot.R | 19 ++ R/print.poly.trend.R | 3 + R/proximity.index.R | 5 +- R/pseudo.absence.R | 62 +++---- R/pu-data.R | 2 +- R/quadrats.R | 32 ++-- R/random.raster.R | 7 +- R/raster.Zscore.R | 8 +- R/raster.change.R | 49 +++-- R/raster.deviation.R | 14 +- R/raster.downscale.R | 53 +++--- R/raster.entropy.R | 8 +- R/raster.gaussian.smooth.R | 6 +- R/raster.invert.R | 10 +- R/raster.kendall.R | 24 +-- R/raster.mds.R | 10 +- R/raster.modified.ttest.R | 43 ++--- R/raster.moments.R | 7 +- R/raster.transformations.R | 8 +- R/raster.vol.R | 7 +- R/rasterCorrelation.R | 11 +- R/rasterDistance.R | 25 +-- R/remove.holes.R | 6 +- R/remove_duplicates.R | 6 +- R/rotate.polygon.R | 6 +- R/sa.trans.R | 16 +- R/sample.annulus.R | 18 +- R/sample.line.R | 18 -- R/sample.poly.R | 18 -- R/{sample.transect.R => sampleTransect.R} | 206 +++++++++++----------- R/sar.R | 5 +- R/se.news.R | 8 - R/separability.R | 28 +-- R/sf.kde.R | 4 +- R/sf_dissolve.R | 7 +- R/sg.smooth.R | 60 +++---- R/shannons.R | 12 +- R/shift.R | 4 +- R/sieve.R | 11 +- R/similarity.R | 12 +- R/smooth.time.series.R | 8 +- R/sobel.R | 6 +- R/sp.na.omit.R | 12 -- R/spatial.select.R | 39 ++-- R/spatialEcoNews.R | 7 +- R/spectral.separability.R | 14 +- R/spherical.sd.R | 4 +- R/squareBuffer.R | 4 +- R/srr.R | 10 +- R/stratified.random.R | 14 +- R/subsample.distance.R | 7 +- R/summary.cross.cor.R | 19 ++ R/summary.effect.size.R | 4 + R/summary.loess.boot.R | 22 ++- R/swvi.R | 63 +++---- R/time_to_event.R | 7 +- R/topo.distance.R | 15 +- R/trasp.R | 16 +- R/tri.R | 26 +-- R/vrm.R | 11 +- R/winsorize.R | 10 +- R/wt.centroids.R | 12 +- R/zonal.stats.R | 18 -- man/all_pairwise.Rd | 2 +- man/annulus.matrix.Rd | 2 +- man/aspline.downscale.Rd | 17 +- man/background.Rd | 2 +- man/bbox_poly.Rd | 40 ++--- man/bearing.distance.Rd | 2 +- man/breeding.density.Rd | 6 +- man/built.index.Rd | 11 +- man/cgls_urls.Rd | 4 +- man/class.comparison.Rd | 24 --- man/classBreaks.Rd | 7 +- man/collinear.Rd | 2 + man/combine.Rd | 22 +-- man/concordance.Rd | 2 +- man/conf.interval.Rd | 26 ++- man/correlogram.Rd | 2 +- man/cross.tab.Rd | 6 +- man/crossCorrelation.Rd | 51 +++--- man/csi.Rd | 20 +-- man/curvature.Rd | 4 +- man/dahi.Rd | 3 +- man/daymet.point.Rd | 4 +- man/dispersion.Rd | 2 +- man/dissection.Rd | 2 +- man/download.daymet.Rd | 35 ---- man/download.hansen.Rd | 17 -- man/download.prism.Rd | 17 -- man/erase.point.Rd | 17 +- man/explode.Rd | 30 ---- man/extract.vertices.Rd | 19 +- man/focal.lmetrics.Rd | 29 --- man/fuzzySum.Rd | 2 +- man/gaussian.kernel.Rd | 3 +- man/geo.buffer.Rd | 27 ++- man/group.pdf.Rd | 6 +- man/hexagons.Rd | 26 +-- man/hli.Rd | 4 +- man/hli.pt.Rd | 6 +- man/hsp.Rd | 2 +- man/idw.smoothing.Rd | 7 +- man/impute.loess.Rd | 3 +- man/insert.Rd | 20 +-- man/insert.values.Rd | 5 +- man/is.empty.Rd | 13 +- man/kde.2D.Rd | 14 -- man/kl.divergence.Rd | 3 + man/knn.Rd | 20 ++- man/lai.Rd | 12 +- man/land.metrics.Rd | 32 ---- man/local.min.max.Rd | 2 +- man/loess.boot.Rd | 3 +- man/logistic.regression.Rd | 3 +- man/max_extent.Rd | 6 +- man/mean_angle.Rd | 4 +- man/moments.Rd | 6 +- man/morans.plot.Rd | 44 ++--- man/mwCorr.Rd | 17 -- man/nni.Rd | 3 +- man/nth.values.Rd | 2 +- man/o.ring.Rd | 13 +- man/oli.asw.Rd | 4 +- man/optimal.k.Rd | 6 +- man/optimized.sample.variance.Rd | 12 +- man/outliers.Rd | 2 +- man/overlap.Rd | 5 +- man/parea.sample.Rd | 15 +- man/parse.bits.Rd | 63 +++---- man/plot.effect.size.Rd | 3 + man/plot.loess.boot.Rd | 12 +- man/point.in.poly.Rd | 24 --- man/poly.regression.Rd | 3 +- man/polyPerimeter.Rd | 3 +- man/poly_trend.Rd | 2 +- man/polygon_extract.Rd | 23 --- man/pp.subsample.Rd | 12 +- man/print.cross.cor.Rd | 20 +++ man/print.effect.size.Rd | 4 + man/print.loess.boot.Rd | 22 +++ man/print.poly.trend.Rd | 3 + man/proximity.index.Rd | 5 +- man/pseudo.absence.Rd | 109 ++++++------ man/pu.Rd | 2 +- man/quadrats.Rd | 31 ++-- man/random.raster.Rd | 3 +- man/raster.Zscore.Rd | 3 +- man/raster.change.Rd | 67 ++++--- man/raster.deviation.Rd | 5 +- man/raster.downscale.Rd | 38 ++-- man/raster.entropy.Rd | 6 +- man/raster.gaussian.smooth.Rd | 2 +- man/raster.invert.Rd | 6 +- man/raster.mds.Rd | 6 +- man/raster.modified.ttest.Rd | 56 +++--- man/raster.moments.Rd | 5 +- man/raster.transformation.Rd | 3 +- man/raster.vol.Rd | 3 +- man/rasterCorrelation.Rd | 5 +- man/rasterDistance.Rd | 15 +- man/remove.holes.Rd | 2 +- man/remove_duplicates.Rd | 2 +- man/rotate.polygon.Rd | 2 +- man/sa.trans.Rd | 10 +- man/sample.annulus.Rd | 15 +- man/sample.line.Rd | 24 --- man/sample.poly.Rd | 24 --- man/sampleTransect.Rd | 16 +- man/sar.Rd | 2 +- man/separability.Rd | 3 +- man/sf_dissolve.Rd | 2 +- man/sg.smooth.Rd | 59 +++---- man/shannons.Rd | 2 +- man/shift.Rd | 3 +- man/sieve.Rd | 10 +- man/similarity.Rd | 2 +- man/smooth.time.series.Rd | 2 +- man/sobal.Rd | 3 +- man/sp.na.omit.Rd | 17 -- man/spatial.select.Rd | 30 ++-- man/{se.news.Rd => spatialEcoNews.Rd} | 11 +- man/spectral.separability.Rd | 8 +- man/squareBuffer.Rd | 2 +- man/srr.Rd | 6 +- man/stratified.random.Rd | 12 +- man/subsample.distance.Rd | 7 +- man/summary.cross.cor.Rd | 20 +++ man/summary.effect.size.Rd | 4 + man/summary.loess.boot.Rd | 22 +++ man/swvi.Rd | 48 +++-- man/time_to_event.Rd | 4 +- man/topo.distance.Rd | 10 +- man/trasp.Rd | 10 +- man/tri.Rd | 8 +- man/vrm.Rd | 7 +- man/winsorize.Rd | 3 +- man/wt.centroid.Rd | 7 +- man/zonal.stats.Rd | 23 --- 283 files changed, 2000 insertions(+), 2384 deletions(-) delete mode 100644 R/class.comparison.R delete mode 100644 R/download.daymet.R delete mode 100644 R/download.hansen.R delete mode 100644 R/download.prism.R delete mode 100644 R/explode.R delete mode 100644 R/focal.lmetrics.R delete mode 100644 R/kde2D.R delete mode 100644 R/land.metrics.R delete mode 100644 R/mwCorr.R delete mode 100644 R/point.in.poly.R delete mode 100644 R/polygon_extract.R delete mode 100644 R/sample.line.R delete mode 100644 R/sample.poly.R rename R/{sample.transect.R => sampleTransect.R} (93%) delete mode 100644 R/se.news.R delete mode 100644 R/sp.na.omit.R delete mode 100644 R/zonal.stats.R delete mode 100644 man/class.comparison.Rd delete mode 100644 man/download.daymet.Rd delete mode 100644 man/download.hansen.Rd delete mode 100644 man/download.prism.Rd delete mode 100644 man/explode.Rd delete mode 100644 man/focal.lmetrics.Rd delete mode 100644 man/kde.2D.Rd delete mode 100644 man/land.metrics.Rd delete mode 100644 man/mwCorr.Rd delete mode 100644 man/point.in.poly.Rd delete mode 100644 man/polygon_extract.Rd delete mode 100644 man/sample.line.Rd delete mode 100644 man/sample.poly.Rd delete mode 100644 man/sp.na.omit.Rd rename man/{se.news.Rd => spatialEcoNews.Rd} (50%) delete mode 100644 man/zonal.stats.Rd diff --git a/DESCRIPTION b/DESCRIPTION index 9a1b82b..20b44e5 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -2,7 +2,7 @@ Package: spatialEco Type: Package Title: Spatial Analysis and Modelling Utilities Version: 2.0-2 -Date: 2023-11-10 +Date: 2023-11-15 Authors@R: c( person(family="Evans", given="Jeffrey S.", email = "jeffrey_evans@tnc.org", role = c("aut", "cre"), comment = c(ORCID = "0000-0002-5533-7044")), @@ -44,7 +44,8 @@ Suggests: units, sp, stringr, - lwgeom + lwgeom, + geodata Maintainer: Jeffrey S. Evans License: GPL-3 URL: https://github.com/jeffreyevans/spatialEco, https://jeffreyevans.github.io/spatialEco/ diff --git a/NAMESPACE b/NAMESPACE index 63e5a60..b769100 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -19,7 +19,6 @@ export(breeding.density) export(built.index) export(cgls_urls) export(chae) -export(class.comparison) export(classBreaks) export(collinear) export(combine) @@ -38,14 +37,9 @@ export(daymet.tiles) export(dispersion) export(dissection) export(divergence) -export(download.daymet) -export(download.hansen) -export(download.prism) export(effect.size) export(erase.point) -export(explode) export(extract.vertices) -export(focal.lmetrics) export(fuzzySum) export(gaussian.kernel) export(geo.buffer) @@ -60,12 +54,10 @@ export(impute.loess) export(insert) export(insert.values) export(is.empty) -export(kde.2D) export(kendall) export(kl.divergence) export(knn) export(lai) -export(land.metrics) export(local.min.max) export(loess.boot) export(loess.ci) @@ -74,7 +66,6 @@ export(max_extent) export(mean_angle) export(moments) export(morans.plot) -export(mwCorr) export(nni) export(nth.values) export(o.ring) @@ -86,11 +77,9 @@ export(overlap) export(parea.sample) export(parse.bits) export(partial.cor) -export(point.in.poly) export(poly.regression) export(polyPerimeter) export(poly_trend) -export(polygon_extract) export(pp.subsample) export(proximity.index) export(pseudo.absence) @@ -117,11 +106,8 @@ export(rm.ext) export(rotate.polygon) export(sa.trans) export(sample.annulus) -export(sample.line) -export(sample.poly) export(sampleTransect) export(sar) -export(se.news) export(separability) export(sf.kde) export(sf_dissolve) @@ -133,8 +119,8 @@ export(similarity) export(smooth.time.series) export(sobal) export(sp.kde) -export(sp.na.omit) export(spatial.select) +export(spatialEcoNews) export(spectral.separability) export(spherical.sd) export(squareBuffer) @@ -151,5 +137,4 @@ export(tri) export(vrm) export(winsorize) export(wt.centroid) -export(zonal.stats) import(terra) diff --git a/R/all_pairwise.R b/R/all_pairwise.R index e111439..3abbd56 100644 --- a/R/all_pairwise.R +++ b/R/all_pairwise.R @@ -2,14 +2,15 @@ #' @description Creates all pairwise combinations list for iteration #' #' @param x A numeric or character vector -#' -#' @return A list object with increasing all combination objects, -#' the first list element are the pairwise comparisons #' -#' @note This returns a list of vector combinations starting with +#' @details +#' This returns a list of vector combinations starting with #' pairwise, as the first nested list element, then in groups of #' threes, fours, to length of the vector. #' +#' @return A list object with increasing all combination objects, +#' the first list element are the pairwise comparisons +#' #' @author Jeffrey S. Evans tnc.org> #' #' @examples diff --git a/R/annulus.matrix.R b/R/annulus.matrix.R index 280c1dd..aa86ade 100644 --- a/R/annulus.matrix.R +++ b/R/annulus.matrix.R @@ -7,9 +7,7 @@ #' @param outer.scale Number of outer rings to set to null.value #' @param null.value Value to set inner and outer scale(s) to #' -#' @return A matrix object with defined null.value and 1, representing retained rings -#' -#' @note +#' @details #' This function will return a matrix of 1 and defined null.value based on a specification #' of the scale, inner scale and outer scale. The scale defines how many rings will be #' represented in the matrix based on (2 * scale - 1). So, a scale of 3 will result in a @@ -17,6 +15,8 @@ #' will be set to the defined null.value (see examples). The resulting matrix can be used #' as the specified window in a focal function. #' +#' @return A matrix object with defined null.value and 1, representing retained rings +#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/aspline.downscale.R b/R/aspline.downscale.R index 37999f4..44a55a4 100644 --- a/R/aspline.downscale.R +++ b/R/aspline.downscale.R @@ -10,6 +10,15 @@ #' @param plot (FALSE/TRUE) Plot results #' @param ... Additional arguments passed to earth #' +#' @details +#' This function uses Multivariate Adaptive Regression Splines, to downscale a raster based +#' on higher-resolution or more detailed raster data specified as covariate(s). This is similar +#' to the raster.downsample function which uses a robust regression and is a frequentest model for +#' fitting linear asymptotic relationships whereas, this approach is for fitting nonparametric +#' functions and should be used when the distributional relationship are complex/nonlinear. +#' Using add.coords adds spatial coordinates to the model, including creating the associated +#' rasters for prediction. +#' #' @return A list object containing: #' * downscale Downscaled terra SpatRaster object #' * GCV Generalized Cross Validation (GCV) @@ -19,15 +28,6 @@ #' * model earth MARS model object (if keep.model = TRUE) #' @md #' -#' @note -#' This function uses Multivariate Adaptive Regression Splines, to downscale a raster based -#' on higher-resolution or more detailed raster data specified as covariate(s). This is similar -#' to the raster.downsample function which uses a robust regression and is a frequentest model for -#' fitting linear asymptotic relationships whereas, this approach is for fitting nonparametric -#' functions and should be used when the distributional relationship are complex/nonlinear. -#' Using add.coords adds spatial coordinates to the model, including creating the associated -#' rasters for prediction. -#' #' @references #' Friedman (1991) Multivariate Adaptive Regression Splines (with discussion) #' Annals of Statistics 19(1):1–141 @@ -35,16 +35,17 @@ #' @author Jeffrey S. Evans #' #' @examples -#' \dontrun{ -#' library(geodata) +#' \donttest{ +#' if (require(geodata, quietly = TRUE)) { #' library(terra) +#' library(geodata) #' #' # Download example data (requires geodata package) -#' elev <- geodata::elevation_30s(country="SWZ", path=tempdir()) +#' elev <- elevation_30s(country="SWZ", path=tempdir()) #' slp <- terrain(elev, v="slope") #' x <- c(elev,slp) #' names(x) <- c("elev","slope") -#' tmax <- geodata::worldclim_country(country="SWZ", var="tmax", +#' tmax <- worldclim_country(country="SWZ", var="tmax", #' path=tempdir()) #' tmax <- crop(tmax[[1]], ext(elev)) #' names(tmax) <- "tmax" @@ -60,8 +61,12 @@ #' plot(x[[2]], main="slope") #' plot(tmax.ds$downscale, main="Downscaled Temp max") #' par(opar) -#' +#' +#' } else { +#' cat("Please install geodata package to run example", "\n") #' } +#' } +#' #' @export aspline.downscale <- function(x, y, add.coords = TRUE, keep.model = FALSE, grid.search = FALSE, plot = FALSE, ...) { @@ -75,7 +80,7 @@ aspline.downscale <- function(x, y, add.coords = TRUE, keep.model = FALSE, y <- y[[1]] } if(grid.search) { - if(!any(which(utils::installed.packages()[,1] %in% "caret"))) + if(length(find.package("caret", quiet = TRUE)) == 0) stop("please install caret package to implement grid search") } sub.samp.sp <- terra::as.points(x, na.rm=TRUE) @@ -101,7 +106,7 @@ aspline.downscale <- function(x, y, add.coords = TRUE, keep.model = FALSE, names(x)[(nlyr(x)-1):nlyr(x)] <- c("xcoord", "ycoord") } if(grid.search) { - if(!any(which(utils::installed.packages()[,1] %in% "caret"))) + if(length(find.package("caret", quiet = TRUE)) == 0) stop("please install caret package to implement grid search") hyper_grid <- expand.grid( degree = 1:3, diff --git a/R/background.R b/R/background.R index 2526bcf..a8b5bd5 100644 --- a/R/background.R +++ b/R/background.R @@ -9,9 +9,7 @@ #' @param d Threshold distance for known proximity #' @param type Type of sample c("systematic", "random", "hexagon", "nonaligned") #' -#' @return A sf POINT feature class or data.frame with x,y coordinates -#' -#' @note +#' @details #' This function creates a background point sample based on an extent #' or polygon sampling region. The known argument can be used with d #' to remove sample points based on distance-based proximity to existing @@ -19,6 +17,8 @@ #' sample will be dependent on the known locations and the influence of #' the distance threshold (d). As such, if the know and d arguments are #' provided the exact value provided in p will not be returned. +#' +#' @return A sf POINT feature class or data.frame with x,y coordinates #' #' @author Jeffrey S. Evans #' @@ -66,7 +66,7 @@ background <- function(x, p=1000, known=NULL, d=NULL, if(as.character(unique(sf::st_geometry_type(x))) != "POLYGON") stop(deparse(substitute(x)), " x must be an sf POLYGON object") } - if(!any(which(utils::installed.packages()[,1] %in% "lwgeom"))) + if(length(find.package("lwgeom", quiet = TRUE)) == 0) stop("please install lwgeom package before running this function") if(!is.null(known)){ if(!inherits(known, c("sf", "sfc"))) diff --git a/R/bbox_poly.R b/R/bbox_poly.R index 2b0aa82..1adb404 100644 --- a/R/bbox_poly.R +++ b/R/bbox_poly.R @@ -1,50 +1,49 @@ #' @title Bounding box polygon +#' #' @description Creates a polygon from a vector or raster extent #' #' @param x An sf or terra object or vector of bounding coordinates #' -#' @return A single feature sf class polygon object -#' -#' @note +#' @details #' If not a spatial object, expected order of input for x is: xmin, ymin, #' xmax, ymax. Where; xmin, ymin and the coordinates of top left corner of the #' bounding box and xmax, ymax represent the bottom right corner. The maximum #' value of xmax is width of the extent while maximum value of ymax is the height #' of the extent. #' +#' @return A single feature sf class polygon object +#' #' @author Jeffrey S. Evans #' #' @examples -#' p = c("sf", "sp", "terra") -#' if(any(!unlist(lapply(p, requireNamespace, quietly=TRUE)))) { -#' m = which(!unlist(lapply(p, requireNamespace, quietly=TRUE))) -#' message("Can't run examples, please install ", paste(p[m], collapse = " ")) -#' } else { -#' invisible(lapply(p, require, character.only=TRUE)) -#' +#' if(require(sp, quietly = TRUE)) { +#' library(terra) +#' library(sf) #' data(meuse, package = "sp") #' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, #' agr = "constant") #' +#' # raster (terra) +#' r <- rast(ext(meuse)) +#' r[] <- runif(ncell(r)) +#' crs(r) <- "epsg:28992" +#' e <- bbox_poly(r) +#' +#' plot(r) +#' plot(st_geometry(e), border="red", add=TRUE) +#' +#' # extent vector +#' e <- bbox_poly(c(178605, 329714, 181390, 333611)) +#' plot(e) +#' #' # vector bounding box #' e <- bbox_poly(meuse) #' #' plot(st_geometry(meuse), pch=20) #' plot(st_geometry(e), add=TRUE) #' -#' # raster (terra) -#' r <- rast(ext(meuse)) -#' r[] <- runif(ncell(r)) -#' crs(r) <- "epsg:28992" -#' e <- bbox_poly(r) -#' -#' plot(r) -#' plot(st_geometry(e), border="red", add=TRUE) -#' -#' # extent vector -#' e <- bbox_poly(c(178605, 329714, 181390, 333611)) -#' plot(e) -#' +#' } else { +#' cat("Please install sp package to run this example", "\n") #' } #' #' @export bbox_poly diff --git a/R/bearing.distance.R b/R/bearing.distance.R index 748174f..71d70ae 100644 --- a/R/bearing.distance.R +++ b/R/bearing.distance.R @@ -8,9 +8,9 @@ #' @param azimuth Azimuth to new point #' @param EastOfNorth Specified surveying convention #' -#' @return a new point representing location of baring and distance +#' @details East of north is a surveying convention and defaults to true. #' -#' @note East of north is a surveying convention and defaults to true. +#' @return a new point representing location of baring and distance #' #' @author Jeffrey S. Evans #' diff --git a/R/breeding.density.R b/R/breeding.density.R index d2fae85..d9a4979 100644 --- a/R/breeding.density.R +++ b/R/breeding.density.R @@ -10,6 +10,13 @@ #' @param self (TRUE/FALSE) Should source observations be included in #' density (default TRUE) #' +#' @details +#' The breeding density areas model identifies the Nth-percent population exhibiting +#' the highest spatial density and counts/frequency. It then buffers these points by +#' a specified distance to produce breeding area polygons. If you would like to recreate +#' the results in Doherty et al., (2010), then define bw = 6400m and b[if p < 0.75 +#' b = 6400m, | p >= 0.75 b = 8500m] +#' #' @return A list object with: #' \itemize{ #' \item pop.pts sf POINT object with points identified within the specified p @@ -19,13 +26,6 @@ #' \item p Specified population percent #' } #' -#' @note -#' The breeding density areas model identifies the Nth-percent population exhibiting -#' the highest spatial density and counts/frequency. It then buffers these points by -#' a specified distance to produce breeding area polygons. If you would like to recreate -#' the results in Doherty et al., (2010), then define bw = 6400m and b[if p < 0.75 -#' b = 6400m, | p >= 0.75 b = 8500m] -#' #' @author Jeffrey S. Evans #' #' @references @@ -34,7 +34,7 @@ #' Bureau of Land Management. Number L10PG00911 #' #' @examples -#' if(require(sf, quietly = TRUE)) { +#' library(sf) #' #' n=1500 #' bb <- rbind(c(-1281299,-761876.5),c(1915337,2566433.5)) @@ -53,8 +53,6 @@ #' plot(st_geometry(bd75$pop.pts), pch=20, col='red', add=TRUE) #' legend("bottomright", legend=c("selected areas","selected sites", "all sites"), #' bg="white", fill=c("grey","red", "black"), pt.cex = 2) -#' -#' } #' #' @export breeding.density <- function(x, pop, p = 0.75, bw = 6400, diff --git a/R/built.index.R b/R/built.index.R index c125058..217e1bb 100644 --- a/R/built.index.R +++ b/R/built.index.R @@ -14,22 +14,20 @@ #' @param L The L factor for the savi index #' @param method Method to use for index options are "Bouhennache", "Zha", "Xu" #' -#' @description +#' @details #' This function calculates the built-up index. Three methods are available: #' * Bouhennache is a new method that uses a larger portion of the VIR/NIR #' following OLI bands (((b3+b4+b7)-b6)/3) / (((b3+b4+b7)+b6)/3) #' * Zha is the original band ratio method using TM5 ndbi = (b5 - b4) / (b5 + b4) #' * Xu is a modification to eliminate noise using ETM+7 #' (ndbi-((savi-nndwi)/2) / (ndbi+((savi-nndwi)/2) -#' -#' @description +#' #' Generally water has the highest values where built-up areas will occur in the #' mid portion of the distribution. Since Bouhennache et al (2018) index exploits #' a larger portion of the visible (Vis) and infra red spectrum, vegetation will #' occur as the lowest values and barren will exhibit greater values than the #' vegetation and lower values than the built-up areas. #' -#' @description #' Band wavelength (nanometers) designations for landsat #' TM4, TM5 and ETM+7 #' * band-2 0.52-0.60 (green) @@ -46,6 +44,8 @@ #' * band-7 2.11-2.29 (SWIR 2) #' @md #' +#' @return A terra raster object of the built index +#' #' @references #' Bouhennache, R., T. Bouden, A. Taleb-Ahmed & A. Chaddad(2018) A new spectral index #' for the extraction of built-up land features from Landsat 8 satellite imagery, @@ -61,9 +61,9 @@ #' @author Jeffrey S. Evans #' #' @examples -#' \dontrun{ +#' \donttest{ #' library(terra) -#' lsat <- rast(system.file("extdata/Landsat_TM5", package="spatialEco")) +#' lsat <- rast(system.file("/extdata/Landsat_TM5.tif", package="spatialEco")) #' plotRGB(lsat, r=3, g=2, b=1, scale=1.0, stretch="lin") #' #' # Using Bouhennache et al., (2018) method (needs green, red, swir1 and swir2) @@ -83,7 +83,6 @@ #' swir1 = lsat[[5]], , method = "Xu") ) #' plotRGB(lsat, r=3, g=2, b=1, scale=1, stretch="lin") #' plot(xu, legend=FALSE, col=rev(terrain.colors(100, alpha=0.35)), add=TRUE ) -#' #' } #' #' @export built.index diff --git a/R/cgls_urls.R b/R/cgls_urls.R index 5fcacff..815221c 100644 --- a/R/cgls_urls.R +++ b/R/cgls_urls.R @@ -7,16 +7,13 @@ # "fcover", "lai", "ndvi" #' @param ver Product version options are "newest", "v1", "v2", "v3" #' -#' @return A vector of download URL's for the products -#' -#' @details +#' @details #' Provides a query of the ESA's Copernicus Global Land Service global # datasets which can then be used to download product(s). #' The query is performed on the manifest files and return URL's #' however, to download data you will need login credentials which, #' can be acquired from: http://land.copernicus.eu #' -#' @details #' If provided, dates need to be in a "YYYY-MM-DD" format. The dates #' are an explicit search string and can contain dates that are not in #' the imagery. As such, the user should generate a daily date string @@ -27,18 +24,16 @@ #' It is highly recommended that you do not change the default #' ver="newest" argument unless there is a specific reason to. #' -#' @details Available products +#' Available products #' * fapar Fraction of photosynthetically active radiation #' absorbed by the vegetation #' * fcover Fraction of green vegetation cover #' * lai Leaf Area index #' * ndvi Normalized Difference Vegetation Index -#' @md #' -#' @details -#' Not yet implemented; Soil Water Index, Surface Soil Moisture, -# and Land Surface Temperature. -#' Copernicus product details: http://land.copernicus.eu/global/products/ +#' Not yet implemented; Soil Water Index, Surface Soil Moisture, and Land Surface Temperature. +#' +#' @return A vector of download URL's for the products #' #' @author Jeffrey S. Evans #' @@ -72,12 +67,14 @@ #' # basename(all.urls[i])), mode = 'wb') #' # } #' } +#' +#' @md #' @export cgls_urls cgls_urls <- function(dates = NULL, resolution = c(1000, 300), product = c("fapar", "fcover", "lai", "ndvi"), ver = c("newest", "v1", "v2", "v3")) { manifest.url <- "https://land.copernicus.vgt.vito.be/manifest/" - if(!any(which(utils::installed.packages()[,1] %in% "stringr"))) + if(length(find.package("stringr", quiet = TRUE)) == 0) stop("please install stringr package before running this function") if(resolution[1] == 300) { r1 = "333m" diff --git a/R/class.comparison.R b/R/class.comparison.R deleted file mode 100644 index a54b0fa..0000000 --- a/R/class.comparison.R +++ /dev/null @@ -1,18 +0,0 @@ -#' @title Class comparison between two nominal rasters -#' @description Compares two categorical rasters using Cohen's Kappa (d) -#' or paired t-test statistic(s) -#' -#' @param ... arguments passed to raster.change -#' @return NA -#' -#' @examples -#' \dontrun{ -#' raster.change() -#' } -#' -#' @export -class.comparison <- function(...) { - .Deprecated("class.comparison", package="spatialEco", - msg="Function is deprecated because it is now the same as - the raster.change function, using stats = t.test or kappa ") -} diff --git a/R/classBreaks.R b/R/classBreaks.R index 3aa0bdc..1e378cd 100644 --- a/R/classBreaks.R +++ b/R/classBreaks.R @@ -5,11 +5,12 @@ #' @param n Number of breaks #' @param type Statistic used to find breaks c("equal", "quantile", "std", "geometric") #' -#' @return A vector containing class break values the length is n+1 to allow for -#' specification of ranges +#' @details +#' The robust std method uses sqrt(sum(x^2)/(n-1)) to center the data before deriving "pretty" breaks. #' -#' @note The robust std method uses sqrt(sum(x^2)/(n-1)) to center the data before -#' deriving "pretty" breaks. +#' @return +#' A vector containing class break values the length is n+1 to allow for +#' specification of ranges #' #' @author Jeffrey S. Evans #' diff --git a/R/collinear.R b/R/collinear.R index 5dc56e6..f86c091 100644 --- a/R/collinear.R +++ b/R/collinear.R @@ -7,8 +7,6 @@ #' (FALSE/TRUE) #' @param p.value If nonlinear is TRUE, the p value to accept as the #' significance of the correlation -#' -#' @return Messages and a vector of correlated variables #' #' @author Jeffrey S. Evans tnc.org> #' @@ -21,7 +19,11 @@ #' whether or not that functional estimate is constant, which would indicate #' no relationship between y and x thus, avoiding potentially arbitrary decisions #' regarding the order in a polynomial regression. -#' +#' +#' @return Messages and a vector of correlated variables +#' +#' @author Jeffrey S. Evans +#' #' @examples #' data(cor.data) #' @@ -58,7 +60,7 @@ collinear <- function (x, p = 0.85, nonlinear = FALSE, p.value = 0.001) { x <- x[,-which(names(x) %in% cn)] } if(nonlinear == TRUE) { - if(!any(which(utils::installed.packages()[,1] %in% "mgcv"))) + if(length(find.package("mgcv", quiet = TRUE)) == 0) stop("please install mgcv package for nonlinear option") nlcor <- function(x, y, pv = 0.05) { g <- mgcv::gam(y ~ s(x)) diff --git a/R/combine.R b/R/combine.R index 142e467..087d606 100644 --- a/R/combine.R +++ b/R/combine.R @@ -3,27 +3,21 @@ #' #' @param x raster stack/brick or SpatialPixelsDataFrame object #' -#' @return -#' A ratified rasterLayer or a list containing a SpatialPixelsDataFrame -#' and a data.frame of unique combinations. -#' -#' @details -#' Please note that this is not a memory safe function that utilizes -#' rasters out of memory in the manner that the raster package does. -#' @details -#' If sp = TRUE the object will be a list with "combine", containing -#' the SpatialPixelsDataFrame with the value attribute containing the -#' unique combinations, and "summary" with the summary table of collapsed -#' combinations and associated attributes. #' @details -#' If sp = FALSE the a single ratified rasterLayer class object is returned -#' with the summary table as the raster attribute table, this is most similar -#' to the ESRI format resulting from their combine function. +#' A single ratified raster object is returned with the summary table +#' as the raster attribute table, this is most similar to the ESRI +#' format resulting from their combine function. #' +#' Please note that this is not a memory safe function that utilizes +#' out of memory in the manner that the terra package does. +#' +#' @return +#' A ratified (factor) terra SpatRaster representing unique combinations. +#' #' @author Jeffrey S. Evans #' #' @examples -#' if(require(terra, quietly = TRUE)) { +#' library(terra) #' #' # Create example data (with a few NA's introduced) #' r1 <- rast(nrows=100, ncol=100) @@ -48,8 +42,6 @@ #' #' # or, from separate layers #' cr <- combine(c(r1,r3)) -#' -#' } #' #' @export combine combine <- function(x) { diff --git a/R/concordance.R b/R/concordance.R index 86a35dc..1e1ec7b 100644 --- a/R/concordance.R +++ b/R/concordance.R @@ -5,9 +5,7 @@ #' @param y vector of binomial response variable used in model #' @param p estimated probabilities from fit binomial model #' -#' @return list object with: concordance, discordance, tied and pairs -#' -#' @note +#' @details #' Test of binomial regression for the hypothesis that probabilities of all #' positives [1], are greater than the probabilities of the nulls [0]. The #' concordance would be 100% for a perfect model where, disconcordance is the @@ -19,6 +17,8 @@ #' representing the null class, tied - number of tied probabilities and #' pairs - number of pairs compared #' +#' @return list object with: concordance, discordance, tied and pairs +#' #' @author Jeffrey S. Evans #' #' @references diff --git a/R/conf.interval.R b/R/conf.interval.R index a8ab822..4cbea0c 100644 --- a/R/conf.interval.R +++ b/R/conf.interval.R @@ -7,12 +7,14 @@ #' @param stat Statistic (mean or median) #' @param std.error Return standard error (TRUE/FALSE) #' -#' @return lci Lower confidence interval value -#' @return uci Upper confidence interval value -#' @return mean If stat = "mean", mean value of distribution -#' @return mean Value of the mean or median -#' @return conf.level Confidence level used for confidence interval -#' @return std.error If std.error = TRUE standard error of distribution +#' @return data.frame contaning: +#' * lci - Lower confidence interval value +#' * uci - Upper confidence interval value +#' * mean - If stat = "mean", mean value of distribution +#' * mean - Value of the mean or median +#' * conf.level - Confidence level used for confidence interval +#' * std.error - If std.error = TRUE standard error of distribution +#' @md #' #' @author Jeffrey S. Evans #' diff --git a/R/correlogram.R b/R/correlogram.R index 4c69952..a5183c2 100644 --- a/R/correlogram.R +++ b/R/correlogram.R @@ -8,7 +8,8 @@ #' @param ns Number of simulations to derive simulation envelope #' @param ... Arguments passed to cor ('pearson', 'kendall' or 'spearman') #' -#' @return A list object containing: +#' @return +#' Plot of correlogram and a list object containing: #' * autocorrelation is a data.frame object with the following components #' * autocorrelation - Autocorrelation value for each distance lag #' * dist - Value of distance lag diff --git a/R/cross.tab.R b/R/cross.tab.R index 90dab0b..913ab0e 100644 --- a/R/cross.tab.R +++ b/R/cross.tab.R @@ -8,19 +8,19 @@ #' @param pct (TRUE/FALSE) return proportions rather than counts #' @param ... Additional arguments #' -#' @return a table with the cross tabulated counts -#' -#' @note +#' @details #' This function returns a cross tabulation between two nominal rasters. #' Arguments allow for labeling the results and returning proportions #' rather than counts. It also accounts for asymmetrical classes between #' the two rasters +#' +#' @return a table with the cross tabulated counts #' #' @author Jeffrey S. Evans #' -#' @references Pontius Jr, R.G., Shusas, E., McEachern, M. (2004). Detecting -#' important categorical land changes while accounting for persistence. -#' Agriculture, Ecosystems & Environment 101(2):251-268. +#' @references +#' Pontius Jr, R.G., Shusas, E., McEachern, M. (2004). Detecting important categorical land changes +# while accounting for persistence. Agriculture, Ecosystems & Environment 101(2):251-268. #' #' @examples #' library(terra) diff --git a/R/crossCorrelation.R b/R/crossCorrelation.R index f8ae75c..418eb44 100644 --- a/R/crossCorrelation.R +++ b/R/crossCorrelation.R @@ -27,25 +27,6 @@ #' @param clust (FALSE/TRUE) Return approximated lisa clusters #' @param return.sims (FALSE/TRUE) Return randomizations vector n = k #' -#' @return When not simulated k=0, a list containing: -#' * I - Global autocorrelation statistic -#' * SCI - - A data.frame with two columns representing the xy and yx autocorrelation -#' * nsim - value of NULL to represent p values were derived from observed data (k=0) -#' * p - Probability based observations above/below confidence interval -#' * t.test - Probability based on t-test -#' \item clusters - If "clust" argument TRUE, vector representing LISA clusters -#' @md -#' -#' @return when simulated (k>0), a list containing: -#' * I - Global autocorrelation statistic -#' * SCI - A data.frame with two columns representing the xy and yx autocorrelation -#' * nsim - value representing number of simulations -#' * global.p - p-value of global autocorrelation statistic -#' * local.p - Probability based simulated data using successful rejection of t-test -#' * range.p - Probability based on range of probabilities resulting from paired t-test -#' * clusters - If "clust" argument TRUE, vector representing lisa clusters -#' @md -#' #' #' @details #' In specifying a distance matrix, you can pass a coordinates matrix or spatial @@ -64,8 +45,25 @@ #' one of the dist.function options #' * IF cords=NULL, w=x, dist.function="none" #' It is assumed that the matrix passed to w already represents -#' the spatial weights -#' @md +#' the spatial weights +#' +#' @return +#' When not simulated k=0, a list containing: +#' * I - Global autocorrelation statistic +#' * SCI - - A data.frame with two columns representing the xy and yx autocorrelation +#' * nsim - value of NULL to represent p values were derived from observed data (k=0) +#' * p - Probability based observations above/below confidence interval +#' * t.test - Probability based on t-test +#' \item clusters - If "clust" argument TRUE, vector representing LISA clusters +#' +#' When simulated (k>0), a list containing: +#' * I - Global autocorrelation statistic +#' * SCI - A data.frame with two columns representing the xy and yx autocorrelation +#' * nsim - value representing number of simulations +#' * global.p - p-value of global autocorrelation statistic +#' * local.p - Probability based simulated data using successful rejection of t-test +#' * range.p - Probability based on range of probabilities resulting from paired t-test +#' * clusters - If "clust" argument TRUE, vector representing lisa clusters #' #' @references #' Chen, Y.G. (2012) On the four types of weight functions for spatial contiguity @@ -85,32 +83,36 @@ #' dist.function = "inv.power") ) #' #' \donttest{ -#' library(sp) -#' library(spdep) -#' -#' data(meuse) -#' coordinates(meuse) <- ~x+y -#' -#' #### Using a default spatial weights matrix method (inverse power function) -#' ( I <- crossCorrelation(meuse$zinc, meuse$copper, -#' coords = coordinates(meuse), k=99) ) -#' meuse$lisa <- I$SCI[,"lsci.xy"] -#' spplot(meuse, "lisa") +#' library(sf) +#' library(spdep) #' -#' #### Providing a distance matrix -#' Wij <- spDists(meuse) -#' ( I <- crossCorrelation(meuse$zinc, meuse$copper, w = Wij, k=99) ) -#' -#' #### Providing an inverse power function weights matrix -#' Wij <- spDists(meuse) -#' Wij <- 1 / Wij -#' diag(Wij) <- 0 -#' Wij <- Wij / sum(Wij) -#' diag(Wij) <- 0 -#' ( I <- crossCorrelation(meuse$zinc, meuse$copper, w = Wij, -#' dist.function = "none", k=99) ) +#' if (require(sp, quietly = TRUE)) { +#' data(meuse, package = "sp") +#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") +#' } +#' +#' #### Using a default spatial weights matrix method (inverse power function) +#' ( I <- crossCorrelation(meuse$zinc, meuse$copper, +#' coords = st_coordinates(meuse)[,1:2], k=99) ) +#' meuse$lisa <- I$SCI[,"lsci.xy"] +#' plot(meuse["lisa"], pch=20) +#' +#' #### Providing a distance matrix +#' if (require(units, quietly = TRUE)) { +#' Wij <- units::drop_units(st_distance(meuse)) +#' ( I <- crossCorrelation(meuse$zinc, meuse$copper, w = Wij, k=99) ) +#' +#' #### Providing an inverse power function weights matrix +#' Wij <- 1 / Wij +#' diag(Wij) <- 0 +#' Wij <- Wij / sum(Wij) +#' diag(Wij) <- 0 +#' ( I <- crossCorrelation(meuse$zinc, meuse$copper, w = Wij, +#' dist.function = "none", k=99) ) +#' } #' } #' +#' @md #' @export crossCorrelation crossCorrelation <- function(x, y = NULL, coords = NULL, w = NULL, type = c("LSCI", "GSCI"), k = 999, dist.function = c("inv.power", "neg.exponent", "none"), diff --git a/R/csi.R b/R/csi.R index f6a4e14..993a9a8 100644 --- a/R/csi.R +++ b/R/csi.R @@ -5,10 +5,7 @@ #' @param x A vector or matrix object #' @param y If x is a vector, then a vector object #' -#' @return If x is a matrix, a list object with: similarity and angular.similarity -#' matrices or, if x and y are vectors, a vector of similarity and angular.similarity -#' -#' @note +#' @details #' The cosine similarity index is a measure of similarity between two #' vectors of an inner product space. This index is bested suited for high-dimensional #' positive variable space. One useful application of the index is to measure separability @@ -17,24 +14,27 @@ #' cosine similarity index is mathematically, and often numerically, equivalent to the #' Pearson's correlation coefficient #' -#' @note #' The cosine similarity index is derived: #' s(xy) = x * y / ||x|| * ||y||, where the expected is 1.0 (perfect similarity) #' to -1.0 (perfect dissimilarity). A normalized angle between the vectors can #' be used as a bounded similarity function within [0,1] #' angular similarity = 1 - (cos(s)^-1/pi) #' +#' @return +#' If x is a matrix, a list object with: similarity and angular.similarity matrices or, +#' if x and y are vectors, a vector of similarity and angular.similarity +#' #' @author Jeffrey S. Evans #' #' @examples #' # Compare two vectors (centered using scale) -#' x=runif(100) -#' y=runif(100)^2 -#' csi(as.vector(scale(x)),as.vector(scale(y))) +#' x=runif(100) +#' y=runif(100)^2 +#' csi(as.vector(scale(x)),as.vector(scale(y))) #' -#' #' # Compare columns (vectors) in a matrix (centered using scale) -#' x <- matrix(round(runif(100),0),nrow=20,ncol=5) -#' ( s <- csi(scale(x)) ) +#' # Compare columns (vectors) in a matrix (centered using scale) +#' x <- matrix(round(runif(100),0),nrow=20,ncol=5) +#' ( s <- csi(scale(x)) ) #' #' # Compare vector (x) to each column in a matrix (y) #' y <- matrix(round(runif(500),3),nrow=100,ncol=5) diff --git a/R/curvature.R b/R/curvature.R index e69f1a2..ee9b5bb 100644 --- a/R/curvature.R +++ b/R/curvature.R @@ -4,10 +4,8 @@ #' @param x A terra SpatRaster object #' @param type Method used c("planform", "profile", "total", "mcnab", "bolstad") #' @param ... Additional arguments passed to focal -#' -#' @return raster class object of surface curvature #' -#' @note +#' @details #' The planform and profile curvatures are the second derivative(s) of the #' elevation surface, or the slope of the slope. Profile curvature is in #' the direction of the maximum slope, and the planform curvature is @@ -24,30 +22,31 @@ #' Zevenbergen & Thorne (1987) via a quadratic equation fit to eight neighbors #' as such, the s (focal window size) argument is ignored. #' -#' @note #' McNab's and Bolstad's variants of the surface curvature (concavity/convexity) #' index (McNab 1993; Bolstad & Lillesand 1992; McNab 1989). The index is based #' on features that confine the view from the center of a #' 3x3 window. In the Bolstad equation, edge correction is addressed #' by dividing by the radius distance to the outermost cell (36.2m). #' +#' @return raster class object of surface curvature +#' #' @author Jeffrey S. Evans #' #' @references #' Bolstad, P.V., and T.M. Lillesand (1992). Improved classification of forest #' vegetation in northern Wisconsin through a rule-based combination of soils, #' terrain, and Landsat TM data. Forest Science. 38(1):5-20. -#' @references +#' #' Florinsky, I.V. (1998). Accuracy of Local Topographic Variables Derived from #' Digital Elevation Models. International Journal of Geographical Information #' Science, 12(1):47-62. -#' @references +#' #' McNab, H.W. (1989). Terrain shape index: quantifying effect of minor landforms #' on tree height. Forest Science. 35(1):91-104. -#' @references +#' #' McNab, H.W. (1993). A topographic index to quantify the effect of mesoscale #' landform on site productivity. Canadian Journal of Forest Research. 23:1100-1107. -#' @references +#' #' Zevenbergen, L.W. & C.R. Thorne (1987). Quantitative Analysis of Land Surface #' Topography. Earth Surface Processes and Landforms, 12:47-56. #' diff --git a/R/dahi.R b/R/dahi.R index f50944d..1070e3f 100644 --- a/R/dahi.R +++ b/R/dahi.R @@ -6,22 +6,22 @@ #' @param amax The Alpha Max (amax) parameter in degrees defined #' as: minimum = 0, maximum = 360 with the default = 202.500 #' -#' @return terra SpatRaster class object Diurnal Anisotropic Heat Index -#' -#' @description +#' @details #' The Diurnal Anisotropic Heat Index is based on this equation. #' Ha = cos(amax - a) * arctan(b) #' Where; amax defines the aspect with the maximum total heat #' surplus, a is the aspect and b is the slope angle. -#' +#' +#' @return terra SpatRaster class object Diurnal Anisotropic Heat Index +#' +#' @author Jeffrey S. Evans +#' #' @references #' Boehner, J., and Antonic, O. (2009) Land-surface parameters specific to #' topo-climatology. In: Hengl, T., & Reuter, H. (Eds.), Geomorphometry - #' Concepts, Software, Applications. Developments in Soil Science, #' 33:195-226 -#' -#' @author Jeffrey S. Evans -#' + #' @examples #' library(terra) #' elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) diff --git a/R/date_seq.R b/R/date_seq.R index 8089ed7..07450d4 100644 --- a/R/date_seq.R +++ b/R/date_seq.R @@ -50,6 +50,6 @@ date_seq <- function(start, end, step = c("day", "week", "month", "quarter", "ye y <- unique(as.numeric(format(d,"%Y"))) leap.idx <- which(sapply(y, is.leap)) if(length(leap.idx) > 0) - cat("The following are leap years", y[leap.idx], "\n",sep="\t") + message("The following are leap years ", y[leap.idx], "\n" ,sep="\t") return(d) } diff --git a/R/daymet.point.R b/R/daymet.point.R index 17d93ac..2b8e43e 100644 --- a/R/daymet.point.R +++ b/R/daymet.point.R @@ -9,9 +9,7 @@ #' @param files (TRUE/FALSE) Write file to disk #' @param echo (TRUE/FALSE) Echo progress #' -#' @return A data.frame with climate results -#' -#' @note +#' @details #' data is available for Long -131.0 W and -53.0 W; lat 52.0 N and 14.5 N #' Function uses the Single Pixel Extraction tool and returns year, yday, #' dayl(s), prcp (mm/day), srad (W/m^2), swe (kg/m^2), tmax (deg c), @@ -19,6 +17,8 @@ #' Metadata for DAYMET single pixel extraction: #' \url{ https://daymet.ornl.gov/files/UserGuides/current/readme_singlepointextraction.pdf } #' +#' @return A data.frame with geographic coordinate point-level climate results +#' #' @author Jeffrey S. Evans #' #' @examples @@ -30,7 +30,7 @@ #' @export daymet.point <- function (lat, long, start.year, end.year, site=NULL, files = FALSE, echo = FALSE) { - if(!any(which(utils::installed.packages()[,1] %in% "RCurl"))) + if(length(find.package("RCurl", quiet = TRUE)) == 0) stop("please install RCurl package before running this function") if(missing(lat)) stop("Please define lat") if(missing(long)) stop("Please define long") diff --git a/R/dispersion.R b/R/dispersion.R index 5c146c5..1158d0c 100644 --- a/R/dispersion.R +++ b/R/dispersion.R @@ -4,14 +4,14 @@ #' #' @param x data.frame object of target values #' -#' @return -#' data.frame with columns H values for each target, H , sH, sHmax -#' -#' @note +#' @details #' The dispersion index (H-prime) is calculated H = sum( sqrt(p) / sqrt(a) ) #' where; P = (sum of target in planning unit / sum of target across all #' planning units) and a = (count of planning units containing #' target / number of planning units) +#' +#' @return +#' data.frame with columns H values for each target, H , sH, sHmax #' #' @author Jeffrey S. Evans #' diff --git a/R/dissection.R b/R/dissection.R index 03dfb56..f025720 100644 --- a/R/dissection.R +++ b/R/dissection.R @@ -6,12 +6,12 @@ #' @param s Focal window size #' @param ... Additional arguments passed to terra::lapp #' -#' @return A SpatRaster class object of Martonne's modified dissection -#' -#' @note +#' @details #' Dissection is calculated as: #' ( z(s) - min(z(s)) ) / ( max(z(s)) - min(z(s)) ) #' +#' @return A SpatRaster class object of Martonne's modified dissection +#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/download.daymet.R b/R/download.daymet.R deleted file mode 100644 index cd00c07..0000000 --- a/R/download.daymet.R +++ /dev/null @@ -1,34 +0,0 @@ -#' @title Download DAYMET -#' @description Batch download of daily gridded DAYMET climate data -#' -#' @param ... ignored -#' -#' @details -#' DAYMET website: \url{http://daymet.ornl.gov}, -#' path structure: /year/tile_year/file.nc -#' -#' @author Jeffrey S. Evans -#' -#' @references -#' Thornton P.E., S.W. Running and M.A. White (1997) Generating surfaces of daily -#' meteorological variables over large regions of complex terrain. Journal of -#' Hydrology 190: 214-251. -#' @references -#' Thornton, P.E. and S.W. Running (1999) An improved algorithm for estimating -#' incident daily solar radiation from measurements of temperature, humidity, -#' and precipitation. Agriculture and Forest Meteorology. 93:211-228. -#' @references -#' Thornton, P.E., H. Hasenauer and M.A. White (2000) Simultaneous estimation -#' of daily solar radiation and humidity from observed temperature and -#' precipitation: An application over complex terrain in Austria. -#' Agricultural and Forest Meteorology 104:255-271. -#' -#' @export -download.daymet <- function(...) { - message("Because THREDDS has moved to a NetCDF Markup Language (NcML) query service, - this function is currently not able to access the server") - message("You can access the North America NetCDF subset tool here: - https://thredds.daac.ornl.gov/thredds/ncss/grid/daymet-v3-agg/na.ncml/dataset.html") - .Deprecated("download.daymet", package="spatialEco", - msg="Function is deprecated due to significant changes to the ORNL DAAC THREDDS server") -} diff --git a/R/download.hansen.R b/R/download.hansen.R deleted file mode 100644 index 16c9001..0000000 --- a/R/download.hansen.R +++ /dev/null @@ -1,12 +0,0 @@ -#' @title Download Hansen Forest 2000-2013 Change -#' @description Download of Hansen Global Forest Change 2000-2013 -#' -#' @param ... Nonexistent parameters -#' -#' @author Jeffrey S. Evans -#' -#' @export -download.hansen <- function(...) { - .Deprecated("download.hansen", package="spatialEco", - msg="Function is deprecated, due to instability in URL's and methods ") -} diff --git a/R/download.prism.R b/R/download.prism.R deleted file mode 100644 index 845ea3f..0000000 --- a/R/download.prism.R +++ /dev/null @@ -1,13 +0,0 @@ -#' @title Download PRISM -#' @description Batch download of monthly gridded PRISM climate data -#' -#' @param ... Nonexistent parameters -#' -#' @author Jeffrey S. Evans -#' -#' @export -download.prism <- function(...) { - .Deprecated("download.prism", package="spatialEco", - msg="Function is deprecated, for methodological consistency, - please use functions in the prism package (on CRAN) ") -} diff --git a/R/erase.points.R b/R/erase.points.R index ae50167..97d24dd 100644 --- a/R/erase.points.R +++ b/R/erase.points.R @@ -5,28 +5,23 @@ #' @param x A sf POLYGON object #' @param inside (TRUE/FALSE) Remove points inside polygon, else outside polygon #' -#' @return A sf POINT object -#' -#' @note +#' @details #' Used to erase points that intersect polygon(s). The default of inside=TRUE #' erases points inside the polygons however, if inside=FALSE then #' the function results in an intersection where points that #' intersect the polygon are retained. #' +#' @return An sf POINT object +#' #' @author Jeffrey S. Evans tnc.org> #' #' @examples #' -#' p = c("sf", "sp") -#' if(any(!unlist(lapply(p, requireNamespace, quietly=TRUE)))) { -#' m = which(!unlist(lapply(p, requireNamespace, quietly=TRUE))) -#' message("Can't run examples, please install ", paste(p[m], collapse = " ")) -#' } else { -#' invisible(lapply(p, require, character.only=TRUE)) +#' library(sf) #' +#' if (require(sp, quietly = TRUE)) { #' data(meuse, package = "sp") -#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, -#' agr = "constant") +#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") #' #' s <- st_as_sf(st_sample(st_as_sfc(st_bbox(meuse)), size=1000, #' type = "regular")) @@ -49,6 +44,8 @@ #' plot(st_geometry(b),add=TRUE) #' par(opar) #' +#' } else { +#' cat("Please install sp package to run example", "\n") #' } #' #' @export erase.point diff --git a/R/explode.R b/R/explode.R deleted file mode 100644 index 87a4b93..0000000 --- a/R/explode.R +++ /dev/null @@ -1,26 +0,0 @@ -#' @title Explodes multipart features -#' @description Explodes multipart features into single part -#' -#' @param ... Parameters to be passed to st_cast -#' -#' @note -#' Multipart geometries are a data structure where a single attribute -#' shares multiple features (polygons, points, lines). This function -#' dissaggregates the data into a one-to-one match. -#' -#' @author Jeffrey S. Evans -#' -#' @examples -#' \donttest{ -#' library(sf) -#' nc <- st_read(system.file("shape/nc.shp", package="sf")) -#' nc <- suppressWarnings(st_cast(nc, "POLYGON")) -#' } -#' -#' @export -explode <- function(...) { - .Deprecated("explode", package="spatialEco", - msg="Function is deprecated because sf provides the ability to explode - multipart geometries using the sf::st_cast function ") - message("An example for polygons is: st_cast(x, POLYGON) ") -} diff --git a/R/extract_vertices.R b/R/extract_vertices.R index c3ef54d..a0e6b77 100644 --- a/R/extract_vertices.R +++ b/R/extract_vertices.R @@ -4,26 +4,25 @@ #' @param x An sf line or polygon class object #' @param join (TRUE/FALSE) Joint attributes from original object #' -#' @return -#' An sf POINT object -#' -#' @note +#' @details #' This function returns the vertices of a line or polygon object, as opposed #' to the polygon centroids or line start/stop coordinates #' +#' @return +#' An sf POINT object of extrated line or polygon vertices +#' #' @author Jeffrey S. Evans #' #' @examples #' -#' if(require(sf, quietly = TRUE)) { -#' nc <- sf::st_read(system.file("shape/nc.shp", package="sf")) -#' nc <- suppressWarnings(sf::st_cast(nc, "POLYGON")) -#' nc <- nc[c(10,50),] +#' library(sf) +#' nc <- sf::st_read(system.file("shape/nc.shp", package="sf")) +#' nc <- suppressWarnings(sf::st_cast(nc, "POLYGON")) +#' nc <- nc[c(10,50),] #' -#' ( v <- extract.vertices(nc) ) -#' plot(st_geometry(nc)) -#' plot(st_geometry(v), pch=20, cex=2, col="red", add=TRUE) -#' } +#' ( v <- extract.vertices(nc) ) +#' plot(st_geometry(nc)) +#' plot(st_geometry(v), pch=20, cex=2, col="red", add=TRUE) #' #' @export extract.vertices extract.vertices <- function(x, join = TRUE) { diff --git a/R/focal.lmetrics.R b/R/focal.lmetrics.R deleted file mode 100644 index 2a5d476..0000000 --- a/R/focal.lmetrics.R +++ /dev/null @@ -1,27 +0,0 @@ -#' @title Focal landscape metrics -#' @description Calculates a variety of landscape metrics on -#' integer rasters using focal approach -#' -#' @param ... Parameters to be passed to the modern version -#' of the function -#' -#' @examples -#' \dontrun{ -#' library(landscapemetrics) -#' library(raster) -#' -#' data(landscape) -#' -#' s <- matrix(1, nrow = 3, ncol = 3) -#' ( result <- do.call(stack, window_lsm(landscape, window = s, -#' what = c("lsm_l_pr", "lsm_l_joinent"))) ) -#' plot(result) -#' } -#' @export -focal.lmetrics <- function(...) { - .Deprecated("focal.lmetrics", package="spatialEco", - msg="Function is deprecated, for methodological consistency, - please use functions in the landscapemetrics package (on CRAN) ") - message("Please see focal.lmetrics help for example of replicating function - using window_lsm in landscapemetrics package") -} diff --git a/R/fuzzySum.R b/R/fuzzySum.R index 1122df9..49927ab 100644 --- a/R/fuzzySum.R +++ b/R/fuzzySum.R @@ -3,13 +3,13 @@ #' #' @param x Vector of values to apply fuzzy sum #' -#' @return Value of fuzzy sum -#' -#' @note +#' @details #' The fuzzy sum is an increasing linear combination of values. #' This can be used to sum probabilities or results of multiple #' density functions. #' +#' @return Value of fuzzy sum +#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/gaussian.kernel.R b/R/gaussian.kernel.R index 71ddd59..e7871f6 100644 --- a/R/gaussian.kernel.R +++ b/R/gaussian.kernel.R @@ -9,6 +9,7 @@ #' @author Jeffrey S. Evans #' #' @examples +#' opar <- par() #' par(mfrow=c(2,2)) #' persp(gaussian.kernel(sigma=1, s=27), theta = 135, #' phi = 30, col = "grey", ltheta = -120, shade = 0.6, @@ -19,7 +20,7 @@ #' col = "grey", ltheta = -120, shade = 0.6, border=NA ) #' persp(gaussian.kernel(sigma=4, s=27), theta = 135, phi = 30, #' col = "grey", ltheta = -120, shade = 0.6, border=NA ) -#' +#' par(opar) #' @export gaussian.kernel <- function(sigma=2, s=5) { m <- matrix(ncol=s, nrow=s) diff --git a/R/geo.buffer.R b/R/geo.buffer.R index 3e17570..66b3e3c 100644 --- a/R/geo.buffer.R +++ b/R/geo.buffer.R @@ -5,29 +5,28 @@ #' @param r Buffer radius in meters #' @param ... Additional arguments passed to sf::st_buffer #' -#' @return an sp or sf polygon class object representing buffer for each feature -#' -#' @note +#' @details #' Projects (Latitude/Longitude) data in decimal-degree geographic projection #' using an on-the-fly azimuthal equidistant projection in meters centered on # each feature. #' +#' @return an sp or sf polygon class object representing buffer for each feature +#' #' @author Jeffrey S. Evans #' #' @examples -#' if(require(sf, quietly = TRUE)) { -#' e <- c(61.87125, 23.90153, 76.64458, 37.27042) -#' names(e) <- c("xmin", "ymin", "xmax", "ymax") -#' s <- st_as_sf(st_sample(st_as_sfc(st_bbox(e)), size=100, -#' type = "regular")) -#' st_crs(s) <- st_crs(4326) -#' s$id <- 1:nrow(s) -#' -#' b <- geo.buffer(x=s, r=1000) -#' plot(st_geometry(b[1,])) -#' plot(st_geometry(s[1,]), pch=20,cex=2, add=TRUE) -#' } -#' +#' library(sf) +#' e <- c(61.87125, 23.90153, 76.64458, 37.27042) +#' names(e) <- c("xmin", "ymin", "xmax", "ymax") +#' s <- st_as_sf(st_sample(st_as_sfc(st_bbox(e)), size=100, +#' type = "regular")) +#' st_crs(s) <- st_crs(4326) +#' s$id <- 1:nrow(s) +#' +#' b <- geo.buffer(x=s, r=1000) +#' plot(st_geometry(b[1,])) +#' plot(st_geometry(s[1,]), pch=20,cex=2, add=TRUE) +#' #' @seealso \code{\link[sf]{st_buffer}} for st_buffer ... arguments #' #' @export geo.buffer diff --git a/R/group.pdf.R b/R/group.pdf.R index faa29be..b74e1d6 100644 --- a/R/group.pdf.R +++ b/R/group.pdf.R @@ -1,6 +1,4 @@ #' @title Probability density plot by group -#' @description Creates a probability density plot of y for -#' each group of x #' #' @param y Numeric vector (density variable) #' @param x Numeric, character or factorial vector of grouping @@ -13,11 +11,15 @@ #' @param ly Position of legend (y coordinate) #' @param ... Additional arguments passed to plot #' +#' @description Creates a probability density plot of y for +#' each group of x +#' +#' @return Plot of grouped PDF's +#' #' @author Jeffrey S. Evans tnc.org> #' #' @references -#' Simonoff, J. S. (1996). Smoothing Methods in Statistics. Springer-Verlag, -#' New York. +#' Simonoff, J. S. (1996). Smoothing Methods in Statistics. Springer-Verlag, New York. #' #' @examples #' y=dnorm(runif(100)) @@ -28,6 +30,8 @@ #' @export group.pdf <- function(x, y, col = NULL, lty = NULL, lwd = NULL, lx = "topleft", ly = NULL, ...) { + oops <- options() + on.exit(options(oops)) if (!is.numeric(y)) stop("y MUST BE NUMERIC") if (length(x) != length(y)) diff --git a/R/hexagons.R b/R/hexagons.R index 855da4a..49e4e59 100644 --- a/R/hexagons.R +++ b/R/hexagons.R @@ -1,20 +1,20 @@ #' @title Hexagons #' @description Create hexagon polygons #' -#' @param x sp SpatialDataFrame class object -#' @param res Area of resulting hexagons -#' @param ... Additional arguments passed to spsample +#' @param x sf class object indicating extent +#' @param res Area of resulting hexagons #' -#' @return SpatialPolygonsDataFrame OBJECT -#' -#' @note depends: sp -#' +#' @details +#' Based on extent of x, creates a hexagon mesh with size of hexagons defined by res argumnet +#' +#' @return sf POLYGONS object +#' #' @examples -#' library(sf) -#' if(require(sp, quietly = TRUE)) { -#' data(meuse, package = "sp") -#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, -#' agr = "constant") +#' library(sf) +#' if(require(sp, quietly = TRUE)) { +#' data(meuse, package = "sp") +#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, +#' agr = "constant") #' #' hex <- hexagons(meuse, res=300) #' plot(st_geometry(hex)) @@ -25,9 +25,13 @@ #' hex.sub <- hex[idx,] #' plot(st_geometry(hex.sub)) #' plot(st_geometry(meuse),pch=20,add=TRUE) +#' +#' } else { +#' cat("Please install sp package to run example", "\n") #' } +#' #' @export -hexagons <- function(x, res = 100, ...) { +hexagons <- function(x, res = 100) { if(!inherits(x, "sf")) stop(deparse(substitute(x)), " must be an sf object") if(sf::st_is_longlat(x)) diff --git a/R/hli.R b/R/hli.R index 801f598..f61849b 100644 --- a/R/hli.R +++ b/R/hli.R @@ -8,9 +8,7 @@ #' @param force.hemisphere If country is split at the equator, force southern #' or northern hemisphere equation c("southern", "northern") #' -#' @return terra SpatRaster class object of McCune & Keon (2002) Heat Load Index -#' -#' @note +#' @details #' Describes A southwest facing slope should have warmer temperatures than a #' southeast facing slope, even though the amount of solar radiation they receive #' is equivalent. The McCune and Keon (2002) method accounts for this by "folding" @@ -18,20 +16,22 @@ #' northeast. Additionally, this method account for steepness of slope, which is #' not addressed in most other aspect rescaling equations. HLI values range #' from 0 (coolest) to 1 (hottest). -#' @note +#' #' The equations follow McCune (2007) and support northern and southern hemisphere #' calculations. The folded aspect for northern hemispheres use (180 - (Aspect – 225) ) #' and for Southern hemisphere ( 180 - ( Aspect – 315) ). If a country is split at the #' equator you can use the force.hemisphere argument to choose which equation to use. #' Valid values for this argument are "southern" and "northern" with the default "none". #' +#' @return terra SpatRaster class object of McCune & Keon (2002) Heat Load Index +#' #' @author Jeffrey S. Evans #' #' @references #' McCune, B., and D. Keon (2002) Equations for potential annual direct #' incident radiation and heat load index. Journal of Vegetation #' Science. 13:603-606. -#' @references +#' #' McCune, B. (2007). Improved estimates of incident radiation and heat load #' using non-parametric regression against topographic variables. Journal #' of Vegetation Science 18:751-754. diff --git a/R/hli.pt.R b/R/hli.pt.R index 89e4c26..b5ff104 100644 --- a/R/hli.pt.R +++ b/R/hli.pt.R @@ -10,9 +10,7 @@ #' @param force.hemisphere If country is split at the equator, force southern #' or northern hemisphere equation c("southern", "northern") #' -#' @return Vector of McCune & Keon (2002) Heat Load Index -#' -#' @note +#' @details #' Describes A southwest facing slope should have warmer temperatures than a #' southeast facing slope, even though the amount of solar radiation they receive #' is equivalent. The McCune and Keon (2002) method accounts for this by "folding" @@ -20,27 +18,29 @@ #' northeast. Additionally, this method account for steepness of slope, which is #' not addressed in most other aspect rescaling equations. HLI values range #' from 0 (coolest) to 1 (hottest). -#' @note +#' #' The equations follow McCune (2007) and support northern and southern hemisphere #' calculations. The folded aspect for northern hemispheres use (180 - (Aspect – 225) ) #' and for Southern hemisphere ( 180 - ( Aspect – 315) ). If a country is split at the #' equator you can use the force.hemisphere argument to choose which equation to use. #' Valid values for this argument are "southern" and "northern" with the default "none". #' +#' @return Vector of McCune & Keon (2002) Heat Load Index +#' #' @author Jeffrey S. Evans #' #' @references #' McCune, B., and D. Keon (2002) Equations for potential annual direct #' incident radiation and heat load index. Journal of Vegetation #' Science. 13:603-606. -#' @references +#' #' McCune, B. (2007). Improved estimates of incident radiation and heat load #' using non-parametric regression against topographic variables. Journal #' of Vegetation Science 18:751-754. #' #' @examples #' -#' # Single input +#' # Single point input #' hli.pt(theta=180, alpha=30, latitude=40) #' #' # Multiple input, returns results from diff --git a/R/hsp.R b/R/hsp.R index b24fc5c..4c0f366 100644 --- a/R/hsp.R +++ b/R/hsp.R @@ -9,13 +9,13 @@ #' @param inc Increment to increase scales #' @param win Window type, options are "rectangle" or "circle" #' @param normalize Normalize results to 0-1 scale (FALSE | TRUE) -#' -#' @return terra SpatRaster class object of slope position #' -#' @note +#' @details #' if win = "circle" units are distance, if win = "rectangle" units #' are number of cells -#' +#' +#' @return terra SpatRaster class object of slope position +#' #' @references #' Murphy M.A., J.S. Evans, and A.S. Storfer (2010) Quantify Bufo boreas #' connectivity in Yellowstone National Park with landscape genetics. diff --git a/R/hybrid.kmeans.R b/R/hybrid.kmeans.R index de8f034..9204261 100644 --- a/R/hybrid.kmeans.R +++ b/R/hybrid.kmeans.R @@ -9,9 +9,6 @@ #' @param stat The statistic to aggregate class centers (mean or median) #' @param ... Additional arguments passed to \code{\link[stats]{kmeans}} #' -#' @return -#' returns an object of class "kmeans" which has a print and a fitted method -#' #' @details #' This method uses hierarchical clustering to define the cluster-centers in the K-means #' clustering algorithm. This mitigates some of the know convergence issues in K-means. @@ -20,12 +17,15 @@ #' options for hmethod are: "ward.D", "ward.D2", "single", #' "complete", "average", mcquitty", "median", "centroid" #' +#' @return +#' returns an object of class "kmeans" which has a print and a fitted method +#' #' @author Jeffrey S. Evans #' #' @references #' Singh, H., & K. Kaur (2013) New Method for Finding Initial Cluster Centroids in #' K-means Algorithm. International Journal of Computer Application. 74(6):27-30 -#' @references +#' #' Ward, J.H., (1963) Hierarchical grouping to optimize an objective function. Journal #' of the American Statistical Association. 58:236-24 #' diff --git a/R/idw.smoothing.R b/R/idw.smoothing.R index 0e15321..9eaee6b 100644 --- a/R/idw.smoothing.R +++ b/R/idw.smoothing.R @@ -8,16 +8,16 @@ #' @param d Distance constraint #' @param k Maximum number of k-nearest neighbors within d #' -#' @return A vector, same length as nrow(x), of smoothed y values -#' -#' @note +#' @details #' Smoothing is conducted with a weighted-mean where; weights represent inverse #' standardized distance lags Distance-based or neighbour-based smoothing can be #' specified by setting the desired neighbour smoothing method to a specified value #' then the other parameter to the potential maximum. For example; a constraint #' distance, including all neighbors within 1000 (d=1000) would require k to equal #' all of the potential neighbors (n-1 or k=nrow(x)-1). -#' +#' +#' @return A vector, same length as nrow(x), of smoothed y values +#' #' @examples #' #' library(sf) @@ -25,7 +25,6 @@ #' data(meuse, package = "sp") #' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, #' agr = "constant") -#' } #' #' # Calculate distance weighted mean on cadmium variable in meuse data #' cadmium.idw <- idw.smoothing(meuse, 'cadmium', k=nrow(meuse), d = 1000) @@ -39,9 +38,13 @@ #' #' plot(meuse[c("cadmium","cadmium.wm")], pch=20) #' +#' } else { +#' cat("Please install sp package to run example", "\n") +#' } +#' #' @export idw.smoothing <- function(x, y, d, k) { - if(!any(which(utils::installed.packages()[,1] %in% "RANN"))) + if(length(find.package("RANN", quiet = TRUE)) == 0) stop("please install RANN package before running this function") if (!inherits(x, "sf")) stop(deparse(substitute(x)), " Must be an sf object") diff --git a/R/impute.loess.R b/R/impute.loess.R index 48f12e1..d031fbc 100644 --- a/R/impute.loess.R +++ b/R/impute.loess.R @@ -5,11 +5,6 @@ #' @param s Smoothing parameter () #' @param smooth (FALSE/TRUE) Smooth data, else only replace NA's #' -#' @return a vector the same length as x with NA values filled or the -#' data smoothed (or both).. -#' -#' @author Jeffrey S. Evans tnc.org> -#' #' @details #' Performs a local polynomial regression to smooth data or to #' impute NA values. The minimal number of non-NA observations to reliably @@ -19,6 +14,11 @@ #' that smooth needs to be TRUE to return a smoothed vector, else only #' NA's will be imputed. #' +#' @return +#' A vector the same length as x with NA values filled or the data smoothed (or both). +#' +#' @author Jeffrey S. Evans tnc.org> +#' #' @examples #' data(cor.data) #' d <- cor.data[[1]][,2] diff --git a/R/insert.R b/R/insert.R index c7b7da0..6a9a10a 100644 --- a/R/insert.R +++ b/R/insert.R @@ -1,25 +1,21 @@ #' @title Insert a row or column into a data.frame -#' @description Inserts a new row or column into a data.frame -#' at a specified location +#' @description Inserts a new row or column into a data.frame at a specified location #' #' @param x Existing data.frame #' @param MARGIN Insert a 1 = row or 2 = column #' @param value A vector of values equal to the length of MARGIN, #' if nothing specified values with be NA #' @param idx Index position to insert row or column -#' @param name Name of new column (not used for rows, -#' MARGIN=1) +#' @param name Name of new column (not used for rows, MARGIN=1) #' -#' @return A data.frame with the new row or column inserted +#' @details +#' Where there are methods to easily add a row/column to the end or beginning of a data.frame, +#' it is not straight forward to insert data at a specific location within the data.frame. +#' This function allows for inserting a vector at a specific location eg., between columns or +#' rows 1 and 2 where row/column 2 is moved to the 3rd position and a new vector of values is +#' inserted into the 2nd position. #' -#' @note -#' Where there are methods to easily add a row/column to -#' the end or beginning of a data.frame, it is not straight -#' forward to insert data at a specific location within the -#' data.frame. This function allows for inserting a vector -#' at a specific location eg., between columns or rows 1 and 2 -#' where row/column 2 is moved to the 3rd position and a new -#' vector of values is inserted into the 2nd position. +#' @return A data.frame with the new row or column inserted #' #' @author Jeffrey S. Evans #' @@ -42,14 +38,14 @@ insert <- function(x, MARGIN = 1, value = NULL, idx, name=NULL) { stop("idx argument must be supplied") idx = idx[1] if(MARGIN == 1) { - cat("Inserting row", "\n") + message("Inserting row") if(is.null(value)) value = rep(NA, ncol(x)) if(length(value) != ncol(x)) stop("specified values not equal number of columns") x[seq(idx+1, nrow(x)+1),] <- x[seq(idx, nrow(x)),] x[idx,] <- value } else if(MARGIN == 2) { - cat("Inserting column", "\n") + message("Inserting column") n <- names(x) if(is.null(value)) value = rep(NA, nrow(x)) if(length(value) != nrow(x)) diff --git a/R/insert.values.R b/R/insert.values.R index a4fcf57..2ad4abf 100644 --- a/R/insert.values.R +++ b/R/insert.values.R @@ -6,20 +6,20 @@ #' @param value Values to insert into x #' @param index Index position(s) to insert y values into x #' -#' @return -#' A vector with values of y inserted into x and the position(s) -#' defined by the index -#' -#' @description +#' @details #' This function inserts new values at specified positions in a vector. It #' does not replace existing values. If a single value is provided for y #' and l represents multiple positions y will be replicated for the length #' of l. In this way you can insert the same value at multiple locations. +#' +#' @return +#' A vector with values of y inserted into x and the position(s) +#' defined by the index #' #' @author Jeffrey S. Evans #' #' @examples -#' (x=1:10) +#' (x=1:10) #' #' # Insert single value in one location #' insert.values(x, 100, 2) diff --git a/R/is.empty.R b/R/is.empty.R index 307d433..d960b80 100644 --- a/R/is.empty.R +++ b/R/is.empty.R @@ -6,15 +6,16 @@ #' @param na.empty (TRUE / FALSE) Return TRUE if element is NA #' @param trim (TRUE / FALSE) Trim empty strings #' +#' @details +#' This function evaluates if an element in a vector is empty the na.empty argument +#' allows for evaluating NA values (TRUE if NA) and all.na returns a TRUE if all elements +#' are NA. The trim argument trims a character string to account for the fact that c(" ") +#' is not empty but, a vector with c("") is empty. Using trim = TRUE will force both +#' to return TRUE +#' #' @return A Boolean indicating empty elements in a vector, if all.na = FALSE #' a TRUE/FALSE value will be returned for each element in the vector -#' -#' @description This function evaluates if an element in a vector is empty -#' the na.empty argument allows for evaluating NA values (TRUE if NA) and -#' all.na returns a TRUE if all elements are NA. The trim argument trims -#' a character string to account for the fact that c(" ") is not empty but, -#' a vector with c("") is empty. Using trim = TRUE will force both to return TRUE -#' +#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/kde2D.R b/R/kde2D.R deleted file mode 100644 index 9702b1a..0000000 --- a/R/kde2D.R +++ /dev/null @@ -1,11 +0,0 @@ -#' @title 2-dimensional kernel density estimate -#' @description Calculates 2-dimensional kernel density estimate over specified extent -#' -#' @param ... Parameters to be passed to the modern version of the function -#' -#' @export -kde.2D <- function(...) { - .Deprecated("mwCorr", package="spatialEco", - msg="kde2D is deprecated, please use sp.kde which provides a weighted option") - kde.2D(...) -} diff --git a/R/kendall.R b/R/kendall.R index b890000..b8d1dd0 100644 --- a/R/kendall.R +++ b/R/kendall.R @@ -12,15 +12,6 @@ #' @param threshold The threshold for number of minimum observations in the time-series #' @param ... Not used #' -#' @return Depending on arguments, a vector containing: -#' * Theil-Sen slope, always returned -#' * Kendall's tau two-sided test, if tau TRUE -#' * intercept for trend if intercept TRUE -#' * p value for trend fit if p.value TRUE -#' * lower confidence level at 95-pct if confidence TRUE -#' * upper confidence level at 95-pct if confidence TRUE -#' @md -#' #' @details #' This function implements Kendall's nonparametric test for a monotonic trend #' using the Theil-Sen (Theil 1950; Sen 1968; Siegel 1982) method to estimate @@ -34,6 +25,15 @@ #' prevent the function from failing but, will likely invalidate the statistic. #' A threshold of <=4 will yield all NA values. If method= "none" a modification of the #' EnvStats::kendallTrendTest code is implemented. +#' +#' @return Depending on arguments, a vector containing: +#' * Theil-Sen slope, always returned +#' * Kendall's tau two-sided test, if tau TRUE +#' * intercept for trend if intercept TRUE +#' * p value for trend fit if p.value TRUE +#' * lower confidence level at 95-pct if confidence TRUE +#' * upper confidence level at 95-pct if confidence TRUE +#' @md #' #' @author Jeffrey S. Evans #' @@ -68,7 +68,7 @@ kendall <- function(y, tau = TRUE, intercept = TRUE, p.value = TRUE, confidence = TRUE, method=c("zhang", "yuepilon", "none"), threshold = 6, ...) { if(any(method %in% c("zhang", "yuepilon"))) { - if(!any(which(utils::installed.packages()[,1] %in% "zyp"))) + if(length(find.package("zyp", quiet = TRUE)) == 0) stop("please install zyp package before running this function") } if(threshold < 6) diff --git a/R/kl.divergence.R b/R/kl.divergence.R index c62c62e..7c576c5 100644 --- a/R/kl.divergence.R +++ b/R/kl.divergence.R @@ -1,10 +1,6 @@ #' @title Kullback-Leibler divergence (relative entropy) #' -#' @description -#' Calculates the Kullback-Leibler divergence (relative entropy) between -#' unweighted theoretical component distributions. Divergence is calculated -#' as: int [f(x) (log f(x) - log g(x)) dx] for distributions with densities -#' f() and g(). +#' @description Calculates the Kullback-Leibler divergence (relative entropy) #' #' @param object Matrix or dataframe object with >=2 columns #' @param eps Probabilities below this threshold are replaced by this @@ -13,6 +9,11 @@ #' pairs where for each point at least one of the densities #' has a value smaller than eps. #' +#' @details +#' Calculates the Kullback-Leibler divergence (relative entropy) between +#' unweighted theoretical component distributions. Divergence is calculated +#' as: int [f(x) (log f(x) - log g(x)) dx] for distributions with densities +#' f() and g(). #' @return pairwise Kullback-Leibler divergence index (matrix) #' #' @author Jeffrey S. Evans diff --git a/R/knn.R b/R/knn.R index 0616e4b..3d4abe9 100644 --- a/R/knn.R +++ b/R/knn.R @@ -11,11 +11,7 @@ #' @param weights.x A vector or matrix representing covariates of x #' @param indexes (FALSE/TRUE) Return row indexes of x neighbors #' -#' @return -#' A data.frame with row indexes (optional), rownames, ids (optional) and -#' distance of k -#' -#' @description +#' @details #' Finds nearest neighbor in x based on y and returns rownames, index and distance, #' If ids is NULL, rownames of x are returned. If coordinate matrix provided, #' columns need to be ordered [X,Y]. If a radius for d is specified than a maximum @@ -25,16 +21,19 @@ #' must match row dimensions with x and y as well as columns matching between weights. #' In other words, the covariates must match and be numeric. #' +#' @return +#' A data.frame with row indexes (optional), rownames, ids (optional) and +#' distance of k +#' #' @author Jeffrey S. Evans #' #' @examples #' \donttest{ -#' library(sf) -#' #' if(require(sp, quietly = TRUE)) { -#' data(meuse, package = "sp") -#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, -#' agr = "constant") +#' library(sf) +#' data(meuse, package = "sp") +#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, +#' agr = "constant") #' #' # create reference and target obs #' idx <- sample(1:nrow(meuse), 10) @@ -60,14 +59,18 @@ #' y <- st_coordinates(pts)[,1:2] #' x <- st_coordinates(meuse)[,1:2] #' knn(y, x, k=2) -#' } -#' } +#' +#' } else { +#' cat("Please install sp package to run example", "\n") +#' } +#' } +#' #' @seealso \code{\link[RANN]{nn2}} for details on search algorithm #' @export knn knn <- function(y, x, k = 1, d = NULL, ids = NULL, weights.y = NULL, weights.x = NULL, indexes = FALSE) { - if(!any(which(utils::installed.packages()[,1] %in% "RANN"))) + if(length(find.package("RANN", quiet = TRUE)) == 0) stop("please install RANN package before running this function") gtypes = c("POLYGON", "POINT") @@ -94,7 +97,7 @@ knn <- function(y, x, k = 1, d = NULL, ids = NULL, stop(deparse(substitute(x)), " must be one of ", paste(gtypes, collopse="")) if(xgeom == gtypes[1]) { - cat("Warning, x has polygon geometry using centroid coordinates", "\n") + warning("x has polygon geometry using centroid coordinates") xmat <- sf::st_coordinates(sf::st_centroid(x))[,1:2] } else { xmat <- sf::st_coordinates(x)[,1:2] @@ -116,7 +119,7 @@ knn <- function(y, x, k = 1, d = NULL, ids = NULL, stop(deparse(substitute(y)), " must be one of ", paste(gtypes, collopse="")) if(ygeom == gtypes[1]) { - cat("Warning, y has polygon geometry using centroid coordinates", "\n") + warning("y has polygon geometry using centroid coordinates") ymat <- sf::st_coordinates(sf::st_centroid(y))[,1:2] } else { ymat <- sf::st_coordinates(y)[,1:2] diff --git a/R/lai.R b/R/lai.R index 4106d7d..7f0677e 100644 --- a/R/lai.R +++ b/R/lai.R @@ -4,7 +4,7 @@ #' @param ndvi NDVI in floating point standard scale range (-1 to 1) #' @param method Method to use for index options c("Jonckheere", "Chen") #' -#' @description +#' @details #' This function calculates the Leaf Area Index (LAI) representing the amount of leaf area #' per unit of ground area. This is an important parameter for understanding the structure #' and function of vegetation, as it affects processes such as photosynthesis, transpiration, @@ -16,20 +16,22 @@ #' between NDVI and LAI can vary depending on factors such as vegetation type, canopy structure, #' and environmental conditions. #' +#' @return A terra SpatRaster object with derived LAI vaues +#' #' @references #' Jonckheere, I., Fleck, S., Nackaerts, K., Muys, B., Coppin, P. (2004). A comparison of two #' methods to retrieve the leaf area index (LAI) from SPOT-4 HRVIR data. International #' Journal of Remote Sensing, 25(21):4407–4425. -#' @references +#' #' Chen, J. M., Liu, R., & Ju, W. (2014). A simple and effective method for estimating #' leaf area index from Landsat imagery. Remote Sensing of Environment, 152:538–548. #' #' @author Jeffrey S. Evans #' #' @examples -#' \dontrun{ +#' \donttest{ #' library(terra) -#' lsat <- rast(system.file("extdata/Landsat_TM5", package="spatialEco")) +#' lsat <- rast(system.file("/extdata/Landsat_TM5.tif", package="spatialEco")) #' plotRGB(lsat, r=3, g=2, b=1, scale=1.0, stretch="lin") #' #' ndvi <- ( lsat[[4]] - lsat[[3]] ) / (lsat[[4]] + lsat[[3]]) diff --git a/R/land.metrics.R b/R/land.metrics.R deleted file mode 100644 index f5463e3..0000000 --- a/R/land.metrics.R +++ /dev/null @@ -1,30 +0,0 @@ -#' @title Landscape metrics for points and polygons -#' @description Calculates a variety of landscape metrics, on -#' binary rasters, for polygons or points with a -#' buffer distance -#' -#' @param ... Parameters to be passed to the modern version -#' of the function -#' -#' @examples -#' \dontrun{ -#' library(landscapemetrics) -#' library(raster) -#' -#' data(landscape) -#' points <- matrix(c(10, 5, 25, 15, 5, 25), -#' ncol = 2, byrow = TRUE) -#' -#' sample_lsm(landscape, y = points, size = 10, -#' level = "landscape", type = "diversity metric", -#' classes_max = 3, -#' verbose = FALSE) -#' } -#' @export -land.metrics <- function(...) { - .Deprecated("land.metrics", package="spatialEco", - msg="Function is deprecated, for methodological consistency, - please use functions in the landscapemetrics package (on CRAN) ") - message("Please see land.metrics help for example of replicating function - using sample_lsm in landscapemetrics package") -} diff --git a/R/local.min.max.R b/R/local.min.max.R index 0b34267..fcaea92 100644 --- a/R/local.min.max.R +++ b/R/local.min.max.R @@ -11,6 +11,10 @@ #' plot (TRUE/FALSE) #' @param ... Arguments passed to plot #' +#' @details +#' Useful function for identifying inflection or enveloping points in +#' a distribution +#' #' @return A list object with: #' * minima - minimum local values of x #' * maxima - maximum local values of x @@ -20,10 +24,6 @@ #' statistic (dev argument) #' @md #' -#' @note -#' Useful function for identifying inflection or enveloping points in -#' a distribution -#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/loess.boot.R b/R/loess.boot.R index c0ebaf4..4c34579 100644 --- a/R/loess.boot.R +++ b/R/loess.boot.R @@ -7,7 +7,17 @@ #' @param confidence Fraction of replicates contained in confidence #' region #' @param ... Additional arguments passed to loess function -#' +#' +#' @details +#' The function fits a loess curve and then calculates a symmetric nonparametric +#' bootstrap with a confidence region. Fitted curves are evaluated at a fixed number +#' of equally-spaced x values, regardless of the number of x values in the data. Some +#' replicates do not include the values at the lower and upper end of the range of x +#' values. If the number of such replicates is too large, it becomes impossible to +#' construct a confidence region that includes a fraction "confidence" of the bootstrap +#' replicates. In such cases, the left and/or right portion of the confidence region +#' is truncated. +#' #' @return list object containing #' * nreps Number of bootstrap replicates #' * confidence Confidence interval (region) @@ -26,27 +36,17 @@ #' 5) stddev - Standard deviation of loess fit at each x value #' @md #' -#' @description -#' The function fits a loess curve and then calculates a symmetric nonparametric -#' bootstrap with a confidence region. Fitted curves are evaluated at a fixed number -#' of equally-spaced x values, regardless of the number of x values in the data. Some -#' replicates do not include the values at the lower and upper end of the range of x -#' values. If the number of such replicates is too large, it becomes impossible to -#' construct a confidence region that includes a fraction "confidence" of the bootstrap -#' replicates. In such cases, the left and/or right portion of the confidence region -#' is truncated. -#' #' @author Jeffrey S. Evans #' #' @references #' Cleveland, WS, (1979) Robust Locally Weighted Regression and Smoothing Plots Journal #' of the American Statistical Association 74:829-836 -#' @references +#' #' Efron, B., and R. Tibshirani (1993) An Introduction to the Bootstrap Chapman and #' Hall, New York -#' @references +#' #' Hardle, W., (1989) Applied Nonparametric Regression Cambridge University Press, NY. -#' @references +#' #' Tibshirani, R. (1988) Variance stabilization and the bootstrap. #' Biometrika 75(3):433-44. #' diff --git a/R/loess.ci.R b/R/loess.ci.R index bc7f398..46dc12e 100644 --- a/R/loess.ci.R +++ b/R/loess.ci.R @@ -1,6 +1,6 @@ #' @title Loess with confidence intervals #' @description Calculates a local polynomial regression fit -#' with associated confidence intervals +#' with associated confidence intervals #' #' @param y Dependent variable, vector #' @param x Independent variable, vector @@ -41,11 +41,13 @@ #' par(opar) #' #' @export -loess.ci <- function(y, x, p=0.95, plot=FALSE, ...) { +loess.ci <- function(y, x, p = 0.95, plot = FALSE, ...) { plx <- stats::predict(stats::loess(y ~ x, ...), se=TRUE) lci = plx$fit - stats::qt(p, plx$df) * plx$se.fit uci = plx$fit + stats::qt(p, plx$df) * plx$se.fit if(plot == TRUE) { + oops <- options() + on.exit(options(oops)) graphics::plot(x, y, type="n", main="Loess fit", sub=paste("confidence intervals at", p)) graphics::polygon(c(x,rev(x)), c(lci, rev(uci)), col="grey86") diff --git a/R/logistic.regression.R b/R/logistic.regression.R index e5df401..9d5e97a 100644 --- a/R/logistic.regression.R +++ b/R/logistic.regression.R @@ -19,18 +19,7 @@ #' @param longlat Are coordinates (coords) in geographic, lat/long (TRUE/FALSE) #' @param ... Additional arguments passed to lrm #' -#' @return A list class object with the following components: -#' * model - lrm model object (rms class) -#' * bandwidth - If AutoCov = TRUE returns the distance bandwidth used for the -#' auto-covariance function -#' * diagTable - data.frame of regression diagnostics -#' * coefTable - data.frame of regression coefficients (log-odds) -#' * Residuals - data.frame of residuals and standardized residuals -#' * AutoCov - If an auto-logistic model, AutoCov represents lagged -#' auto-covariance term -#' @md -#' -#' @description +#' @details #' It should be noted that the auto-logistic model (Besag 1972) is intended for #' exploratory analysis of spatial effects. Auto-logistic are know to underestimate #' the effect of environmental variables and tend to be unreliable (Dormann 2007). @@ -42,18 +31,30 @@ #' If the object passed to the function is an sp class there is no need to call the data #' slot directly via "object@data", just pass the object name. #' +#' @return +#' A list class object with the following components: +#' * model - lrm model object (rms class) +#' * bandwidth - If AutoCov = TRUE returns the distance bandwidth used for the +#' auto-covariance function +#' * diagTable - data.frame of regression diagnostics +#' * coefTable - data.frame of regression coefficients (log-odds) +#' * Residuals - data.frame of residuals and standardized residuals +#' * AutoCov - If an auto-logistic model, AutoCov represents lagged +#' auto-covariance term +#' @md +#' #' @author Jeffrey S. Evans #' #' @references #' Besag, J.E., (1972) Nearest-neighbour systems and the auto-logistic model for binary #' data. Journal of the Royal Statistical Society, Series B Methodological 34:75-83 -#' @references +#' #' Dormann, C.F., (2007) Assessing the validity of autologistic regression. Ecological #' Modelling 207:234-242 -#' @references +#' #' Le Cessie, S., Van Houwelingen, J.C., (1992) Ridge estimators in logistic regression. #' Applied Statistics 41:191-201 -#' @references +#' #' Shao, J., (1993) Linear model selection by cross-validation. JASA 88:486-494 #' #' @examples @@ -128,7 +129,7 @@ logistic.regression <- function(ldata, y, x, penalty = TRUE, autologistic = FALSE, coords = NULL, bw = NULL, type = "inverse", style = "W", longlat = FALSE, ...) { - if(!any(which(utils::installed.packages()[,1] %in% "rms"))) + if(length(find.package("rms", quiet = TRUE)) == 0) stop("please install rms package before running this function") if(inherits(ldata, "sf")) ldata <- sf::st_drop_geometry(ldata) diff --git a/R/max_extent.R b/R/max_extent.R index 70bc123..83fef2f 100644 --- a/R/max_extent.R +++ b/R/max_extent.R @@ -5,15 +5,15 @@ #' @param x terra SpatRaster class object #' @param ... additional SpatRaster class objects in same projection #' -#' @return An sf POLYGON class object representing maximum extents -#' -#' @note +#' @details #' Creates a maximum extent polygon of all specified rasters +#' +#' @return An sf POLYGON class object representing maximum extents #' #' @author Jeffrey S. Evans #' #' @examples -#' if(require(terra, quietly = TRUE)) { +#' library(terra) #' #' r1 <- rast(ext(61.87125, 76.64458, 23.90153, 37.27042)) #' r2 <- rast(ext(67.66625, 81.56847, 20.38458, 35.67347)) @@ -28,8 +28,6 @@ #' #' sf::st_bbox(e) # full extent #' -#' } -#' #' @export max_extent max_extent <- function(x, ...) { if(length(list(...))){ diff --git a/R/mean_angle.R b/R/mean_angle.R index 3803590..7e01496 100644 --- a/R/mean_angle.R +++ b/R/mean_angle.R @@ -4,16 +4,17 @@ #' @param a vector of angle values #' @param angle ("degree", "radians") to define angle in degrees or radians #' -#' @return A vector of mean angle -#' -#' @note +#' @details #' The arithmetic mean is not correct for calculating the central tendency of #' angles. This function is intended to return the mean angle for slope or aspect, #' which could be used in a focal or zonal function. #' +#' @return A vector of mean angle +#' #' @author Jeffrey S. Evans #' #' @examples +#' library(terra) #' mean_angle(c(180, 10)) #' mean(c(180, 10)) #' mean_angle(c(90, 180, 70, 60)) @@ -21,7 +22,6 @@ #' mean_angle(c(90, 180, 270, 360)) #' mean(c(90, 180, 270, 360)) #' -#' library(terra) #' elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) #' asp <- terrain(elev, v="aspect") #' s <- buffer(spatSample(asp, 20, as.points=TRUE, diff --git a/R/moments.R b/R/moments.R index 5b57c7d..53e203b 100644 --- a/R/moments.R +++ b/R/moments.R @@ -27,9 +27,9 @@ #' @author Jeffrey S. Evans #' #' @examples -#' x <- runif(1000,0,100) -#' ( d <- moments(x, plot=TRUE) ) -#' ( mode.x <- moments(x, plot=FALSE)[16] ) +#' x <- runif(1000,0,100) +#' ( d <- moments(x, plot=TRUE) ) +#' ( mode.x <- moments(x, plot=FALSE)[16] ) #' #' @export moments <- function(x, plot = FALSE) { diff --git a/R/morans.plot.R b/R/morans.plot.R index 4e178d8..f42f3be 100644 --- a/R/morans.plot.R +++ b/R/morans.plot.R @@ -12,8 +12,6 @@ #' expected [-1 to 1]? #' @param ... Additional arguments passed to plot #' -#' @return A plot of the scaled variable against its spatially lagged values. -#' #' @details #' The argument "type" controls the plot for x influencing y (type="xy") or y #' influencing x (type="yx"). If y is not defined then the statistic is univariate @@ -27,44 +25,50 @@ #' surrounded by high and the lower-right quadrant represents negative associations of #' high values surrounded by low. #' -#' @note -#' if y is not specified the univariate statistic for x is returned. the coords argument +#' If y is not specified the univariate statistic for x is returned. the coords argument #' is only used if k = NULL. Can also be an sp object with relevant x,y coordinate slot #' (ie., points or polygons). If w = NULL, the default method for deriving spatial weights #' matrix, options are: inverse power or negative exponent. If scale.xy = FALSE it is #' assumed that they are already scaled following Chen (2015). #' +#' @return A plot of the scaled variable against its spatially lagged values. +#' #' @author Jeffrey S. Evans #' #' @references #' Chen., Y. (2015) A New Methodology of Spatial Cross-Correlation Analysis. #' PLoS One 10(5):e0126158. doi:10.1371/journal.pone.0126158 -#' @references +#' #' Anselin, L. (1996) The Moran scatterplot as an ESDA tool to assess local instability #' in spatial association. pp. 111-125 in M. M. Fischer, H. J. Scholten and D. Unwin (eds) #' Spatial analytical perspectives on GIS, London, Taylor and Francis -#' @references +#' #' Anselin, L. (1995) Local indicators of spatial association, Geographical Analysis, #' 27:93-115 #' #' @examples -#' library(sp) -#' library(spdep) -#' data(meuse) -#' coordinates(meuse) <- ~x+y -#' -#' # Autocorrelation (univariate) -#' morans.plot(meuse$zinc, coords = coordinates(meuse)) +#' p = c("sf", "sp", "spdep") +#' if(any(!unlist(lapply(p, requireNamespace, quietly=TRUE)))) { +#' m = which(!unlist(lapply(p, requireNamespace, quietly=TRUE))) +#' message("Can't run examples, please install ", paste(p[m], collapse = " ")) +#' } else { +#' invisible(lapply(p, require, character.only=TRUE)) +#' +#' data(meuse, package = "sp") +#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") +#' +#' # Autocorrelation (univariate) +#' morans.plot(meuse$zinc, coords = st_coordinates(meuse)[,1:2]) #' -#' # Cross-correlation of: x influencing y and y influencing x -#' opar <- par(no.readonly=TRUE) -#' par(mfrow=c(1,2)) -#' morans.plot(x=meuse$zinc, y=meuse$copper, coords = coordinates(meuse), -#' scale.morans = TRUE) -#' morans.plot(x=meuse$zinc, y=meuse$copper, coords = coordinates(meuse), -#' scale.morans = TRUE, type.ac="yx") -#' par(opar) -#' +#' # Cross-correlation of: x influencing y and y influencing x +#' opar <- par(no.readonly=TRUE) +#' par(mfrow=c(1,2)) +#' morans.plot(x=meuse$zinc, y=meuse$copper, coords = st_coordinates(meuse)[,1:2], +#' scale.morans = TRUE) +#' morans.plot(x=meuse$zinc, y=meuse$copper, coords = st_coordinates(meuse)[,1:2], +#' scale.morans = TRUE, type.ac="yx") +#' par(opar) +#' } #' @export morans.plot morans.plot <- function(x, y = NULL, coords = NULL, type.ac = c("xy", "yx"), diff --git a/R/mwCorr.R b/R/mwCorr.R deleted file mode 100644 index 818d823..0000000 --- a/R/mwCorr.R +++ /dev/null @@ -1,13 +0,0 @@ -#' @title Dutilleul moving window bivariate raster -#' correlation -#' @description A bivarate raster correlation using Dutilleul's -#' modified t-test -#' -#' @param ... Parameters to be passed to the modern version -#' of the function -#' -#' @export -mwCorr <- function(...) { - .Deprecated("mwCorr", package="spatialEco", - msg="Function is deprecated, please use raster.modified.ttest") -} diff --git a/R/nni.R b/R/nni.R index 5ad687c..5807d41 100644 --- a/R/nni.R +++ b/R/nni.R @@ -4,12 +4,7 @@ #' @param x An sf point object #' @param win Type of window 'hull' or 'extent' #' -#' @return -#' list object containing NNI = nearest neighbor index, z.score = Z -#' Score value, p = p value, expected.mean.distance = Expected mean -#' distance, observed.mean.distance = Observed meand distance. -#' -#' @description +#' @details #' The nearest neighbor index is expressed as the ratio of the observed distance #' divided by the expected distance. The expected distance is the average distance #' between neighbors in a hypothetical random distribution. If the index is less than 1, @@ -21,12 +16,17 @@ #' Where; D=neighbor distance, A=Area #' @md #' +#' @return +#' list object containing NNI = nearest neighbor index, z.score = Z +#' Score value, p = p value, expected.mean.distance = Expected mean +#' distance, observed.mean.distance = Observed meand distance. +#' #' @author Jeffrey S. Evans #' #' @references #' Clark, P.J., and F.C. Evans (1954) Distance to nearest neighbour as a measure #' of spatial relationships in populations. Ecology 35:445-453 -#' @references +#' #' Cressie, N (1991) Statistics for spatial data. Wiley & Sons, New York. #' #' @examples diff --git a/R/nth.values.R b/R/nth.values.R index 2e948d7..496b862 100644 --- a/R/nth.values.R +++ b/R/nth.values.R @@ -5,9 +5,10 @@ #' @param N Number of (Nth) values returned #' @param smallest (FALSE/TRUE) Return the highest, else smallest values #' -#' @return Numeric vector of Nth values +#' @details +#' This function returns n lowest or highest elements in a vector #' -#' @note This function returns n lowest or highest elements in a vector +#' @return Numeric vector of Nth values #' #' @author Jeffrey S. Evans #' diff --git a/R/o.ring.R b/R/o.ring.R index fc2e31c..9af26f1 100644 --- a/R/o.ring.R +++ b/R/o.ring.R @@ -7,9 +7,7 @@ #' (pcfinhom) #' @param ... additional arguments passed to pcf or pcfinhom #' -#' @return plot of o-ring and data.frame with plot labels and descriptions -#' -#' @description +#' @details #' The function K(r) is the expected number of points in a circle of radius r centered #' at an arbitrary point (which is not counted), divided by the intensity l of the pattern. #' The alternative pair correlation function g(r), which arises if the circles of @@ -29,6 +27,8 @@ #' conditioned probability spectrum) with the interpretation of a neighborhood #' density, which is more intuitive than an accumulative measure. #' +#' @return plot of o-ring and data.frame with plot labels and descriptions +#' #' @author Jeffrey S. Evans #' #' @references @@ -36,10 +36,14 @@ #' pattern analysis in ecology. Oikos 104:209-229 #' #' @examples -#' library(spatstat.explore) +#' if (require(spatstat.explore, quietly = TRUE)) { #' data(lansing) -#' x <- spatstat.geom::unmark(split(lansing)$maple) -#' o.ring(x) +#' x <- spatstat.geom::unmark(split(lansing)$maple) +#' o.ring(x) +#' +#' } else { +#' cat("Please install spatstat.explore package to run example", "\n") +#' } #' #' @export o.ring <- function(x, inhomogeneous = FALSE, ...) { diff --git a/R/oli.aws.R b/R/oli.aws.R index 0220a5d..0d49ee4 100644 --- a/R/oli.aws.R +++ b/R/oli.aws.R @@ -7,6 +7,18 @@ #' @param cloud.cover percent cloud cover #' @param processing processing level ("L1GT" or "L1T") #' +#' @details +#' Amazons AWS cloud service is hosting OLI Landsat 8 data granules +#' \url{https://registry.opendata.aws/landsat-8} +#' \url{https://aws.amazon.com/blogs/aws/start-using-landsat-on-aws/} +#' +#' USGS Landsat collections: \url{https://www.usgs.gov/landsat-missions} +#' Pre-collection processing levels: "L1T", "L1GT", "L1G" +#' Collection 1 processing levels: "L1TP", "L1GT", "L1GS" +#' "L1T" and "L1TP" - Radiomertically calibrated and orthorectified (highest level processing) +#' "L1GT" and "L1GT" - Radiomertically calibrated and systematic geometric corrections +#' "L1G" and "L1GS" - Radiomertically calibrated with systematic ephemeris correction +#' #' @return #' data.frame object with: #' \itemize{ @@ -27,22 +39,10 @@ #' \item row - Landsat row #' } #' -#' @note -#' Amazons AWS cloud service is hosting OLI Landsat 8 data granules -#' \url{https://registry.opendata.aws/landsat-8} -#' \url{https://aws.amazon.com/blogs/aws/start-using-landsat-on-aws/} -#' -#' USGS Landsat collections: \url{https://www.usgs.gov/landsat-missions} -#' Pre-collection processing levels: "L1T", "L1GT", "L1G" -#' Collection 1 processing levels: "L1TP", "L1GT", "L1GS" -#' "L1T" and "L1TP" - Radiomertically calibrated and orthorectified (highest level processing) -#' "L1GT" and "L1GT" - Radiomertically calibrated and systematic geometric corrections -#' "L1G" and "L1GS" - Radiomertically calibrated with systematic ephemeris correction -#' #' @author Jeffrey S. Evans #' #' @examples -#' \dontrun{ +#' \donttest{ #' # Query path 126, row 59, 2013-04-15 to 2017-03-09, <20% cloud cover #' ( p126r59.oli <- oli.asw(path=126, row=59, dates = c("2013-04-15", "2017-03-09"), #' cloud.cover = 20) ) @@ -64,7 +64,7 @@ oli.asw <- function(path, row, dates, cloud.cover = 10, processing) { aws.url <- "http://landsat-pds.s3.amazonaws.com/scene_list.gz" - if(!any(which(utils::installed.packages()[,1] %in% "readr"))) + if(length(find.package("readr", quiet = TRUE)) == 0) stop("please install readr package before running this function") if( missing(path) ) stop("Must specify landsat path") if( missing(row) ) stop("Must specify landsat row") diff --git a/R/optimal.k.R b/R/optimal.k.R index 68fdfb8..db36c04 100644 --- a/R/optimal.k.R +++ b/R/optimal.k.R @@ -17,7 +17,7 @@ #' Theodoridis, S. & K. Koutroumbas(2006) Pattern Recognition 3rd ed. #' #' @examples -#' library(cluster) +#' if (require(cluster, quietly = TRUE)) { #' x <- rbind(cbind(rnorm(10,0,0.5), rnorm(10,0,0.5)), #' cbind(rnorm(15,5,0.5), rnorm(15,5,0.5))) #' @@ -32,12 +32,16 @@ #' # join clusters to data #' x <- data.frame(x, k=clust$model$clustering) #' +#' } else { +#' cat("Please install cluster package to run example", "\n") +#' } +#' #' @seealso \code{\link[cluster]{pam}} for details on Partitioning Around Medoids (PAM) #' @seealso \code{\link[cluster]{clara}} for details on Clustering Large Applications (clara) #' #' @export optimal.k <- function(x, nk = 10, plot = TRUE, cluster = TRUE, clara = FALSE, ...) { - if(!any(which(utils::installed.packages()[,1] %in% "cluster"))) + if(length(find.package("cluster", quiet = TRUE)) == 0) stop("please install cluster package before running this function") asw <- numeric(nk) for (k in 2:nk) { diff --git a/R/optimized.sample.variance.R b/R/optimized.sample.variance.R index 3f12c37..0e25cd1 100644 --- a/R/optimized.sample.variance.R +++ b/R/optimized.sample.variance.R @@ -1,6 +1,6 @@ #' @title Optimized sample variance -#' @description Draws an optimal sample that minimizes or maximizes the -#' sample variance +#' @description +#' Draws an optimal sample that minimizes or maximizes the sample variance #' #' @param x A vector to draw a sample from #' @param n Number of samples to draw @@ -15,9 +15,9 @@ #' #' @examples #' library(sf) -#' data(meuse, package = "sp") -#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, -#' agr = "constant") +#' if (require(sp, quietly = TRUE)) { +#' data(meuse, package = "sp") +#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") #' #' n = 15 #' # Draw n samples that maximize the variance of y @@ -36,6 +36,9 @@ #' "minimized variance"), col=c("grey","red","blue"), #' pch=c(19,19,19)) #' +#' } else { +#' cat("Please install sp package to run example", "\n") +#' } #' @export optimized.sample.variance optimized.sample.variance <- function(x, n, type = "maximized") { if(!is.numeric(x)) stop("x is not a numeric vector") diff --git a/R/outliers.R b/R/outliers.R index ec104e7..81a4561 100644 --- a/R/outliers.R +++ b/R/outliers.R @@ -9,8 +9,8 @@ #' @author Jeffrey S. Evans #' #' @references -#' Iglewicz, B. & D.C. Hoaglin (1993) How to Detect and Handle Outliers, -#' American Society for Quality Control, Milwaukee, WI. +#' Iglewicz, B. & D.C. Hoaglin (1993) How to Detect and Handle Outliers, +#' American Society for Quality Control, Milwaukee, WI. #' #' @examples #' # Create data with 3 outliers diff --git a/R/overlap.R b/R/overlap.R index 907e3c0..536ebfd 100644 --- a/R/overlap.R +++ b/R/overlap.R @@ -5,9 +5,7 @@ #' @param y A matrix or SpatRaster raster class object #' with the same dimensions of x #' -#' @return A value representing the I similarity statistic -#' -#' @description +#' @details #' The overlap function computes the I similarity statistic (Warren et al. 2008) #' of two overlapping niche estimates. Similarity is based on the Hellenger distance. #' It is assumed that the input data share the same extent and cellsize and all values @@ -20,6 +18,8 @@ #' identical (Warren et al., 2008). The function is based on code #' from Jeremy VanDerWal #' +#' @return A vector (single value) representing the I similarity statistic +#' #' @author Jeffrey Evans and Jeremy VanDerWal #' #' @references diff --git a/R/parea.sample.R b/R/parea.sample.R index c415421..dc75853 100644 --- a/R/parea.sample.R +++ b/R/parea.sample.R @@ -9,7 +9,7 @@ #' @param stype Sampling type ('random', 'regular', 'nonaligned', 'hexagonal') #' @param ... Additional arguments passed to spsample #' -#' @note +#' @details #' This function results in an adaptive sample based on the area of #' each polygon. The default scaling factor (sf) converts meters to #' acres. You can set sf=1 to stay in the native projection units @@ -19,14 +19,13 @@ #' @author Jeffrey S. Evans #' #' @examples -#' if(require(sf, quietly = TRUE)) { -#' nc <- st_read(system.file("shape/nc.shp", package="sf")) -#' nc <- suppressWarnings(st_cast(nc[c(10,100),], "POLYGON")) +#' library(sf) +#' nc <- st_read(system.file("shape/nc.shp", package="sf")) +#' nc <- suppressWarnings(st_cast(nc[c(10,100),], "POLYGON")) #' -#' ( ars <- parea.sample(nc, pct=0.001, join = TRUE, stype='random') ) -#' plot(st_geometry(nc)) -#' plot(st_geometry(ars), pch=19, add=TRUE) -#' } +#' ( ars <- parea.sample(nc, pct=0.001, join = TRUE, stype='random') ) +#' plot(st_geometry(nc)) +#' plot(st_geometry(ars), pch=19, add=TRUE) #' #' @export parea.sample <- function(x, pct = 0.1, join = FALSE, sf = 4046.86, diff --git a/R/parse.bits.R b/R/parse.bits.R index 4ae23a7..dfb5bcb 100644 --- a/R/parse.bits.R +++ b/R/parse.bits.R @@ -14,46 +14,47 @@ #' With this function you can parse the values for each bit to assign the #' flag values. #' +#' @return a vector or data.frame of parsed interger value(s) associated with input bit +#' #' @author Jeffrey S. Evans #' #' @examples -#' -#' # Return value for bit 5 for integer value 100 -#' parse.bits(100, 5) +#' # Return value for bit 5 for integer value 100 +#' parse.bits(100, 5) #' -#' # Return value(s) for bits 0 and 1 for integer value 100 -#' parse.bits(100, c(0,1)) +#' # Return value(s) for bits 0 and 1 for integer value 100 +#' parse.bits(100, c(0,1)) #' -#' # Return value(s) for bits 0 and 1 for integer values 0-255 -#' for(i in 0:255) { print(parse.bits(i, c(0,1))) } +#' # Return value(s) for bits 0 and 1 for integer values 0-255 +#' for(i in 0:255) { print(parse.bits(i, c(0,1))) } #' -#' \dontrun{ +#' \donttest{ #' #### Applied Example using Harmonized Landsat Sentinel-2 QC #' #' # Create dummy data and qc band -#' library(raster) -#' r <- raster(nrow=100, ncol=100) -#' r[] <- round(runif(ncell(r), 0,1)) -#' qc <- raster(nrow=100, ncol=100) -#' qc[] <- round(runif(ncell(qc), 64,234)) -#' -#' # Calculate bit values from QC table -#' ( qc_bits <- data.frame(int=0:255, -#' cloud = unlist(lapply(0:255, FUN=parse.bits, bit=1)), -#' shadow = unlist(lapply(0:255, FUN=parse.bits, bit=3)), -#' acloud = unlist(lapply(0:255, FUN=parse.bits, bit=2)), -#' cirrus = unlist(lapply(0:255, FUN=parse.bits, bit=0)), -#' aerosol = unlist(lapply(0:255, FUN=parse.bits, bit=c(7,6)))) ) -#' -#' # Query the results to create a vector of integer values indicating what to mask -# # cloud is bit 1 and shadow bit 3 -#' m <- sort(unique(qc_bits[c(which(qc_bits$cloud == 1), -#' which(qc_bits$shadow == 1) -#' ),]$int)) -#' -#' # Apply queried integer values to mask image with QA band -#' qc[qc %in% m] <- NA -#' r <- mask(r, qc) +#' library(terra) +#' r <- rast(nrow=100, ncol=100) +#' r[] <- round(runif(ncell(r), 0,1)) +#' qc <- rast(nrow=100, ncol=100) +#' qc[] <- round(runif(ncell(qc), 64,234)) +#' +#' # Calculate bit values from QC table +#' ( qc_bits <- data.frame(int=0:255, +#' cloud = unlist(lapply(0:255, FUN=parse.bits, bit=1)), +#' shadow = unlist(lapply(0:255, FUN=parse.bits, bit=3)), +#' acloud = unlist(lapply(0:255, FUN=parse.bits, bit=2)), +#' cirrus = unlist(lapply(0:255, FUN=parse.bits, bit=0)), +#' aerosol = unlist(lapply(0:255, FUN=parse.bits, bit=c(7,6)))) ) +#' +#' # Query the results to create a vector of integer values indicating what to mask +#' # cloud is bit 1 and shadow bit 3 +#' m <- sort(unique(qc_bits[c(which(qc_bits$cloud == 1), +#' which(qc_bits$shadow == 1) +#' ),]$int)) +#' +#' # Apply queried integer values to mask image with QA band +#' qc[qc %in% m] <- NA +#' r <- mask(r, qc) #' } #' #' @export parse.bits diff --git a/R/partial.cor.R b/R/partial.cor.R index d15ca47..ccd25e2 100644 --- a/R/partial.cor.R +++ b/R/partial.cor.R @@ -9,15 +9,6 @@ #' @param statistic Correlation statistic, options are: "kendall", #' "pearson", "spearman" #' -#' @return data.frame containing: -#' * correlation - correlation coefficient -#' * p.value - p-value of correlation -#' * test.statistic - test statistic -#' * n - sample size -#' * Method - indicating partial or semipartial correlation -#' * Statistic - the correlation statistic used -#' @md -#' #' @details #' Partial and semipartial correlations show the association between two #' variables when one or more peripheral variables are controlled @@ -30,6 +21,15 @@ #' for Z. Semipartial correlation holds Z constant for either X or Y, but not #' both, so if we wanted to control X for Z, we could compute the semipartial #' correlation between X and Y holding Z constant for X. +#' +#' @return data.frame containing: +#' * correlation - correlation coefficient +#' * p.value - p-value of correlation +#' * test.statistic - test statistic +#' * n - sample size +#' * Method - indicating partial or semipartial correlation +#' * Statistic - the correlation statistic used +#' @md #' #' @author Jeffrey S. Evans #' diff --git a/R/plot.effect.size.R b/R/plot.effect.size.R index 6a46fd8..f36a397 100644 --- a/R/plot.effect.size.R +++ b/R/plot.effect.size.R @@ -4,6 +4,8 @@ #' @param x A effect.size object #' @param ... Additional arguments passed to plot #' +#' @return Plot of effect size object with group effect sizes and 95% confidence +#' #' @author Jeffrey S. Evans #' #' @method plot effect.size diff --git a/R/plot.loess.boot.R b/R/plot.loess.boot.R index 079615f..1713662 100644 --- a/R/plot.loess.boot.R +++ b/R/plot.loess.boot.R @@ -4,17 +4,10 @@ #' @param x A loess.boot object #' @param ... Additional arguments passed to plot #' +#' @return plot of lowess bootstrap +#' #' @author Jeffrey S. Evans #' -#' @references -#' Cleveland, WS, (1979) Robust Locally Weighted Regression and Smoothing Plots Journal of the American Statistical Association 74:829-836 -#' @references -#' Efron, B., and R. Tibshirani (1993) An Introduction to the Bootstrap Chapman and Hall, New York -#' @references -#' Hardle, W., (1989) Applied Nonparametric Regression Cambridge University Press, NY. -#' @references -#' Tibshirani, R. (1988) Variance stabilization and the bootstrap. Biometrika 75(3):433-44. -#' #' @examples #' n=1000 #' x <- seq(0, 4, length.out=n) @@ -25,6 +18,8 @@ #' @method plot loess.boot #' @export plot.loess.boot <- function(x, ...) { + oops <- options() + on.exit(options(oops)) dots <- as.list(match.call(expand.dots = TRUE)[-1]) dots[["x"]] <- x$data$x dots[["y"]] <- x$data$y diff --git a/R/point.in.poly.R b/R/point.in.poly.R deleted file mode 100644 index e6c3f37..0000000 --- a/R/point.in.poly.R +++ /dev/null @@ -1,19 +0,0 @@ -#' @title Point and Polygon Intersect -#' @description Intersects point and polygon feature classes and adds polygon -#' attributes to points -#' -#' @param ... arguments passed to sf::st_intersection -#' @return NA -#' -#' @examples -#' \dontrun{ -#' sf::st_intersection() -#' } -#' -#' @export -point.in.poly <- function(...) { - .Deprecated("point.in.poly", package="spatialEco", - msg="Function is deprecated because sf::st_intersection - intersections points an polygons and returns associated - attributes ") -} diff --git a/R/poly.regression.R b/R/poly.regression.R index 4bb62cb..fdef9cd 100644 --- a/R/poly.regression.R +++ b/R/poly.regression.R @@ -10,15 +10,7 @@ #' @param ci (FALSE/TRUE) Should confidence intervals be returned #' @param ... Additional arguments passed to loess #' -#' @return If ci = FALSE, a vector of smoothed values, -#' otherwise a list object with: -#' * loess - A vector, same length of y, representing the smoothed or -#' inputed data -#' * lower.ci - Lower confidence interval -#' * upper.ci - Upper confidence interval -#' @md -#' -#' @description +#' @details #' This is a wrapper function for loess that simplifies data smoothing and imputation #' of missing values. The function allows for smoothing a vector, based on an index #' (derived automatically) or covariates. If the impute option is TRUE NA values are @@ -28,6 +20,14 @@ #' function w(x) = (1-|x|^3)^3 where; x is the distance of a data point from the point #' the curve being fitted. #' +#' @return If ci = FALSE, a vector of smoothed values, +#' otherwise a list object with: +#' * loess - A vector, same length of y, representing the smoothed or +#' inputed data +#' * lower.ci - Lower confidence interval +#' * upper.ci - Upper confidence interval +#' @md +#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/polyPerimeter.R b/R/polyPerimeter.R index 8e88798..5c7c41c 100644 --- a/R/polyPerimeter.R +++ b/R/polyPerimeter.R @@ -8,12 +8,11 @@ #' @author Jeffrey S. Evans #' #' @examples -#' if(require(sf, quietly = TRUE)) { +#' library(sf) #' polys <- st_read(system.file("shape/nc.shp", package="sf")) #' polys <- suppressWarnings(st_cast(polys[c(10,100),], "POLYGON")) #' #' polyPerimeter(polys) -#' } #' #' @export polyPerimeter <- function(x) { diff --git a/R/poly_trend.R b/R/poly_trend.R index 490620e..68a2bd8 100644 --- a/R/poly_trend.R +++ b/R/poly_trend.R @@ -8,17 +8,17 @@ #' @param plot Plot results (TRUE/FALSE) #' @param ... Additional arguments passed to plot #' +#' @details +#' A fit using a lm(y ~ x + I(X^2) + I(X^3)) form will be correlated which, +#' can cause problems. The function avoids undue correlation using orthogonal +#' polynomials +#' #' @return A poly.trend class (list) containing #' * trend data.frame of fit polynomial and upper/lower confidence intervals #' * model Class lm model object fit with poly term #' * prameterCI Intercept confidence intervals of Nth order polynomials #' * order Specified polynomial order #' @md -#' -#' @note -#' A fit using a lm(y ~ x + I(X^2) + I(X^3)) form will be correlated which, -#' can cause problems. The function avoids undue correlation using orthogonal -#' polynomials #' #' @author Jeffrey S. Evans #' diff --git a/R/polygon_extract.R b/R/polygon_extract.R deleted file mode 100644 index 9409162..0000000 --- a/R/polygon_extract.R +++ /dev/null @@ -1,17 +0,0 @@ -#' @title polygon raster extract -#' @description Fast method for extracting raster values to polygons -#' -#' @param ... arguments passed to terra::extract -#' @return NA -#' -#' @examples -#' \dontrun{ -#' terra::extract() -#' } -#' -#' @export -polygon_extract <- function(...) { - .Deprecated("polygon_extract", package="spatialEco", - msg="Function is deprecated because terra::extract or - exactextractr::exact_extract benchmarks about the same ") -} diff --git a/R/pp.subsample.R b/R/pp.subsample.R index f792927..59800dd 100644 --- a/R/pp.subsample.R +++ b/R/pp.subsample.R @@ -14,9 +14,7 @@ #' default is 1, for no adjustment (downweight < 1 | upweight > 1) #' @param edge Apply Diggle edge correction (TRUE/FALSE) #' -#' @return sf class POINT geometry containing random subsamples -#' -#' @description +#' @details #' The window type creates a convex hull by default or, optionally, uses the maximum #' extent (envelope). The resulting bandwidth can vary widely by method. the 'diggle' #' method is intended for bandwidth representing 2nd order spatial variation whereas @@ -25,7 +23,6 @@ #' order 'likelihood' approach, as it is slow and computationally expensive. finally, #' the 'stoyan' method will produce very strong 2nd order results. ' #' -#' @note #' Available bandwidth selection methods are: #' * Scott - (Scott 1992), Scott's Rule for Bandwidth Selection (1st order) #' * Diggle - (Berman & Diggle 1989), Minimise the mean-square error via cross @@ -36,34 +33,36 @@ #' * User defined - using a numeric value for sigma #' @md #' +#' @return sf class POINT geometry containing random subsamples +#' #' @author Jeffrey S. Evans #' #' @references #' Berman, M. and Diggle, P. (1989) Estimating weighted integrals of the second-order #' intensity of a spatial point process. Journal of the Royal Statistical Society, #' series B 51, 81-92. -#' @references +#' #' Fithian, W & T. Hastie (2013) Finite-sample equivalence in statistical models for #' presence-only data. Annals of Applied Statistics 7(4): 1917-1939 -#' @references +#' #' Hengl, T., H. Sierdsema, A. Radovic, and A. Dilo (2009) Spatial prediction of species #' distributions from occurrence-only records: combining point pattern analysis, #' ENFA and regression-kriging. Ecological Modelling, 220(24):3499-3511 -#' @references +#' #' Loader, C. (1999) Local Regression and Likelihood. Springer, New York. -#' @references +#' #' Scott, D.W. (1992) Multivariate Density Estimation. Theory, Practice and Visualization. #' New York, Wiley. -#' @references +#' #' Stoyan, D. and Stoyan, H. (1995) Fractals, random shapes and point fields: methods of #' geometrical statistics. John Wiley and Sons. -#' @references +#' #' Warton, D.i., and L.C. Shepherd (2010) Poisson Point Process Models Solve the Pseudo-Absence #' Problem for Presence-only Data in Ecology. The Annals of Applied Statistics, 4(3):1383-1402 #' #' @examples #' library(sf) -#' library(spatstat.explore) +#' if(require(spatstat.explore, quietly = TRUE)) { #' data(bei, package = "spatstat.data") #' #' trees <- st_as_sf(bei) @@ -78,6 +77,10 @@ #' legend('bottomright', legend=c('Original sample', 'Subsample'), #' col=c('black','red'),pch=c(19,19)) #' +#' } else { +#' cat("Please install spatstat.explore package to run example", "\n") +#' } +#' #' @export pp.subsample pp.subsample <- function(x, n, window = "hull", sigma = "Scott", wts = NULL, gradient = 1, edge = FALSE) { diff --git a/R/print.cross.cor.R b/R/print.cross.cor.R index 50bcefd..abc60a6 100644 --- a/R/print.cross.cor.R +++ b/R/print.cross.cor.R @@ -1,10 +1,28 @@ #' @title Print spatial cross correlation #' @description print method for class "cross.cor" +#' #' @param x Object of class cross.cor #' @param ... Ignored #' +#' @return +#' When not simulated k=0, prints functions list object containing: +#' * I - Global autocorrelation statistic +#' * SCI - - A data.frame with two columns representing the xy and yx autocorrelation +#' * nsim - value of NULL to represent p values were derived from observed data (k=0) +#' * p - Probability based observations above/below confidence interval +#' * t.test - Probability based on t-test +#' +#' When simulated (k>0), prints functions list object containing: +#' * I - Global autocorrelation statistic +#' * SCI - A data.frame with two columns representing the xy and yx autocorrelation +#' * nsim - value representing number of simulations +#' * global.p - p-value of global autocorrelation statistic +#' * local.p - Probability based simulated data using successful rejection of t-test +#' * range.p - Probability based on range of probabilities resulting from paired t-test +#' #' @method print cross.cor #' +#' @md #' @export print.cross.cor <- function(x, ...) { if(!is.null(x$nsim)) { diff --git a/R/print.effect.size.R b/R/print.effect.size.R index e8b07cc..ed4aa99 100644 --- a/R/print.effect.size.R +++ b/R/print.effect.size.R @@ -1,8 +1,13 @@ #' @title Print effect size #' @description print method for class "effect.size" +#' #' @param x Object of class effect.size #' @param ... Ignored #' +#' @return +#' Prints the output data.frame contaning; effect size with upper and lower confidence +#' and, mean and sd by group +#' #' @method print effect.size #' @export print.effect.size <- function(x, ...) { diff --git a/R/print.loess.boot.R b/R/print.loess.boot.R index 6a480cd..f2039b0 100644 --- a/R/print.loess.boot.R +++ b/R/print.loess.boot.R @@ -1,8 +1,27 @@ #' @title Print Loess bootstrap model #' @description print method for class "loess.boot" +#' #' @param x Object of class loess.boot #' @param ... Ignored #' +#' @return same as summary lowess.boot of data.frame including; +#' * nreps Number of bootstrap replicates +#' * confidence Confidence interval (region) +#' * span alpha (span) parameter used loess fit +#' * degree polynomial degree used in loess fit +#' * normalize Normalized data (TRUE/FALSE) +#' * family Family of statistic used in fit +#' * parametric Parametric approximation (TRUE/FALSE) +#' * surface Surface fit, see loess.control +#' * data data.frame of x,y used in model +#' * fit data.frame including: +#' 1) x - Equally-spaced x index +#' 2) y.fit - loess fit +#' 3) up.lim - Upper confidence interval +#' 4) low.lim - Lower confidence interval +#' 5) stddev - Standard deviation of loess fit at each x value +#' @md +#' #' @method print loess.boot #' @export print.loess.boot <- function(x, ...) { diff --git a/R/print.poly.trend.R b/R/print.poly.trend.R index 292c3ba..68d5b7d 100644 --- a/R/print.poly.trend.R +++ b/R/print.poly.trend.R @@ -1,8 +1,11 @@ #' @title Print poly_trend #' @description print method for class "poly.trend" +#' #' @param x Object of class poly.trend #' @param ... Ignored #' +#' @return Prints trend model summary, order and trend confidence intervals +#' #' @method print poly.trend #' @export print.poly.trend <- function(x, ...) { diff --git a/R/proximity.index.R b/R/proximity.index.R index fb834ac..b4e6b63 100644 --- a/R/proximity.index.R +++ b/R/proximity.index.R @@ -46,7 +46,10 @@ #' # plot index for just forest class #' forest <- meuse[meuse$LU == "forest",] #' plot(forest["cpidx"]) -#' } +#' +#' } else { +#' cat("Please install sp package to run example", "\n") +#' } #' } #' @export proximity.index proximity.index <- function(x, y = NULL, min.dist = 0, max.dist = 1000, diff --git a/R/pseudo.absence.R b/R/pseudo.absence.R index 5b0966c..ef009f1 100644 --- a/R/pseudo.absence.R +++ b/R/pseudo.absence.R @@ -21,14 +21,6 @@ #' default is 1, for no adjustment (downweight < 1 | upweight > 1) #' @param p Minimum value for probability distribution (must be > 0) #' @param edge Apply Diggle edge correction (TRUE/FALSE) -#' -#' @return A list class object with the following components: -#' \itemize{ -#' \item sample A sf POINT geometry object containing random samples -#' \item kde A terra SpatRaster class of inverted Isotropic KDE estimates -#' used as sample weights (IF KDE = TRUE) -#' \item sigma Selected bandwidth of KDE -#' } #' #' @details #' The window type creates a convex hull by default or, optionally, uses the @@ -36,26 +28,31 @@ #' areas defined by the mask and defines the area that pseudo absence data #' will be generated. #' -#' @details #' Available bandwidth selection methods are: -#' \itemize{ -#' \item Scott (Scott 1992), Scott's Rule for Bandwidth Selection (1st order) -#' \item Diggle (Berman & Diggle 1989), Minimize the mean-square error via cross -#' validation (2nd order) -#' \item likelihood (Loader 1999), Maximum likelihood cross validation (2nd order) -#' \item geometry, Bandwidth is based on simple window geometry (1st order) -#' \item Stoyan (Stoyan & Stoyan 1995), Based on pair-correlation function -#' (strong 2nd order) -#' \item User defined numeric distance bandwidth -#' } +#' * Scott (Scott 1992), Scott's Rule for Bandwidth Selection (1st order) +#' * Diggle (Berman & Diggle 1989), Minimize the mean-square error via cross +#' * validation (2nd order) +#' * likelihood (Loader 1999), Maximum likelihood cross validation (2nd order) +#' * geometry, Bandwidth is based on simple window geometry (1st order) +#' * Stoyan (Stoyan & Stoyan 1995), Based on pair-correlation function (strong 2nd order) +#' * User defined numeric distance bandwidth +#' @md #' -#' @details -#' Note; resulting bandwidth can vary widely by method. the 'diggle' method +#' @note +#' resulting bandwidth can vary widely by method. the 'diggle' method #' is intended for selecting bandwidth representing 2nd order spatial variation #' whereas the 'scott' method will represent 1st order trend. the 'geometry' approach #' will also represent 1st order trend. For large datasets, caution should be used with #' the 2nd order 'likelihood' approach, as it is slow and computationally expensive. #' finally, the 'stoyan' method will produce very strong 2nd order results. +#' +#' @return +#' A list class object with the following components: +#' * sample A sf POINT geometry object containing random samples +#' * kde A terra SpatRaster class of inverted Isotropic KDE estimates +#' used as sample weights (IF KDE = TRUE) +#' * sigma Selected bandwidth of KDE +#' @md #' #' @author Jeffrey S. Evans #' @@ -63,29 +60,32 @@ #' Berman, M. and Diggle, P. (1989) Estimating weighted integrals of the second-order #' intensity of a spatial point process. Journal of the Royal Statistical Society, #' series B 51, 81-92. -#' @references +#' #' Fithian, W & T. Hastie (2013) Finite-sample equivalence in statistical models for #' presence-only data. Annals of Applied Statistics 7(4): 1917-1939 -#' @references +#' #' Hengl, T., H. Sierdsema, A. Radovic, and A. Dilo (2009) Spatial prediction of species #' distributions from occurrence-only records: combining point pattern analysis, #' ENFA and regression-kriging. Ecological Modelling, 220(24):3499-3511 -#' @references +#' #' Loader, C. (1999) Local Regression and Likelihood. Springer, New York. -#' @references +#' #' Scott, D.W. (1992) Multivariate Density Estimation. Theory, Practice and Visualization. #' New York, Wiley. -#' @references +#' #' Stoyan, D. and Stoyan, H. (1995) Fractals, random shapes and point fields: methods of #' geometrical statistics. John Wiley and Sons. -#' @references +#' #' Warton, D.i., and L.C. Shepherd (2010) Poisson Point Process Models Solve the Pseudo-Absence #' Problem for Presence-only Data in Ecology. The Annals of Applied Statistics, 4(3):1383-1402 #' #' @examples -#' library(sf) -#' library(terra) -#' library(spatstat.data) +#' p = c("sf", "sp", "terra", "spatstat.data") +#' if(any(!unlist(lapply(p, requireNamespace, quietly=TRUE)))) { +#' m = which(!unlist(lapply(p, requireNamespace, quietly=TRUE))) +#' message("Can't run examples, please install ", paste(p[m], collapse = " ")) +#' } else { +#' invisible(lapply(p, require, character.only=TRUE)) #' #' data(meuse, package = "sp") #' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, @@ -116,7 +116,7 @@ #' plot(st_geometry(trees.abs$sample), col='red', pch=20, cex=1, add=TRUE) #' legend('top', legend=c('Presence', 'Pseudo-absence'), #' pch=c(20,20),col=c('black','red'),bg="white") -#' +#' } #' @export pseudo.absence <- function(x, n, window = "hull", ref = NULL, s = NULL, sigma = "Scott", wts = NULL, KDE = FALSE, gradient = 1, p = NULL, edge = FALSE) { diff --git a/R/pu-data.R b/R/pu-data.R index b03d2df..ae44e58 100644 --- a/R/pu-data.R +++ b/R/pu-data.R @@ -58,7 +58,7 @@ #' @references #' Evans, J.S., S.R. Schill, G.T. Raber (2015) A Systematic Framework for Spatial Conservation Planning and Ecological Priority Design in St. Lucia, Eastern Caribbean. Chapter 26 in Central American Biodiversity : Conservation, Ecology and a Sustainable Future. F. Huettman (eds). Springer, NY. #' -#' @source \url{https://www.conservationgateway.org} +#' @source "The Nature Conservancy" NULL diff --git a/R/quadrats.R b/R/quadrats.R index 2a9f937..6dee10b 100644 --- a/R/quadrats.R +++ b/R/quadrats.R @@ -1,44 +1,50 @@ #' @title Quadrats #' @description Creates quadrat polygons for sampling or analysis #' -#' @param x A sp or sf polygon object defining extent +#' @param x An sf POLYGONS object defining extent #' @param s Radius defining single or range of sizes of quadrats #' @param n Number of quadrats #' @param r A rotation factor for random rotation, default is NULL #' @param sp (FALSE | TRUE) Output sp class object #' -#' @return an sf or sp polygon object with rotated polygon(s) -#' -#' @note +#' @details #' The radius (s) parameter can be a single value or a range of values, #' representing a randomization range of resulting quadrat sizes. The #' rotation (r) parameter can also be used to defined a fixed rotation or #' random range of quadrat rotations. You can specify each of these parameters #' using an explicit vector that will be sampled eg., seq(100,300,0.5) #' +#' @return an sf POLYGONS object with rotated polygon(s) +#' +#' @author Jeffrey S. Evans +#' #' @examples #' library(sf) #' library(terra) #' #' # read meuse data and create convex hull -#' data(meuse, package = "sp") -#' meuse <- st_as_sf(meuse, coords = c("x", "y"), -#' crs = 28992, agr = "constant") -#' e <- st_convex_hull(st_union(meuse)) +#' if (require(sp, quietly = TRUE)) { +#' data(meuse, package = "sp") +#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") +#' e <- st_convex_hull(st_union(meuse)) #' #' # Fixed size 250 and no rotation #' s <- quadrats(e, s = 250, n = 10) #' plot(st_geometry(s)) #' #' \donttest{ -#' # Variable sizes 100-300 and rotation of 0-45 degrees -#' s <- quadrats(e, s = c(100,300), n = 10, r = c(0,45)) -#' plot(st_geometry(s)) +#' # Variable sizes 100-300 and rotation of 0-45 degrees +#' s <- quadrats(e, s = c(100,300), n = 10, r = c(0,45)) +#' plot(st_geometry(s)) #' -#' # Variable sizes 100-300 and no rotation -#' s <- quadrats(e, s = c(100,300), n = 10) +#' # Variable sizes 100-300 and no rotation +#' s <- quadrats(e, s = c(100,300), n = 10) #' plot(st_geometry(s)) #' } +#' +#' } else { +#' cat("Please install sp package to run example", "\n") +#' } #' #' @export quadrats quadrats <- function(x, s = 250, n = 100, r = NULL, sp = FALSE) { diff --git a/R/random.raster.R b/R/random.raster.R index db9fb1d..c075f7b 100644 --- a/R/random.raster.R +++ b/R/random.raster.R @@ -17,11 +17,10 @@ #' @param distribution Available distributions, c("random", "normal", #' "seq", "binominal", "gaussian", "sample") #' -#' @return terra SpatRaster object with random rasters -#' #' @details -#' Options for distributions are for random, normal, seq, binominal, -#' gaussian and sample raster(s) +#' Options for distributions are; random, normal, seq, binominal, gaussian and sample raster(s) +#' +#' @return terra SpatRaster object with random rasters #' #' @author Jeffrey S. Evans #' diff --git a/R/raster.Zscore.R b/R/raster.Zscore.R index 4ee3e57..63aa64d 100644 --- a/R/raster.Zscore.R +++ b/R/raster.Zscore.R @@ -1,6 +1,6 @@ #' @title Modified z-score for a raster -#' @description Calculates the modified z-score for all cells -#' in a raster +#' @description +#' Calculates the modified z-score for raster values #' #' @param x A raster class object #' @param p.value Return p-value rather than z-score @@ -8,12 +8,12 @@ #' @param file.name Name of raster written to disk #' @param ... Additional arguments passed to writeRaster #' -#' @return raster class object or raster written to disk -#' #' @note #' Since this functions needs to operate on all of the raster values, #' it is not memory safe #' +#' @return raster class object or raster written to disk +#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/raster.change.R b/R/raster.change.R index db8454f..80fe73e 100644 --- a/R/raster.change.R +++ b/R/raster.change.R @@ -1,6 +1,6 @@ #' @title Raster change between two nominal rasters -#' @description Compares two categorical rasters with a variety of -#' statistical options +#' @description +#' Compares two categorical rasters with a variety of statistical options #' #' @param x A terra SpatRaster #' @param y A terra SpatRaster for comparison to x @@ -10,46 +10,42 @@ #' options. #' @param ... Additional arguments passed to terra::focalPairs #' -#' @return A terra SpatRaster layer containing one of the following layers: -#' \itemize{ -#' \item kappa Kappa or Weighted Kappa statistic (if stat = "kappa") -#' \item correlation Paired t.test statistic (if stat = "cor") -#' \item entropy Local entropy (if stat = "entropy") -#' \item divergence Kullback-Leibler divergence (if stat = "divergence") -#' \item cross.entropy Local Cross-entropy (if stat = "cross.entropy") -#' \item t.test Paired t.test statistic (if stat = "t.test") -#' \item p.value p-value of the paired t.test statistic (if stat = "t.test") -#' } -#' -#' @description +#' @details #' This function provides a various statistics for comparing two classified maps. -#' Valid options are: -#' \itemize{ -#' \item kappa - Cohen's Kappa -#' \item t.test - Two-tailed paired t-test -#' \item cor - Persons Correlation -#' \item entropy - Delta entropy -#' \item cross-entropy - Cross-entropy loss function -#' \item divergence - Kullback-Leibler divergence (relative entropy) -#' } +#' Valid options are: +#' * kappa - Cohen's Kappa +#' * t.test - Two-tailed paired t-test +#' * cor - Persons Correlation +#' * entropy - Delta entropy +#' * cross-entropy - Cross-entropy loss function +#' * divergence - Kullback-Leibler divergence (relative entropy) #' -#' @note #' Kappa and t-test values < 0 are reported as 0. For a weighted kappa, a matrix must #' be provided that correspond to the pairwise weights for all values in both rasters. #' Delta entropy is derived by calculating Shannon's on each focal window then #' differencing them (e(x) - e(y)). The s argument can be a single scalar, defining #' a symmetrical kernel, two scalers defining the dimensions of the kernel eg., c(3,5) #' or a matrix defining the kernel say, resulting from terra::focalMat +#' +#' @return +#' A terra SpatRaster layer containing one of the following layers: +#' * kappa - Kappa or Weighted Kappa statistic (if stat = "kappa") +#' * correlation - Paired t.test statistic (if stat = "cor") +#' * entropy - Local entropy (if stat = "entropy") +#' * divergence - Kullback-Leibler divergence (if stat = "divergence") +#' * cross.entropy - Local Cross-entropy (if stat = "cross.entropy") +#' * t.test - Paired t.test statistic (if stat = "t.test") +#' * p.value - p-value of the paired t.test statistic (if stat = "t.test") #' #' @author Jeffrey S. Evans #' #' @references #' Cohen, J. (1960). A coefficient of agreement for nominal scales. Educational #' and Psychological Measurement, 20:37-46 -#' @references +#' #' McHugh M.L. (2012) Interrater reliability: the kappa statistic. #' Biochemia medica, 22(3):276–282. -#' @references +#' #' Kullback, S., R.A. Leibler (1951). On information and sufficiency. Annals of #' Mathematical Statistics. 22(1):79–86 #' @@ -83,6 +79,7 @@ #' par(opar) #' } #' +#' @md #' @export raster.change raster.change <- function(x, y, s = 3, stat = c("kappa", "t.test", "cor", "entropy", "cross-entropy", diff --git a/R/raster.deviation.R b/R/raster.deviation.R index cd56f08..be01f06 100644 --- a/R/raster.deviation.R +++ b/R/raster.deviation.R @@ -11,11 +11,7 @@ #' @param global Use single global value for deviation or cell-level values #' (FALSE/TRUE). Argument is ignored for type="trend" #' -#' @return -#' A SpatRaster class object representing local deviation from the raster or the -#' specified global statistic -#' -#' @description +#' @details #' The deviation from the trend is derived as [y-hat - y] where; y-hat is the #' Nth-order polynomial. Whereas the deviation from a global statistic is [y - y-hat] #' where; y-hat is the local (focal) statistic. The global = TRUE argument allows @@ -23,12 +19,16 @@ #' where; stat(x) is the global value of the specified statistic and y-hat is the #' specified focal statistic. #' +#' @return +#' A SpatRaster class object representing local deviation from the raster or the +#' specified global statistic +#' #' @author Jeffrey S. Evans #' #' @references #' Magee, Lonnie (1998). Nonlocal Behavior in Polynomial Regressions. The American #' Statistician. American Statistical Association. 52(1):20-22 -#' @references +#' #' Fan, J. (1996). Local Polynomial Modelling and Its Applications: From linear #' regression to nonlinear regression. Monographs on Statistics and Applied #' Probability. Chapman and Hall/CRC. ISBN 0-412-98321-4 @@ -86,7 +86,7 @@ raster.deviation <- function(x, type = c("trend", "min", "max", "mean", "median" rsf <- sf::st_as_sf(r, coords = c("xcoord", "ycoord"), agr = "constant") rstat <- terra::rasterize(terra::vect(rsf), x, field="trend") - cat("polynomial confidence intervals", "\n") + message("polynomial confidence intervals") stats::confint(poly.mdl, level=0.95) return( rstat - x ) } diff --git a/R/raster.downscale.R b/R/raster.downscale.R index a3a3cdc..d2d6a16 100644 --- a/R/raster.downscale.R +++ b/R/raster.downscale.R @@ -14,19 +14,7 @@ #' @param uncertainty Output uncertainty raster(s) of confidence or prediction interval, #' at same resolution as y. Options are c("none", "prediction", "confidence") #' -#' @return A list object containing: -#' * downscale downscaled terra SpatRaster object -#' * model MASS rlm model object -#' * MSE Mean Square Error -#' * AIC Akaike information criterion -#' * parm.ci Parameter confidence intervals -#' * residuals If residuals = TRUE, a SpatRaster of the residual error -#' * uncertainty If pred.int = TRUE, SpatRaster's of the -#' lower/upper prediction intervals -#' * std.error If se = TRUE, SpatRaster's of the standard error -#' @md -#' -#' @note +#' @details #' This function uses a robust regression, fit using an M-estimation with Tukey's biweight #' initialized by a specific S-estimator, to downscale a raster based on higher-resolution #' or more detailed raster data specified as covariate(s). You can optionally output residual @@ -42,22 +30,34 @@ #' the prediction interval for standard error defaults to "confidence" else is the same output as #' uncertainty (eg., prediction or confidence). #' -#' @references -#' Bruce, P., & A. Bruce. (2017). Practical Statistics for Data Scientists. O’Reilly Media. +#' @return +#' A list object containing: +#' * downscale - downscaled terra SpatRaster object +#' * model - MASS rlm model object +#' * MSE - Mean Square Error +#' * AIC - Akaike information criterion +#' * parm.ci - Parameter confidence intervals +#' * residuals - If residuals = TRUE, a SpatRaster of the residual error +#' * uncertainty - If pred.int = TRUE, SpatRaster's of the lower/upper prediction intervals +#' * std.error - If se = TRUE, SpatRaster's of the standard error +#' @md #' #' @author Jeffrey S. Evans #' +#' @references +#' Bruce, P., & A. Bruce. (2017). Practical Statistics for Data Scientists. O’Reilly Media. +#' #' @examples -#' \dontrun{ -#' library(geodata) +#' \donttest{ +#' if (require(geodata, quietly = TRUE)) { #' library(terra) -#' +#' library(geodata) +#' #' # Download example data (requires geodata package) -#' elev <- geodata::elevation_30s(country="SWZ", path=tempdir()) -#' slp <- terrain(elev, v="slope") -#' tmax <- geodata::worldclim_country(country="SWZ", var="tmax", -#' path=tempdir()) -#' tmax <- crop(tmax[[1]], ext(elev)) +#' elev <- elevation_30s(country="SWZ", path=tempdir()) +#' slp <- terrain(elev, v="slope") +#' tmax <- worldclim_country(country="SWZ", var="tmax", path=tempdir()) +#' tmax <- crop(tmax[[1]], ext(elev)) #' #' # Downscale temperature #' x=c(elev,slp) @@ -96,7 +96,10 @@ #' plot(tmax.ds$downscale - tmax.ds$uncertainty[[2]], #' main="upper prediction interval") #' par(opar) -#' +#' +#' } else { +#' cat("Please install geodata package to run example", "\n") +#' } #' } #' @export raster.downscale raster.downscale <- function(x, y, scatter = FALSE, full.res = FALSE, @@ -138,6 +141,8 @@ raster.downscale <- function(x, y, scatter = FALSE, full.res = FALSE, rrr <- MASS::rlm(stats::as.formula(paste(names(sub.samp)[1], xnames, sep=" ~ ")), data=sub.samp, scale.est="Huber", psi=MASS::psi.hampel, init="lts") if(scatter == TRUE) { + oops <- options() + on.exit(options(oops)) n = terra::nlyr(x) if(n > 1) { graphics::par(mfrow=c(n,n/2)) diff --git a/R/raster.entropy.R b/R/raster.entropy.R index 7219101..41f76dc 100644 --- a/R/raster.entropy.R +++ b/R/raster.entropy.R @@ -8,15 +8,17 @@ #' entropy (FALSE/TRUE) #' @param ... Optional arguments passed terra focal function #' -#' @return terra SpatRaster class object -#' -#' @description +#' @details #' Entropy calculated as: H = -sum(Pi*ln(Pi)) where; Pi, Proportion of one value #' to total values Pi=n(p)/m and m, Number of unique values. Expected range: #' 0 to log(m) H=0 if window contains the same value in all cells. #' H increases with the number of different values in the window. The ellipsis #' arguments can be used to write to disk using the filename argument. #' +#' @return terra SpatRaster class object +#' +#' @author Jeffrey S. Evans +#' #' @references #' Fuchs M., R. Hoffmann, F. Schwonke (2008) Change Detection with GRASS #' GIS - Comparison of images taken by different sensor. diff --git a/R/raster.gaussian.smooth.R b/R/raster.gaussian.smooth.R index 7c71dbe..af59959 100644 --- a/R/raster.gaussian.smooth.R +++ b/R/raster.gaussian.smooth.R @@ -9,13 +9,13 @@ #' @param type The statistic to use in the smoothing operator; #' "mean", "median", "sd", "convolution" #' @param ... Additional arguments passed to terra::focal -#' -#' @return A terra SpatRaster class object of the local distributional moment #' -#' @note +#' @details #' This applies a Gaussian Kernel smoother. The convolution option performs #' a Gaussian decomposition whereas the other options use the kernel #' as weights for the given statistic. +#' +#' @return A terra SpatRaster class object of the local distributional moment #' #' @author Jeffrey S. Evans #' diff --git a/R/raster.invert.R b/R/raster.invert.R index dd564db..a7f3794 100644 --- a/R/raster.invert.R +++ b/R/raster.invert.R @@ -1,14 +1,14 @@ #' @title Invert raster #' @description Inverts (flip) the values of a raster #' -#' @param x raster object +#' @param x A terra SpatRaster object #' -#' @return -#' raster class object with inverted (flipped) raster values -#' -#' @note +#' @details #' Inverts raster values using the formula: (((x - max(x)) * -1) + min(x) #' +#' @return +#' A terra SpatRaster object with inverted (flipped) raster values +#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/raster.kendall.R b/R/raster.kendall.R index 003b89e..5e6892d 100644 --- a/R/raster.kendall.R +++ b/R/raster.kendall.R @@ -16,19 +16,19 @@ #' @param method Kendall method to use c("zhang", "yuepilon","none"), see kendall function #' @param ... Additional arguments passed to the terra app function #' -#' @return Depending on arguments, a raster layer or rasterBrick object containing: -#' * raster layer 1 - slope for trend, always returned -#' * raster layer 2 - Kendall's tau two-sided test, reject null at 0, if tau TRUE -#' * raster layer 3 - intercept for trend if intercept TRUE -#' * raster layer 4 - p value for trend fit if p.value TRUE -#' * raster layer 5 - lower confidence level at 95 pct, if confidence TRUE -#' * raster layer 6 - upper confidence level at 95 pct, if confidence TRUE -#' @md -#' #' @details #' This function implements Kendall's nonparametric test for a monotonic trend #' using the Theil-Sen (Theil 1950; Sen 1968; Siegel 1982) method to estimate #' the slope and related confidence intervals. +#' +#' @return Depending on arguments, a raster layer or rasterBrick object containing: +#' * raster layer 1 - slope for trend, always returned +#' * raster layer 2 - Kendall's tau two-sided test, reject null at 0, if tau TRUE +#' * raster layer 3 - intercept for trend if intercept TRUE +#' * raster layer 4 - p value for trend fit if p.value TRUE +#' * raster layer 5 - lower confidence level at 95 pct, if confidence TRUE +#' * raster layer 6 - upper confidence level at 95 pct, if confidence TRUE +#' @md #' #' @author Jeffrey S. Evans #' @@ -36,10 +36,10 @@ #' Theil, H. (1950) A rank invariant method for linear and polynomial regression #' analysis. Nederl. Akad. Wetensch. Proc. Ser. A 53:386-392 (Part I), #' 53:521-525 (Part II), 53:1397-1412 (Part III). -#' @references +#' #' Sen, P.K. (1968) Estimates of Regression Coefficient Based on Kendall's tau. #' Journal of the American Statistical Association. 63(324):1379-1389. -#' @references +#' #' Siegel, A.F. (1982) Robust Regression Using Repeated Medians. #' Biometrika, 69(1):242-244 #' @@ -65,7 +65,7 @@ raster.kendall <- function(x, intercept = TRUE, p.value = TRUE, confidence = TRUE, tau = TRUE, min.obs = 6, method=c("zhang", "yuepilon","none"), ...) { - if(!any(which(utils::installed.packages()[,1] %in% "zyp"))) + if(length(find.package("zyp", quiet = TRUE)) == 0) stop("please install zyp package before running this function") if (!inherits(x, "SpatRaster")) stop(deparse(substitute(x)), " must be a terra SpatRaster object") diff --git a/R/raster.mds.R b/R/raster.mds.R index 8799b6b..cd4c77d 100644 --- a/R/raster.mds.R +++ b/R/raster.mds.R @@ -1,6 +1,6 @@ #' @title Raster multidimensional scaling (MDS) -#' @description Multidimensional scaling of raster values within an -#' N x N focal window +#' @description +#' Multidimensional scaling of raster values within an N x N focal window #' #' @param r A terra SpatRaster class object #' @param s Window size (may be a vector of 1 or 2) of @@ -9,12 +9,12 @@ #' matrix values. #' @param ... Additional arguments passed to terra::focal #' -#' @return A terra SpatRaster class object -#' -#' @description +#' @details #' An MDS focal function. If only one value provided for s, then a square matrix #' (window) will be used. If window.median = FALSE then the center value of the #' matrix is returned and not the median of the matrix +#' +#' @return A terra SpatRaster class object #' #' @author Jeffrey S. Evans #' diff --git a/R/raster.modified.ttest.R b/R/raster.modified.ttest.R index c07b5e9..a4f38e6 100644 --- a/R/raster.modified.ttest.R +++ b/R/raster.modified.ttest.R @@ -9,39 +9,41 @@ #' @param p If sample != "none", what proportion of population #' should be sampled #' @param size Fixed sample size (default NULL) -#' -#' @return A terra SpatRaster or sf POINT class object with the -#' following attributes: -#' \itemize{ -#' \item corr Correlation -#' \item Fstat The F-statistic calculated as [degrees of freedom * -#' unscaled F-statistic] -#' \item p.value p-value for the test -#' \item moran.x Moran's-I for x -#' \item moran.y Moran's-I for y -#' } +#' #' #' @description #' This function provides a bivariate moving window correlation using the modified #' t-test to account for spatial autocorrelation. Point based subsampling is provided #' for computation tractability. The hexagon sampling is recommended as it it good #' at capturing spatial process that includes nonstationarity and anistropy. +#' +#' @return +#' A terra SpatRaster or sf POINT class object with the following attributes: +#' * corr - Correlation +#' * Fstat - The F-statistic calculated as degrees of freedom unscaled F-statistic +#' * p.value - p-value for the test +#' * moran.x - Moran's-I for x +#' * moran.y - Moran's-I for y +#' @md #' #' @author Jeffrey S. Evans #' #' @references #' Clifford, P., S. Richardson, D. Hemon (1989), Assessing the significance of the #' correlationbetween two spatial processes. Biometrics 45:123-134. -#' @references +#' #' Dutilleul, P. (1993), Modifying the t test for assessing the correlation between #' two spatial processes. Biometrics 49:305-314. #' #' @examples -#' \dontrun{ -#' library(gstat) -#' library(sf) -#' library(terra) -#' +#' \donttest{ +#' p = c("sf", "sp", "terra", "gstat") +#' if(any(!unlist(lapply(p, requireNamespace, quietly=TRUE)))) { +#' m = which(!unlist(lapply(p, requireNamespace, quietly=TRUE))) +#' message("Can't run examples, please install ", paste(p[m], collapse = " ")) +#' } else { +#' invisible(lapply(p, require, character.only=TRUE)) +#' #' data(meuse, package = "sp") #' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, #' agr = "constant") @@ -74,15 +76,14 @@ #' # Sample-based corrected correlation #' ( cor.hex <- raster.modified.ttest(G1, G2, sample = "hexagonal") ) #' plot(cor.hex["corr"], pch=20) -#' #' } -#' +#' } #' @seealso \code{\link[SpatialPack]{modified.ttest}} for test details #' #' @export raster.modified.ttest raster.modified.ttest <- function(x, y, d = "auto", sample = c("none", "random", "hexagonal", "regular"), - p = 0.10, size = NULL) { - if(!any(which(utils::installed.packages()[,1] %in% "SpatialPack"))) + p = 0.10, size = NULL) { + if(length(find.package("SpatialPack", quiet = TRUE)) == 0) stop("please install SpatialPack package before running this function") if(missing(x)) stop("x argument is missing") diff --git a/R/raster.moments.R b/R/raster.moments.R index 8b0b3b5..520db48 100644 --- a/R/raster.moments.R +++ b/R/raster.moments.R @@ -10,11 +10,10 @@ #' @param p if type="quantile", the returned percentile. #' @param ... Additional arguments passed to terra::focal #' -#' @return A terra SpatRaster object representing the local distributional moment +#' @details +#' This is a simple wrapper for the terra focal function, returning local statistical moments #' -#' @note -#' This is a simple wrapper for the focal function, returning local -#' statistical moments +#' @return A terra SpatRaster object representing the local distributional moment #' #' @author Jeffrey S. Evans #' diff --git a/R/raster.transformations.R b/R/raster.transformations.R index 864fc7b..7f323cc 100644 --- a/R/raster.transformations.R +++ b/R/raster.transformations.R @@ -7,9 +7,7 @@ #' @param smin Minimum value for stretch #' @param smax Maximum value for stretch #' -#' @return A terra SpatRaster class object of specified transformation -#' -#' @description +#' @details #' Transformation option details: #' * norm - (Normalization_ (0-1): if min(x) < 0 ( x - min(x) ) / ( max(x) - min(x) ) #' * rstd - (Row standardize) (0-1): if min(x) >= 0 x / max(x) This normalizes data @@ -24,6 +22,8 @@ #' * sr - (Square-root) if min(x) >= 0 sqrt(x) #' @md #' +#' @return A terra SpatRaster class object of specified transformation +#' #' @author Jeffrey S. Evans #' #' @examples @@ -65,7 +65,7 @@ raster.transformation <- function(x, trans = "norm", smin=0, smax=255) { stop(" Minimum value < 0, cannot log transform") } if( trans == "norm" && rmin < 0) { - print(" Min value < 0, running row standardization instead") + message(" Min value < 0, running row standardization instead") return( x / rmax ) } if( trans == "norm") { diff --git a/R/raster.vol.R b/R/raster.vol.R index cad4948..7ea4c85 100644 --- a/R/raster.vol.R +++ b/R/raster.vol.R @@ -8,15 +8,14 @@ #' @param spct sample percent, if sample (TRUE) #' @param type If sample=TRUE type of sample, options are "random" or "regular" #' +#' @note +#' Since this model needs to operate on all of the raster values, it is not memory safe +#' #' @return #' if sample (FALSE) binary raster object with 1 representing designated #' percent volume else, if sample (TRUE) n sf POINT object with points #' that represent the percent volume of the sub-sample #' -#' @note -#' Since this model needs to operate on all of the raster values, -#' it is not memory safe -#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/rasterCorrelation.R b/R/rasterCorrelation.R index a0d49ad..506fc46 100644 --- a/R/rasterCorrelation.R +++ b/R/rasterCorrelation.R @@ -1,6 +1,6 @@ #' @title Raster correlation -#' @description Performs a moving window correlation between -#' two rasters +#' @description +#' Performs a moving window correlation between two rasters #' #' @param x A terra SpatRaster class object for x #' @param y A terra SpatRasterclass object for y @@ -11,11 +11,12 @@ #' @param type Type of output, options are: "pearson", "spearman", #' "covariance" #' +#' @note +#' The NA behavior is set to na.rm = TRUE to make default outputs +#' consistent between the terra and raster packages. +#' #' @return A terra SpatRaster class object #' -#' @note The NA behavior is set to na.rm = TRUE to make default outputs -#' consistent between the terra and raster packages. -#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/rasterDistance.R b/R/rasterDistance.R index e91e242..af0198e 100644 --- a/R/rasterDistance.R +++ b/R/rasterDistance.R @@ -6,23 +6,24 @@ #' @param y Value(s) in x to to calculate distance to #' @param scale (FALSE/TRUE) Perform a row standardization on results #' -#' @return a distance terra SpatRast raster -#' -#' @note +#' @details #' This replicates the terra distance function but uses the Arya & Mount #' Approximate Near Neighbor (ANN) C++ library for calculating distances. Where this #' results in a notable increase in performance it is not memory safe, needing to read #' in the entire raster and does not use the GeographicLib (Karney, 2013) spheroid #' distance method for geographic data. -#' +#' +#' @return A terra SpatRast raster representing distances +#' +#' @author Jeffrey S. Evans +#' #' @references #' Arya S., Mount D. M., Netanyahu N. S., Silverman R. and Wu A. Y (1998), An #' optimal algorithm for approximate nearest neighbor searching, Journal of #' the ACM, 45, 891-923. #' -#' @author Jeffrey S. Evans -#' #' @examples +#' \donttest{ #' library(sf) #' library(terra) #' @@ -44,21 +45,13 @@ #' plot(rd) #' plot( st_geometry(nc.sub), add=TRUE) #' -#' #### Benchmark rasterDistance and terra::distance -#' #### at res=90m the differences are quite notable -#' # ref <- rast(ext(nc), resolution=500) -#' # rnc <- mask(rasterize(vect(nc.sub), ref, background=2), -#' # vect(nc)) -#' # crs(rnc) <- "ESRI:102008" -#' # system.time({ rasterDistance(rnc, y=1) }) -#' # system.time({ distance(rnc, target=2) }) -#' +#' } #' @seealso \code{\link[terra]{distance}, \link[terra]{distance}} #' #' @import terra #' @export rasterDistance rasterDistance <- function(x, y, scale = FALSE){ - if(!any(which(utils::installed.packages()[,1] %in% "RANN"))) + if(length(find.package("RANN", quiet = TRUE)) == 0) stop("please install RANN package before running this function") if(missing(x)) stop("x argument is missing") diff --git a/R/remove.holes.R b/R/remove.holes.R index 943cb57..fa9b83a 100644 --- a/R/remove.holes.R +++ b/R/remove.holes.R @@ -5,14 +5,14 @@ #' @param x sf POLYGON or MULTIPOLYGON object #' @param only.holes Delete holes (FALSE) or returns only holes (FALSE) #' -#' @return sf POLYGON object -#' -#' @note +#' @details #' A hole is considered a polygon within a polygon (island) representing null #' geometry. If you want to return only holes, no longer NULL, use keep = TRUE. #' To delete holes use default only.holes = FALSE. Single part features will be #' returned regardless of input. #' +#' @return sf POLYGON object +#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/remove_duplicates.R b/R/remove_duplicates.R index d890d7b..034c1a1 100644 --- a/R/remove_duplicates.R +++ b/R/remove_duplicates.R @@ -5,14 +5,14 @@ #' @param threshold A distance threshold indicating fuzzy duplication, #' default i 0.00001 #' -#' @return sf object, of same feature class as x, with duplicate geometries removed -#' -#' @note +#' @details #' This function removes duplicate geometries based on order and not "non null" #' attribution or other factors, the first feature gets to stay. If one needs to #' know which points were removed sf::st_difference can be used between original #' data and results of the function. #' +#' @return sf object, of same feature class as x, with duplicate geometries removed +#' #' @author Jeffrey S. Evans #' #' @examples diff --git a/R/rotate.polygon.R b/R/rotate.polygon.R index 8cef81a..b5df655 100644 --- a/R/rotate.polygon.R +++ b/R/rotate.polygon.R @@ -7,12 +7,12 @@ #' @param anchor Location to rotate polygon on options are "center", #' "lower.left" and "upper.right" #' -#' @return an sp or sf polygon object with rotated polygon -#' -#' @note +#' @details #' The anchor is the location that the rotation is anchored to. The center #' is the centroid where the lower.left and upper.right are based on the #' min or max of the coordinates respectively. +#' +#' @return an sp or sf polygon object with rotated polygon #' #' @examples #' library(sf) diff --git a/R/sa.trans.R b/R/sa.trans.R index 7fb3bb0..6d7ea5a 100644 --- a/R/sa.trans.R +++ b/R/sa.trans.R @@ -10,26 +10,25 @@ #' @param asp.units Units of aspect values, options are: #' "degrees" or "radians" #' -#' @return A vector of the modeled value -#' -#' @description +#' @details #' An a priori assumption of a maximum in the NW quadrant (45 azimuth) #' and a minimum in the SW quadrant can be replaced by an empirically #' determined location of the optimum without repeated calculations of #' the regression fit. In addition it is argued that expressions for #' the effects of aspect should always be considered as terms involving #' an interaction with slope (Stage, 1976) -#' @description +#' #' For slopes from 0% - 100%, The functions are linearized and #' bounded from -1 to 1. Greater than 100% slopes are treated #' out of the -1 to 1 range. #' -#' @description #' An alternative for slopes with values approaching infinity is #' to take the square root of slope/100 to reduce the range of #' values.By default this model test all values greater than 100% #' to 101% and flat areas (-1) to nodata. #' +#' @return A vector of the modeled value +#' #' @author Jeffrey S. Evans #' #' @references @@ -37,12 +36,11 @@ #' and Habitat Type on Tree Growth. Forest Science 22(3):457-460. #' #' @examples -#' sa.trans(slope = 48.146, aspect = 360.000) -#' #' library(terra) +#' sa.trans(slope = 48.146, aspect = 360.000) +#' +#' # Example of creating slope*cos(aspect) raster #' elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) -#' -#' # Example of slope*cos(aspect) #' sa <- terra::terrain(elev, v=c("slope", "aspect"), unit="degrees") #' scosa <- terra::lapp(c(sa[[1]], sa[[2]]), fun = sa.trans) #' diff --git a/R/sample.annulus.R b/R/sample.annulus.R index 904fa67..3347436 100644 --- a/R/sample.annulus.R +++ b/R/sample.annulus.R @@ -1,6 +1,6 @@ #' @title Sample annulus #' @description Creates sample points based on annulus with defined -#' inner and outer radius +#' inner and outer radius #' #' @param x An sf POINT class object #' @param r1 Numeric value defining inner radius of annulus @@ -10,12 +10,12 @@ #' @param size Number of samples #' @param ... Additional arguments passed to sf::st_sample #' -#' @return sp SpatialPointsataFrame OBJECT -#' -#' @note -#' Function can be used for distance based sampling. This is a sampling method +#' @details +#' Function can be used for distance based sampling which is a sampling method #' that can be used to capture spatially lagged variation. #' +#' @return sf POINTS object +#' #' @author Jeffrey S. Evans #' #' @examples @@ -24,7 +24,6 @@ #' data(meuse, package = "sp") #' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, #' agr = "constant") -#' } #' #' xy <- meuse[2,] #' rs100 <- sample.annulus(xy, r1=50, r2=100, size = 50) @@ -36,7 +35,7 @@ #' legend("topright", legend=c("50-100m", "100-200m", "source"), #' pch=c(20,20,20), col=c("blue","red","black")) #' -#' \dontrun{ +#' \donttest{ #' # Run on multiple points #' rs100 <- sample.annulus(meuse[1:3,], r1=50, r2=100, #' size = 50) @@ -48,7 +47,10 @@ #' legend("topright", legend=c("50-100m", "100-200m", "source"), #' pch=c(20,20,20), col=c("blue","red","black")) #' } - +#' } else { +#' cat("Please install sp package to run example", "\n") +#' } +#' #' @export sample.annulus sample.annulus <- function(x, r1, r2, size = 10, ...) { if(!inherits(x, c("sf", "sfc"))) diff --git a/R/sample.line.R b/R/sample.line.R deleted file mode 100644 index 21ad0bf..0000000 --- a/R/sample.line.R +++ /dev/null @@ -1,18 +0,0 @@ -#' @title Sample Lines -#' @description Creates a sample for each line in a -#' sf LINESTRING class object -#' -#' @param ... arguments passed to sf::st_sample -#' @return NA -#' -#' @examples -#' \dontrun{ -#' sf::sf_sample() -#' } -#' -#' @export -sample.line <- function(...) { - .Deprecated("sample.line", package="spatialEco", - msg="Function is deprecated because sf provides the ability to - sample lines using sf::st_sample function ") -} diff --git a/R/sample.poly.R b/R/sample.poly.R deleted file mode 100644 index dcd795d..0000000 --- a/R/sample.poly.R +++ /dev/null @@ -1,18 +0,0 @@ -#' @title Sample Polygons -#' @description Creates an equal sample of n for each polygon in an -#' sp Polygon class object -#' -#' @param ... arguments passed to sf::st_sample -#' @return NA -#' -#' @examples -#' \dontrun{ -#' sf::sf_sample() -#' } -#' -#' @export -sample.poly <- function(...) { - .Deprecated("sample.poly", package="spatialEco", - msg="Function is deprecated because sf provides the ability to - sample polygons using sf::st_sample function ") -} diff --git a/R/sample.transect.R b/R/sampleTransect.R similarity index 93% rename from R/sample.transect.R rename to R/sampleTransect.R index f76256b..56e5278 100644 --- a/R/sample.transect.R +++ b/R/sampleTransect.R @@ -1,100 +1,106 @@ -#' @title Sample transect -#' @description Creates random transects from points and generates -#' sample points along each transect -#' -#' @param x A sf point object -#' @param min.dist Minimum length of transect(s) -#' @param max.dist Maximum length of transect(s) -#' @param distance A vector of distances, same length as x, used -#' to define transect distances (length) -#' @param azimuth A vector of azimuths, same length as x, used -#' to define transect direction -#' @param id A unique identification column in x -#' @param ... Additional arguments passed to st_sample -#' -#' @note -#' Function create lines and samples using random or defined direction -#' and length transects and then creates a point sample along each transect. -#' The characteristic of the sample points are defined by arguments passed -#' to the sf::st_sample function. The distance and azimuth arguments allow -#' for specifying the exact length and direction for each points transect. -#' -#' @author Jeffrey S. Evans -#' -#' @examples -#' -#' library(sf) -#' if(require(sp, quietly = TRUE)) { -#' data(meuse, package = "sp") -#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, -#' agr = "constant") -#' } -#' meuse <- meuse[sample(1:nrow(meuse),10),] -#' -#' transects <- sampleTransect(meuse, min.dist=200, max.dist=500, -#' type="regular", size=20) -#' plot(st_geometry(transects$transects)) -#' plot(st_geometry(meuse), pch=19, cex=2, add=TRUE) -#' plot(st_geometry(transects$samples), -#' col="red", pch=19, add=TRUE) -#' -#' @export sampleTransect -sampleTransect <- function(x, min.dist, max.dist, distance = NULL, - azimuth = NULL, id = NULL, ...) { - if (!inherits(x, "sf")) - stop(deparse(substitute(x)), " must be an sf POLYGON object") - if(!is.null(distance)) { - if(length(distance) != nrow(x)) - stop("length of distance does not match x") - } - if(!is.null(azimuth)) { - if(length(azimuth) != nrow(x)) - stop("length of azimuth does not match x") - } - if(!is.null(id)) { - if(!names(x) %in% id) - stop(id, " not present in ", deparse(substitute(x))) - ids <- sf::st_drop_geometry(x[,id])[,1] - } else { - ids <- 1:nrow(x) - } - dots <- as.list(match.call(expand.dots = TRUE)[-1]) - if (is.null(dots[["size"]]) & "size" %in% names(dots) == FALSE) - dots[["size"]] <- 10 - if (is.null(dots[["type"]]) & "type" %in% names(dots) == FALSE) - dots[["type"]] <- "random" - message(paste("Creating", dots$size, dots$type, "samples for each transect")) - tlines <- list() - tpoints <- list() - for(i in 1:nrow(x) ) { - p <- x[i,] - if(!is.null(azimuth)) { - az = azimuth[i] - } else { - az = stats::runif(1, 0, 360) - } - if(!is.null(azimuth)) { - d = distance[i] - } else { - d = stats::runif(1, min.dist, max.dist) - } - samp.pt <- spatialEco::bearing.distance( - sf::st_coordinates(p)[,1], - sf::st_coordinates(p)[,2], - distance = d, azimuth = az) - l <- sf::st_as_sf(sf::st_sfc(sf::st_linestring( - rbind(sf::st_coordinates(p)[,1:2], samp.pt))), - crs=sf::st_crs(x)) - sf::st_geometry(l) <- "geometry" - l$ID <- ids[i] - dots[["x"]] <- l - pts <- suppressWarnings(sf::st_as_sf(sf::st_cast( - do.call(sf::st_sample, dots),"POINT"))) - sf::st_geometry(pts) <- "geometry" - pts$ID <- ids[i] - tpoints[[i]] <- pts - tlines[[i]] <- l - } - return( list( transects = do.call("rbind", tlines), - samples = do.call("rbind", tpoints) ) ) -} +#' @title Sample transect +#' @description Creates random transects from points and generates +#' sample points along each transect +#' +#' @param x A sf point object +#' @param min.dist Minimum length of transect(s) +#' @param max.dist Maximum length of transect(s) +#' @param distance A vector of distances, same length as x, used +#' to define transect distances (length) +#' @param azimuth A vector of azimuths, same length as x, used +#' to define transect direction +#' @param id A unique identification column in x +#' @param ... Additional arguments passed to st_sample +#' +#' @details +#' Function create lines and samples using random or defined direction +#' and length transects and then creates a point sample along each transect. +#' The characteristic of the sample points are defined by arguments passed +#' to the sf::st_sample function. The distance and azimuth arguments allow +#' for specifying the exact length and direction for each points transect. +#' +#' @return +#' A list object contaning sf LINES and POINTS objects representing random transects +#' and sample points along each transect. The "ID" column links the resulting data. +#' +#' @author Jeffrey S. Evans +#' +#' @examples +#' if(require(sp, quietly = TRUE)) { +#' library(sf) +#' data(meuse, package = "sp") +#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, +#' agr = "constant") +#' meuse <- meuse[sample(1:nrow(meuse),10),] +#' +#' transects <- sampleTransect(meuse, min.dist=200, max.dist=500, +#' type="regular", size=20) +#' plot(st_geometry(transects$transects)) +#' plot(st_geometry(meuse), pch=19, cex=2, add=TRUE) +#' plot(st_geometry(transects$samples), +#' col="red", pch=19, add=TRUE) +#' +#' } else { +#' cat("Please install sp package to run example", "\n") +#' } +#' +#' @export sampleTransect +sampleTransect <- function(x, min.dist, max.dist, distance = NULL, + azimuth = NULL, id = NULL, ...) { + if (!inherits(x, "sf")) + stop(deparse(substitute(x)), " must be an sf POLYGON object") + if(!is.null(distance)) { + if(length(distance) != nrow(x)) + stop("length of distance does not match x") + } + if(!is.null(azimuth)) { + if(length(azimuth) != nrow(x)) + stop("length of azimuth does not match x") + } + if(!is.null(id)) { + if(!names(x) %in% id) + stop(id, " not present in ", deparse(substitute(x))) + ids <- sf::st_drop_geometry(x[,id])[,1] + } else { + ids <- 1:nrow(x) + } + dots <- as.list(match.call(expand.dots = TRUE)[-1]) + if (is.null(dots[["size"]]) & "size" %in% names(dots) == FALSE) + dots[["size"]] <- 10 + if (is.null(dots[["type"]]) & "type" %in% names(dots) == FALSE) + dots[["type"]] <- "random" + message(paste("Creating", dots$size, dots$type, "samples for each transect")) + tlines <- list() + tpoints <- list() + for(i in 1:nrow(x) ) { + p <- x[i,] + if(!is.null(azimuth)) { + az = azimuth[i] + } else { + az = stats::runif(1, 0, 360) + } + if(!is.null(azimuth)) { + d = distance[i] + } else { + d = stats::runif(1, min.dist, max.dist) + } + samp.pt <- spatialEco::bearing.distance( + sf::st_coordinates(p)[,1], + sf::st_coordinates(p)[,2], + distance = d, azimuth = az) + l <- sf::st_as_sf(sf::st_sfc(sf::st_linestring( + rbind(sf::st_coordinates(p)[,1:2], samp.pt))), + crs=sf::st_crs(x)) + sf::st_geometry(l) <- "geometry" + l$ID <- ids[i] + dots[["x"]] <- l + pts <- suppressWarnings(sf::st_as_sf(sf::st_cast( + do.call(sf::st_sample, dots),"POINT"))) + sf::st_geometry(pts) <- "geometry" + pts$ID <- ids[i] + tpoints[[i]] <- pts + tlines[[i]] <- l + } + return( list( transects = do.call("rbind", tlines), + samples = do.call("rbind", tpoints) ) ) +} diff --git a/R/sar.R b/R/sar.R index 0691cd8..9169b25 100644 --- a/R/sar.R +++ b/R/sar.R @@ -6,9 +6,10 @@ #' is in planar units) #' @param scale (TRUE/FALSE) Scale (row standardize) results #' -#' @return A terra SpatRaster class object of the Surface Area Ratio +#' @details +#' SAR is calculated as: resolution^2 * cos( (degrees(slope) * (pi / 180)) ) #' -#' @note SAR is calculated as: resolution^2 * cos( (degrees(slope) * (pi / 180)) ) +#' @return A terra SpatRaster class object of the Surface Area Ratio #' #' @author Jeffrey S. Evans #' diff --git a/R/se.news.R b/R/se.news.R deleted file mode 100644 index 2f7f924..0000000 --- a/R/se.news.R +++ /dev/null @@ -1,8 +0,0 @@ -#' @title spatialEco news -#' @description Displays release notes -#' @param ... not used -#' @export -se.news <- function(...) { - newsfile <- file.path(system.file(package="spatialEco"), "NEWS") - file.show(newsfile) -} diff --git a/R/separability.R b/R/separability.R index 097f74d..f067e90 100644 --- a/R/separability.R +++ b/R/separability.R @@ -8,15 +8,7 @@ #' @param clabs labels for two classes #' @param ... additional arguments passes to plot #' -#' @return A data.frame with the following separability metrics: -#' * B - Bhattacharryya distance statistic -#' * JM - Jeffries-Matusita distance statistic -#' * M - M-Statistic -#' * D - Divergence index -#' * TD - Transformed Divergence index -#' @md -#' -#' @description +#' @details #' Available statistics: #' * M-Statistic (Kaufman & Remer 1994) - This is a measure of the difference of the #' distributional peaks. A large M-statistic indicates good separation between the @@ -37,28 +29,36 @@ #' #' @md #' +#' @return A data.frame with the following separability metrics: +#' * B - Bhattacharryya distance statistic +#' * JM - Jeffries-Matusita distance statistic +#' * M - M-Statistic +#' * D - Divergence index +#' * TD - Transformed Divergence index +#' @md +#' #' @author Jeffrey S. Evans #' #' @references #' Anderson, M. J., & Clements, A. (2000) Resolving environmental disputes: a #' statistical method for choosing among competing cluster models. Ecological #' Applications 10(5):1341-1355 -#' @references +#' #' Bhattacharyya, A. (1943) On a measure of divergence between two statistical #' populations defined by their probability distributions'. Bulletin of the #' Calcutta Mathematical Society 35:99-109 -#' @references +#' #' Bruzzone, L., F. Roli, S.B. Serpico (1995) An extension to multiclass cases of #' the Jefferys-Matusita distance. IEEE Transactions on Pattern Analysis and #' Machine Intelligence 33:1318-1321 -#' @references +#' #' Du, H., C.I. Chang, H. Ren, F.M. D'Amico, J. O. Jensen, J., (2004) New #' Hyperspectral Discrimination Measure for Spectral Characterization. Optical #' Engineering 43(8):1777-1786. -#' @references +#' #' Kailath, T., (1967) The Divergence and Bhattacharyya measures in signal #' selection. IEEE Transactions on Communication Theory 15:52-60 -#' @references +#' #' Kaufman Y., and L. Remer (1994) Detection of forests using mid-IR reflectance: #' An application for aerosol studies. IEEE T. Geosci.Remote. 32(3):672-683. #' diff --git a/R/sf.kde.R b/R/sf.kde.R index 10950c0..03e1278 100644 --- a/R/sf.kde.R +++ b/R/sf.kde.R @@ -16,14 +16,14 @@ #' @param mask (TRUE/FALSE) mask resulting raster if ref is provided #' as a SpatRaster #' -#' @return a terra SpatRaster class object containing kernel density estimate -#' #' @details #' Please note that ks methods for estimation has been reverted to the Gussian method proposed #' in Venables & Ripley (2002). There was not enought evendence that the Chacon & Duong (2018) #' multivariate method(s) for bandwidth selection and kernal estimation were suitable for #' spatial random fields. #' +#' @return a terra SpatRaster class object containing kernel density estimate +#' #' @author Jeffrey S. Evans #' #' @references diff --git a/R/sf_dissolve.R b/R/sf_dissolve.R index f1fd6c6..12ebcd8 100644 --- a/R/sf_dissolve.R +++ b/R/sf_dissolve.R @@ -5,14 +5,15 @@ #' @param y An attribute in x to dissolve by, default is NULL #' @param overlaps (FALSE/TRUE) Dissolve overlapping polygons, negates using attribute #' -#' @return A dissolved POLYGON or MULTIPOLYGON object -#' -#' @note If a dissolve attribute is defined, the result will be a +#' @details +#' If a dissolve attribute is defined, the result will be a #' MULTIPOLYGON with the grouping attribute column. If y=NULL all polygons #' will be dissolved into a single attribute, unless there is spatial #' discontinuity (eg., gaps) in the data. The intent of overlaps=TRUE is to #' provide functionality for dissolving overlapping polygons and should only #' be used in this specialized case. +#' +#' @return A dissolved POLYGON or MULTIPOLYGON object #' #' @author Jeffrey S. Evans #' diff --git a/R/sg.smooth.R b/R/sg.smooth.R index b8c17f2..bf9e8b9 100644 --- a/R/sg.smooth.R +++ b/R/sg.smooth.R @@ -10,8 +10,8 @@ #' @param na.rm NA behavior #' @param ... not used #' -#' @return A vector of the smoothed data equal to length of x. Please note; -#' NA values are retained +#' @return +#' A vector of the smoothed data equal to length of x. Please note; NA values are retained #' #' @author Jeffrey S. Evans tnc.org> #' @@ -20,35 +20,35 @@ #' by Simplified Least Squares Procedures. Analytical Chemistry. 36(8):1627-39 #' #' @examples -#' y <- c(0.112220988, 0.055554941, 0.013333187, 0.055554941, 0.063332640, 0.014444285, -#' 0.015555384, 0.057777140, 0.059999339, 0.034444068, 0.058888242, 0.136665165, -#' 0.038888458, 0.096665606,0.141109571, 0.015555384, 0.012222088, 0.012222088, -#' 0.072221428, 0.052221648, 0.087776810,0.014444285, 0.033332966, 0.012222088, -#' 0.032221869, 0.059999339, 0.011110989, 0.011110989,0.042221759, 0.029999670, -#' 0.018888680, 0.098887801, 0.016666483, 0.031110767, 0.061110441,0.022221979, -#' 0.073332526, 0.012222088, 0.016666483, 0.012222088, 0.122220881, 0.134442955, -#' 0.094443403, 0.128887475, 0.045555055, 0.152220547, 0.071110331, 0.018888680, -#' 0.022221979, 0.029999670, 0.035555165, 0.014444285, 0.049999449, 0.074443623, -#' 0.068888135, 0.062221535, 0.032221869, 0.095554501, 0.143331751, 0.121109776, -#' 0.065554835, 0.074443623, 0.043332856, 0.017777583, 0.016666483, 0.036666263, -#' 0.152220547, 0.032221869, 0.009999890, 0.009999890, 0.021110879, 0.025555275, -#' 0.099998899, 0.015555384, 0.086665712, 0.008888791, 0.062221535, 0.044443958, -#' 0.081110224, 0.015555384, 0.089999005, 0.082221314, 0.056666043, 0.013333187, -#' 0.048888352, 0.075554721, 0.025555275, 0.056666043, 0.146665052, 0.118887581, -#' 0.125554174, 0.024444176, 0.124443069, 0.012222088, 0.126665279, 0.048888352, -#' 0.046666153, 0.141109571, 0.015555384, 0.114443190) +#' y <- c(0.112220988, 0.055554941, 0.013333187, 0.055554941, 0.063332640, 0.014444285, +#' 0.015555384, 0.057777140, 0.059999339, 0.034444068, 0.058888242, 0.136665165, +#' 0.038888458, 0.096665606,0.141109571, 0.015555384, 0.012222088, 0.012222088, +#' 0.072221428, 0.052221648, 0.087776810,0.014444285, 0.033332966, 0.012222088, +#' 0.032221869, 0.059999339, 0.011110989, 0.011110989,0.042221759, 0.029999670, +#' 0.018888680, 0.098887801, 0.016666483, 0.031110767, 0.061110441,0.022221979, +#' 0.073332526, 0.012222088, 0.016666483, 0.012222088, 0.122220881, 0.134442955, +#' 0.094443403, 0.128887475, 0.045555055, 0.152220547, 0.071110331, 0.018888680, +#' 0.022221979, 0.029999670, 0.035555165, 0.014444285, 0.049999449, 0.074443623, +#' 0.068888135, 0.062221535, 0.032221869, 0.095554501, 0.143331751, 0.121109776, +#' 0.065554835, 0.074443623, 0.043332856, 0.017777583, 0.016666483, 0.036666263, +#' 0.152220547, 0.032221869, 0.009999890, 0.009999890, 0.021110879, 0.025555275, +#' 0.099998899, 0.015555384, 0.086665712, 0.008888791, 0.062221535, 0.044443958, +#' 0.081110224, 0.015555384, 0.089999005, 0.082221314, 0.056666043, 0.013333187, +#' 0.048888352, 0.075554721, 0.025555275, 0.056666043, 0.146665052, 0.118887581, +#' 0.125554174, 0.024444176, 0.124443069, 0.012222088, 0.126665279, 0.048888352, +#' 0.046666153, 0.141109571, 0.015555384, 0.114443190) +#' +#' plot(y, type="l", lty = 3, main="Savitzky-Golay with l = 51, 25, 10") +#' lines(sg.smooth(y),col="red", lwd=2) +#' lines(sg.smooth(y, l = 25),col="blue", lwd=2) +#' lines(sg.smooth(y, l = 10),col="green", lwd=2) #' -#' plot(y, type="l", lty = 3, main="Savitzky-Golay with l = 51, 25, 10") -#' lines(sg.smooth(y),col="red", lwd=2) -#' lines(sg.smooth(y, l = 25),col="blue", lwd=2) -#' lines(sg.smooth(y, l = 10),col="green", lwd=2) -#' -#' #### function applied to a multi-band raster -#' library(terra) -#' ( r <- spatialEco::random.raster(n.layers=20) ) -#' -#' # raster stack example -#' ( r.sg <- app(r, sg.smooth) ) +#' #### function applied to a multi-band raster +#' library(terra) +#' ( r <- spatialEco::random.raster(n.layers=20) ) +#' +#' # raster stack example +#' ( r.sg <- app(r, sg.smooth) ) #' #' @export sg.smooth sg.smooth <- function(x, f = 4, l = 51, d = 1, na.rm, ...) { diff --git a/R/shannons.R b/R/shannons.R index bfcdd45..6c2c52b 100644 --- a/R/shannons.R +++ b/R/shannons.R @@ -6,22 +6,22 @@ #' @param ens Calculate effective number of species (TRUE/FALSE) #' @param margin Calculate diversity for rows or columns. c("row", "col") #' +#' @details +#' The expected for H is 0-3+ where a value of 2 has been suggested as medium-high diversity, +#' for evenness is 0-1 with 0 signifying no evenness and 1, complete evenness. +#' #' @return #' data.frame with "H" (Shannon's diversity) and "evenness" (Shannon's #' evenness where H / max( sum(x) ) ) and ESN #' -#' @note -#' The expected for H is 0-3+ where a value of 2 has been suggested as medium-high diversity, -#' for evenness is 0-1 with 0 signifying no evenness and 1, complete evenness. -#' #' @author Jeffrey S. Evans #' #' @references #' Shannon, C. E. and W. Weaver (1948) A mathematical theory of communication. The Bell #' System Technical Journal, 27:379-423. -#' @references +#' #' Simpson, E. H. (1949) Measurement of diversity. Nature 163:688 -#' @references +#' #' Roth, D. S., I. Perfecto, and B. Rathcke (1994) The effects of management systems on #' ground-foraging ant diversity in Costa Rica. Ecological Applications 4(3):423-436. #' diff --git a/R/shift.R b/R/shift.R index 824dee8..1e10211 100644 --- a/R/shift.R +++ b/R/shift.R @@ -5,8 +5,8 @@ #' @param lag Number of lagged offsets, default is 1 #' @param pad Value to fill the lagged offset with, default is NA #' -#' @return a vector, length equal to x, with offset length filled -#' with pad values +#' @return +#' A vector, length equal to x, with offset length filled with pad values #' #' @author Jeffrey S. Evans #' diff --git a/R/sieve.R b/R/sieve.R index 03a1d86..42cd770 100644 --- a/R/sieve.R +++ b/R/sieve.R @@ -5,12 +5,15 @@ #' @param a Query area to remove #' @param unit The unit to use for area query options are c("m", "km", "ha") #' +#' @details +#' A sieve can be used to establish a minimal mapping unit where +#' contiguous cells < specified query area are set to NA. These NA +#' values can then be filled using focal (majority, median, mean) +#' #' @return -#' A terra SpatRaster with < a set to NA +#' A terra SpatRaster with cells < a set to NA #' -#' @note -#' A sieve can be used to establish a minimal mapping unit where -#' contiguous cells < specified query area are set to NA +#' @author Jeffrey S. Evans #' #' @examples #' \donttest{ diff --git a/R/similarity.R b/R/similarity.R index 6f84d6e..1dcfa68 100644 --- a/R/similarity.R +++ b/R/similarity.R @@ -10,14 +10,14 @@ #' @param ID Unique ID vector to use as reference ID's (rownames). Must be #' unique and same length as number of rows in x #' +#' @details +#' This function uses row-based imputation to identify k similar neighbors for each +#' observation. Has been used to identify offsets based on ecological similarity. +#' #' @return #' data.frame with k similar targets and associated distances. If frequency = TRUE the #' freq column represents the number of times a row (ID) was selected as a neighbor. -#' -#' @note -#' This function uses row-based imputation to identify k similar neighbors for each -#' observation. Has been used to identify offsets based on ecological similarity. -#' +#' #' @author Jeffrey S. Evans #' #' @references @@ -51,7 +51,7 @@ #' @export similarity similarity <- function(x, k=4, method="mahalanobis", frequency = TRUE, scale = TRUE, ID = NULL) { - if(!any(which(utils::installed.packages()[,1] %in% "yaImpute"))) + if(length(find.package("yaImpute", quiet = TRUE)) == 0) stop("please install yaImpute package before running this function") if(!class(x)[1] == "data.frame") stop( "x is not a data.frame") if(!is.null(x)) { diff --git a/R/smooth.time.series.R b/R/smooth.time.series.R index 47afedf..e4dfcc3 100644 --- a/R/smooth.time.series.R +++ b/R/smooth.time.series.R @@ -8,9 +8,6 @@ #' @param ... Additional arguments passed to terra::app (for #' writing results to disk) #' -#' @return -#' A terra SpatRaster containing imputed or smoothed data. -#' #' @details #' This function uses a LOESS regression to smooth the time-series. If the data is #' smoothed, (using the smooth.data = TRUE argument) it will be entirely replaced by @@ -20,10 +17,13 @@ #' The results can dramatically be effected by the choice of the smoothing #' parameter (f) so caution is warranted and the effect of this parameter tested. #' +#' @return +#' A terra SpatRaster containing imputed or smoothed data. +#' #' @author Jeffrey S. Evans #' #' @examples -#' \dontrun{ +#' \donttest{ #' library(terra) #' random.raster <- function(rows=50, cols=50, l=20, min=0, max=1){ #' do.call(c, replicate(l, rast(matrix(runif(rows * cols, min, max), diff --git a/R/sobel.R b/R/sobel.R index 8ebf708..927069a 100644 --- a/R/sobel.R +++ b/R/sobel.R @@ -7,9 +7,7 @@ #' if method="edge", raster::focal (if you want a file #' written to disk use filename = "" argument) #' -#' @return A raster class object or raster written to disk -#' -#' @description +#' @details #' The Sobel-Feldmanh operator is a discrete differentiation operator, deriving an #' approximation of the gradient of the intensity function. abrupt discontinuity #' in the gradient function represents edges, making this a common approach for edge @@ -21,6 +19,8 @@ #' the down-direction. At each pixel in the raster, the resulting gradient can be combined #' to give the gradient intensity, using: SQRT( Gx^2 Gy^2 ). This can be expanded into the #' gradient direction using atan(Gx/Gy) +#' +#' @return A raster class object or raster written to disk #' #' @author Jeffrey S. Evans #' diff --git a/R/sp.na.omit.R b/R/sp.na.omit.R deleted file mode 100644 index dcc78af..0000000 --- a/R/sp.na.omit.R +++ /dev/null @@ -1,12 +0,0 @@ -#' @title sp na.omit -#' @description Removes row or column NA's in sp object -#' -#' @param ... arguments passed to stats::na.omit -#' @return NA -#' -#' @export -sp.na.omit <- function(...) { - .Deprecated("sp.na.omit", package="spatialEco", - msg="Function is deprecated because stats::na.omit operates on - sf class objects ") -} diff --git a/R/spatial.select.R b/R/spatial.select.R index 67629d2..779acb4 100644 --- a/R/spatial.select.R +++ b/R/spatial.select.R @@ -11,11 +11,7 @@ #' @param neighbors If predicate = "contingency" type of neighbors options are #' c("queen", "rook") #' -#' @return -#' An sf object representing a subset of y based on the spatial query of x or, -#' if predicate = contingency a sparse matrix representing neighbor indexes -#' -#' @note +#' @details #' Performs a spatial select of features based on an overlay of a polygon (x), #' which can represent multiple features, and a polygon, point or line feature #' classes (y). User can specify a partial or complete intersection, using within @@ -23,25 +19,29 @@ #' query polygon. This function is similar to ArcGIS/Pro spatial select. Please note #' that for point to point neighbor selections use the knn function. #' Valid spatial predicates include: intersect, touches, covers, contains, proximity -#' and contingency. See DE-9IM topology model for detailed information on data predicates. -#' @details -#' * intersection Create a spatial intersection between two features -#' * intersect Boolean evaluation of features intersecting -#' * contains Boolean evaluation of x containing y -#' * covers Boolean evaluation of x covering y -#' * touches Boolean evaluation of x touching y -#' * proximity Evaluation of distance-based proximity of x to y (x and y can be the same) -#' * contingency Evaluation of polygon contingency (eg., 1st, 2nd order) +#' and contingency. +#' See DE-9IM topology model for detailed information on following data predicates. +#' * intersection Create a spatial intersection between two features +#' * intersect Boolean evaluation of features intersecting +#' * contains Boolean evaluation of x containing y +#' * covers Boolean evaluation of x covering y +#' * touches Boolean evaluation of x touching y +#' * proximity Evaluation of distance-based proximity of x to y (x and y can be the same) +#' * contingency Evaluation of polygon contingency (eg., 1st, 2nd order) #' @md #' +#' @return +#' An sf object representing a subset of y based on the spatial query of x or, +#' if predicate = contingency a sparse matrix representing neighbor indexes +#' #' @author Jeffrey S. Evans #' #' @examples +#' if(require(sp, quietly = TRUE)) { #' library(sf) -#' if(require(sp, quietly = TRUE)) { -#' data(meuse, package = "sp") -#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, -#' agr = "constant") +#' data(meuse, package = "sp") +#' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, +#' agr = "constant") #' #' spolys <- hexagons(meuse, res=100) #' spolys$ID <- 1:nrow(spolys) @@ -67,7 +67,10 @@ #' spatial.select(x=spolys, predicate = "contingency") #' spatial.select(spolys, predicate = "contingency", neighbors = "rook") #' +#' } else { +#' cat("Please install sp package to run example", "\n") #' } +#' #' @seealso \code{\link[sf]{st_intersection}} for details on intersection predicate #' @seealso \code{\link[sf]{st_intersects}} for details on intersect predicate #' @seealso \code{\link[sf]{st_contains}} for details on contain predicate diff --git a/R/spatialEcoNews.R b/R/spatialEcoNews.R index f2de26b..4b4e7af 100644 --- a/R/spatialEcoNews.R +++ b/R/spatialEcoNews.R @@ -1,4 +1,9 @@ -spatialEcoNews <- function() { +#' @title spatialEco news +#' @description Displays release notes +#' @param ... not used +#' @return Shows package NEWS file +#' @export +spatialEcoNews <- function(...) { newsfile <- file.path(system.file(package="spatialEco"), "NEWS") file.show(newsfile) } diff --git a/R/spectral.separability.R b/R/spectral.separability.R index 3c46129..8dde5bd 100644 --- a/R/spectral.separability.R +++ b/R/spectral.separability.R @@ -7,10 +7,7 @@ #' row wise values in x #' @param jeffries.matusita (TRUE/FALSE) Return J-M distance (default) else Bhattacharyya #' -#' @return A matrix of class-wise Jeffries-Matusita or Bhattacharyya distance -#' separability values -#' -#' @description +#' @details #' Available statistics: #' * Bhattacharyya distance (Bhattacharyya 1943; Harold 2003) measures the similarity #' of two discrete or continuous probability distributions. @@ -20,23 +17,26 @@ #' where 2 suggest complete separability. #' @md #' +#' @return +#' A matrix of class-wise Jeffries-Matusita or Bhattacharyya distance separability values +#' #' @author Jeffrey S. Evans #' #' @references #' Bhattacharyya, A. (1943) On a measure of divergence between two statistical #' populations defined by their probability distributions'. Bulletin of the #' Calcutta Mathematical Society 35:99-109 -#' @references +#' #' Bruzzone, L., F. Roli, S.B. Serpico (1995) An extension to multiclass cases of #' the Jefferys-Matusita distance. IEEE Transactions on Pattern Analysis and #' Machine Intelligence 33:1318-1321 -#' @references +#' #' Kailath, T., (1967) The Divergence and Bhattacharyya measures in signal #' selection. IEEE Transactions on Communication Theory 15:52-60 #' #' @examples -#'#' # Create example data #' require(MASS) +#' # Create example data #' d <- 6 # Number of bands #' n.class <- 5 # Number of classes #' n <- rep(1000, 5) diff --git a/R/spherical.sd.R b/R/spherical.sd.R index 38dc799..7684e4f 100644 --- a/R/spherical.sd.R +++ b/R/spherical.sd.R @@ -6,8 +6,6 @@ #' @param variance (FALSE|TRUE) Output spherical variance rather than standard deviation #' @param ... Additional arguments passed to terra:app (can write raster to disk here) #' -#' @return A terra SpatRaster class object of the spherical standard deviation -#' #' @details #' Surface variability using spherical variance/standard deviation. #' The variation can be assessed using the spherical standard deviation of the normal @@ -26,6 +24,8 @@ #' The value of (1 - R^2), which will lie between 0 and 1, is the spherical variance. #' and it's square root can be considered the spherical standard deviation. #' +#' @return A terra SpatRaster class object of the spherical standard deviation +#' #' @author Jeffrey S. Evans tnc.org> #' #' @examples diff --git a/R/squareBuffer.R b/R/squareBuffer.R index b45d7c4..97a7b36 100644 --- a/R/squareBuffer.R +++ b/R/squareBuffer.R @@ -5,9 +5,9 @@ #' @param a Numeric single or vector indicating buffer distance(s) #' @param ... Additional arguments passed to st_buffer #' -#' @return A single feature sf class polygon object +#' @details Function creates a square buffer of feature class. #' -#' @note Function creates a square buffer of feature class. +#' @return A single feature sf class polygon object #' #' @author Jeffrey S. Evans #' diff --git a/R/srr.R b/R/srr.R index adafcf2..20e1b6d 100644 --- a/R/srr.R +++ b/R/srr.R @@ -5,18 +5,18 @@ #' @param s Focal window size #' @param ... Additional arguments passed to terra::lapp #' -#' @return A terra SpatRaster object of Pike's (1971) Surface Relief Ratio -#' -#' @note +#' @details #' Describes rugosity in continuous raster surface within a specified window. #' The implementation of SRR can be shown as: (mean(x) - min(x)) / (max(x) - min(x)) #' +#' @return A terra SpatRaster object of Pike's (1971) Surface Relief Ratio +#' #' @author Jeffrey S. Evans #' #' @examples #' \donttest{ -#' library(terra) -#' elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) +#' library(terra) +#' elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) #' r.srr <- srr(elev, s=5) #' plot(r.srr, main="Surface Relief Ratio") #' } diff --git a/R/stratified.random.R b/R/stratified.random.R index 8d589ec..82c6eb1 100644 --- a/R/stratified.random.R +++ b/R/stratified.random.R @@ -7,14 +7,12 @@ #' @param reps Number of replicates per strata #' @param replace (TRUE/FALSE) Sampling with replacement #' -#' @return An sf class object containing random samples -#' -#' @note +#' @details #' If replace=FALSE features are removed from consideration in subsequent replicates. #' Conversely, if replace=TRUE, a feature can be selected multiple times across #' replicates. Not applicable if rep=1. #' -#' @note Depends: sf +#' @return An sf class object containing random samples #' #' @author Jeffrey S. Evans #' @@ -25,8 +23,8 @@ #' Canadian Journal of Remote Sensing 32: 126-138. #' #' @examples +#' if(require(sp, quietly = TRUE)) { #' library(sf) -#' if(require(sp, quietly = TRUE)) { #' data(meuse, package = "sp") #' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, #' agr = "constant") @@ -60,7 +58,11 @@ #' # Plot random samples colored by replacement #' ssample$REP <- factor(ssample$REP) #' plot(ssample['REP'], pch=20) -#' } +#' +#' } else { +#' cat("Please install sp package to run example", "\n") +#' } +#' #' @export stratified.random stratified.random <- function(x, strata, n = 10, reps = 1, replace = FALSE) { gtypes = c("POLYGON", "POINT", "LINESTRING", "MULTIPOLYGON", diff --git a/R/subsample.distance.R b/R/subsample.distance.R index 11ffec0..eb4f351 100644 --- a/R/subsample.distance.R +++ b/R/subsample.distance.R @@ -18,8 +18,8 @@ #' #' @examples #' \donttest{ +#' if(require(sp, quietly = TRUE)) { #' library(sf) -#' if(require(sp, quietly = TRUE)) { #' data(meuse, package = "sp") #' meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, #' agr = "constant") @@ -34,7 +34,10 @@ #' diag(dm) <- NA #' cat("\n", "Min distance for subsample", min(dm, na.rm=TRUE), "\n") #' cat("Max distance for subsample", max(dm, na.rm=TRUE), "\n") -#' } +#' +#' } else { +#' cat("Please install sp package to run example", "\n") +#' } #' } #' @export subsample.distance subsample.distance <- function(x, size, d, d.max = NULL, diff --git a/R/summary.cross.cor.R b/R/summary.cross.cor.R index af3b59c..c00d613 100644 --- a/R/summary.cross.cor.R +++ b/R/summary.cross.cor.R @@ -1,8 +1,27 @@ #' @title Summary of spatial cross correlation #' @description summary method for class "cross.cor" +#' #' @param object Object of class cross.cor #' @param ... Ignored #' +#' +#' @return +#' When not simulated k=0, prints functions list object containing: +#' * I - Global autocorrelation statistic +#' * SCI - - A data.frame with two columns representing the xy and yx autocorrelation +#' * nsim - value of NULL to represent p values were derived from observed data (k=0) +#' * p - Probability based observations above/below confidence interval +#' * t.test - Probability based on t-test +#' +#' When simulated (k>0), prints functions list object containing: +#' * I - Global autocorrelation statistic +#' * SCI - A data.frame with two columns representing the xy and yx autocorrelation +#' * nsim - value representing number of simulations +#' * global.p - p-value of global autocorrelation statistic +#' * local.p - Probability based simulated data using successful rejection of t-test +#' * range.p - Probability based on range of probabilities resulting from paired t-test +#' @md +#' #' @method summary cross.cor #' #' @export diff --git a/R/summary.effect.size.R b/R/summary.effect.size.R index 724ff0b..09db95b 100644 --- a/R/summary.effect.size.R +++ b/R/summary.effect.size.R @@ -1,8 +1,12 @@ #' @title Summarizing effect size #' @description Summary method for class "effect.size". +#' #' @param object Object of class effect.size #' @param ... Ignored #' +#' @return Prints the output data.frame contaning; effect size with upper and lower confidence +#' and, mean and sd by group +#' #' @method summary effect.size #' @export summary.effect.size <- function(object, ...) { diff --git a/R/summary.loess.boot.R b/R/summary.loess.boot.R index 2603219..ea21b73 100644 --- a/R/summary.loess.boot.R +++ b/R/summary.loess.boot.R @@ -1,8 +1,28 @@ #' @title Summarizing Loess bootstrap models #' @description Summary method for class "loess.boot". +#' #' @param object Object of class loess.boot #' @param ... Ignored #' +#' +#' @return same as print lowess.boot data.frame including; +#' * nreps Number of bootstrap replicates +#' * confidence Confidence interval (region) +#' * span alpha (span) parameter used loess fit +#' * degree polynomial degree used in loess fit +#' * normalize Normalized data (TRUE/FALSE) +#' * family Family of statistic used in fit +#' * parametric Parametric approximation (TRUE/FALSE) +#' * surface Surface fit, see loess.control +#' * data data.frame of x,y used in model +#' * fit data.frame including: +#' 1) x - Equally-spaced x index (see NOTES) +#' 2) y.fit - loess fit +#' 3) up.lim - Upper confidence interval +#' 4) low.lim - Lower confidence interval +#' 5) stddev - Standard deviation of loess fit at each x value +#' @md +#' #' @method summary loess.boot #' @export summary.loess.boot <- function(object, ...) { @@ -15,11 +35,9 @@ summary.loess.boot <- function(object, ...) { cat("","\n", sep="") cat("summary of fit: ","\n", sep="") summary(object$fit[,"y.fit"]) - cat("","\n", sep="") cat("summary of lower confidence limits: ","\n", sep="") summary(object$fit[,"low.lim"]) - cat("","\n", sep="") cat("summary of fit: ","\n", sep="") summary(object$fit[,"up.lim"]) diff --git a/R/swvi.R b/R/swvi.R index 87fff62..3e3be44 100644 --- a/R/swvi.R +++ b/R/swvi.R @@ -17,18 +17,14 @@ #' @param weight.factor Apply partial weights (w * weight.factor) to the NDSVI weights #' @param ... Additional arguments passed to terra::lapp function #' -#' @return A terra SpatRaster class object of the weighted MSAVI metric -#' -#' @description +#' @details #' The intent of this index is to correct the MSAVI or MTVI index for bias associated #' with senescent vegetation. This is done by: -#' * deriving the NDSVI; -#' * applying a threshold to limit NDSVI to values associated with senescent vegetation; -#' * converting the index to inverted weights (-1*(NDSVI/sum(NDSVI))); -#' * applying weights to MSAVI or MTVI -#' @md +#' 1 deriving the NDSVI +#' 2 applying a threshold to limit NDSVI to values associated with senescent vegetation +#' 3 converting the index to inverted weights (-1*(NDSVI/sum(NDSVI))) +#' 4 applying weights to MSAVI or MTVI #' -#' @description #' The MSAVI formula follows the modification proposed by Qi et al. (1994), #' often referred to as MSAVI2. MSAVI index reduces soil noise and increases #' the dynamic range of the vegetation signal. The implemented modified version @@ -43,53 +39,48 @@ #' for the background signature of soils while preserving sensitivity to LAI and resistance #' to the influence of chlorophyll. #' -#' @description #' The Normalized difference senescent vegetation index (NDSVI) follows methods from #' Qi et a., (2000). The senescence is used to threshold the NDSVI. Values less then this value #' will be NA. The threshold argument is used to apply a threshold to MSAVI. The default is NULL #' but if specified all values (MSAVI <= threshold) will be NA. Applying a weight.factor can be #' used to change the influence of the weights on MSAVI. +#' @md #' +#' @return A terra SpatRaster class object of the weighted MSAVI metric +#' +#' @author Jeffrey S. Evans +#' #' @references #' Haboudane, D., et al. (2004) Hyperspectral Vegetation Indices and Novel Algorithms #' for Predicting Green LAI of Crop Canopies: Modeling and Validation in the Context #' of Precision Agriculture. Remote Sensing of Environment 90:337-352. -#' @references +#' #' Qi J., Chehbouni A., Huete A.R., Kerr Y.H., (1994). Modified Soil Adjusted Vegetation #' Index (MSAVI). Remote Sens Environ 48:119-126. -#' @references +#' #' Qi J., Kerr Y., Chehbouni A., (1994). External factor consideration in vegetation #' index development. Proc. of Physical Measurements and Signatures in Remote Sensing, #' ISPRS, 723-730. -#' @references +#' #' Qi, J., Marsett, R., Moran, M.S., Goodrich, D.C., Heilman, P., Kerr, Y.H., Dedieu, #' G., Chehbouni, A., Zhang, X.X. (2000). Spatial and temporal dynamics of vegetation # in the San Pedro River basin area. Agricultural and Forest Meteorology. 105:55-68. #' -#' @author Jeffrey S. Evans -#' #' @examples -#' \dontrun{ -#' # library(terra) -#' # if(!unlist(lapply("RStoolbox", requireNamespace, quietly=TRUE))) -#' # message("Can't run examples, please install RStoolbox") -#' # -#' # data(lsat) -#' # lsat <- radCor(lsat, metaData = readMeta(system.file( -#' # "external/landsat/LT52240631988227CUB02_MTL.txt", -#' # package="RStoolbox")), method = "apref") -#' # lsat <- rast(lsat) -#' # -#' # # Using Modified Soil-adjusted Vegetation Index (MSAVI) -#' # ( wmsavi <- swvi(red = lsat[[3]], nir = lsat[[4]], swir = lsat[[5]]) ) -#' # plotRGB(lsat, r=6,g=5,b=2, scale=1, stretch="lin") -#' # plot(wmsavi, legend=FALSE, col=rev(terrain.colors(100, alpha=0.35)), add=TRUE ) -#' # -#' # # Using Modified Triangular Vegetation Index 2 (MTVI) -#' # ( wmtvi <- swvi(red = lsat[[3]], nir = lsat[[4]], swir = lsat[[5]], -#' # green = lsat[[3]], mtvi = TRUE) ) -#' # plotRGB(lsat, r=6,g=5,b=2, scale=1, stretch="lin") -#' # plot(wmtvi, legend=FALSE, col=rev(terrain.colors(100, alpha=0.35)), add=TRUE ) +#' \donttest{ +#' library(terra) +#' lsat <- rast(system.file("/extdata/Landsat_TM5.tif", package="spatialEco")) +#' +#' # Using Modified Soil-adjusted Vegetation Index (MSAVI) +#' ( wmsavi <- swvi(red = lsat[[3]], nir = lsat[[4]], swir = lsat[[5]]) ) +#' plotRGB(lsat, r=6,g=5,b=2, scale=1, stretch="lin") +#' plot(wmsavi, legend=FALSE, col=rev(terrain.colors(100, alpha=0.35)), add=TRUE ) +#' +#' # Using Modified Triangular Vegetation Index 2 (MTVI) +#' ( wmtvi <- swvi(red = lsat[[3]], nir = lsat[[4]], swir = lsat[[5]], +#' green = lsat[[3]], mtvi = TRUE) ) +#' plotRGB(lsat, r=6,g=5,b=2, scale=1, stretch="lin") +#' plot(wmtvi, legend=FALSE, col=rev(terrain.colors(100, alpha=0.35)), add=TRUE ) #' } #' #' @export swvi diff --git a/R/time_to_event.R b/R/time_to_event.R index b3733d2..2952d72 100644 --- a/R/time_to_event.R +++ b/R/time_to_event.R @@ -9,9 +9,7 @@ #' @param na.action c("fail", "ignore"), if "fail" function will return error with NA's #' with "ignore" NA values will be included in count to event #' -#' @return A vector value representing the time to event -#' -#' @description +#' @details #' The time to event represents the sum of positions, in the vector, #' until the specified value is found ie., (0,0,1) would be 3 or, #' 2 with up.to=TRUE. The int argument allows for rounding a continuous @@ -23,10 +21,11 @@ #' function behavior, causing it to fail or count NAs. Note that it makes no #' sense to actually remove NAs as it will make the run uninterpretable. #' +#' @return A vector value representing the time to event +#' #' @author Jeffrey S. Evans #' #' @examples -#' #' # Binomial instance #' time_to_event(c(0,0,0,0,1,0,0,0,1,0)) #' time_to_event(c(0,0,0,0,1,0,0,0,1,0), up.to = TRUE) diff --git a/R/topo.distance.R b/R/topo.distance.R index d0b4881..1973a75 100644 --- a/R/topo.distance.R +++ b/R/topo.distance.R @@ -1,18 +1,17 @@ #' @title Topographic distance -#' @description Calculates topographic corrected distance for a -#' LINESTRING object +#' @description +#' Calculates topographic corrected distance for a line object #' #' @param x sf LINESTRING object -#' @param r terra or raster class elevation raster +#' @param r terra SpatRaster class elevation raster #' @param echo (FALSE/TRUE) print progress to screen #' +#' @details +#' This function corrects straight-line (euclidean) distances for topographic-slope effect. +#' #' @return #' Vector of corrected topographic distances same length as nrow(x) #' -#' @note -#' This function corrects straight-line (euclidean) distances for -#' topographic-slope effect. -#' #' @author Jeffrey S. Evans #' #' @examples @@ -61,7 +60,7 @@ topo.distance <- function(x, r, echo = FALSE) { } line.dist <- vector() for(i in 1:nrow(x)) { - if(echo) cat("Calculating corrected distance for:", i, "of", nrow(x), "\n") + if(echo) message("Calculating corrected distance for: ", i, " of ", nrow(x), "\n") pts <- sf::st_cast(sf::st_line_sample(x[i,], density=1/terra::res(r)[1]), "POINT") pts <- sf::st_as_sf(pts) diff --git a/R/trasp.R b/R/trasp.R index 168d6f6..5c53548 100644 --- a/R/trasp.R +++ b/R/trasp.R @@ -1,14 +1,11 @@ #' @title Solar-radiation Aspect Index -#' @description Calculates the Roberts and Cooper (1989) Solar-radiation -#' Aspect Index +#' @description +#' Calculates the Roberts and Cooper (1989) Solar-radiation Aspect Index #' #' @param x A terra SpatRaster object #' @param ... Additional arguments passed to terra::app #' -#' @return -#' A terra SpatRaster object of Roberts and Cooper (1989) Solar-radiation Aspect Index -#' -#' @description +#' @details #' Roberts and Cooper (1989) rotates (transforms) the circular aspect to assign a #' value of zero to land oriented in a north-northeast direction, (typically the #' coolest and wettest orientation), and a value of one on the hotter, dryer @@ -16,12 +13,15 @@ #' The metric is defined as: trasp = ( 1 - cos((pi/180)(a-30) ) / 2 #' where; a = aspect in degrees #' +#' @return +#' A terra SpatRaster object of Roberts and Cooper (1989) Solar-radiation Aspect Index +#' #' @author Jeffrey S. Evans #' #' @references #' Roberts. D.W., and Cooper, S.V. (1989). Concepts and techniques of vegetation mapping. -#' In Land Classifications Based on Vegetation: Applications for Resource Management. -#' USDA Forest Service GTR INT-257, Ogden, UT, pp 90-96 +#' In Land Classifications Based on Vegetation: Applications for Resource Management. +#' USDA Forest Service GTR INT-257, Ogden, UT, pp 90-96 #' #' @examples #' library(terra) diff --git a/R/tri.R b/R/tri.R index 0d67046..8def4bd 100644 --- a/R/tri.R +++ b/R/tri.R @@ -8,24 +8,24 @@ #' approximation. #' @param ... Additional arguments passed to terra::focal or terra::app #' -#' @return A terra SpatRaster class object of the TRI -#' -#' @description +#' @details #' The algebraic approximation is considerably faster. However, because #' inclusion of the center cell, the larger the scale the larger the divergence -#' of the minimum value. +#' of the minimum value. Resuls are driven by local variations so, fixed thresholds +#' are not very reliable. However there are some reccomended breaks (eg., Riley et al., 1999). #' -#' @description -#' Recommended ranges for classifying Topographic Ruggedness Index: -#' * 0-80 - level terrain surface. -#' * 81-116 - nearly level surface. -#' * 117-161 - slightly rugged surface. -#' * 162-239 - intermediately rugged surface. -#' * 240-497 - moderately rugged surface. -#' * 498-958 - highly rugged surface. -#' * gt 959 - extremely rugged surface. +#' Riley et al., (1999) ranges for classifying Topographic Ruggedness Index: +#' * 0-80 - level terrain surface. +#' * 81-116 - nearly level surface. +#' * 117-161 - slightly rugged surface. +#' * 162-239 - intermediately rugged surface. +#' * 240-497 - moderately rugged surface. +#' * 498-958 - highly rugged surface. +#' * gt 959 - extremely rugged surface. #' @md #' +#' @return A terra SpatRaster class object of the TRI +#' #' @author Jeffrey S. Evans #' #' @references diff --git a/R/vrm.R b/R/vrm.R index 0d07258..fb65ade 100644 --- a/R/vrm.R +++ b/R/vrm.R @@ -3,16 +3,15 @@ #' ruggedness measure #' #' @param x A terra SpatRaster class object -#' @param s Scale of window. Must be odd number, can -#' represent 2 dimensions (eg., s=c(3,5) would -#' represent a 3 x 5 window) +#' @param s Scale of window. Must be odd number, can represent 2 dimensions +#' (eg., s=c(3,5) would represent a 3 x 5 window) #' -#' @return A terra SpatRaster class object of the VRI -#' -#' @note +#' @details #' This function measures terrain ruggedness by calculating the vector #' ruggedness measure #' +#' @return A terra SpatRaster class object of the VRI +#' #' @author Jeffrey S. Evans #' #' @references diff --git a/R/winsorize.R b/R/winsorize.R index 8b5cf94..d1701a9 100644 --- a/R/winsorize.R +++ b/R/winsorize.R @@ -11,15 +11,15 @@ #' quantile function. #' @param na.rm (FALSE/TRUE) should NAs be omitted? #' -#' @return -#' A transformed vector the same length as x, unless na.rm is TRUE, then x is length -#' minus number of NA's -#' -#' @description +#' @details #' Winsorization is the transformation of a distribution by limiting extreme values #' to reduce the effect of spurious outliers. This is done by shrinking outlying #' observations to the border of the main part of the distribution. #' +#' @return +#' A transformed vector the same length as x, unless na.rm is TRUE, then x is length +#' minus number of NA's +#' #' @author Jeffrey S. Evans #' #' @references diff --git a/R/wt.centroids.R b/R/wt.centroids.R index 73388ea..6a7637e 100644 --- a/R/wt.centroids.R +++ b/R/wt.centroids.R @@ -6,17 +6,17 @@ #' @param p Weights column in x #' @param spatial (TRUE/FALSE) Output sf POINT object #' -#' @return -#' An x,y coordinate or sf POINT object representing the weighted or unweighted -#' coordinate centroid -#' -#' @note +#' @details #' The weighted centroid is calculated as: #' [Xw]=[X]*[p], [Yw]=[Y]*[p], [sXw]=SUM[Xw], [sYw]=SUM[Yw], [sP]=SUM[p] #' wX=[sXw]/[sP], wY=[sYw]/[sP] #' where; X=X coordinate(S), Y=Y coordinate(S), p=WEIGHT #' -#' @note Depends: sp +#' @return +#' An x,y coordinate or sf POINT object representing the weighted or unweighted +#' coordinate centroid +#' +#' @author Jeffrey S. Evans #' #' @examples #' p = c("sf", "sp") diff --git a/R/zonal.stats.R b/R/zonal.stats.R deleted file mode 100644 index 1785799..0000000 --- a/R/zonal.stats.R +++ /dev/null @@ -1,18 +0,0 @@ -#' @title zonal.stats -#' @description Polygon zonal statistics of a raster -#' -#' @param ... arguments passed to terra::extract -#' @return NA -#' -#' @examples -#' \dontrun{ -#' terra::extract() -#' } -#' -#' @export -zonal.stats <- function(...) { - .Deprecated("zonal.stat", package="spatialEco", - msg="Function is deprecated because terra::extract or - exactextractr::exact_extract can accept custom functions - for specialized statistics ") -} diff --git a/man/all_pairwise.Rd b/man/all_pairwise.Rd index 543ba70..41a48d7 100644 --- a/man/all_pairwise.Rd +++ b/man/all_pairwise.Rd @@ -16,7 +16,7 @@ A list object with increasing all combination objects, \description{ Creates all pairwise combinations list for iteration } -\note{ +\details{ This returns a list of vector combinations starting with pairwise, as the first nested list element, then in groups of threes, fours, to length of the vector. diff --git a/man/annulus.matrix.Rd b/man/annulus.matrix.Rd index 7dbabbc..f121806 100644 --- a/man/annulus.matrix.Rd +++ b/man/annulus.matrix.Rd @@ -22,7 +22,7 @@ A matrix object with defined null.value and 1, representing retained rings Creates a square matrix representing annulus position values of 1 and defined null } -\note{ +\details{ This function will return a matrix of 1 and defined null.value based on a specification of the scale, inner scale and outer scale. The scale defines how many rings will be represented in the matrix based on (2 * scale - 1). So, a scale of 3 will result in a diff --git a/man/aspline.downscale.Rd b/man/aspline.downscale.Rd index 08a1285..91cfb2d 100644 --- a/man/aspline.downscale.Rd +++ b/man/aspline.downscale.Rd @@ -44,7 +44,7 @@ A list object containing: Downscales a raster to a higher resolution raster multivariate adaptive regression splines (MARS). } -\note{ +\details{ This function uses Multivariate Adaptive Regression Splines, to downscale a raster based on higher-resolution or more detailed raster data specified as covariate(s). This is similar to the raster.downsample function which uses a robust regression and is a frequentest model for @@ -54,16 +54,17 @@ Using add.coords adds spatial coordinates to the model, including creating the a rasters for prediction. } \examples{ -\dontrun{ -library(geodata) +\donttest{ +if (require(geodata, quietly = TRUE)) { library(terra) +library(geodata) # Download example data (requires geodata package) - elev <- geodata::elevation_30s(country="SWZ", path=tempdir()) + elev <- elevation_30s(country="SWZ", path=tempdir()) slp <- terrain(elev, v="slope") x <- c(elev,slp) names(x) <- c("elev","slope") - tmax <- geodata::worldclim_country(country="SWZ", var="tmax", + tmax <- worldclim_country(country="SWZ", var="tmax", path=tempdir()) tmax <- crop(tmax[[1]], ext(elev)) names(tmax) <- "tmax" @@ -79,8 +80,12 @@ tmax.ds <- aspline.downscale(x, tmax, add.coords=TRUE, keep.model=TRUE) plot(x[[2]], main="slope") plot(tmax.ds$downscale, main="Downscaled Temp max") par(opar) - + +} else { + cat("Please install geodata package to run example", "\n") } +} + } \references{ Friedman (1991) Multivariate Adaptive Regression Splines (with discussion) diff --git a/man/background.Rd b/man/background.Rd index 1f56b05..1fa62a3 100644 --- a/man/background.Rd +++ b/man/background.Rd @@ -30,7 +30,7 @@ A sf POINT feature class or data.frame with x,y coordinates Creates a point sample that can be used as a NULL for SDM's and other modeling approaches. } -\note{ +\details{ This function creates a background point sample based on an extent or polygon sampling region. The known argument can be used with d to remove sample points based on distance-based proximity to existing diff --git a/man/bbox_poly.Rd b/man/bbox_poly.Rd index 06c61df..5013edb 100644 --- a/man/bbox_poly.Rd +++ b/man/bbox_poly.Rd @@ -15,7 +15,7 @@ A single feature sf class polygon object \description{ Creates a polygon from a vector or raster extent } -\note{ +\details{ If not a spatial object, expected order of input for x is: xmin, ymin, xmax, ymax. Where; xmin, ymin and the coordinates of top left corner of the bounding box and xmax, ymax represent the bottom right corner. The maximum @@ -23,36 +23,34 @@ value of xmax is width of the extent while maximum value of ymax is the height of the extent. } \examples{ -p = c("sf", "sp", "terra") - if(any(!unlist(lapply(p, requireNamespace, quietly=TRUE)))) { - m = which(!unlist(lapply(p, requireNamespace, quietly=TRUE))) - message("Can't run examples, please install ", paste(p[m], collapse = " ")) - } else { - invisible(lapply(p, require, character.only=TRUE)) - +if(require(sp, quietly = TRUE)) { +library(terra) +library(sf) data(meuse, package = "sp") meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") +# raster (terra) +r <- rast(ext(meuse)) + r[] <- runif(ncell(r)) + crs(r) <- "epsg:28992" +e <- bbox_poly(r) + +plot(r) + plot(st_geometry(e), border="red", add=TRUE) + +# extent vector +e <- bbox_poly(c(178605, 329714, 181390, 333611)) + plot(e) + # vector bounding box e <- bbox_poly(meuse) plot(st_geometry(meuse), pch=20) plot(st_geometry(e), add=TRUE) -# raster (terra) - r <- rast(ext(meuse)) - r[] <- runif(ncell(r)) - crs(r) <- "epsg:28992" - e <- bbox_poly(r) - - plot(r) - plot(st_geometry(e), border="red", add=TRUE) - -# extent vector -e <- bbox_poly(c(178605, 329714, 181390, 333611)) -plot(e) - +} else { + cat("Please install sp package to run this example", "\n") } } diff --git a/man/bearing.distance.Rd b/man/bearing.distance.Rd index 4acf645..286279c 100644 --- a/man/bearing.distance.Rd +++ b/man/bearing.distance.Rd @@ -24,7 +24,7 @@ a new point representing location of baring and distance Calculates a new point [X,Y] based on defined bearing and distance } -\note{ +\details{ East of north is a surveying convention and defaults to true. } \examples{ diff --git a/man/breeding.density.Rd b/man/breeding.density.Rd index c6d14bb..46b06a8 100644 --- a/man/breeding.density.Rd +++ b/man/breeding.density.Rd @@ -34,7 +34,7 @@ A list object with: Calculates breeding density areas base on population counts and spatial point density. } -\note{ +\details{ The breeding density areas model identifies the Nth-percent population exhibiting the highest spatial density and counts/frequency. It then buffers these points by a specified distance to produce breeding area polygons. If you would like to recreate @@ -42,7 +42,7 @@ the results in Doherty et al., (2010), then define bw = 6400m and b[if p < 0.75 b = 6400m, | p >= 0.75 b = 8500m] } \examples{ -if(require(sf, quietly = TRUE)) { +library(sf) n=1500 bb <- rbind(c(-1281299,-761876.5),c(1915337,2566433.5)) @@ -62,8 +62,6 @@ bb.mat <- round(cbind(c(bb[1,1], bb[1,2], bb[1,2], bb[1,1]), legend("bottomright", legend=c("selected areas","selected sites", "all sites"), bg="white", fill=c("grey","red", "black"), pt.cex = 2) -} - } \references{ Doherty, K.E., J.D. Tack, J.S. Evans, D.E. Naugle (2010) Mapping breeding densities of diff --git a/man/built.index.Rd b/man/built.index.Rd index ebc36e9..89e6d66 100644 --- a/man/built.index.Rd +++ b/man/built.index.Rd @@ -34,9 +34,13 @@ band 7, OLI (landsat 8) band 7} \item{method}{Method to use for index options are "Bouhennache", "Zha", "Xu"} } +\value{ +A terra raster object of the built index +} \description{ Remote sensing built-up index - +} +\details{ This function calculates the built-up index. Three methods are available: \itemize{ \item Bouhennache is a new method that uses a larger portion of the VIR/NIR @@ -72,9 +76,9 @@ OLI (Landsat 8) } } \examples{ -\dontrun{ +\donttest{ library(terra) - lsat <- rast(system.file("extdata/Landsat_TM5", package="spatialEco")) + lsat <- rast(system.file("/extdata/Landsat_TM5.tif", package="spatialEco")) plotRGB(lsat, r=3, g=2, b=1, scale=1.0, stretch="lin") # Using Bouhennache et al., (2018) method (needs green, red, swir1 and swir2) @@ -94,7 +98,6 @@ library(terra) swir1 = lsat[[5]], , method = "Xu") ) plotRGB(lsat, r=3, g=2, b=1, scale=1, stretch="lin") plot(xu, legend=FALSE, col=rev(terrain.colors(100, alpha=0.35)), add=TRUE ) - } } diff --git a/man/cgls_urls.Rd b/man/cgls_urls.Rd index 81751c4..9a062ff 100644 --- a/man/cgls_urls.Rd +++ b/man/cgls_urls.Rd @@ -51,8 +51,7 @@ absorbed by the vegetation \item ndvi Normalized Difference Vegetation Index } -Not yet implemented; Soil Water Index, Surface Soil Moisture, -Copernicus product details: http://land.copernicus.eu/global/products/ +Not yet implemented; Soil Water Index, Surface Soil Moisture, and Land Surface Temperature. } \examples{ \donttest{ @@ -84,6 +83,7 @@ all.urls <- cgls_urls(dates = d, resolution = 300, # basename(all.urls[i])), mode = 'wb') # } } + } \author{ Jeffrey S. Evans \href{mailto:jeffrey_evans@tnc.org}{jeffrey_evans@tnc.org} diff --git a/man/class.comparison.Rd b/man/class.comparison.Rd deleted file mode 100644 index d395f2d..0000000 --- a/man/class.comparison.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/class.comparison.R -\name{class.comparison} -\alias{class.comparison} -\title{Class comparison between two nominal rasters} -\usage{ -class.comparison(...) -} -\arguments{ -\item{...}{arguments passed to raster.change} -} -\value{ -NA -} -\description{ -Compares two categorical rasters using Cohen's Kappa (d) - or paired t-test statistic(s) -} -\examples{ - \dontrun{ - raster.change() -} - -} diff --git a/man/classBreaks.Rd b/man/classBreaks.Rd index 4e39745..485679d 100644 --- a/man/classBreaks.Rd +++ b/man/classBreaks.Rd @@ -15,14 +15,13 @@ classBreaks(x, n, type = c("equal", "quantile", "std", "geometric")) } \value{ A vector containing class break values the length is n+1 to allow for - specification of ranges +specification of ranges } \description{ Finds class breaks in a distribution } -\note{ -The robust std method uses sqrt(sum(x^2)/(n-1)) to center the data before - deriving "pretty" breaks. +\details{ +The robust std method uses sqrt(sum(x^2)/(n-1)) to center the data before deriving "pretty" breaks. } \examples{ y <- rnbinom(100, 10, 0.5) diff --git a/man/collinear.Rd b/man/collinear.Rd index 7a72be4..462fc57 100644 --- a/man/collinear.Rd +++ b/man/collinear.Rd @@ -54,4 +54,6 @@ plot(cor.data[[1]], pch=20) } \author{ Jeffrey S. Evans tnc.org> + +Jeffrey S. Evans } diff --git a/man/combine.Rd b/man/combine.Rd index 8ae23a8..78386b6 100644 --- a/man/combine.Rd +++ b/man/combine.Rd @@ -10,27 +10,21 @@ combine(x) \item{x}{raster stack/brick or SpatialPixelsDataFrame object} } \value{ -A ratified rasterLayer or a list containing a SpatialPixelsDataFrame -and a data.frame of unique combinations. +A ratified (factor) terra SpatRaster representing unique combinations. } \description{ Combines rasters into all unique combinations of inputs } \details{ +A single ratified raster object is returned with the summary table +as the raster attribute table, this is most similar to the ESRI +format resulting from their combine function. + Please note that this is not a memory safe function that utilizes -rasters out of memory in the manner that the raster package does. - -If sp = TRUE the object will be a list with "combine", containing -the SpatialPixelsDataFrame with the value attribute containing the -unique combinations, and "summary" with the summary table of collapsed -combinations and associated attributes. - -If sp = FALSE the a single ratified rasterLayer class object is returned -with the summary table as the raster attribute table, this is most similar -to the ESRI format resulting from their combine function. +out of memory in the manner that the terra package does. } \examples{ -if(require(terra, quietly = TRUE)) { +library(terra) # Create example data (with a few NA's introduced) r1 <- rast(nrows=100, ncol=100) @@ -56,8 +50,6 @@ if(require(terra, quietly = TRUE)) { # or, from separate layers cr <- combine(c(r1,r3)) -} - } \author{ Jeffrey S. Evans diff --git a/man/concordance.Rd b/man/concordance.Rd index e676748..a0eb0bb 100644 --- a/man/concordance.Rd +++ b/man/concordance.Rd @@ -18,7 +18,7 @@ list object with: concordance, discordance, tied and pairs Performs a concordance/disconcordance (C-statistic) test on binomial models. } -\note{ +\details{ Test of binomial regression for the hypothesis that probabilities of all positives [1], are greater than the probabilities of the nulls [0]. The concordance would be 100% for a perfect model where, disconcordance is the diff --git a/man/conf.interval.Rd b/man/conf.interval.Rd index 58af6d5..f2c82d0 100644 --- a/man/conf.interval.Rd +++ b/man/conf.interval.Rd @@ -16,21 +16,19 @@ conf.interval(x, cl = 0.95, stat = "mean", std.error = TRUE) \item{std.error}{Return standard error (TRUE/FALSE)} } \value{ -lci Lower confidence interval value - -uci Upper confidence interval value - -mean If stat = "mean", mean value of distribution - -mean Value of the mean or median - -conf.level Confidence level used for confidence interval - -std.error If std.error = TRUE standard error of distribution +data.frame contaning: +\itemize{ +\item lci - Lower confidence interval value +\item uci - Upper confidence interval value +\item mean - If stat = "mean", mean value of distribution +\item mean - Value of the mean or median +\item conf.level - Confidence level used for confidence interval +\item std.error - If std.error = TRUE standard error of distribution +} } \description{ -Calculates confidence interval for the mean or median of a distribution with - unknown population variance +Calculates confidence interval for the mean or median of a distribution with +unknown population variance } \examples{ x <- runif(100) @@ -49,5 +47,5 @@ Calculates confidence interval for the mean or median of a distribution with } \author{ -Jeffrey S. Evans +Jeffrey S. Evans \href{mailto:jeffrey_evans@tnc.org}{jeffrey_evans@tnc.org} } diff --git a/man/correlogram.Rd b/man/correlogram.Rd index df48e78..39315e2 100644 --- a/man/correlogram.Rd +++ b/man/correlogram.Rd @@ -19,7 +19,7 @@ great circle in kilometers} \item{...}{Arguments passed to cor ('pearson', 'kendall' or 'spearman')} } \value{ -A list object containing: +Plot of correlogram and a list object containing: \itemize{ \item autocorrelation is a data.frame object with the following components \item autocorrelation - Autocorrelation value for each distance lag diff --git a/man/cross.tab.Rd b/man/cross.tab.Rd index 1f627e8..def26c6 100644 --- a/man/cross.tab.Rd +++ b/man/cross.tab.Rd @@ -25,7 +25,7 @@ a table with the cross tabulated counts \description{ Creates a labeled cross tabulation between two nominal rasters } -\note{ +\details{ This function returns a cross tabulation between two nominal rasters. Arguments allow for labeling the results and returning proportions rather than counts. It also accounts for asymmetrical classes between @@ -56,9 +56,7 @@ cross.tab(lulc2010, lulc2020, values = v, labs = l, pct=TRUE) } \references{ -Pontius Jr, R.G., Shusas, E., McEachern, M. (2004). Detecting -important categorical land changes while accounting for persistence. -Agriculture, Ecosystems & Environment 101(2):251-268. +Pontius Jr, R.G., Shusas, E., McEachern, M. (2004). Detecting important categorical land changes } \author{ Jeffrey S. Evans diff --git a/man/crossCorrelation.Rd b/man/crossCorrelation.Rd index e1a7b6f..4bcad10 100644 --- a/man/crossCorrelation.Rd +++ b/man/crossCorrelation.Rd @@ -69,7 +69,7 @@ When not simulated k=0, a list containing: \item clusters - If "clust" argument TRUE, vector representing LISA clusters } -when simulated (k>0), a list containing: +When simulated (k>0), a list containing: \itemize{ \item I - Global autocorrelation statistic \item SCI - A data.frame with two columns representing the xy and yx autocorrelation @@ -113,30 +113,33 @@ the spatial weights dist.function = "inv.power") ) \donttest{ - library(sp) - library(spdep) - - data(meuse) - coordinates(meuse) <- ~x+y - - #### Using a default spatial weights matrix method (inverse power function) - ( I <- crossCorrelation(meuse$zinc, meuse$copper, - coords = coordinates(meuse), k=99) ) - meuse$lisa <- I$SCI[,"lsci.xy"] - spplot(meuse, "lisa") +library(sf) +library(spdep) - #### Providing a distance matrix - Wij <- spDists(meuse) - ( I <- crossCorrelation(meuse$zinc, meuse$copper, w = Wij, k=99) ) - - #### Providing an inverse power function weights matrix - Wij <- spDists(meuse) - Wij <- 1 / Wij - diag(Wij) <- 0 - Wij <- Wij / sum(Wij) - diag(Wij) <- 0 - ( I <- crossCorrelation(meuse$zinc, meuse$copper, w = Wij, - dist.function = "none", k=99) ) + if (require(sp, quietly = TRUE)) { + data(meuse, package = "sp") + meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") + } + +#### Using a default spatial weights matrix method (inverse power function) +( I <- crossCorrelation(meuse$zinc, meuse$copper, + coords = st_coordinates(meuse)[,1:2], k=99) ) + meuse$lisa <- I$SCI[,"lsci.xy"] + plot(meuse["lisa"], pch=20) + +#### Providing a distance matrix +if (require(units, quietly = TRUE)) { + Wij <- units::drop_units(st_distance(meuse)) + ( I <- crossCorrelation(meuse$zinc, meuse$copper, w = Wij, k=99) ) + +#### Providing an inverse power function weights matrix + Wij <- 1 / Wij + diag(Wij) <- 0 + Wij <- Wij / sum(Wij) + diag(Wij) <- 0 + ( I <- crossCorrelation(meuse$zinc, meuse$copper, w = Wij, + dist.function = "none", k=99) ) +} } } diff --git a/man/csi.Rd b/man/csi.Rd index 9b1ae95..eab3e75 100644 --- a/man/csi.Rd +++ b/man/csi.Rd @@ -12,21 +12,21 @@ csi(x, y = NULL) \item{y}{If x is a vector, then a vector object} } \value{ -If x is a matrix, a list object with: similarity and angular.similarity -matrices or, if x and y are vectors, a vector of similarity and angular.similarity +If x is a matrix, a list object with: similarity and angular.similarity matrices or, +if x and y are vectors, a vector of similarity and angular.similarity } \description{ Calculates the cosine similarity and angular similarity on two vectors or a matrix } -\note{ +\details{ The cosine similarity index is a measure of similarity between two vectors of an inner product space. This index is bested suited for high-dimensional positive variable space. One useful application of the index is to measure separability of clusters derived from algorithmic approaches (e.g., k-means). It is a good common practice to center the data before calculating the index. It should be noted that the cosine similarity index is mathematically, and often numerically, equivalent to the -Pearson's correlation coefficient +Pearson's correlation coefficient The cosine similarity index is derived: s(xy) = x * y / ||x|| * ||y||, where the expected is 1.0 (perfect similarity) @@ -36,13 +36,13 @@ angular similarity = 1 - (cos(s)^-1/pi) } \examples{ # Compare two vectors (centered using scale) - x=runif(100) - y=runif(100)^2 - csi(as.vector(scale(x)),as.vector(scale(y))) +x=runif(100) +y=runif(100)^2 +csi(as.vector(scale(x)),as.vector(scale(y))) - #' # Compare columns (vectors) in a matrix (centered using scale) - x <- matrix(round(runif(100),0),nrow=20,ncol=5) - ( s <- csi(scale(x)) ) +# Compare columns (vectors) in a matrix (centered using scale) +x <- matrix(round(runif(100),0),nrow=20,ncol=5) +( s <- csi(scale(x)) ) # Compare vector (x) to each column in a matrix (y) y <- matrix(round(runif(500),3),nrow=100,ncol=5) diff --git a/man/curvature.Rd b/man/curvature.Rd index e8b308d..67b3860 100644 --- a/man/curvature.Rd +++ b/man/curvature.Rd @@ -19,7 +19,7 @@ raster class object of surface curvature \description{ Calculates Zevenbergen & Thorne, McNab's or Bolstad's curvature } -\note{ +\details{ The planform and profile curvatures are the second derivative(s) of the elevation surface, or the slope of the slope. Profile curvature is in the direction of the maximum slope, and the planform curvature is @@ -34,7 +34,7 @@ Total curvature is the sigma of the profile and planform curvatures. A value of 0 in profile, planform or total curvature, indicates the surface is flat. The planform, profile and total curvatures are derived using Zevenbergen & Thorne (1987) via a quadratic equation fit to eight neighbors -as such, the s (focal window size) argument is ignored. +as such, the s (focal window size) argument is ignored. McNab's and Bolstad's variants of the surface curvature (concavity/convexity) index (McNab 1993; Bolstad & Lillesand 1992; McNab 1989). The index is based diff --git a/man/dahi.Rd b/man/dahi.Rd index 95f8d37..c102f3d 100644 --- a/man/dahi.Rd +++ b/man/dahi.Rd @@ -17,7 +17,8 @@ terra SpatRaster class object Diurnal Anisotropic Heat Index } \description{ Simple approximation of the anisotropic diurnal heat (Ha) distribution - +} +\details{ The Diurnal Anisotropic Heat Index is based on this equation. Ha = cos(amax - a) * arctan(b) Where; amax defines the aspect with the maximum total heat diff --git a/man/daymet.point.Rd b/man/daymet.point.Rd index df501cd..0ad1210 100644 --- a/man/daymet.point.Rd +++ b/man/daymet.point.Rd @@ -30,12 +30,12 @@ daymet.point( \item{echo}{(TRUE/FALSE) Echo progress} } \value{ -A data.frame with climate results +A data.frame with geographic coordinate point-level climate results } \description{ Downloads DAYMET climate variables for specified point and time-period } -\note{ +\details{ data is available for Long -131.0 W and -53.0 W; lat 52.0 N and 14.5 N Function uses the Single Pixel Extraction tool and returns year, yday, dayl(s), prcp (mm/day), srad (W/m^2), swe (kg/m^2), tmax (deg c), diff --git a/man/dispersion.Rd b/man/dispersion.Rd index a9ff7ff..42efc14 100644 --- a/man/dispersion.Rd +++ b/man/dispersion.Rd @@ -16,7 +16,7 @@ data.frame with columns H values for each target, H , sH, sHmax Calculates the dispersion ("rarity") of targets associated with planning units } -\note{ +\details{ The dispersion index (H-prime) is calculated H = sum( sqrt(p) / sqrt(a) ) where; P = (sum of target in planning unit / sum of target across all planning units) and a = (count of planning units containing diff --git a/man/dissection.Rd b/man/dissection.Rd index 69bb683..37d95b7 100644 --- a/man/dissection.Rd +++ b/man/dissection.Rd @@ -20,7 +20,7 @@ A SpatRaster class object of Martonne's modified dissection Calculates the Evans (1972) Martonne's modified dissection } -\note{ +\details{ Dissection is calculated as: ( z(s) - min(z(s)) ) / ( max(z(s)) - min(z(s)) ) } diff --git a/man/download.daymet.Rd b/man/download.daymet.Rd deleted file mode 100644 index c0e1d90..0000000 --- a/man/download.daymet.Rd +++ /dev/null @@ -1,35 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/download.daymet.R -\name{download.daymet} -\alias{download.daymet} -\title{Download DAYMET} -\usage{ -download.daymet(...) -} -\arguments{ -\item{...}{ignored} -} -\description{ -Batch download of daily gridded DAYMET climate data -} -\details{ -DAYMET website: \url{http://daymet.ornl.gov}, -path structure: /year/tile_year/file.nc -} -\references{ -Thornton P.E., S.W. Running and M.A. White (1997) Generating surfaces of daily - meteorological variables over large regions of complex terrain. Journal of - Hydrology 190: 214-251. - -Thornton, P.E. and S.W. Running (1999) An improved algorithm for estimating - incident daily solar radiation from measurements of temperature, humidity, - and precipitation. Agriculture and Forest Meteorology. 93:211-228. - -Thornton, P.E., H. Hasenauer and M.A. White (2000) Simultaneous estimation - of daily solar radiation and humidity from observed temperature and - precipitation: An application over complex terrain in Austria. - Agricultural and Forest Meteorology 104:255-271. -} -\author{ -Jeffrey S. Evans -} diff --git a/man/download.hansen.Rd b/man/download.hansen.Rd deleted file mode 100644 index 521a24c..0000000 --- a/man/download.hansen.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/download.hansen.R -\name{download.hansen} -\alias{download.hansen} -\title{Download Hansen Forest 2000-2013 Change} -\usage{ -download.hansen(...) -} -\arguments{ -\item{...}{Nonexistent parameters} -} -\description{ -Download of Hansen Global Forest Change 2000-2013 -} -\author{ -Jeffrey S. Evans -} diff --git a/man/download.prism.Rd b/man/download.prism.Rd deleted file mode 100644 index de71b4b..0000000 --- a/man/download.prism.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/download.prism.R -\name{download.prism} -\alias{download.prism} -\title{Download PRISM} -\usage{ -download.prism(...) -} -\arguments{ -\item{...}{Nonexistent parameters} -} -\description{ -Batch download of monthly gridded PRISM climate data -} -\author{ -Jeffrey S. Evans -} diff --git a/man/erase.point.Rd b/man/erase.point.Rd index 86349a1..9d51b75 100644 --- a/man/erase.point.Rd +++ b/man/erase.point.Rd @@ -14,12 +14,12 @@ erase.point(y, x, inside = TRUE) \item{inside}{(TRUE/FALSE) Remove points inside polygon, else outside polygon} } \value{ -A sf POINT object +An sf POINT object } \description{ Removes points intersecting a polygon feature class } -\note{ +\details{ Used to erase points that intersect polygon(s). The default of inside=TRUE erases points inside the polygons however, if inside=FALSE then the function results in an intersection where points that @@ -27,16 +27,11 @@ intersect the polygon are retained. } \examples{ -p = c("sf", "sp") - if(any(!unlist(lapply(p, requireNamespace, quietly=TRUE)))) { - m = which(!unlist(lapply(p, requireNamespace, quietly=TRUE))) - message("Can't run examples, please install ", paste(p[m], collapse = " ")) - } else { - invisible(lapply(p, require, character.only=TRUE)) +library(sf) +if (require(sp, quietly = TRUE)) { data(meuse, package = "sp") - meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, - agr = "constant") + meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") s <- st_as_sf(st_sample(st_as_sfc(st_bbox(meuse)), size=1000, type = "regular")) @@ -59,6 +54,8 @@ out.erase <- erase.point(s, b, inside = FALSE) plot(st_geometry(b),add=TRUE) par(opar) +} else { + cat("Please install sp package to run example", "\n") } } diff --git a/man/explode.Rd b/man/explode.Rd deleted file mode 100644 index 0d47524..0000000 --- a/man/explode.Rd +++ /dev/null @@ -1,30 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/explode.R -\name{explode} -\alias{explode} -\title{Explodes multipart features} -\usage{ -explode(...) -} -\arguments{ -\item{...}{Parameters to be passed to st_cast} -} -\description{ -Explodes multipart features into single part -} -\note{ -Multipart geometries are a data structure where a single attribute -shares multiple features (polygons, points, lines). This function -dissaggregates the data into a one-to-one match. -} -\examples{ -\donttest{ -library(sf) - nc <- st_read(system.file("shape/nc.shp", package="sf")) - nc <- suppressWarnings(st_cast(nc, "POLYGON")) -} - -} -\author{ -Jeffrey S. Evans -} diff --git a/man/extract.vertices.Rd b/man/extract.vertices.Rd index 9ea4cdd..07a1695 100644 --- a/man/extract.vertices.Rd +++ b/man/extract.vertices.Rd @@ -12,26 +12,25 @@ extract.vertices(x, join = TRUE) \item{join}{(TRUE/FALSE) Joint attributes from original object} } \value{ -An sf POINT object +An sf POINT object of extrated line or polygon vertices } \description{ Extracts [x,y] vertices from an sf line or polygon object } -\note{ +\details{ This function returns the vertices of a line or polygon object, as opposed to the polygon centroids or line start/stop coordinates } \examples{ -if(require(sf, quietly = TRUE)) { - nc <- sf::st_read(system.file("shape/nc.shp", package="sf")) - nc <- suppressWarnings(sf::st_cast(nc, "POLYGON")) - nc <- nc[c(10,50),] +library(sf) +nc <- sf::st_read(system.file("shape/nc.shp", package="sf")) + nc <- suppressWarnings(sf::st_cast(nc, "POLYGON")) + nc <- nc[c(10,50),] - ( v <- extract.vertices(nc) ) - plot(st_geometry(nc)) - plot(st_geometry(v), pch=20, cex=2, col="red", add=TRUE) -} +( v <- extract.vertices(nc) ) + plot(st_geometry(nc)) + plot(st_geometry(v), pch=20, cex=2, col="red", add=TRUE) } \author{ diff --git a/man/focal.lmetrics.Rd b/man/focal.lmetrics.Rd deleted file mode 100644 index fab5467..0000000 --- a/man/focal.lmetrics.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/focal.lmetrics.R -\name{focal.lmetrics} -\alias{focal.lmetrics} -\title{Focal landscape metrics} -\usage{ -focal.lmetrics(...) -} -\arguments{ -\item{...}{Parameters to be passed to the modern version -of the function} -} -\description{ -Calculates a variety of landscape metrics on - integer rasters using focal approach -} -\examples{ -\dontrun{ -library(landscapemetrics) -library(raster) - -data(landscape) - -s <- matrix(1, nrow = 3, ncol = 3) -( result <- do.call(stack, window_lsm(landscape, window = s, - what = c("lsm_l_pr", "lsm_l_joinent"))) ) - plot(result) -} -} diff --git a/man/fuzzySum.Rd b/man/fuzzySum.Rd index a5e33e5..c19d5d6 100644 --- a/man/fuzzySum.Rd +++ b/man/fuzzySum.Rd @@ -15,7 +15,7 @@ Value of fuzzy sum \description{ Calculates the fuzzy sum of a vector } -\note{ +\details{ The fuzzy sum is an increasing linear combination of values. This can be used to sum probabilities or results of multiple density functions. diff --git a/man/gaussian.kernel.Rd b/man/gaussian.kernel.Rd index a7e6263..e520351 100644 --- a/man/gaussian.kernel.Rd +++ b/man/gaussian.kernel.Rd @@ -18,6 +18,7 @@ Symmetrical (NxN) matrix of a Gaussian distribution Creates a Gaussian Kernel of specified size and sigma } \examples{ +opar <- par() par(mfrow=c(2,2)) persp(gaussian.kernel(sigma=1, s=27), theta = 135, phi = 30, col = "grey", ltheta = -120, shade = 0.6, @@ -28,7 +29,7 @@ Creates a Gaussian Kernel of specified size and sigma col = "grey", ltheta = -120, shade = 0.6, border=NA ) persp(gaussian.kernel(sigma=4, s=27), theta = 135, phi = 30, col = "grey", ltheta = -120, shade = 0.6, border=NA ) - + par(opar) } \author{ Jeffrey S. Evans diff --git a/man/geo.buffer.Rd b/man/geo.buffer.Rd index 8c971c1..9bdb92c 100644 --- a/man/geo.buffer.Rd +++ b/man/geo.buffer.Rd @@ -19,24 +19,23 @@ an sp or sf polygon class object representing buffer for each feature \description{ Buffers data in geographic (Latitude/Longitude) projection } -\note{ +\details{ Projects (Latitude/Longitude) data in decimal-degree geographic projection using an on-the-fly azimuthal equidistant projection in meters centered on } \examples{ -if(require(sf, quietly = TRUE)) { - e <- c(61.87125, 23.90153, 76.64458, 37.27042) - names(e) <- c("xmin", "ymin", "xmax", "ymax") - s <- st_as_sf(st_sample(st_as_sfc(st_bbox(e)), size=100, - type = "regular")) - st_crs(s) <- st_crs(4326) - s$id <- 1:nrow(s) - - b <- geo.buffer(x=s, r=1000) - plot(st_geometry(b[1,])) - plot(st_geometry(s[1,]), pch=20,cex=2, add=TRUE) -} - +library(sf) +e <- c(61.87125, 23.90153, 76.64458, 37.27042) + names(e) <- c("xmin", "ymin", "xmax", "ymax") + s <- st_as_sf(st_sample(st_as_sfc(st_bbox(e)), size=100, + type = "regular")) + st_crs(s) <- st_crs(4326) + s$id <- 1:nrow(s) + +b <- geo.buffer(x=s, r=1000) + plot(st_geometry(b[1,])) + plot(st_geometry(s[1,]), pch=20,cex=2, add=TRUE) + } \seealso{ \code{\link[sf]{st_buffer}} for st_buffer ... arguments diff --git a/man/group.pdf.Rd b/man/group.pdf.Rd index d3fa9f8..8219f4c 100644 --- a/man/group.pdf.Rd +++ b/man/group.pdf.Rd @@ -34,6 +34,9 @@ variable (must be same length as y)} \item{...}{Additional arguments passed to plot} } +\value{ +Plot of grouped PDF's +} \description{ Creates a probability density plot of y for each group of x @@ -46,8 +49,7 @@ ylab='PDF', xlab='Y', lty=c(1,2,3)) } \references{ -Simonoff, J. S. (1996). Smoothing Methods in Statistics. Springer-Verlag, - New York. +Simonoff, J. S. (1996). Smoothing Methods in Statistics. Springer-Verlag, New York. } \author{ Jeffrey S. Evans tnc.org> diff --git a/man/hexagons.Rd b/man/hexagons.Rd index 0814cfa..2867b05 100644 --- a/man/hexagons.Rd +++ b/man/hexagons.Rd @@ -4,30 +4,28 @@ \alias{hexagons} \title{Hexagons} \usage{ -hexagons(x, res = 100, ...) +hexagons(x, res = 100) } \arguments{ -\item{x}{sp SpatialDataFrame class object} +\item{x}{sf class object indicating extent} \item{res}{Area of resulting hexagons} - -\item{...}{Additional arguments passed to spsample} } \value{ -SpatialPolygonsDataFrame OBJECT +sf POLYGONS object } \description{ Create hexagon polygons } -\note{ -depends: sp +\details{ +Based on extent of x, creates a hexagon mesh with size of hexagons defined by res argumnet } \examples{ - library(sf) - if(require(sp, quietly = TRUE)) { - data(meuse, package = "sp") - meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, - agr = "constant") +library(sf) +if(require(sp, quietly = TRUE)) { + data(meuse, package = "sp") + meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, + agr = "constant") hex <- hexagons(meuse, res=300) plot(st_geometry(hex)) @@ -38,5 +36,9 @@ idx <- which(apply(st_intersects(hex, meuse, sparse=FALSE), 1, any)) hex.sub <- hex[idx,] plot(st_geometry(hex.sub)) plot(st_geometry(meuse),pch=20,add=TRUE) + +} else { + cat("Please install sp package to run example", "\n") } + } diff --git a/man/hli.Rd b/man/hli.Rd index c510ac2..e003414 100644 --- a/man/hli.Rd +++ b/man/hli.Rd @@ -22,14 +22,14 @@ terra SpatRaster class object of McCune & Keon (2002) Heat Load Index \description{ Calculates the McCune & Keon (2002) Heat Load Index } -\note{ +\details{ Describes A southwest facing slope should have warmer temperatures than a southeast facing slope, even though the amount of solar radiation they receive is equivalent. The McCune and Keon (2002) method accounts for this by "folding" the aspect so that the highest values are southwest and the lowest values are northeast. Additionally, this method account for steepness of slope, which is not addressed in most other aspect rescaling equations. HLI values range -from 0 (coolest) to 1 (hottest). +from 0 (coolest) to 1 (hottest). The equations follow McCune (2007) and support northern and southern hemisphere calculations. The folded aspect for northern hemispheres use (180 - (Aspect – 225) ) diff --git a/man/hli.pt.Rd b/man/hli.pt.Rd index a9b92c0..05c926b 100644 --- a/man/hli.pt.Rd +++ b/man/hli.pt.Rd @@ -34,14 +34,14 @@ Vector of McCune & Keon (2002) Heat Load Index \description{ Calculates the McCune & Keon (2002) Heat Load Index } -\note{ +\details{ Describes A southwest facing slope should have warmer temperatures than a southeast facing slope, even though the amount of solar radiation they receive is equivalent. The McCune and Keon (2002) method accounts for this by "folding" the aspect so that the highest values are southwest and the lowest values are northeast. Additionally, this method account for steepness of slope, which is not addressed in most other aspect rescaling equations. HLI values range -from 0 (coolest) to 1 (hottest). +from 0 (coolest) to 1 (hottest). The equations follow McCune (2007) and support northern and southern hemisphere calculations. The folded aspect for northern hemispheres use (180 - (Aspect – 225) ) @@ -51,7 +51,7 @@ Valid values for this argument are "southern" and "northern" with the default "n } \examples{ -# Single input +# Single point input hli.pt(theta=180, alpha=30, latitude=40) # Multiple input, returns results from diff --git a/man/hsp.Rd b/man/hsp.Rd index fd9d11f..b8ac540 100644 --- a/man/hsp.Rd +++ b/man/hsp.Rd @@ -33,7 +33,7 @@ terra SpatRaster class object of slope position Calculates a hierarchical scale decomposition of topographic position index } -\note{ +\details{ if win = "circle" units are distance, if win = "rectangle" units are number of cells } diff --git a/man/idw.smoothing.Rd b/man/idw.smoothing.Rd index 397c5e1..fd599a2 100644 --- a/man/idw.smoothing.Rd +++ b/man/idw.smoothing.Rd @@ -22,7 +22,7 @@ A vector, same length as nrow(x), of smoothed y values Distance weighted smoothing of a variable in a spatial point object } -\note{ +\details{ Smoothing is conducted with a weighted-mean where; weights represent inverse standardized distance lags Distance-based or neighbour-based smoothing can be specified by setting the desired neighbour smoothing method to a specified value @@ -37,7 +37,6 @@ if(require(sp, quietly = TRUE)) { data(meuse, package = "sp") meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") -} # Calculate distance weighted mean on cadmium variable in meuse data cadmium.idw <- idw.smoothing(meuse, 'cadmium', k=nrow(meuse), d = 1000) @@ -51,4 +50,8 @@ if(require(sp, quietly = TRUE)) { plot(meuse[c("cadmium","cadmium.wm")], pch=20) +} else { + cat("Please install sp package to run example", "\n") +} + } diff --git a/man/impute.loess.Rd b/man/impute.loess.Rd index 80a5003..a28d976 100644 --- a/man/impute.loess.Rd +++ b/man/impute.loess.Rd @@ -14,8 +14,7 @@ impute.loess(y, s = 0.2, smooth = FALSE) \item{smooth}{(FALSE/TRUE) Smooth data, else only replace NA's} } \value{ -a vector the same length as x with NA values filled or the -data smoothed (or both).. +A vector the same length as x with NA values filled or the data smoothed (or both). } \description{ Imputes missing data or smooths using Loess regression diff --git a/man/insert.Rd b/man/insert.Rd index 9800b4c..f5522dc 100644 --- a/man/insert.Rd +++ b/man/insert.Rd @@ -16,24 +16,20 @@ if nothing specified values with be NA} \item{idx}{Index position to insert row or column} -\item{name}{Name of new column (not used for rows, -MARGIN=1)} +\item{name}{Name of new column (not used for rows, MARGIN=1)} } \value{ A data.frame with the new row or column inserted } \description{ -Inserts a new row or column into a data.frame - at a specified location +Inserts a new row or column into a data.frame at a specified location } -\note{ -Where there are methods to easily add a row/column to -the end or beginning of a data.frame, it is not straight -forward to insert data at a specific location within the -data.frame. This function allows for inserting a vector -at a specific location eg., between columns or rows 1 and 2 -where row/column 2 is moved to the 3rd position and a new -vector of values is inserted into the 2nd position. +\details{ +Where there are methods to easily add a row/column to the end or beginning of a data.frame, +it is not straight forward to insert data at a specific location within the data.frame. +This function allows for inserting a vector at a specific location eg., between columns or +rows 1 and 2 where row/column 2 is moved to the 3rd position and a new vector of values is +inserted into the 2nd position. } \examples{ d <- data.frame(ID=1:10, y=runif(10)) diff --git a/man/insert.values.Rd b/man/insert.values.Rd index 8dd2b35..326a549 100644 --- a/man/insert.values.Rd +++ b/man/insert.values.Rd @@ -20,14 +20,15 @@ defined by the index \description{ Inserts new values into a vector at specified positions - +} +\details{ This function inserts new values at specified positions in a vector. It does not replace existing values. If a single value is provided for y and l represents multiple positions y will be replicated for the length of l. In this way you can insert the same value at multiple locations. } \examples{ - (x=1:10) +(x=1:10) # Insert single value in one location insert.values(x, 100, 2) diff --git a/man/is.empty.Rd b/man/is.empty.Rd index 37065ef..2c307b2 100644 --- a/man/is.empty.Rd +++ b/man/is.empty.Rd @@ -21,12 +21,13 @@ a TRUE/FALSE value will be returned for each element in the vector } \description{ evaluates empty elements in a vector - -This function evaluates if an element in a vector is empty -the na.empty argument allows for evaluating NA values (TRUE if NA) and -all.na returns a TRUE if all elements are NA. The trim argument trims -a character string to account for the fact that c(" ") is not empty but, -a vector with c("") is empty. Using trim = TRUE will force both to return TRUE +} +\details{ +This function evaluates if an element in a vector is empty the na.empty argument +allows for evaluating NA values (TRUE if NA) and all.na returns a TRUE if all elements +are NA. The trim argument trims a character string to account for the fact that c(" ") +is not empty but, a vector with c("") is empty. Using trim = TRUE will force both +to return TRUE } \examples{ is.empty( c("") ) diff --git a/man/kde.2D.Rd b/man/kde.2D.Rd deleted file mode 100644 index 109a4f4..0000000 --- a/man/kde.2D.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/kde2D.R -\name{kde.2D} -\alias{kde.2D} -\title{2-dimensional kernel density estimate} -\usage{ -kde.2D(...) -} -\arguments{ -\item{...}{Parameters to be passed to the modern version of the function} -} -\description{ -Calculates 2-dimensional kernel density estimate over specified extent -} diff --git a/man/kl.divergence.Rd b/man/kl.divergence.Rd index 833beef..ee58603 100644 --- a/man/kl.divergence.Rd +++ b/man/kl.divergence.Rd @@ -20,6 +20,9 @@ has a value smaller than eps.} pairwise Kullback-Leibler divergence index (matrix) } \description{ +Calculates the Kullback-Leibler divergence (relative entropy) +} +\details{ Calculates the Kullback-Leibler divergence (relative entropy) between unweighted theoretical component distributions. Divergence is calculated as: int [f(x) (log f(x) - log g(x)) dx] for distributions with densities diff --git a/man/knn.Rd b/man/knn.Rd index 1ebbee9..a4d0880 100644 --- a/man/knn.Rd +++ b/man/knn.Rd @@ -38,7 +38,8 @@ distance of k } \description{ Find K nearest neighbors for two spatial objects - +} +\details{ Finds nearest neighbor in x based on y and returns rownames, index and distance, If ids is NULL, rownames of x are returned. If coordinate matrix provided, columns need to be ordered [X,Y]. If a radius for d is specified than a maximum @@ -50,12 +51,11 @@ In other words, the covariates must match and be numeric. } \examples{ \donttest{ -library(sf) - if(require(sp, quietly = TRUE)) { - data(meuse, package = "sp") - meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, - agr = "constant") +library(sf) + data(meuse, package = "sp") + meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, + agr = "constant") # create reference and target obs idx <- sample(1:nrow(meuse), 10) @@ -81,8 +81,12 @@ wy = as.matrix(st_drop_geometry(pts[,1:3])) y <- st_coordinates(pts)[,1:2] x <- st_coordinates(meuse)[,1:2] knn(y, x, k=2) -} -} + +} else { + cat("Please install sp package to run example", "\n") +} +} + } \seealso{ \code{\link[RANN]{nn2}} for details on search algorithm diff --git a/man/lai.Rd b/man/lai.Rd index 047d552..ca09c6a 100644 --- a/man/lai.Rd +++ b/man/lai.Rd @@ -11,9 +11,13 @@ lai(ndvi, method = c("Jonckheere", "Chen")) \item{method}{Method to use for index options c("Jonckheere", "Chen")} } +\value{ +A terra SpatRaster object with derived LAI vaues +} \description{ Remote sensing measure of LAI (leaf area per ground-unit area) - +} +\details{ This function calculates the Leaf Area Index (LAI) representing the amount of leaf area per unit of ground area. This is an important parameter for understanding the structure and function of vegetation, as it affects processes such as photosynthesis, transpiration, @@ -26,9 +30,9 @@ between NDVI and LAI can vary depending on factors such as vegetation type, cano and environmental conditions. } \examples{ -\dontrun{ +\donttest{ library(terra) - lsat <- rast(system.file("extdata/Landsat_TM5", package="spatialEco")) +lsat <- rast(system.file("/extdata/Landsat_TM5.tif", package="spatialEco")) plotRGB(lsat, r=3, g=2, b=1, scale=1.0, stretch="lin") ndvi <- ( lsat[[4]] - lsat[[3]] ) / (lsat[[4]] + lsat[[3]]) @@ -44,7 +48,7 @@ library(terra) Jonckheere, I., Fleck, S., Nackaerts, K., Muys, B., Coppin, P. (2004). A comparison of two methods to retrieve the leaf area index (LAI) from SPOT-4 HRVIR data. International Journal of Remote Sensing, 25(21):4407–4425. - + Chen, J. M., Liu, R., & Ju, W. (2014). A simple and effective method for estimating leaf area index from Landsat imagery. Remote Sensing of Environment, 152:538–548. } diff --git a/man/land.metrics.Rd b/man/land.metrics.Rd deleted file mode 100644 index fc16f2a..0000000 --- a/man/land.metrics.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/land.metrics.R -\name{land.metrics} -\alias{land.metrics} -\title{Landscape metrics for points and polygons} -\usage{ -land.metrics(...) -} -\arguments{ -\item{...}{Parameters to be passed to the modern version -of the function} -} -\description{ -Calculates a variety of landscape metrics, on - binary rasters, for polygons or points with a - buffer distance -} -\examples{ -\dontrun{ -library(landscapemetrics) -library(raster) - -data(landscape) -points <- matrix(c(10, 5, 25, 15, 5, 25), - ncol = 2, byrow = TRUE) - -sample_lsm(landscape, y = points, size = 10, - level = "landscape", type = "diversity metric", - classes_max = 3, - verbose = FALSE) -} -} diff --git a/man/local.min.max.Rd b/man/local.min.max.Rd index 02ee703..e2386ca 100644 --- a/man/local.min.max.Rd +++ b/man/local.min.max.Rd @@ -34,7 +34,7 @@ statistic (dev argument) Calculates the local minimums and maximums in a numeric vector, indicating inflection points in the distribution. } -\note{ +\details{ Useful function for identifying inflection or enveloping points in a distribution } diff --git a/man/loess.boot.Rd b/man/loess.boot.Rd index f58ec15..8ac0f22 100644 --- a/man/loess.boot.Rd +++ b/man/loess.boot.Rd @@ -42,7 +42,8 @@ list object containing } \description{ Bootstrap of a Local Polynomial Regression (loess) - +} +\details{ The function fits a loess curve and then calculates a symmetric nonparametric bootstrap with a confidence region. Fitted curves are evaluated at a fixed number of equally-spaced x values, regardless of the number of x values in the data. Some diff --git a/man/logistic.regression.Rd b/man/logistic.regression.Rd index 58a9bd4..516c03e 100644 --- a/man/logistic.regression.Rd +++ b/man/logistic.regression.Rd @@ -62,7 +62,8 @@ auto-covariance term Performs a logistic (binomial) or auto-logistic (spatially lagged binomial) regression using maximum likelihood or penalized maximum likelihood estimation. - +} +\details{ It should be noted that the auto-logistic model (Besag 1972) is intended for exploratory analysis of spatial effects. Auto-logistic are know to underestimate the effect of environmental variables and tend to be unreliable (Dormann 2007). diff --git a/man/max_extent.Rd b/man/max_extent.Rd index 6cf8984..e12e1bf 100644 --- a/man/max_extent.Rd +++ b/man/max_extent.Rd @@ -18,11 +18,11 @@ An sf POLYGON class object representing maximum extents returns a extent polygon representing maximum extent of input rasters } -\note{ +\details{ Creates a maximum extent polygon of all specified rasters } \examples{ -if(require(terra, quietly = TRUE)) { +library(terra) r1 <- rast(ext(61.87125, 76.64458, 23.90153, 37.27042)) r2 <- rast(ext(67.66625, 81.56847, 20.38458, 35.67347)) @@ -37,8 +37,6 @@ plot(e, border=NA) sf::st_bbox(e) # full extent -} - } \author{ Jeffrey S. Evans diff --git a/man/mean_angle.Rd b/man/mean_angle.Rd index d9c81d0..752d7c9 100644 --- a/man/mean_angle.Rd +++ b/man/mean_angle.Rd @@ -17,12 +17,13 @@ A vector of mean angle \description{ Calculates the mean angle of a vector } -\note{ +\details{ The arithmetic mean is not correct for calculating the central tendency of angles. This function is intended to return the mean angle for slope or aspect, which could be used in a focal or zonal function. } \examples{ +library(terra) mean_angle(c(180, 10)) mean(c(180, 10)) mean_angle(c(90, 180, 70, 60)) @@ -30,7 +31,6 @@ mean_angle(c(90, 180, 70, 60)) mean_angle(c(90, 180, 270, 360)) mean(c(90, 180, 270, 360)) -library(terra) elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) asp <- terrain(elev, v="aspect") s <- buffer(spatSample(asp, 20, as.points=TRUE, diff --git a/man/moments.Rd b/man/moments.Rd index 0256242..8e4ae7c 100644 --- a/man/moments.Rd +++ b/man/moments.Rd @@ -36,9 +36,9 @@ A vector with the following values: Calculate statistical moments of a distribution } \examples{ - x <- runif(1000,0,100) - ( d <- moments(x, plot=TRUE) ) - ( mode.x <- moments(x, plot=FALSE)[16] ) +x <- runif(1000,0,100) +( d <- moments(x, plot=TRUE) ) +( mode.x <- moments(x, plot=FALSE)[16] ) } \author{ diff --git a/man/morans.plot.Rd b/man/morans.plot.Rd index 6df54b5..05392fd 100644 --- a/man/morans.plot.Rd +++ b/man/morans.plot.Rd @@ -51,38 +51,42 @@ autocorrelation and the red line represents the trend in autocorrelation. The qu in the plot indicate the type of spatial association/interaction (Anselin 1996). For example the upper-left quadrant represents negative associations of low values surrounded by high and the lower-right quadrant represents negative associations of -high values surrounded by low. -} -\note{ -if y is not specified the univariate statistic for x is returned. the coords argument +high values surrounded by low. + +If y is not specified the univariate statistic for x is returned. the coords argument is only used if k = NULL. Can also be an sp object with relevant x,y coordinate slot (ie., points or polygons). If w = NULL, the default method for deriving spatial weights matrix, options are: inverse power or negative exponent. If scale.xy = FALSE it is assumed that they are already scaled following Chen (2015). } \examples{ - library(sp) - library(spdep) - data(meuse) - coordinates(meuse) <- ~x+y + p = c("sf", "sp", "spdep") + if(any(!unlist(lapply(p, requireNamespace, quietly=TRUE)))) { + m = which(!unlist(lapply(p, requireNamespace, quietly=TRUE))) + message("Can't run examples, please install ", paste(p[m], collapse = " ")) + } else { + invisible(lapply(p, require, character.only=TRUE)) + + data(meuse, package = "sp") + meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") - # Autocorrelation (univariate) - morans.plot(meuse$zinc, coords = coordinates(meuse)) +# Autocorrelation (univariate) +morans.plot(meuse$zinc, coords = st_coordinates(meuse)[,1:2]) - # Cross-correlation of: x influencing y and y influencing x - opar <- par(no.readonly=TRUE) - par(mfrow=c(1,2)) - morans.plot(x=meuse$zinc, y=meuse$copper, coords = coordinates(meuse), - scale.morans = TRUE) - morans.plot(x=meuse$zinc, y=meuse$copper, coords = coordinates(meuse), - scale.morans = TRUE, type.ac="yx") - par(opar) - +# Cross-correlation of: x influencing y and y influencing x +opar <- par(no.readonly=TRUE) + par(mfrow=c(1,2)) + morans.plot(x=meuse$zinc, y=meuse$copper, coords = st_coordinates(meuse)[,1:2], + scale.morans = TRUE) + morans.plot(x=meuse$zinc, y=meuse$copper, coords = st_coordinates(meuse)[,1:2], + scale.morans = TRUE, type.ac="yx") +par(opar) +} } \references{ Chen., Y. (2015) A New Methodology of Spatial Cross-Correlation Analysis. PLoS One 10(5):e0126158. doi:10.1371/journal.pone.0126158 - + Anselin, L. (1996) The Moran scatterplot as an ESDA tool to assess local instability in spatial association. pp. 111-125 in M. M. Fischer, H. J. Scholten and D. Unwin (eds) Spatial analytical perspectives on GIS, London, Taylor and Francis diff --git a/man/mwCorr.Rd b/man/mwCorr.Rd deleted file mode 100644 index 66550d0..0000000 --- a/man/mwCorr.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/mwCorr.R -\name{mwCorr} -\alias{mwCorr} -\title{Dutilleul moving window bivariate raster - correlation} -\usage{ -mwCorr(...) -} -\arguments{ -\item{...}{Parameters to be passed to the modern version -of the function} -} -\description{ -A bivarate raster correlation using Dutilleul's - modified t-test -} diff --git a/man/nni.Rd b/man/nni.Rd index 5638601..4a9af8f 100644 --- a/man/nni.Rd +++ b/man/nni.Rd @@ -18,7 +18,8 @@ distance, observed.mean.distance = Observed meand distance. } \description{ Calculates the NNI as a measure of clustering or dispersal - +} +\details{ The nearest neighbor index is expressed as the ratio of the observed distance divided by the expected distance. The expected distance is the average distance between neighbors in a hypothetical random distribution. If the index is less than 1, diff --git a/man/nth.values.Rd b/man/nth.values.Rd index e4f950b..333ed2e 100644 --- a/man/nth.values.Rd +++ b/man/nth.values.Rd @@ -19,7 +19,7 @@ Numeric vector of Nth values \description{ Returns the Nth highest or lowest values in a vector } -\note{ +\details{ This function returns n lowest or highest elements in a vector } \examples{ diff --git a/man/o.ring.Rd b/man/o.ring.Rd index 0b3157a..0c4f3d5 100644 --- a/man/o.ring.Rd +++ b/man/o.ring.Rd @@ -20,7 +20,8 @@ plot of o-ring and data.frame with plot labels and descriptions \description{ Calculates the inhomogeneous O-ring point pattern statistic (Wiegand & Maloney 2004) - +} +\details{ The function K(r) is the expected number of points in a circle of radius r centered at an arbitrary point (which is not counted), divided by the intensity l of the pattern. The alternative pair correlation function g(r), which arises if the circles of @@ -41,10 +42,14 @@ conditioned probability spectrum) with the interpretation of a neighborhood density, which is more intuitive than an accumulative measure. } \examples{ -library(spatstat.explore) +if (require(spatstat.explore, quietly = TRUE)) { data(lansing) - x <- spatstat.geom::unmark(split(lansing)$maple) - o.ring(x) + x <- spatstat.geom::unmark(split(lansing)$maple) + o.ring(x) + +} else { + cat("Please install spatstat.explore package to run example", "\n") +} } \references{ diff --git a/man/oli.asw.Rd b/man/oli.asw.Rd index e5a0119..79fc15d 100644 --- a/man/oli.asw.Rd +++ b/man/oli.asw.Rd @@ -40,7 +40,7 @@ data.frame object with: \description{ Query of Amazon AWS OLI-Landsat 8 cloud service } -\note{ +\details{ Amazons AWS cloud service is hosting OLI Landsat 8 data granules \url{https://registry.opendata.aws/landsat-8} \url{https://aws.amazon.com/blogs/aws/start-using-landsat-on-aws/} @@ -53,7 +53,7 @@ Collection 1 processing levels: "L1TP", "L1GT", "L1GS" "L1G" and "L1GS" - Radiomertically calibrated with systematic ephemeris correction } \examples{ -\dontrun{ +\donttest{ # Query path 126, row 59, 2013-04-15 to 2017-03-09, <20\% cloud cover ( p126r59.oli <- oli.asw(path=126, row=59, dates = c("2013-04-15", "2017-03-09"), cloud.cover = 20) ) diff --git a/man/optimal.k.Rd b/man/optimal.k.Rd index aff6a02..4a5cb00 100644 --- a/man/optimal.k.Rd +++ b/man/optimal.k.Rd @@ -27,7 +27,7 @@ Find optimal k of k-Medoid partitions using silhouette widths } \examples{ -library(cluster) +if (require(cluster, quietly = TRUE)) { x <- rbind(cbind(rnorm(10,0,0.5), rnorm(10,0,0.5)), cbind(rnorm(15,5,0.5), rnorm(15,5,0.5))) @@ -42,6 +42,10 @@ library(cluster) # join clusters to data x <- data.frame(x, k=clust$model$clustering) +} else { + cat("Please install cluster package to run example", "\n") +} + } \references{ Theodoridis, S. & K. Koutroumbas(2006) Pattern Recognition 3rd ed. diff --git a/man/optimized.sample.variance.Rd b/man/optimized.sample.variance.Rd index 09b8f96..be65fe3 100644 --- a/man/optimized.sample.variance.Rd +++ b/man/optimized.sample.variance.Rd @@ -19,14 +19,13 @@ A data.frame with "idx" representing the index of the original vector and "y" is the value of the sampled data } \description{ -Draws an optimal sample that minimizes or maximizes the - sample variance +Draws an optimal sample that minimizes or maximizes the sample variance } \examples{ library(sf) -data(meuse, package = "sp") -meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, - agr = "constant") +if (require(sp, quietly = TRUE)) { + data(meuse, package = "sp") + meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") n = 15 # Draw n samples that maximize the variance of y @@ -45,6 +44,9 @@ meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, "minimized variance"), col=c("grey","red","blue"), pch=c(19,19,19)) +} else { + cat("Please install sp package to run example", "\n") +} } \author{ Jeffrey S. Evans diff --git a/man/outliers.Rd b/man/outliers.Rd index 43dbd85..c4a1e4b 100644 --- a/man/outliers.Rd +++ b/man/outliers.Rd @@ -34,7 +34,7 @@ Identify outliers using modified Z-score } \references{ Iglewicz, B. & D.C. Hoaglin (1993) How to Detect and Handle Outliers, - American Society for Quality Control, Milwaukee, WI. + American Society for Quality Control, Milwaukee, WI. } \author{ Jeffrey S. Evans diff --git a/man/overlap.Rd b/man/overlap.Rd index a702648..65748ef 100644 --- a/man/overlap.Rd +++ b/man/overlap.Rd @@ -13,11 +13,12 @@ overlap(x, y) with the same dimensions of x} } \value{ -A value representing the I similarity statistic +A vector (single value) representing the I similarity statistic } \description{ Similarity Statistic for Quantifying Niche Overlap using Warren's-I - +} +\details{ The overlap function computes the I similarity statistic (Warren et al. 2008) of two overlapping niche estimates. Similarity is based on the Hellenger distance. It is assumed that the input data share the same extent and cellsize and all values diff --git a/man/parea.sample.Rd b/man/parea.sample.Rd index a024abf..4c561f9 100644 --- a/man/parea.sample.Rd +++ b/man/parea.sample.Rd @@ -26,20 +26,19 @@ An sf POINT object Creates a point sample of polygons where n is based on percent area } -\note{ +\details{ This function results in an adaptive sample based on the area of each polygon. The default scaling factor (sf) converts meters to acres. You can set sf=1 to stay in the native projection units } \examples{ -if(require(sf, quietly = TRUE)) { - nc <- st_read(system.file("shape/nc.shp", package="sf")) - nc <- suppressWarnings(st_cast(nc[c(10,100),], "POLYGON")) +library(sf) +nc <- st_read(system.file("shape/nc.shp", package="sf")) + nc <- suppressWarnings(st_cast(nc[c(10,100),], "POLYGON")) - ( ars <- parea.sample(nc, pct=0.001, join = TRUE, stype='random') ) - plot(st_geometry(nc)) - plot(st_geometry(ars), pch=19, add=TRUE) -} + ( ars <- parea.sample(nc, pct=0.001, join = TRUE, stype='random') ) + plot(st_geometry(nc)) + plot(st_geometry(ars), pch=19, add=TRUE) } \author{ diff --git a/man/parse.bits.Rd b/man/parse.bits.Rd index b6996dc..0d27381 100644 --- a/man/parse.bits.Rd +++ b/man/parse.bits.Rd @@ -15,6 +15,9 @@ parse.bits(x, bit, depth = 8, order = c("reverse", "none")) \item{order}{c("reverse", "none") sort order for the bits} } +\value{ +a vector or data.frame of parsed interger value(s) associated with input bit +} \description{ Returns specified bit value based on integer input @@ -26,42 +29,42 @@ With this function you can parse the values for each bit to assign the flag values. } \examples{ - - # Return value for bit 5 for integer value 100 - parse.bits(100, 5) +# Return value for bit 5 for integer value 100 +parse.bits(100, 5) - # Return value(s) for bits 0 and 1 for integer value 100 - parse.bits(100, c(0,1)) +# Return value(s) for bits 0 and 1 for integer value 100 +parse.bits(100, c(0,1)) - # Return value(s) for bits 0 and 1 for integer values 0-255 - for(i in 0:255) { print(parse.bits(i, c(0,1))) } +# Return value(s) for bits 0 and 1 for integer values 0-255 +for(i in 0:255) { print(parse.bits(i, c(0,1))) } -\dontrun{ +\donttest{ #### Applied Example using Harmonized Landsat Sentinel-2 QC # Create dummy data and qc band - library(raster) - r <- raster(nrow=100, ncol=100) - r[] <- round(runif(ncell(r), 0,1)) - qc <- raster(nrow=100, ncol=100) - qc[] <- round(runif(ncell(qc), 64,234)) - - # Calculate bit values from QC table - ( qc_bits <- data.frame(int=0:255, - cloud = unlist(lapply(0:255, FUN=parse.bits, bit=1)), - shadow = unlist(lapply(0:255, FUN=parse.bits, bit=3)), - acloud = unlist(lapply(0:255, FUN=parse.bits, bit=2)), - cirrus = unlist(lapply(0:255, FUN=parse.bits, bit=0)), - aerosol = unlist(lapply(0:255, FUN=parse.bits, bit=c(7,6)))) ) - - # Query the results to create a vector of integer values indicating what to mask - m <- sort(unique(qc_bits[c(which(qc_bits$cloud == 1), - which(qc_bits$shadow == 1) - ),]$int)) - - # Apply queried integer values to mask image with QA band - qc[qc \%in\% m] <- NA - r <- mask(r, qc) +library(terra) +r <- rast(nrow=100, ncol=100) + r[] <- round(runif(ncell(r), 0,1)) +qc <- rast(nrow=100, ncol=100) + qc[] <- round(runif(ncell(qc), 64,234)) + +# Calculate bit values from QC table +( qc_bits <- data.frame(int=0:255, + cloud = unlist(lapply(0:255, FUN=parse.bits, bit=1)), + shadow = unlist(lapply(0:255, FUN=parse.bits, bit=3)), + acloud = unlist(lapply(0:255, FUN=parse.bits, bit=2)), + cirrus = unlist(lapply(0:255, FUN=parse.bits, bit=0)), + aerosol = unlist(lapply(0:255, FUN=parse.bits, bit=c(7,6)))) ) + +# Query the results to create a vector of integer values indicating what to mask +# cloud is bit 1 and shadow bit 3 +m <- sort(unique(qc_bits[c(which(qc_bits$cloud == 1), + which(qc_bits$shadow == 1) + ),]$int)) + +# Apply queried integer values to mask image with QA band +qc[qc \%in\% m] <- NA +r <- mask(r, qc) } } diff --git a/man/plot.effect.size.Rd b/man/plot.effect.size.Rd index 86613ff..806893e 100644 --- a/man/plot.effect.size.Rd +++ b/man/plot.effect.size.Rd @@ -11,6 +11,9 @@ \item{...}{Additional arguments passed to plot} } +\value{ +Plot of effect size object with group effect sizes and 95% confidence +} \description{ Plot function for effect.size object } diff --git a/man/plot.loess.boot.Rd b/man/plot.loess.boot.Rd index 1e39ff9..eac9032 100644 --- a/man/plot.loess.boot.Rd +++ b/man/plot.loess.boot.Rd @@ -11,6 +11,9 @@ \item{...}{Additional arguments passed to plot} } +\value{ +plot of lowess bootstrap +} \description{ Plot function for loess.boot object } @@ -21,15 +24,6 @@ Plot function for loess.boot object sb <- loess.boot(x, y, nreps = 99, confidence = 0.90, span = 0.40) plot(sb) -} -\references{ -Cleveland, WS, (1979) Robust Locally Weighted Regression and Smoothing Plots Journal of the American Statistical Association 74:829-836 - -Efron, B., and R. Tibshirani (1993) An Introduction to the Bootstrap Chapman and Hall, New York - -Hardle, W., (1989) Applied Nonparametric Regression Cambridge University Press, NY. - -Tibshirani, R. (1988) Variance stabilization and the bootstrap. Biometrika 75(3):433-44. } \author{ Jeffrey S. Evans diff --git a/man/point.in.poly.Rd b/man/point.in.poly.Rd deleted file mode 100644 index 8fa75ce..0000000 --- a/man/point.in.poly.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/point.in.poly.R -\name{point.in.poly} -\alias{point.in.poly} -\title{Point and Polygon Intersect} -\usage{ -point.in.poly(...) -} -\arguments{ -\item{...}{arguments passed to sf::st_intersection} -} -\value{ -NA -} -\description{ -Intersects point and polygon feature classes and adds polygon - attributes to points -} -\examples{ - \dontrun{ - sf::st_intersection() -} - -} diff --git a/man/poly.regression.Rd b/man/poly.regression.Rd index e422167..20c5c88 100644 --- a/man/poly.regression.Rd +++ b/man/poly.regression.Rd @@ -42,7 +42,8 @@ inputed data \description{ Calculates a Local Polynomial Regression for smoothing or imputation of missing data. - +} +\details{ This is a wrapper function for loess that simplifies data smoothing and imputation of missing values. The function allows for smoothing a vector, based on an index (derived automatically) or covariates. If the impute option is TRUE NA values are diff --git a/man/polyPerimeter.Rd b/man/polyPerimeter.Rd index e6503b7..d8abf23 100644 --- a/man/polyPerimeter.Rd +++ b/man/polyPerimeter.Rd @@ -16,12 +16,11 @@ A vector of polygon perimeters in projection units Calculates the perimeter length(s) for a polygon object } \examples{ -if(require(sf, quietly = TRUE)) { +library(sf) polys <- st_read(system.file("shape/nc.shp", package="sf")) polys <- suppressWarnings(st_cast(polys[c(10,100),], "POLYGON")) polyPerimeter(polys) -} } \author{ diff --git a/man/poly_trend.Rd b/man/poly_trend.Rd index e3a3f4d..6ea3ad2 100644 --- a/man/poly_trend.Rd +++ b/man/poly_trend.Rd @@ -31,7 +31,7 @@ A poly.trend class (list) containing \description{ Fits a polynomial trend using specified order } -\note{ +\details{ A fit using a lm(y ~ x + I(X^2) + I(X^3)) form will be correlated which, can cause problems. The function avoids undue correlation using orthogonal polynomials diff --git a/man/polygon_extract.Rd b/man/polygon_extract.Rd deleted file mode 100644 index f4ad0b3..0000000 --- a/man/polygon_extract.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/polygon_extract.R -\name{polygon_extract} -\alias{polygon_extract} -\title{polygon raster extract} -\usage{ -polygon_extract(...) -} -\arguments{ -\item{...}{arguments passed to terra::extract} -} -\value{ -NA -} -\description{ -Fast method for extracting raster values to polygons -} -\examples{ - \dontrun{ - terra::extract() -} - -} diff --git a/man/pp.subsample.Rd b/man/pp.subsample.Rd index 5adf37d..3763744 100644 --- a/man/pp.subsample.Rd +++ b/man/pp.subsample.Rd @@ -39,7 +39,8 @@ sf class POINT geometry containing random subsamples \description{ Generates random subsample based on density estimate of observations - +} +\details{ The window type creates a convex hull by default or, optionally, uses the maximum extent (envelope). The resulting bandwidth can vary widely by method. the 'diggle' method is intended for bandwidth representing 2nd order spatial variation whereas @@ -47,8 +48,7 @@ the 'scott' method will represent 1st order trend. the 'geometry' approach will represent 1st order trend. for large datasets, caution should be used with the 2nd order 'likelihood' approach, as it is slow and computationally expensive. finally, the 'stoyan' method will produce very strong 2nd order results. ' -} -\note{ + Available bandwidth selection methods are: \itemize{ \item Scott - (Scott 1992), Scott's Rule for Bandwidth Selection (1st order) @@ -63,7 +63,7 @@ validation (2nd order) \examples{ library(sf) -library(spatstat.explore) +if(require(spatstat.explore, quietly = TRUE)) { data(bei, package = "spatstat.data") trees <- st_as_sf(bei) @@ -78,6 +78,10 @@ trees.wrs <- pp.subsample(trees, n=n, window='hull') legend('bottomright', legend=c('Original sample', 'Subsample'), col=c('black','red'),pch=c(19,19)) +} else { + cat("Please install spatstat.explore package to run example", "\n") +} + } \references{ Berman, M. and Diggle, P. (1989) Estimating weighted integrals of the second-order diff --git a/man/print.cross.cor.Rd b/man/print.cross.cor.Rd index 4b42b49..2307502 100644 --- a/man/print.cross.cor.Rd +++ b/man/print.cross.cor.Rd @@ -11,6 +11,26 @@ \item{...}{Ignored} } +\value{ +When not simulated k=0, prints functions list object containing: +\itemize{ +\item I - Global autocorrelation statistic +\item SCI - - A data.frame with two columns representing the xy and yx autocorrelation +\item nsim - value of NULL to represent p values were derived from observed data (k=0) +\item p - Probability based observations above/below confidence interval +\item t.test - Probability based on t-test +} + +When simulated (k>0), prints functions list object containing: +\itemize{ +\item I - Global autocorrelation statistic +\item SCI - A data.frame with two columns representing the xy and yx autocorrelation +\item nsim - value representing number of simulations +\item global.p - p-value of global autocorrelation statistic +\item local.p - Probability based simulated data using successful rejection of t-test +\item range.p - Probability based on range of probabilities resulting from paired t-test +} +} \description{ print method for class "cross.cor" } diff --git a/man/print.effect.size.Rd b/man/print.effect.size.Rd index 9bc99bd..f6a8cdc 100644 --- a/man/print.effect.size.Rd +++ b/man/print.effect.size.Rd @@ -11,6 +11,10 @@ \item{...}{Ignored} } +\value{ +Prints the output data.frame contaning; effect size with upper and lower confidence +and, mean and sd by group +} \description{ print method for class "effect.size" } diff --git a/man/print.loess.boot.Rd b/man/print.loess.boot.Rd index a367202..5a67b25 100644 --- a/man/print.loess.boot.Rd +++ b/man/print.loess.boot.Rd @@ -11,6 +11,28 @@ \item{...}{Ignored} } +\value{ +same as summary lowess.boot of data.frame including; +\itemize{ +\item nreps Number of bootstrap replicates +\item confidence Confidence interval (region) +\item span alpha (span) parameter used loess fit +\item degree polynomial degree used in loess fit +\item normalize Normalized data (TRUE/FALSE) +\item family Family of statistic used in fit +\item parametric Parametric approximation (TRUE/FALSE) +\item surface Surface fit, see loess.control +\item data data.frame of x,y used in model +\item fit data.frame including: +\enumerate{ +\item x - Equally-spaced x index +\item y.fit - loess fit +\item up.lim - Upper confidence interval +\item low.lim - Lower confidence interval +\item stddev - Standard deviation of loess fit at each x value +} +} +} \description{ print method for class "loess.boot" } diff --git a/man/print.poly.trend.Rd b/man/print.poly.trend.Rd index a3be245..23eee5f 100644 --- a/man/print.poly.trend.Rd +++ b/man/print.poly.trend.Rd @@ -11,6 +11,9 @@ \item{...}{Ignored} } +\value{ +Prints trend model summary, order and trend confidence intervals +} \description{ print method for class "poly.trend" } diff --git a/man/proximity.index.Rd b/man/proximity.index.Rd index ec48682..01160c7 100644 --- a/man/proximity.index.Rd +++ b/man/proximity.index.Rd @@ -53,7 +53,10 @@ Calculates proximity index for a set of polygons # plot index for just forest class forest <- meuse[meuse$LU == "forest",] plot(forest["cpidx"]) - } + +} else { + cat("Please install sp package to run example", "\n") +} } } \references{ diff --git a/man/pseudo.absence.Rd b/man/pseudo.absence.Rd index 26947f9..108099e 100644 --- a/man/pseudo.absence.Rd +++ b/man/pseudo.absence.Rd @@ -25,24 +25,24 @@ pseudo.absence( \item{window}{Type of window (hull OR extent), overridden if mask provided} -\item{ref}{Optional terra SpatRaster class raster. The resolution of the +\item{ref}{Optional terra SpatRaster class raster. The resolution of the density estimate will match mask.} -\item{s}{Optional resolution passed to window argument. Caution should be -used due to long processing times associated with high -resolution. In contrast, coarse resolution can exclude +\item{s}{Optional resolution passed to window argument. Caution should be +used due to long processing times associated with high +resolution. In contrast, coarse resolution can exclude known points.} -\item{sigma}{Bandwidth selection method for KDE, default is 'Scott'. -Options are 'Scott', 'Stoyan', 'Diggle', 'likelihood', +\item{sigma}{Bandwidth selection method for KDE, default is 'Scott'. +Options are 'Scott', 'Stoyan', 'Diggle', 'likelihood', and 'geometry'} \item{wts}{Optional vector of weights corresponding to point pattern} \item{KDE}{Return KDE raster (TRUE/FALSE)} -\item{gradient}{A scaling factor applied to the sigma parameter used to -adjust the gradient decent of the density estimate. The +\item{gradient}{A scaling factor applied to the sigma parameter used to +adjust the gradient decent of the density estimate. The default is 1, for no adjustment (downweight < 1 | upweight > 1)} \item{p}{Minimum value for probability distribution (must be > 0)} @@ -51,46 +51,49 @@ default is 1, for no adjustment (downweight < 1 | upweight > 1)} } \value{ A list class object with the following components: -\itemize{ -\item sample A sf POINT geometry object containing random samples -\item kde A terra SpatRaster class of inverted Isotropic KDE estimates - used as sample weights (IF KDE = TRUE) -\item sigma Selected bandwidth of KDE - } +\itemize{ +\item sample A sf POINT geometry object containing random samples +\item kde A terra SpatRaster class of inverted Isotropic KDE estimates +used as sample weights (IF KDE = TRUE) +\item sigma Selected bandwidth of KDE +} } \description{ -Generates pseudo-absence samples based on density estimate - of known locations +Generates pseudo-absence samples based on density estimate +of known locations } \details{ -The window type creates a convex hull by default or, optionally, uses the -maximum extent (envelope). If a mask is provided the kde will represent -areas defined by the mask and defines the area that pseudo absence data +The window type creates a convex hull by default or, optionally, uses the +maximum extent (envelope). If a mask is provided the kde will represent +areas defined by the mask and defines the area that pseudo absence data will be generated. Available bandwidth selection methods are: \itemize{ -\item Scott (Scott 1992), Scott's Rule for Bandwidth Selection (1st order) -\item Diggle (Berman & Diggle 1989), Minimize the mean-square error via cross - validation (2nd order) -\item likelihood (Loader 1999), Maximum likelihood cross validation (2nd order) -\item geometry, Bandwidth is based on simple window geometry (1st order) -\item Stoyan (Stoyan & Stoyan 1995), Based on pair-correlation function - (strong 2nd order) -\item User defined numeric distance bandwidth - } - -Note; resulting bandwidth can vary widely by method. the 'diggle' method -is intended for selecting bandwidth representing 2nd order spatial variation -whereas the 'scott' method will represent 1st order trend. the 'geometry' approach -will also represent 1st order trend. For large datasets, caution should be used with -the 2nd order 'likelihood' approach, as it is slow and computationally expensive. +\item Scott (Scott 1992), Scott's Rule for Bandwidth Selection (1st order) +\item Diggle (Berman & Diggle 1989), Minimize the mean-square error via cross +\item validation (2nd order) +\item likelihood (Loader 1999), Maximum likelihood cross validation (2nd order) +\item geometry, Bandwidth is based on simple window geometry (1st order) +\item Stoyan (Stoyan & Stoyan 1995), Based on pair-correlation function (strong 2nd order) +\item User defined numeric distance bandwidth +} +} +\note{ +resulting bandwidth can vary widely by method. the 'diggle' method +is intended for selecting bandwidth representing 2nd order spatial variation +whereas the 'scott' method will represent 1st order trend. the 'geometry' approach +will also represent 1st order trend. For large datasets, caution should be used with +the 2nd order 'likelihood' approach, as it is slow and computationally expensive. finally, the 'stoyan' method will produce very strong 2nd order results. } \examples{ -library(sf) -library(terra) -library(spatstat.data) + p = c("sf", "sp", "terra", "spatstat.data") + if(any(!unlist(lapply(p, requireNamespace, quietly=TRUE)))) { + m = which(!unlist(lapply(p, requireNamespace, quietly=TRUE))) + message("Can't run examples, please install ", paste(p[m], collapse = " ")) + } else { + invisible(lapply(p, require, character.only=TRUE)) data(meuse, package = "sp") meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, @@ -121,31 +124,31 @@ trees.abs <- pseudo.absence(trees, n=100, window='extent', KDE=TRUE) plot(st_geometry(trees.abs$sample), col='red', pch=20, cex=1, add=TRUE) legend('top', legend=c('Presence', 'Pseudo-absence'), pch=c(20,20),col=c('black','red'),bg="white") - +} } \references{ -Berman, M. and Diggle, P. (1989) Estimating weighted integrals of the second-order - intensity of a spatial point process. Journal of the Royal Statistical Society, - series B 51, 81-92. +Berman, M. and Diggle, P. (1989) Estimating weighted integrals of the second-order +intensity of a spatial point process. Journal of the Royal Statistical Society, +series B 51, 81-92. -Fithian, W & T. Hastie (2013) Finite-sample equivalence in statistical models for - presence-only data. Annals of Applied Statistics 7(4): 1917-1939 +Fithian, W & T. Hastie (2013) Finite-sample equivalence in statistical models for +presence-only data. Annals of Applied Statistics 7(4): 1917-1939 -Hengl, T., H. Sierdsema, A. Radovic, and A. Dilo (2009) Spatial prediction of species - distributions from occurrence-only records: combining point pattern analysis, - ENFA and regression-kriging. Ecological Modelling, 220(24):3499-3511 +Hengl, T., H. Sierdsema, A. Radovic, and A. Dilo (2009) Spatial prediction of species +distributions from occurrence-only records: combining point pattern analysis, +ENFA and regression-kriging. Ecological Modelling, 220(24):3499-3511 Loader, C. (1999) Local Regression and Likelihood. Springer, New York. -Scott, D.W. (1992) Multivariate Density Estimation. Theory, Practice and Visualization. - New York, Wiley. +Scott, D.W. (1992) Multivariate Density Estimation. Theory, Practice and Visualization. +New York, Wiley. -Stoyan, D. and Stoyan, H. (1995) Fractals, random shapes and point fields: methods of - geometrical statistics. John Wiley and Sons. +Stoyan, D. and Stoyan, H. (1995) Fractals, random shapes and point fields: methods of +geometrical statistics. John Wiley and Sons. -Warton, D.i., and L.C. Shepherd (2010) Poisson Point Process Models Solve the Pseudo-Absence - Problem for Presence-only Data in Ecology. The Annals of Applied Statistics, 4(3):1383-1402 +Warton, D.i., and L.C. Shepherd (2010) Poisson Point Process Models Solve the Pseudo-Absence +Problem for Presence-only Data in Ecology. The Annals of Applied Statistics, 4(3):1383-1402 } \author{ -Jeffrey S. Evans +Jeffrey S. Evans \href{mailto:jeffrey_evans@tnc.org}{jeffrey_evans@tnc.org} } diff --git a/man/pu.Rd b/man/pu.Rd index ac85eb2..26f812c 100644 --- a/man/pu.Rd +++ b/man/pu.Rd @@ -56,7 +56,7 @@ A sp SpatialPolygonsDataFrame with 5919 rows and 46 variables: } } \source{ -\url{https://www.conservationgateway.org} +"The Nature Conservancy" } \description{ Subset of biodiversity planning units for Haiti ecoregional spatial reserve plan diff --git a/man/quadrats.Rd b/man/quadrats.Rd index 0143c20..2c7d961 100644 --- a/man/quadrats.Rd +++ b/man/quadrats.Rd @@ -7,7 +7,7 @@ quadrats(x, s = 250, n = 100, r = NULL, sp = FALSE) } \arguments{ -\item{x}{A sp or sf polygon object defining extent} +\item{x}{An sf POLYGONS object defining extent} \item{s}{Radius defining single or range of sizes of quadrats} @@ -18,12 +18,12 @@ quadrats(x, s = 250, n = 100, r = NULL, sp = FALSE) \item{sp}{(FALSE | TRUE) Output sp class object} } \value{ -an sf or sp polygon object with rotated polygon(s) +an sf POLYGONS object with rotated polygon(s) } \description{ Creates quadrat polygons for sampling or analysis } -\note{ +\details{ The radius (s) parameter can be a single value or a range of values, representing a randomization range of resulting quadrat sizes. The rotation (r) parameter can also be used to defined a fixed rotation or @@ -35,23 +35,30 @@ library(sf) library(terra) # read meuse data and create convex hull - data(meuse, package = "sp") - meuse <- st_as_sf(meuse, coords = c("x", "y"), - crs = 28992, agr = "constant") - e <- st_convex_hull(st_union(meuse)) +if (require(sp, quietly = TRUE)) { + data(meuse, package = "sp") + meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") + e <- st_convex_hull(st_union(meuse)) # Fixed size 250 and no rotation s <- quadrats(e, s = 250, n = 10) plot(st_geometry(s)) \donttest{ - # Variable sizes 100-300 and rotation of 0-45 degrees - s <- quadrats(e, s = c(100,300), n = 10, r = c(0,45)) - plot(st_geometry(s)) +# Variable sizes 100-300 and rotation of 0-45 degrees +s <- quadrats(e, s = c(100,300), n = 10, r = c(0,45)) + plot(st_geometry(s)) - # Variable sizes 100-300 and no rotation - s <- quadrats(e, s = c(100,300), n = 10) +# Variable sizes 100-300 and no rotation +s <- quadrats(e, s = c(100,300), n = 10) plot(st_geometry(s)) } + +} else { + cat("Please install sp package to run example", "\n") +} } +\author{ +Jeffrey S. Evans +} diff --git a/man/random.raster.Rd b/man/random.raster.Rd index d900c93..e21a9a8 100644 --- a/man/random.raster.Rd +++ b/man/random.raster.Rd @@ -56,8 +56,7 @@ Create a random raster or raster stack using specified distribution } \details{ -Options for distributions are for random, normal, seq, binominal, -gaussian and sample raster(s) +Options for distributions are; random, normal, seq, binominal, gaussian and sample raster(s) } \examples{ library(terra) diff --git a/man/raster.Zscore.Rd b/man/raster.Zscore.Rd index a03ffeb..9464871 100644 --- a/man/raster.Zscore.Rd +++ b/man/raster.Zscore.Rd @@ -20,8 +20,7 @@ raster (FALSE/TRUE)} raster class object or raster written to disk } \description{ -Calculates the modified z-score for all cells - in a raster +Calculates the modified z-score for raster values } \note{ Since this functions needs to operate on all of the raster values, diff --git a/man/raster.change.Rd b/man/raster.change.Rd index e991278..385af5a 100644 --- a/man/raster.change.Rd +++ b/man/raster.change.Rd @@ -17,45 +17,44 @@ raster.change( \item{y}{A terra SpatRaster for comparison to x} -\item{s}{Integer or matrix for defining Kernel, +\item{s}{Integer or matrix for defining Kernel, must be odd but not necessarily square} -\item{stat}{Statistic to use in comparison, please see details for +\item{stat}{Statistic to use in comparison, please see details for options.} \item{...}{Additional arguments passed to terra::focalPairs} } \value{ A terra SpatRaster layer containing one of the following layers: -\itemize{ -\item kappa Kappa or Weighted Kappa statistic (if stat = "kappa") -\item correlation Paired t.test statistic (if stat = "cor") -\item entropy Local entropy (if stat = "entropy") -\item divergence Kullback-Leibler divergence (if stat = "divergence") -\item cross.entropy Local Cross-entropy (if stat = "cross.entropy") -\item t.test Paired t.test statistic (if stat = "t.test") -\item p.value p-value of the paired t.test statistic (if stat = "t.test") - } +\itemize{ +\item kappa - Kappa or Weighted Kappa statistic (if stat = "kappa") +\item correlation - Paired t.test statistic (if stat = "cor") +\item entropy - Local entropy (if stat = "entropy") +\item divergence - Kullback-Leibler divergence (if stat = "divergence") +\item cross.entropy - Local Cross-entropy (if stat = "cross.entropy") +\item t.test - Paired t.test statistic (if stat = "t.test") +\item p.value - p-value of the paired t.test statistic (if stat = "t.test") +} } \description{ -Compares two categorical rasters with a variety of - statistical options - -This function provides a various statistics for comparing two classified maps. -Valid options are: -\itemize{ -\item kappa - Cohen's Kappa -\item t.test - Two-tailed paired t-test -\item cor - Persons Correlation -\item entropy - Delta entropy -\item cross-entropy - Cross-entropy loss function -\item divergence - Kullback-Leibler divergence (relative entropy) +Compares two categorical rasters with a variety of statistical options } +\details{ +This function provides a various statistics for comparing two classified maps. +Valid options are: +\itemize{ +\item kappa - Cohen's Kappa +\item t.test - Two-tailed paired t-test +\item cor - Persons Correlation +\item entropy - Delta entropy +\item cross-entropy - Cross-entropy loss function +\item divergence - Kullback-Leibler divergence (relative entropy) } -\note{ -Kappa and t-test values < 0 are reported as 0. For a weighted kappa, a matrix must -be provided that correspond to the pairwise weights for all values in both rasters. -Delta entropy is derived by calculating Shannon's on each focal window then + +Kappa and t-test values < 0 are reported as 0. For a weighted kappa, a matrix must +be provided that correspond to the pairwise weights for all values in both rasters. +Delta entropy is derived by calculating Shannon's on each focal window then differencing them (e(x) - e(y)). The s argument can be a single scalar, defining a symmetrical kernel, two scalers defining the dimensions of the kernel eg., c(3,5) or a matrix defining the kernel say, resulting from terra::focalMat @@ -92,15 +91,15 @@ or a matrix defining the kernel say, resulting from terra::focalMat } \references{ -Cohen, J. (1960). A coefficient of agreement for nominal scales. Educational - and Psychological Measurement, 20:37-46 +Cohen, J. (1960). A coefficient of agreement for nominal scales. Educational +and Psychological Measurement, 20:37-46 -McHugh M.L. (2012) Interrater reliability: the kappa statistic. - Biochemia medica, 22(3):276–282. +McHugh M.L. (2012) Interrater reliability: the kappa statistic. +Biochemia medica, 22(3):276–282. -Kullback, S., R.A. Leibler (1951). On information and sufficiency. Annals of - Mathematical Statistics. 22(1):79–86 +Kullback, S., R.A. Leibler (1951). On information and sufficiency. Annals of +Mathematical Statistics. 22(1):79–86 } \author{ -Jeffrey S. Evans +Jeffrey S. Evans \href{mailto:jeffrey_evans@tnc.org}{jeffrey_evans@tnc.org} } diff --git a/man/raster.deviation.Rd b/man/raster.deviation.Rd index df72386..8196089 100644 --- a/man/raster.deviation.Rd +++ b/man/raster.deviation.Rd @@ -32,7 +32,8 @@ specified global statistic \description{ Calculates the local deviation from the raster, a specified global statistic or a polynomial trend of the raster. - +} +\details{ The deviation from the trend is derived as [y-hat - y] where; y-hat is the Nth-order polynomial. Whereas the deviation from a global statistic is [y - y-hat] where; y-hat is the local (focal) statistic. The global = TRUE argument allows @@ -62,7 +63,7 @@ par(opar) Magee, Lonnie (1998). Nonlocal Behavior in Polynomial Regressions. The American Statistician. American Statistical Association. 52(1):20-22 -Fan, J. (1996). Local Polynomial Modelling and Its Applications: From linear + Fan, J. (1996). Local Polynomial Modelling and Its Applications: From linear regression to nonlinear regression. Monographs on Statistics and Applied Probability. Chapman and Hall/CRC. ISBN 0-412-98321-4 } diff --git a/man/raster.downscale.Rd b/man/raster.downscale.Rd index aabcd1e..aa64e3e 100644 --- a/man/raster.downscale.Rd +++ b/man/raster.downscale.Rd @@ -38,22 +38,21 @@ at same resolution as y. Options are c("none", "prediction", "confidence")} \value{ A list object containing: \itemize{ -\item downscale downscaled terra SpatRaster object -\item model MASS rlm model object -\item MSE Mean Square Error -\item AIC Akaike information criterion -\item parm.ci Parameter confidence intervals -\item residuals If residuals = TRUE, a SpatRaster of the residual error -\item uncertainty If pred.int = TRUE, SpatRaster's of the -lower/upper prediction intervals -\item std.error If se = TRUE, SpatRaster's of the standard error +\item downscale - downscaled terra SpatRaster object +\item model - MASS rlm model object +\item MSE - Mean Square Error +\item AIC - Akaike information criterion +\item parm.ci - Parameter confidence intervals +\item residuals - If residuals = TRUE, a SpatRaster of the residual error +\item uncertainty - If pred.int = TRUE, SpatRaster's of the lower/upper prediction intervals +\item std.error - If se = TRUE, SpatRaster's of the standard error } } \description{ Downscales a raster to a higher resolution raster using a robust regression } -\note{ +\details{ This function uses a robust regression, fit using an M-estimation with Tukey's biweight initialized by a specific S-estimator, to downscale a raster based on higher-resolution or more detailed raster data specified as covariate(s). You can optionally output residual @@ -70,16 +69,16 @@ the prediction interval for standard error defaults to "confidence" else is the uncertainty (eg., prediction or confidence). } \examples{ -\dontrun{ -library(geodata) +\donttest{ +if (require(geodata, quietly = TRUE)) { library(terra) +library(geodata) # Download example data (requires geodata package) - elev <- geodata::elevation_30s(country="SWZ", path=tempdir()) - slp <- terrain(elev, v="slope") - tmax <- geodata::worldclim_country(country="SWZ", var="tmax", - path=tempdir()) - tmax <- crop(tmax[[1]], ext(elev)) +elev <- elevation_30s(country="SWZ", path=tempdir()) +slp <- terrain(elev, v="slope") +tmax <- worldclim_country(country="SWZ", var="tmax", path=tempdir()) + tmax <- crop(tmax[[1]], ext(elev)) # Downscale temperature x=c(elev,slp) @@ -118,7 +117,10 @@ tmax.ds <- raster.downscale(x, y, scatter=TRUE, residuals = TRUE, plot(tmax.ds$downscale - tmax.ds$uncertainty[[2]], main="upper prediction interval") par(opar) - + +} else { + cat("Please install geodata package to run example", "\n") +} } } \references{ diff --git a/man/raster.entropy.Rd b/man/raster.entropy.Rd index c53c401..153e600 100644 --- a/man/raster.entropy.Rd +++ b/man/raster.entropy.Rd @@ -23,7 +23,8 @@ terra SpatRaster class object } \description{ Calculates entropy on integer raster (i.e., 8 bit 0-255) - +} +\details{ Entropy calculated as: H = -sum(Pi*ln(Pi)) where; Pi, Proportion of one value to total values Pi=n(p)/m and m, Number of unique values. Expected range: 0 to log(m) H=0 if window contains the same value in all cells. @@ -50,3 +51,6 @@ rEnt <- raster.entropy(r, d=5, categorical = TRUE, global = TRUE) Fuchs M., R. Hoffmann, F. Schwonke (2008) Change Detection with GRASS GIS - Comparison of images taken by different sensor. } +\author{ +Jeffrey S. Evans +} diff --git a/man/raster.gaussian.smooth.Rd b/man/raster.gaussian.smooth.Rd index df9b6eb..3da1c2f 100644 --- a/man/raster.gaussian.smooth.Rd +++ b/man/raster.gaussian.smooth.Rd @@ -34,7 +34,7 @@ A terra SpatRaster class object of the local distributional moment \description{ Applies a Gaussian smoothing kernel to smooth raster. } -\note{ +\details{ This applies a Gaussian Kernel smoother. The convolution option performs a Gaussian decomposition whereas the other options use the kernel as weights for the given statistic. diff --git a/man/raster.invert.Rd b/man/raster.invert.Rd index e3cf567..7da645f 100644 --- a/man/raster.invert.Rd +++ b/man/raster.invert.Rd @@ -7,15 +7,15 @@ raster.invert(x) } \arguments{ -\item{x}{raster object} +\item{x}{A terra SpatRaster object} } \value{ -raster class object with inverted (flipped) raster values +A terra SpatRaster object with inverted (flipped) raster values } \description{ Inverts (flip) the values of a raster } -\note{ +\details{ Inverts raster values using the formula: (((x - max(x)) * -1) + min(x) } \examples{ diff --git a/man/raster.mds.Rd b/man/raster.mds.Rd index a28d9c1..453e430 100644 --- a/man/raster.mds.Rd +++ b/man/raster.mds.Rd @@ -21,9 +21,9 @@ matrix values.} A terra SpatRaster class object } \description{ -Multidimensional scaling of raster values within an - N x N focal window - +Multidimensional scaling of raster values within an N x N focal window +} +\details{ An MDS focal function. If only one value provided for s, then a square matrix (window) will be used. If window.median = FALSE then the center value of the matrix is returned and not the median of the matrix diff --git a/man/raster.modified.ttest.Rd b/man/raster.modified.ttest.Rd index 4a16ffb..d9d7fae 100644 --- a/man/raster.modified.ttest.Rd +++ b/man/raster.modified.ttest.Rd @@ -22,38 +22,39 @@ raster.modified.ttest( \item{sample}{Apply sub-sampling options; c("none", "random", "hexagonal", "regular")} -\item{p}{If sample != "none", what proportion of population +\item{p}{If sample != "none", what proportion of population should be sampled} \item{size}{Fixed sample size (default NULL)} } \value{ -A terra SpatRaster or sf POINT class object with the - following attributes: -\itemize{ -\item corr Correlation -\item Fstat The F-statistic calculated as [degrees of freedom * - unscaled F-statistic] -\item p.value p-value for the test -\item moran.x Moran's-I for x -\item moran.y Moran's-I for y - } +A terra SpatRaster or sf POINT class object with the following attributes: +\itemize{ +\item corr - Correlation +\item Fstat - The F-statistic calculated as degrees of freedom unscaled F-statistic +\item p.value - p-value for the test +\item moran.x - Moran's-I for x +\item moran.y - Moran's-I for y +} } \description{ -A bivarate raster correlation using Dutilleul's - modified t-test +A bivarate raster correlation using Dutilleul's +modified t-test -This function provides a bivariate moving window correlation using the modified -t-test to account for spatial autocorrelation. Point based subsampling is provided -for computation tractability. The hexagon sampling is recommended as it it good +This function provides a bivariate moving window correlation using the modified +t-test to account for spatial autocorrelation. Point based subsampling is provided +for computation tractability. The hexagon sampling is recommended as it it good at capturing spatial process that includes nonstationarity and anistropy. } \examples{ -\dontrun{ -library(gstat) -library(sf) -library(terra) - +\donttest{ + p = c("sf", "sp", "terra", "gstat") + if(any(!unlist(lapply(p, requireNamespace, quietly=TRUE)))) { + m = which(!unlist(lapply(p, requireNamespace, quietly=TRUE))) + message("Can't run examples, please install ", paste(p[m], collapse = " ")) + } else { + invisible(lapply(p, require, character.only=TRUE)) + data(meuse, package = "sp") meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") @@ -86,20 +87,19 @@ acor <- raster.modified.ttest(G1, G2) # Sample-based corrected correlation ( cor.hex <- raster.modified.ttest(G1, G2, sample = "hexagonal") ) plot(cor.hex["corr"], pch=20) - } - +} } \references{ -Clifford, P., S. Richardson, D. Hemon (1989), Assessing the significance of the - correlationbetween two spatial processes. Biometrics 45:123-134. +Clifford, P., S. Richardson, D. Hemon (1989), Assessing the significance of the +correlationbetween two spatial processes. Biometrics 45:123-134. -Dutilleul, P. (1993), Modifying the t test for assessing the correlation between - two spatial processes. Biometrics 49:305-314. +Dutilleul, P. (1993), Modifying the t test for assessing the correlation between +two spatial processes. Biometrics 49:305-314. } \seealso{ \code{\link[SpatialPack]{modified.ttest}} for test details } \author{ -Jeffrey S. Evans +Jeffrey S. Evans \href{mailto:jeffrey_evans@tnc.org}{jeffrey_evans@tnc.org} } diff --git a/man/raster.moments.Rd b/man/raster.moments.Rd index f29b1ac..52bd557 100644 --- a/man/raster.moments.Rd +++ b/man/raster.moments.Rd @@ -26,9 +26,8 @@ A terra SpatRaster object representing the local distributional moment \description{ Calculates focal statistical moments of a raster } -\note{ -This is a simple wrapper for the focal function, returning local - statistical moments +\details{ +This is a simple wrapper for the terra focal function, returning local statistical moments } \examples{ \donttest{ diff --git a/man/raster.transformation.Rd b/man/raster.transformation.Rd index ea85d61..970db40 100644 --- a/man/raster.transformation.Rd +++ b/man/raster.transformation.Rd @@ -21,7 +21,8 @@ A terra SpatRaster class object of specified transformation } \description{ Transforms raster to a specified statistical transformation - +} +\details{ Transformation option details: \itemize{ \item norm - (Normalization_ (0-1): if min(x) < 0 ( x - min(x) ) / ( max(x) - min(x) ) diff --git a/man/raster.vol.Rd b/man/raster.vol.Rd index e688623..427f903 100644 --- a/man/raster.vol.Rd +++ b/man/raster.vol.Rd @@ -33,8 +33,7 @@ Calculates a percent volume on a raster or based on a systematic sample } \note{ -Since this model needs to operate on all of the raster values, -it is not memory safe +Since this model needs to operate on all of the raster values, it is not memory safe } \examples{ \donttest{ diff --git a/man/rasterCorrelation.Rd b/man/rasterCorrelation.Rd index 9603abe..f9254be 100644 --- a/man/rasterCorrelation.Rd +++ b/man/rasterCorrelation.Rd @@ -23,12 +23,11 @@ s=c(3,5) for 3 x 5 window)} A terra SpatRaster class object } \description{ -Performs a moving window correlation between - two rasters +Performs a moving window correlation between two rasters } \note{ The NA behavior is set to na.rm = TRUE to make default outputs - consistent between the terra and raster packages. +consistent between the terra and raster packages. } \examples{ \donttest{ diff --git a/man/rasterDistance.Rd b/man/rasterDistance.Rd index 87d080b..9abf99b 100644 --- a/man/rasterDistance.Rd +++ b/man/rasterDistance.Rd @@ -14,13 +14,13 @@ rasterDistance(x, y, scale = FALSE) \item{scale}{(FALSE/TRUE) Perform a row standardization on results} } \value{ -a distance terra SpatRast raster +A terra SpatRast raster representing distances } \description{ Calculates the Euclidean distance of a defined raster class and all the other cells in a taster } -\note{ +\details{ This replicates the terra distance function but uses the Arya & Mount Approximate Near Neighbor (ANN) C++ library for calculating distances. Where this results in a notable increase in performance it is not memory safe, needing to read @@ -28,6 +28,7 @@ in the entire raster and does not use the GeographicLib (Karney, 2013) spheroid distance method for geographic data. } \examples{ +\donttest{ library(sf) library(terra) @@ -49,15 +50,7 @@ rd <- rasterDistance(rnc, y=ids) plot(rd) plot( st_geometry(nc.sub), add=TRUE) -#### Benchmark rasterDistance and terra::distance -#### at res=90m the differences are quite notable -# ref <- rast(ext(nc), resolution=500) -# rnc <- mask(rasterize(vect(nc.sub), ref, background=2), -# vect(nc)) -# crs(rnc) <- "ESRI:102008" -# system.time({ rasterDistance(rnc, y=1) }) -# system.time({ distance(rnc, target=2) }) - +} } \references{ Arya S., Mount D. M., Netanyahu N. S., Silverman R. and Wu A. Y (1998), An diff --git a/man/remove.holes.Rd b/man/remove.holes.Rd index a6047e8..4030211 100644 --- a/man/remove.holes.Rd +++ b/man/remove.holes.Rd @@ -18,7 +18,7 @@ sf POLYGON object Removes or returns all holes (null geometry) in sf polygon class objects } -\note{ +\details{ A hole is considered a polygon within a polygon (island) representing null geometry. If you want to return only holes, no longer NULL, use keep = TRUE. To delete holes use default only.holes = FALSE. Single part features will be diff --git a/man/remove_duplicates.Rd b/man/remove_duplicates.Rd index d6af3c1..0b6977a 100644 --- a/man/remove_duplicates.Rd +++ b/man/remove_duplicates.Rd @@ -18,7 +18,7 @@ sf object, of same feature class as x, with duplicate geometries removed \description{ Removes duplicate geometries in a single-part feature class } -\note{ +\details{ This function removes duplicate geometries based on order and not "non null" attribution or other factors, the first feature gets to stay. If one needs to know which points were removed sf::st_difference can be used between original diff --git a/man/rotate.polygon.Rd b/man/rotate.polygon.Rd index f01100b..a78a28b 100644 --- a/man/rotate.polygon.Rd +++ b/man/rotate.polygon.Rd @@ -27,7 +27,7 @@ an sp or sf polygon object with rotated polygon \description{ rotates polygon by specified angle } -\note{ +\details{ The anchor is the location that the rotation is anchored to. The center is the centroid where the lower.left and upper.right are based on the min or max of the coordinates respectively. diff --git a/man/sa.trans.Rd b/man/sa.trans.Rd index 72e2d5d..0574cea 100644 --- a/man/sa.trans.Rd +++ b/man/sa.trans.Rd @@ -31,7 +31,8 @@ A vector of the modeled value \description{ The Trigonometric Stage (1978) [slope * cos(aspect)] or [slope * sin(aspect)] - +} +\details{ An a priori assumption of a maximum in the NW quadrant (45 azimuth) and a minimum in the SW quadrant can be replaced by an empirically determined location of the optimum without repeated calculations of @@ -49,12 +50,11 @@ values.By default this model test all values greater than 100% to 101% and flat areas (-1) to nodata. } \examples{ - sa.trans(slope = 48.146, aspect = 360.000) - library(terra) -elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) +sa.trans(slope = 48.146, aspect = 360.000) -# Example of slope*cos(aspect) +# Example of creating slope*cos(aspect) raster +elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) sa <- terra::terrain(elev, v=c("slope", "aspect"), unit="degrees") scosa <- terra::lapp(c(sa[[1]], sa[[2]]), fun = sa.trans) diff --git a/man/sample.annulus.Rd b/man/sample.annulus.Rd index e45adad..9eebb60 100644 --- a/man/sample.annulus.Rd +++ b/man/sample.annulus.Rd @@ -20,14 +20,14 @@ sample.annulus(x, r1, r2, size = 10, ...) \item{...}{Additional arguments passed to sf::st_sample} } \value{ -sp SpatialPointsataFrame OBJECT +sf POINTS object } \description{ Creates sample points based on annulus with defined - inner and outer radius +inner and outer radius } -\note{ -Function can be used for distance based sampling. This is a sampling method +\details{ +Function can be used for distance based sampling which is a sampling method that can be used to capture spatially lagged variation. } \examples{ @@ -36,7 +36,6 @@ that can be used to capture spatially lagged variation. data(meuse, package = "sp") meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") - } xy <- meuse[2,] rs100 <- sample.annulus(xy, r1=50, r2=100, size = 50) @@ -48,7 +47,7 @@ that can be used to capture spatially lagged variation. legend("topright", legend=c("50-100m", "100-200m", "source"), pch=c(20,20,20), col=c("blue","red","black")) - \dontrun{ +\donttest{ # Run on multiple points rs100 <- sample.annulus(meuse[1:3,], r1=50, r2=100, size = 50) @@ -60,6 +59,10 @@ plot(st_geometry(rs200), pch=20, col="red") legend("topright", legend=c("50-100m", "100-200m", "source"), pch=c(20,20,20), col=c("blue","red","black")) } +} else { + cat("Please install sp package to run example", "\n") +} + } \author{ Jeffrey S. Evans diff --git a/man/sample.line.Rd b/man/sample.line.Rd deleted file mode 100644 index 2508bbf..0000000 --- a/man/sample.line.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/sample.line.R -\name{sample.line} -\alias{sample.line} -\title{Sample Lines} -\usage{ -sample.line(...) -} -\arguments{ -\item{...}{arguments passed to sf::st_sample} -} -\value{ -NA -} -\description{ -Creates a sample for each line in a - sf LINESTRING class object -} -\examples{ - \dontrun{ - sf::sf_sample() -} - -} diff --git a/man/sample.poly.Rd b/man/sample.poly.Rd deleted file mode 100644 index 6405825..0000000 --- a/man/sample.poly.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/sample.poly.R -\name{sample.poly} -\alias{sample.poly} -\title{Sample Polygons} -\usage{ -sample.poly(...) -} -\arguments{ -\item{...}{arguments passed to sf::st_sample} -} -\value{ -NA -} -\description{ -Creates an equal sample of n for each polygon in an - sp Polygon class object -} -\examples{ - \dontrun{ - sf::sf_sample() -} - -} diff --git a/man/sampleTransect.Rd b/man/sampleTransect.Rd index 6ad6680..9b536bf 100644 --- a/man/sampleTransect.Rd +++ b/man/sampleTransect.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/sample.transect.R +% Please edit documentation in R/sampleTransect.R \name{sampleTransect} \alias{sampleTransect} \title{Sample transect} @@ -31,11 +31,15 @@ to define transect direction} \item{...}{Additional arguments passed to st_sample} } +\value{ +A list object contaning sf LINES and POINTS objects representing random transects +and sample points along each transect. The "ID" column links the resulting data. +} \description{ Creates random transects from points and generates sample points along each transect } -\note{ +\details{ Function create lines and samples using random or defined direction and length transects and then creates a point sample along each transect. The characteristic of the sample points are defined by arguments passed @@ -43,13 +47,11 @@ to the sf::st_sample function. The distance and azimuth arguments allow for specifying the exact length and direction for each points transect. } \examples{ - -library(sf) if(require(sp, quietly = TRUE)) { +library(sf) data(meuse, package = "sp") meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") -} meuse <- meuse[sample(1:nrow(meuse),10),] transects <- sampleTransect(meuse, min.dist=200, max.dist=500, @@ -59,6 +61,10 @@ transects <- sampleTransect(meuse, min.dist=200, max.dist=500, plot(st_geometry(transects$samples), col="red", pch=19, add=TRUE) +} else { + cat("Please install sp package to run example", "\n") +} + } \author{ Jeffrey S. Evans diff --git a/man/sar.Rd b/man/sar.Rd index a0c1ae4..56e2713 100644 --- a/man/sar.Rd +++ b/man/sar.Rd @@ -20,7 +20,7 @@ A terra SpatRaster class object of the Surface Area Ratio \description{ Calculates the Berry (2002) Surface Area Ratio based on slope } -\note{ +\details{ SAR is calculated as: resolution^2 * cos( (degrees(slope) * (pi / 180)) ) } \examples{ diff --git a/man/separability.Rd b/man/separability.Rd index b9c4e36..51f0ae9 100644 --- a/man/separability.Rd +++ b/man/separability.Rd @@ -38,7 +38,8 @@ A data.frame with the following separability metrics: } \description{ Calculates variety of two-class sample separability metrics - +} +\details{ Available statistics: \itemize{ \item M-Statistic (Kaufman & Remer 1994) - This is a measure of the difference of the diff --git a/man/sf_dissolve.Rd b/man/sf_dissolve.Rd index c9b2a2f..1a2e963 100644 --- a/man/sf_dissolve.Rd +++ b/man/sf_dissolve.Rd @@ -19,7 +19,7 @@ A dissolved POLYGON or MULTIPOLYGON object \description{ Dissolve polygon feature calss } -\note{ +\details{ If a dissolve attribute is defined, the result will be a MULTIPOLYGON with the grouping attribute column. If y=NULL all polygons will be dissolved into a single attribute, unless there is spatial diff --git a/man/sg.smooth.Rd b/man/sg.smooth.Rd index f8a16a4..2d6eec9 100644 --- a/man/sg.smooth.Rd +++ b/man/sg.smooth.Rd @@ -21,43 +21,42 @@ Defines degree of smoothing} \item{...}{not used} } \value{ -A vector of the smoothed data equal to length of x. Please note; - NA values are retained +A vector of the smoothed data equal to length of x. Please note; NA values are retained } \description{ Smoothing of time-series data using Savitzky-Golay convolution smoothing } \examples{ - y <- c(0.112220988, 0.055554941, 0.013333187, 0.055554941, 0.063332640, 0.014444285, - 0.015555384, 0.057777140, 0.059999339, 0.034444068, 0.058888242, 0.136665165, - 0.038888458, 0.096665606,0.141109571, 0.015555384, 0.012222088, 0.012222088, - 0.072221428, 0.052221648, 0.087776810,0.014444285, 0.033332966, 0.012222088, - 0.032221869, 0.059999339, 0.011110989, 0.011110989,0.042221759, 0.029999670, - 0.018888680, 0.098887801, 0.016666483, 0.031110767, 0.061110441,0.022221979, - 0.073332526, 0.012222088, 0.016666483, 0.012222088, 0.122220881, 0.134442955, - 0.094443403, 0.128887475, 0.045555055, 0.152220547, 0.071110331, 0.018888680, - 0.022221979, 0.029999670, 0.035555165, 0.014444285, 0.049999449, 0.074443623, - 0.068888135, 0.062221535, 0.032221869, 0.095554501, 0.143331751, 0.121109776, - 0.065554835, 0.074443623, 0.043332856, 0.017777583, 0.016666483, 0.036666263, - 0.152220547, 0.032221869, 0.009999890, 0.009999890, 0.021110879, 0.025555275, - 0.099998899, 0.015555384, 0.086665712, 0.008888791, 0.062221535, 0.044443958, - 0.081110224, 0.015555384, 0.089999005, 0.082221314, 0.056666043, 0.013333187, - 0.048888352, 0.075554721, 0.025555275, 0.056666043, 0.146665052, 0.118887581, - 0.125554174, 0.024444176, 0.124443069, 0.012222088, 0.126665279, 0.048888352, - 0.046666153, 0.141109571, 0.015555384, 0.114443190) - - plot(y, type="l", lty = 3, main="Savitzky-Golay with l = 51, 25, 10") - lines(sg.smooth(y),col="red", lwd=2) - lines(sg.smooth(y, l = 25),col="blue", lwd=2) - lines(sg.smooth(y, l = 10),col="green", lwd=2) +y <- c(0.112220988, 0.055554941, 0.013333187, 0.055554941, 0.063332640, 0.014444285, + 0.015555384, 0.057777140, 0.059999339, 0.034444068, 0.058888242, 0.136665165, + 0.038888458, 0.096665606,0.141109571, 0.015555384, 0.012222088, 0.012222088, + 0.072221428, 0.052221648, 0.087776810,0.014444285, 0.033332966, 0.012222088, + 0.032221869, 0.059999339, 0.011110989, 0.011110989,0.042221759, 0.029999670, + 0.018888680, 0.098887801, 0.016666483, 0.031110767, 0.061110441,0.022221979, + 0.073332526, 0.012222088, 0.016666483, 0.012222088, 0.122220881, 0.134442955, + 0.094443403, 0.128887475, 0.045555055, 0.152220547, 0.071110331, 0.018888680, + 0.022221979, 0.029999670, 0.035555165, 0.014444285, 0.049999449, 0.074443623, + 0.068888135, 0.062221535, 0.032221869, 0.095554501, 0.143331751, 0.121109776, + 0.065554835, 0.074443623, 0.043332856, 0.017777583, 0.016666483, 0.036666263, + 0.152220547, 0.032221869, 0.009999890, 0.009999890, 0.021110879, 0.025555275, + 0.099998899, 0.015555384, 0.086665712, 0.008888791, 0.062221535, 0.044443958, + 0.081110224, 0.015555384, 0.089999005, 0.082221314, 0.056666043, 0.013333187, + 0.048888352, 0.075554721, 0.025555275, 0.056666043, 0.146665052, 0.118887581, + 0.125554174, 0.024444176, 0.124443069, 0.012222088, 0.126665279, 0.048888352, + 0.046666153, 0.141109571, 0.015555384, 0.114443190) + +plot(y, type="l", lty = 3, main="Savitzky-Golay with l = 51, 25, 10") + lines(sg.smooth(y),col="red", lwd=2) + lines(sg.smooth(y, l = 25),col="blue", lwd=2) + lines(sg.smooth(y, l = 10),col="green", lwd=2) - #### function applied to a multi-band raster - library(terra) - ( r <- spatialEco::random.raster(n.layers=20) ) - - # raster stack example - ( r.sg <- app(r, sg.smooth) ) +#### function applied to a multi-band raster +library(terra) +( r <- spatialEco::random.raster(n.layers=20) ) + +# raster stack example +( r.sg <- app(r, sg.smooth) ) } \references{ diff --git a/man/shannons.Rd b/man/shannons.Rd index 74a60e1..0dda56c 100644 --- a/man/shannons.Rd +++ b/man/shannons.Rd @@ -22,7 +22,7 @@ evenness where H / max( sum(x) ) ) and ESN \description{ Calculates Shannon's Diversity Index and Shannon's Evenness Index } -\note{ +\details{ The expected for H is 0-3+ where a value of 2 has been suggested as medium-high diversity, for evenness is 0-1 with 0 signifying no evenness and 1, complete evenness. } diff --git a/man/shift.Rd b/man/shift.Rd index b60f34c..0fa96b5 100644 --- a/man/shift.Rd +++ b/man/shift.Rd @@ -14,8 +14,7 @@ shift(x, lag = 1, pad = NA) \item{pad}{Value to fill the lagged offset with, default is NA} } \value{ -a vector, length equal to x, with offset length filled - with pad values +A vector, length equal to x, with offset length filled with pad values } \description{ Shift a vector by specified positive or negative lag diff --git a/man/sieve.Rd b/man/sieve.Rd index 6e00bdc..a5d9825 100644 --- a/man/sieve.Rd +++ b/man/sieve.Rd @@ -14,14 +14,15 @@ sieve(x, a, unit = c("m", "km", "ha")) \item{unit}{The unit to use for area query options are c("m", "km", "ha")} } \value{ -A terra SpatRaster with < a set to NA +A terra SpatRaster with cells < a set to NA } \description{ Removes contiguous cells < specified query area } -\note{ +\details{ A sieve can be used to establish a minimal mapping unit where -contiguous cells < specified query area are set to NA +contiguous cells < specified query area are set to NA. These NA +values can then be filled using focal (majority, median, mean) } \examples{ \donttest{ @@ -36,3 +37,6 @@ sv <- spatialEco::sieve(x, a = 60, unit = "km") plot(c(x, sv)) } } +\author{ +Jeffrey S. Evans +} diff --git a/man/similarity.Rd b/man/similarity.Rd index 3e2cbb0..95856d2 100644 --- a/man/similarity.Rd +++ b/man/similarity.Rd @@ -35,7 +35,7 @@ freq column represents the number of times a row (ID) was selected as a neighbor \description{ Uses row imputation to identify "k" ecological similar observations } -\note{ +\details{ This function uses row-based imputation to identify k similar neighbors for each observation. Has been used to identify offsets based on ecological similarity. } diff --git a/man/smooth.time.series.Rd b/man/smooth.time.series.Rd index 917c8ce..70e3404 100644 --- a/man/smooth.time.series.Rd +++ b/man/smooth.time.series.Rd @@ -33,7 +33,7 @@ The results can dramatically be effected by the choice of the smoothing parameter (f) so caution is warranted and the effect of this parameter tested. } \examples{ -\dontrun{ +\donttest{ library(terra) random.raster <- function(rows=50, cols=50, l=20, min=0, max=1){ do.call(c, replicate(l, rast(matrix(runif(rows * cols, min, max), diff --git a/man/sobal.Rd b/man/sobal.Rd index bd933f8..264e904 100644 --- a/man/sobal.Rd +++ b/man/sobal.Rd @@ -20,7 +20,8 @@ A raster class object or raster written to disk } \description{ An isotropic image gradient operator using a 3x3 window - +} +\details{ The Sobel-Feldmanh operator is a discrete differentiation operator, deriving an approximation of the gradient of the intensity function. abrupt discontinuity in the gradient function represents edges, making this a common approach for edge diff --git a/man/sp.na.omit.Rd b/man/sp.na.omit.Rd deleted file mode 100644 index 5527ce7..0000000 --- a/man/sp.na.omit.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/sp.na.omit.R -\name{sp.na.omit} -\alias{sp.na.omit} -\title{sp na.omit} -\usage{ -sp.na.omit(...) -} -\arguments{ -\item{...}{arguments passed to stats::na.omit} -} -\value{ -NA -} -\description{ -Removes row or column NA's in sp object -} diff --git a/man/spatial.select.Rd b/man/spatial.select.Rd index cf65f6a..c49ff57 100644 --- a/man/spatial.select.Rd +++ b/man/spatial.select.Rd @@ -34,6 +34,15 @@ Performs a spatial select (feature subset) between a polygon(s) and other feature class } \details{ +Performs a spatial select of features based on an overlay of a polygon (x), +which can represent multiple features, and a polygon, point or line feature +classes (y). User can specify a partial or complete intersection, using within +argument, or within a distance, using distance argument, predicated on the +query polygon. This function is similar to ArcGIS/Pro spatial select. Please note +that for point to point neighbor selections use the knn function. +Valid spatial predicates include: intersect, touches, covers, contains, proximity +and contingency. +See DE-9IM topology model for detailed information on following data predicates. \itemize{ \item intersection Create a spatial intersection between two features \item intersect Boolean evaluation of features intersecting @@ -44,22 +53,12 @@ feature class \item contingency Evaluation of polygon contingency (eg., 1st, 2nd order) } } -\note{ -Performs a spatial select of features based on an overlay of a polygon (x), -which can represent multiple features, and a polygon, point or line feature -classes (y). User can specify a partial or complete intersection, using within -argument, or within a distance, using distance argument, predicated on the -query polygon. This function is similar to ArcGIS/Pro spatial select. Please note -that for point to point neighbor selections use the knn function. -Valid spatial predicates include: intersect, touches, covers, contains, proximity -and contingency. See DE-9IM topology model for detailed information on data predicates. -} \examples{ +if(require(sp, quietly = TRUE)) { library(sf) - if(require(sp, quietly = TRUE)) { - data(meuse, package = "sp") - meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, - agr = "constant") + data(meuse, package = "sp") + meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, + agr = "constant") spolys <- hexagons(meuse, res=100) spolys$ID <- 1:nrow(spolys) @@ -85,7 +84,10 @@ plot( spolys <- sf::st_make_grid(sf::st_sfc(sf::st_point(c(0,0)), spatial.select(x=spolys, predicate = "contingency") spatial.select(spolys, predicate = "contingency", neighbors = "rook") +} else { + cat("Please install sp package to run example", "\n") } + } \seealso{ \code{\link[sf]{st_intersection}} for details on intersection predicate diff --git a/man/se.news.Rd b/man/spatialEcoNews.Rd similarity index 50% rename from man/se.news.Rd rename to man/spatialEcoNews.Rd index 5625fcf..a792c26 100644 --- a/man/se.news.Rd +++ b/man/spatialEcoNews.Rd @@ -1,14 +1,17 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/se.news.R -\name{se.news} -\alias{se.news} +% Please edit documentation in R/spatialEcoNews.R +\name{spatialEcoNews} +\alias{spatialEcoNews} \title{spatialEco news} \usage{ -se.news(...) +spatialEcoNews(...) } \arguments{ \item{...}{not used} } +\value{ +Shows package NEWS file +} \description{ Displays release notes } diff --git a/man/spectral.separability.Rd b/man/spectral.separability.Rd index 00aec51..16da125 100644 --- a/man/spectral.separability.Rd +++ b/man/spectral.separability.Rd @@ -16,12 +16,12 @@ row wise values in x} \item{jeffries.matusita}{(TRUE/FALSE) Return J-M distance (default) else Bhattacharyya} } \value{ -A matrix of class-wise Jeffries-Matusita or Bhattacharyya distance -separability values +A matrix of class-wise Jeffries-Matusita or Bhattacharyya distance separability values } \description{ Calculates spectral separability by class - +} +\details{ Available statistics: \itemize{ \item Bhattacharyya distance (Bhattacharyya 1943; Harold 2003) measures the similarity @@ -32,8 +32,8 @@ where 2 suggest complete separability. } } \examples{ -#' # Create example data require(MASS) +# Create example data d <- 6 # Number of bands n.class <- 5 # Number of classes n <- rep(1000, 5) diff --git a/man/squareBuffer.Rd b/man/squareBuffer.Rd index f8c502f..6ce2ef6 100644 --- a/man/squareBuffer.Rd +++ b/man/squareBuffer.Rd @@ -19,7 +19,7 @@ A single feature sf class polygon object \description{ Creates a square buffer of a feature class } -\note{ +\details{ Function creates a square buffer of feature class. } \examples{ diff --git a/man/srr.Rd b/man/srr.Rd index 8836bdd..282794e 100644 --- a/man/srr.Rd +++ b/man/srr.Rd @@ -19,14 +19,14 @@ A terra SpatRaster object of Pike's (1971) Surface Relief Ratio \description{ Calculates the Pike (1971) Surface Relief Ratio } -\note{ +\details{ Describes rugosity in continuous raster surface within a specified window. The implementation of SRR can be shown as: (mean(x) - min(x)) / (max(x) - min(x)) } \examples{ \donttest{ - library(terra) - elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) +library(terra) +elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) r.srr <- srr(elev, s=5) plot(r.srr, main="Surface Relief Ratio") } diff --git a/man/stratified.random.Rd b/man/stratified.random.Rd index 1383d02..9c35d92 100644 --- a/man/stratified.random.Rd +++ b/man/stratified.random.Rd @@ -23,16 +23,14 @@ An sf class object containing random samples \description{ Creates a stratified random sample of an sp class object } -\note{ +\details{ If replace=FALSE features are removed from consideration in subsequent replicates. Conversely, if replace=TRUE, a feature can be selected multiple times across replicates. Not applicable if rep=1. - -Depends: sf } \examples{ +if(require(sp, quietly = TRUE)) { library(sf) - if(require(sp, quietly = TRUE)) { data(meuse, package = "sp") meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") @@ -66,7 +64,11 @@ ssample.ct <- stratified.random(meuse, strata='STRAT', n=1, reps=10, # Plot random samples colored by replacement ssample$REP <- factor(ssample$REP) plot(ssample['REP'], pch=20) -} + +} else { + cat("Please install sp package to run example", "\n") +} + } \references{ Hudak, A.T., N.L. Crookston, J.S. Evans, M.J. Falkowski, A.M.S. Smith, P. Gessler diff --git a/man/subsample.distance.Rd b/man/subsample.distance.Rd index 9c147e2..5bde040 100644 --- a/man/subsample.distance.Rd +++ b/man/subsample.distance.Rd @@ -31,8 +31,8 @@ This function provides a distance constrained subsample of existing point \examples{ \donttest{ +if(require(sp, quietly = TRUE)) { library(sf) - if(require(sp, quietly = TRUE)) { data(meuse, package = "sp") meuse <- st_as_sf(meuse, coords = c("x", "y"), crs = 28992, agr = "constant") @@ -47,7 +47,10 @@ dm <- st_distance(sub.meuse) diag(dm) <- NA cat("\n", "Min distance for subsample", min(dm, na.rm=TRUE), "\n") cat("Max distance for subsample", max(dm, na.rm=TRUE), "\n") - } + +} else { + cat("Please install sp package to run example", "\n") +} } } \author{ diff --git a/man/summary.cross.cor.Rd b/man/summary.cross.cor.Rd index 788411b..e815932 100644 --- a/man/summary.cross.cor.Rd +++ b/man/summary.cross.cor.Rd @@ -11,6 +11,26 @@ \item{...}{Ignored} } +\value{ +When not simulated k=0, prints functions list object containing: +\itemize{ +\item I - Global autocorrelation statistic +\item SCI - - A data.frame with two columns representing the xy and yx autocorrelation +\item nsim - value of NULL to represent p values were derived from observed data (k=0) +\item p - Probability based observations above/below confidence interval +\item t.test - Probability based on t-test +} + +When simulated (k>0), prints functions list object containing: +\itemize{ +\item I - Global autocorrelation statistic +\item SCI - A data.frame with two columns representing the xy and yx autocorrelation +\item nsim - value representing number of simulations +\item global.p - p-value of global autocorrelation statistic +\item local.p - Probability based simulated data using successful rejection of t-test +\item range.p - Probability based on range of probabilities resulting from paired t-test +} +} \description{ summary method for class "cross.cor" } diff --git a/man/summary.effect.size.Rd b/man/summary.effect.size.Rd index 2ff2db3..7f32103 100644 --- a/man/summary.effect.size.Rd +++ b/man/summary.effect.size.Rd @@ -11,6 +11,10 @@ \item{...}{Ignored} } +\value{ +Prints the output data.frame contaning; effect size with upper and lower confidence +and, mean and sd by group +} \description{ Summary method for class "effect.size". } diff --git a/man/summary.loess.boot.Rd b/man/summary.loess.boot.Rd index 10199af..cc99442 100644 --- a/man/summary.loess.boot.Rd +++ b/man/summary.loess.boot.Rd @@ -11,6 +11,28 @@ \item{...}{Ignored} } +\value{ +same as print lowess.boot data.frame including; +\itemize{ +\item nreps Number of bootstrap replicates +\item confidence Confidence interval (region) +\item span alpha (span) parameter used loess fit +\item degree polynomial degree used in loess fit +\item normalize Normalized data (TRUE/FALSE) +\item family Family of statistic used in fit +\item parametric Parametric approximation (TRUE/FALSE) +\item surface Surface fit, see loess.control +\item data data.frame of x,y used in model +\item fit data.frame including: +\enumerate{ +\item x - Equally-spaced x index (see NOTES) +\item y.fit - loess fit +\item up.lim - Upper confidence interval +\item low.lim - Lower confidence interval +\item stddev - Standard deviation of loess fit at each x value +} +} +} \description{ Summary method for class "loess.boot". } diff --git a/man/swvi.Rd b/man/swvi.Rd index 8bb3519..55f8c42 100644 --- a/man/swvi.Rd +++ b/man/swvi.Rd @@ -46,15 +46,14 @@ A terra SpatRaster class object of the weighted MSAVI metric Modified Soil-adjusted Vegetation Index (MSAVI) or Modified Triangular Vegetation Index 2 (MTVI) weighted by the Normalized difference senescent vegetation index (NDSVI) - +} +\details{ The intent of this index is to correct the MSAVI or MTVI index for bias associated with senescent vegetation. This is done by: -\itemize{ -\item deriving the NDSVI; -\item applying a threshold to limit NDSVI to values associated with senescent vegetation; -\item converting the index to inverted weights (-1*(NDSVI/sum(NDSVI))); -\item applying weights to MSAVI or MTVI -} +1 deriving the NDSVI +2 applying a threshold to limit NDSVI to values associated with senescent vegetation +3 converting the index to inverted weights (-1*(NDSVI/sum(NDSVI))) +4 applying weights to MSAVI or MTVI The MSAVI formula follows the modification proposed by Qi et al. (1994), often referred to as MSAVI2. MSAVI index reduces soil noise and increases @@ -77,27 +76,20 @@ but if specified all values (MSAVI <= threshold) will be NA. Applying a weight.f used to change the influence of the weights on MSAVI. } \examples{ -\dontrun{ -# library(terra) -# if(!unlist(lapply("RStoolbox", requireNamespace, quietly=TRUE))) -# message("Can't run examples, please install RStoolbox") -# -# data(lsat) -# lsat <- radCor(lsat, metaData = readMeta(system.file( -# "external/landsat/LT52240631988227CUB02_MTL.txt", -# package="RStoolbox")), method = "apref") -# lsat <- rast(lsat) -# -# # Using Modified Soil-adjusted Vegetation Index (MSAVI) -# ( wmsavi <- swvi(red = lsat[[3]], nir = lsat[[4]], swir = lsat[[5]]) ) -# plotRGB(lsat, r=6,g=5,b=2, scale=1, stretch="lin") -# plot(wmsavi, legend=FALSE, col=rev(terrain.colors(100, alpha=0.35)), add=TRUE ) -# -# # Using Modified Triangular Vegetation Index 2 (MTVI) -# ( wmtvi <- swvi(red = lsat[[3]], nir = lsat[[4]], swir = lsat[[5]], -# green = lsat[[3]], mtvi = TRUE) ) -# plotRGB(lsat, r=6,g=5,b=2, scale=1, stretch="lin") -# plot(wmtvi, legend=FALSE, col=rev(terrain.colors(100, alpha=0.35)), add=TRUE ) +\donttest{ +library(terra) +lsat <- rast(system.file("/extdata/Landsat_TM5.tif", package="spatialEco")) + +# Using Modified Soil-adjusted Vegetation Index (MSAVI) +( wmsavi <- swvi(red = lsat[[3]], nir = lsat[[4]], swir = lsat[[5]]) ) + plotRGB(lsat, r=6,g=5,b=2, scale=1, stretch="lin") + plot(wmsavi, legend=FALSE, col=rev(terrain.colors(100, alpha=0.35)), add=TRUE ) + +# Using Modified Triangular Vegetation Index 2 (MTVI) +( wmtvi <- swvi(red = lsat[[3]], nir = lsat[[4]], swir = lsat[[5]], + green = lsat[[3]], mtvi = TRUE) ) + plotRGB(lsat, r=6,g=5,b=2, scale=1, stretch="lin") + plot(wmtvi, legend=FALSE, col=rev(terrain.colors(100, alpha=0.35)), add=TRUE ) } } diff --git a/man/time_to_event.Rd b/man/time_to_event.Rd index e2b8f87..d23e656 100644 --- a/man/time_to_event.Rd +++ b/man/time_to_event.Rd @@ -32,7 +32,8 @@ A vector value representing the time to event } \description{ Returns the time (sum to position) to a specified value - +} +\details{ The time to event represents the sum of positions, in the vector, until the specified value is found ie., (0,0,1) would be 3 or, 2 with up.to=TRUE. The int argument allows for rounding a continuous @@ -45,7 +46,6 @@ function behavior, causing it to fail or count NAs. Note that it makes no sense to actually remove NAs as it will make the run uninterpretable. } \examples{ - # Binomial instance time_to_event(c(0,0,0,0,1,0,0,0,1,0)) time_to_event(c(0,0,0,0,1,0,0,0,1,0), up.to = TRUE) diff --git a/man/topo.distance.Rd b/man/topo.distance.Rd index a5952d9..02b04ae 100644 --- a/man/topo.distance.Rd +++ b/man/topo.distance.Rd @@ -9,7 +9,7 @@ topo.distance(x, r, echo = FALSE) \arguments{ \item{x}{sf LINESTRING object} -\item{r}{terra or raster class elevation raster} +\item{r}{terra SpatRaster class elevation raster} \item{echo}{(FALSE/TRUE) print progress to screen} } @@ -17,12 +17,10 @@ topo.distance(x, r, echo = FALSE) Vector of corrected topographic distances same length as nrow(x) } \description{ -Calculates topographic corrected distance for a - LINESTRING object +Calculates topographic corrected distance for a line object } -\note{ -This function corrects straight-line (euclidean) distances for -topographic-slope effect. +\details{ +This function corrects straight-line (euclidean) distances for topographic-slope effect. } \examples{ library(sf) diff --git a/man/trasp.Rd b/man/trasp.Rd index bdea94a..2f8f500 100644 --- a/man/trasp.Rd +++ b/man/trasp.Rd @@ -15,9 +15,9 @@ trasp(x, ...) A terra SpatRaster object of Roberts and Cooper (1989) Solar-radiation Aspect Index } \description{ -Calculates the Roberts and Cooper (1989) Solar-radiation - Aspect Index - +Calculates the Roberts and Cooper (1989) Solar-radiation Aspect Index +} +\details{ Roberts and Cooper (1989) rotates (transforms) the circular aspect to assign a value of zero to land oriented in a north-northeast direction, (typically the coolest and wettest orientation), and a value of one on the hotter, dryer @@ -34,8 +34,8 @@ elev <- rast(system.file("extdata/elev.tif", package="spatialEco")) } \references{ Roberts. D.W., and Cooper, S.V. (1989). Concepts and techniques of vegetation mapping. -In Land Classifications Based on Vegetation: Applications for Resource Management. -USDA Forest Service GTR INT-257, Ogden, UT, pp 90-96 + In Land Classifications Based on Vegetation: Applications for Resource Management. + USDA Forest Service GTR INT-257, Ogden, UT, pp 90-96 } \author{ Jeffrey S. Evans diff --git a/man/tri.Rd b/man/tri.Rd index 02e5921..928ad7a 100644 --- a/man/tri.Rd +++ b/man/tri.Rd @@ -22,12 +22,14 @@ A terra SpatRaster class object of the TRI } \description{ Implementation of the Riley et al (1999) Terrain Ruggedness Index - +} +\details{ The algebraic approximation is considerably faster. However, because inclusion of the center cell, the larger the scale the larger the divergence -of the minimum value. +of the minimum value. Resuls are driven by local variations so, fixed thresholds +are not very reliable. However there are some reccomended breaks (eg., Riley et al., 1999). -Recommended ranges for classifying Topographic Ruggedness Index: +Riley et al., (1999) ranges for classifying Topographic Ruggedness Index: \itemize{ \item 0-80 - level terrain surface. \item 81-116 - nearly level surface. diff --git a/man/vrm.Rd b/man/vrm.Rd index 652a387..e932230 100644 --- a/man/vrm.Rd +++ b/man/vrm.Rd @@ -9,9 +9,8 @@ vrm(x, s = 3) \arguments{ \item{x}{A terra SpatRaster class object} -\item{s}{Scale of window. Must be odd number, can -represent 2 dimensions (eg., s=c(3,5) would -represent a 3 x 5 window)} +\item{s}{Scale of window. Must be odd number, can represent 2 dimensions +(eg., s=c(3,5) would represent a 3 x 5 window)} } \value{ A terra SpatRaster class object of the VRI @@ -20,7 +19,7 @@ A terra SpatRaster class object of the VRI Implementation of the Sappington et al., (2007) vector ruggedness measure } -\note{ +\details{ This function measures terrain ruggedness by calculating the vector ruggedness measure } diff --git a/man/winsorize.Rd b/man/winsorize.Rd index 018621a..d7dba46 100644 --- a/man/winsorize.Rd +++ b/man/winsorize.Rd @@ -33,7 +33,8 @@ minus number of NA's } \description{ Removes extreme outliers using a winsorization transformation - +} +\details{ Winsorization is the transformation of a distribution by limiting extreme values to reduce the effect of spurious outliers. This is done by shrinking outlying observations to the border of the main part of the distribution. diff --git a/man/wt.centroid.Rd b/man/wt.centroid.Rd index c009123..bb822ec 100644 --- a/man/wt.centroid.Rd +++ b/man/wt.centroid.Rd @@ -21,13 +21,11 @@ coordinate centroid Creates centroid of [x,y] coordinates with optional weights field } -\note{ +\details{ The weighted centroid is calculated as: [Xw]=[X]*[p], [Yw]=[Y]*[p], [sXw]=SUM[Xw], [sYw]=SUM[Yw], [sP]=SUM[p] wX=[sXw]/[sP], wY=[sYw]/[sP] where; X=X coordinate(S), Y=Y coordinate(S), p=WEIGHT - -Depends: sp } \examples{ p = c("sf", "sp") @@ -54,3 +52,6 @@ legend('topleft', legend=c('all','copper', 'zinc'), pch=c(20,19,19),col=c('black','red','blue')) } } +\author{ +Jeffrey S. Evans +} diff --git a/man/zonal.stats.Rd b/man/zonal.stats.Rd deleted file mode 100644 index a36d092..0000000 --- a/man/zonal.stats.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/zonal.stats.R -\name{zonal.stats} -\alias{zonal.stats} -\title{zonal.stats} -\usage{ -zonal.stats(...) -} -\arguments{ -\item{...}{arguments passed to terra::extract} -} -\value{ -NA -} -\description{ -Polygon zonal statistics of a raster -} -\examples{ - \dontrun{ - terra::extract() -} - -}