From ff37447369987a11ff95cbaaa3619e9bbfbf5c6c Mon Sep 17 00:00:00 2001 From: Jim Thorson Date: Mon, 17 Jul 2017 12:13:57 -0700 Subject: [PATCH 1/5] added v2_7_0... ... which allows calculation of the proportion `PropIndex_cyl` of total biomass in each time-interval, category, and stratum --- R/Data_Fn.R | 38 +- R/Param_Fn.R | 2 +- inst/executables/VAST_v2_7_0.cpp | 1005 ++++++++++++++++++++++++++++++ 3 files changed, 1030 insertions(+), 15 deletions(-) create mode 100644 inst/executables/VAST_v2_7_0.cpp diff --git a/R/Data_Fn.R b/R/Data_Fn.R index 4a64041..86dfa7d 100644 --- a/R/Data_Fn.R +++ b/R/Data_Fn.R @@ -39,7 +39,7 @@ #' @param Aniso OPTIONAL, whether to assume isotropy (Aniso=0) or geometric anisotropy (Aniso=1) #' @param RhoConfig OPTIONAL, vector of form c("Beta1"=0,"Beta2"=0,"Epsilon1"=0,"Epsilon2"=0) specifying whether either intercepts (Beta1 and Beta2) or spatio-temporal variation (Epsilon1 and Epsilon2) is structured among time intervals (0: each year as fixed effect; 1: each year as random following IID distribution; 2: each year as random following a random walk; 3: constant among years as fixed effect; 4: each year as random following AR1 process) #' @param t_yz OPTIONAL, matrix specifying combination of levels of \code{t_iz} to use when calculating different indices of abundance or range shifts -#' @param Options OPTIONAL, a vector of form c('SD_site_density'=0,'SD_site_logdensity'=0,'Calculate_Range'=0,'Calculate_evenness'=0,'Calculate_effective_area'=0,'Calculate_Cov_SE'=0,'Calculate_Synchrony'=0,'Calculate_Coherence'=0), where Calculate_Range=1 turns on calculation of center of gravity, and Calculate_effective_area=1 turns on calculation of effective area occupied +#' @param Options OPTIONAL, a vector of form c('SD_site_logdensity'=0,'Calculate_Range'=0,'Calculate_effective_area'=0,'Calculate_Cov_SE'=0,'Calculate_Synchrony'=0,'Calculate_proportion'=0), where Calculate_Range=1 turns on calculation of center of gravity, and Calculate_effective_area=1 turns on calculation of effective area occupied #' @param yearbounds_zz OPTIONAL, matrix with two columns, giving first and last years for defining one or more periods (rows) used to calculate changes in synchrony over time (only used if \code{Options['Calculate_Synchrony']=1}) #' @param CheckForErrors OPTIONAL, whether to check for errors in input (NOTE: when CheckForErrors=TRUE, the function will throw an error if it detects a problem with inputs. However, failing to throw an error is no guaruntee that the inputs are all correct) @@ -50,7 +50,18 @@ Data_Fn <- function( Version, FieldConfig, OverdispersionConfig=c("eta1"=0,"eta2"=0), ObsModel=c("PosDist"=1,"Link"=0), b_i, a_i, c_i, s_i, t_iz, a_xl, MeshList, GridList, Method, v_i=rep(0,length(b_i)), PredTF_i=rep(0,length(b_i)), X_xj=NULL, X_xtp=NULL, Q_ik=NULL, Aniso=1, RhoConfig=c("Beta1"=0,"Beta2"=0,"Epsilon1"=0,"Epsilon2"=0), t_yz=NULL, CheckForErrors=TRUE, yearbounds_zz=NULL, - Options=c('SD_site_density'=0,'SD_site_logdensity'=0,'Calculate_Range'=0,'Calculate_evenness'=0,'Calculate_effective_area'=0,'Calculate_Cov_SE'=0,'Calculate_Synchrony'=0,'Calculate_Coherence'=0) ){ + Options=c('SD_site_logdensity'=0,'Calculate_Range'=0,'Calculate_effective_area'=0,'Calculate_Cov_SE'=0,'Calculate_Synchrony'=0,'Calculate_proportion'=0) ){ + + # Specify default values for `Options` + Options2use = c('SD_site_density'=0,'SD_site_logdensity'=0,'Calculate_Range'=0,'Calculate_evenness'=0,'Calculate_effective_area'=0, + 'Calculate_Cov_SE'=0,'Calculate_Synchrony'=0,'Calculate_Coherence'=0,'Calculate_proportion'=0) + + # Replace defaults for `Options` with provided values (if any) + for( i in 1:length(Options)){ + if(tolower(names(Options)[i]) %in% tolower(names(Options2use))){ + Options2use[[match(tolower(names(Options)[i]),tolower(names(Options2use)))]] = Options[[i]] + } + } # Coerce t_iz to be a matrix if( !is.matrix(t_iz) ) t_iz = matrix(t_iz,ncol=1) @@ -105,10 +116,9 @@ function( Version, FieldConfig, OverdispersionConfig=c("eta1"=0,"eta2"=0), ObsMo print(OverdispersionConfig_input) # by default, add nothing as Z_xl - if( Options['Calculate_Range']==FALSE ){ + if( Options2use['Calculate_Range']==FALSE ){ Z_xm = matrix(0, nrow=nrow(a_xl), ncol=ncol(a_xl) ) # Size so that it works for Version 3g-3j - } - if(Options['Calculate_Range']==TRUE ){ + }else{ Z_xm = MeshList$loc_x message( "Calculating range shift for stratum #1:",colnames(a_xl[1])) } @@ -127,7 +137,7 @@ function( Version, FieldConfig, OverdispersionConfig=c("eta1"=0,"eta2"=0), ObsMo if( length(OverdispersionConfig)!=2 ) stop("length(OverdispersionConfig)!=2") if( length(ObsModel)!=2 ) stop("length(ObsModel)!=2") if( ncol(yearbounds_zz)!=2 ) stop("yearbounds_zz must have two columns") - if( Options['Calculate_Coherence']==1 & ObsModel[2]==0 ) stop("Calculating coherence only makes sense when 'ObsModel[2]=1'") + if( Options2use['Calculate_Coherence']==1 & ObsModel[2]==0 ) stop("Calculating coherence only makes sense when 'ObsModel[2]=1'") if( any(yearbounds_zz<0) | any(yearbounds_zz>=max(n_t)) ) stop("yearbounds_zz exceeds bounds for modeled years") if( ncol(t_yz)!=ncol(t_iz) ) stop("t_yz and t_iz must have same number of columns") if( n_c!=length(unique(c_i)) ) stop("n_c doesn't equal the number of levels in c_i") @@ -158,25 +168,25 @@ function( Version, FieldConfig, OverdispersionConfig=c("eta1"=0,"eta2"=0), ObsMo # CMP_xmax should be >100 and CMP_breakpoint should be 1 for Tweedie model Options_vec = c("Aniso"=Aniso, "R2_interpretation"=0, "Rho_betaTF"=ifelse(RhoConfig[["Beta1"]]|RhoConfig[["Beta2"]],1,0), "Alpha"=0, "AreaAbundanceCurveTF"=0, "CMP_xmax"=200, "CMP_breakpoint"=1, "Method"=switch(Method,"Mesh"=0,"Grid"=1,"Spherical_mesh"=0) ) if(Version%in%c("VAST_v1_1_0","VAST_v1_0_0")){ - Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "ObsModel"=ObsModel, "Options"=Options, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list() ) + Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "ObsModel"=ObsModel, "Options"=Options2use, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list() ) } if(Version%in%c("VAST_v1_4_0","VAST_v1_3_0","VAST_v1_2_0")){ - Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_f_input"=OverdispersionConfig_input[1], "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "ObsModel"=ObsModel, "Options"=Options, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "v_i"=match(v_i,sort(unique(v_i)))-1, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list() ) + Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_f_input"=OverdispersionConfig_input[1], "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "ObsModel"=ObsModel, "Options"=Options2use, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "v_i"=match(v_i,sort(unique(v_i)))-1, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list() ) } if(Version%in%c("VAST_v1_6_0","VAST_v1_5_0")){ - Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_f_input"=OverdispersionConfig_input[1], "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "ObsModel"=ObsModel, "Options"=Options, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "v_i"=match(v_i,sort(unique(v_i)))-1, "PredTF_i"=PredTF_i, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list() ) + Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_f_input"=OverdispersionConfig_input[1], "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "ObsModel"=ObsModel, "Options"=Options2use, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "v_i"=match(v_i,sort(unique(v_i)))-1, "PredTF_i"=PredTF_i, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list() ) } if(Version%in%c("VAST_v1_7_0")){ - Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "OverdispersionConfig"=OverdispersionConfig_input, "ObsModel"=ObsModel, "Options"=Options, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "v_i"=match(v_i,sort(unique(v_i)))-1, "PredTF_i"=PredTF_i, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list() ) + Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "OverdispersionConfig"=OverdispersionConfig_input, "ObsModel"=ObsModel, "Options"=Options2use, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "v_i"=match(v_i,sort(unique(v_i)))-1, "PredTF_i"=PredTF_i, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list() ) } if(Version%in%c("VAST_v1_8_0")){ - Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "OverdispersionConfig"=OverdispersionConfig_input, "ObsModel"=ObsModel, "Options"=Options, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "v_i"=match(v_i,sort(unique(v_i)))-1, "PredTF_i"=PredTF_i, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list(), "M0"=GridList$M0, "M1"=GridList$M1, "M2"=GridList$M2 ) + Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "OverdispersionConfig"=OverdispersionConfig_input, "ObsModel"=ObsModel, "Options"=Options2use, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "v_i"=match(v_i,sort(unique(v_i)))-1, "PredTF_i"=PredTF_i, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list(), "M0"=GridList$M0, "M1"=GridList$M1, "M2"=GridList$M2 ) } if(Version%in%c("VAST_v1_9_0")){ - Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "OverdispersionConfig"=OverdispersionConfig_input, "ObsModel"=ObsModel, "Options"=Options, "yearbounds_zz"=yearbounds_zz, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "v_i"=match(v_i,sort(unique(v_i)))-1, "PredTF_i"=PredTF_i, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list(), "M0"=GridList$M0, "M1"=GridList$M1, "M2"=GridList$M2 ) + Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "OverdispersionConfig"=OverdispersionConfig_input, "ObsModel"=ObsModel, "Options"=Options2use, "yearbounds_zz"=yearbounds_zz, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_i"=t_iz-min(t_iz[,1]), "v_i"=match(v_i,sort(unique(v_i)))-1, "PredTF_i"=PredTF_i, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list(), "M0"=GridList$M0, "M1"=GridList$M1, "M2"=GridList$M2 ) } - if(Version%in%c("VAST_v2_6_0","VAST_v2_5_0","VAST_v2_4_0","VAST_v2_3_0","VAST_v2_2_0","VAST_v2_1_0","VAST_v2_0_0")){ - Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "OverdispersionConfig"=OverdispersionConfig_input, "ObsModel"=ObsModel, "Options"=Options, "yearbounds_zz"=yearbounds_zz, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_iz"=t_iz-min(t_iz,na.rm=TRUE), "v_i"=match(v_i,sort(unique(v_i)))-1, "PredTF_i"=PredTF_i, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "t_yz"=t_yz, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list(), "M0"=GridList$M0, "M1"=GridList$M1, "M2"=GridList$M2 ) + if(Version%in%c("VAST_v2_7_0","VAST_v2_6_0","VAST_v2_5_0","VAST_v2_4_0","VAST_v2_3_0","VAST_v2_2_0","VAST_v2_1_0","VAST_v2_0_0")){ + Return = list( "n_i"=n_i, "n_s"=c(MeshList$anisotropic_spde$n.spde,n_x)[Options_vec['Method']+1], "n_x"=n_x, "n_t"=n_t, "n_c"=n_c, "n_j"=n_j, "n_p"=n_p, "n_k"=n_k, "n_v"=n_v, "n_l"=n_l, "n_m"=ncol(Z_xm), "Options_vec"=Options_vec, "FieldConfig"=FieldConfig_input, "OverdispersionConfig"=OverdispersionConfig_input, "ObsModel"=ObsModel, "Options"=Options2use, "yearbounds_zz"=yearbounds_zz, "b_i"=b_i, "a_i"=a_i, "c_i"=c_i, "s_i"=s_i, "t_iz"=t_iz-min(t_iz,na.rm=TRUE), "v_i"=match(v_i,sort(unique(v_i)))-1, "PredTF_i"=PredTF_i, "a_xl"=a_xl, "X_xj"=X_xj, "X_xtp"=X_xtp, "Q_ik"=Q_ik, "t_yz"=t_yz, "Z_xm"=Z_xm, "spde"=list(), "spde_aniso"=list(), "M0"=GridList$M0, "M1"=GridList$M1, "M2"=GridList$M2 ) } if( "spde" %in% names(Return) ) Return[['spde']] = MeshList$isotropic_spde$param.inla[c("M0","M1","M2")] if( "spde_aniso" %in% names(Return) ) Return[['spde_aniso']] = list("n_s"=MeshList$anisotropic_spde$n.spde, "n_tri"=nrow(MeshList$anisotropic_mesh$graph$tv), "Tri_Area"=MeshList$Tri_Area, "E0"=MeshList$E0, "E1"=MeshList$E1, "E2"=MeshList$E2, "TV"=MeshList$TV-1, "G0"=MeshList$anisotropic_spde$param.inla$M0, "G0_inv"=INLA::inla.as.dgTMatrix(solve(MeshList$anisotropic_spde$param.inla$M0)) ) diff --git a/R/Param_Fn.R b/R/Param_Fn.R index a3f6d24..b4a460e 100644 --- a/R/Param_Fn.R +++ b/R/Param_Fn.R @@ -86,7 +86,7 @@ function( Version, DataList, RhoConfig=c("Beta1"=0,"Beta2"=0,"Epsilon1"=0,"Epsil if(Version%in%c("VAST_v1_9_0","VAST_v1_8_0","VAST_v1_7_0","VAST_v1_6_0")){ Return = list("ln_H_input"=c(0,0), "beta1_ct"=NA, "gamma1_j"=rep(0,DataList$n_j), "gamma1_ctp"=array(0,dim=c(DataList$n_c,DataList$n_t,DataList$n_p)), "lambda1_k"=rep(0,DataList$n_k), "L1_z"=NA, "L_omega1_z"=NA, "L_epsilon1_z"=NA, "logkappa1"=log(0.9), "Beta_mean1"=0, "logsigmaB1"=log(1), "Beta_rho1"=0, "Epsilon_rho1"=0, "eta1_vf"=NA, "Omegainput1_sf"=NA, "Epsiloninput1_sft"=NA, "beta2_ct"=NA, "gamma2_j"=rep(0,DataList$n_j), "gamma2_ctp"=array(0,dim=c(DataList$n_c,DataList$n_t,DataList$n_p)), "lambda2_k"=rep(0,DataList$n_k), "L2_z"=NA, "L_omega2_z"=NA, "L_epsilon2_z"=NA, "logkappa2"=log(0.9), "Beta_mean2"=0, "logsigmaB2"=log(1), "Beta_rho2"=0, "Epsilon_rho2"=0, "logSigmaM"=rep(1,DataList$n_c)%o%c(log(5),log(2),log(5)), "eta2_vf"=NA, "Omegainput2_sf"=NA, "Epsiloninput2_sft"=NA ) } - if(Version%in%c("VAST_v2_6_0","VAST_v2_5_0","VAST_v2_4_0","VAST_v2_3_0","VAST_v2_2_0","VAST_v2_1_0","VAST_v2_0_0")){ + if(Version%in%c("VAST_v2_7_0","VAST_v2_6_0","VAST_v2_5_0","VAST_v2_4_0","VAST_v2_3_0","VAST_v2_2_0","VAST_v2_1_0","VAST_v2_0_0")){ Return = list("ln_H_input"=c(0,0), "beta1_ct"=NA, "gamma1_j"=rep(0,DataList$n_j), "gamma1_ctp"=array(0,dim=c(DataList$n_c,DataList$n_t,DataList$n_p)), "lambda1_k"=rep(0,DataList$n_k), "L1_z"=NA, "L_omega1_z"=NA, "L_epsilon1_z"=NA, "logkappa1"=log(0.9), "Beta_mean1"=0, "logsigmaB1"=log(1), "Beta_rho1"=0, "Epsilon_rho1"=0, "log_sigmaratio1_z"=rep(0,ncol(DataList$t_iz)), "eta1_vf"=NA, "Omegainput1_sf"=NA, "Epsiloninput1_sft"=NA, "beta2_ct"=NA, "gamma2_j"=rep(0,DataList$n_j), "gamma2_ctp"=array(0,dim=c(DataList$n_c,DataList$n_t,DataList$n_p)), "lambda2_k"=rep(0,DataList$n_k), "L2_z"=NA, "L_omega2_z"=NA, "L_epsilon2_z"=NA, "logkappa2"=log(0.9), "Beta_mean2"=0, "logsigmaB2"=log(1), "Beta_rho2"=0, "Epsilon_rho2"=0, "log_sigmaratio2_z"=rep(0,ncol(DataList$t_iz)), "logSigmaM"=rep(1,DataList$n_c)%o%c(log(5),log(2),log(5)), "eta2_vf"=NA, "Omegainput2_sf"=NA, "Epsiloninput2_sft"=NA ) } # Overdispersion diff --git a/inst/executables/VAST_v2_7_0.cpp b/inst/executables/VAST_v2_7_0.cpp new file mode 100644 index 0000000..1654f35 --- /dev/null +++ b/inst/executables/VAST_v2_7_0.cpp @@ -0,0 +1,1005 @@ +#include +#include + +// Function for detecting NAs +template +bool isNA(Type x){ + return R_IsNA(asDouble(x)); +} + +// Posfun +template +Type posfun(Type x, Type lowerlimit, Type &pen){ + pen += CppAD::CondExpLt(x,lowerlimit,Type(0.01)*pow(x-lowerlimit,2),Type(0)); + return CppAD::CondExpGe(x,lowerlimit,x,lowerlimit/(Type(2)-x/lowerlimit)); +} + +// Variance +template +Type var( array vec ){ + Type vec_mod = vec - (vec.sum()/vec.size()); + Type res = pow(vec_mod, 2).sum() / vec.size(); + return res; +} + +// dlnorm +template +Type dlnorm(Type x, Type meanlog, Type sdlog, int give_log=0){ + //return 1/(sqrt(2*M_PI)*sd)*exp(-.5*pow((x-mean)/sd,2)); + Type logres = dnorm( log(x), meanlog, sdlog, true) - log(x); + if(give_log) return logres; else return exp(logres); +} + +// Generate loadings matrix +template +matrix loadings_matrix( vector L_val, int n_rows, int n_cols ){ + matrix L_rc(n_rows, n_cols); + int Count = 0; + for(int r=0; r=c){ + L_rc(r,c) = L_val(Count); + Count++; + }else{ + L_rc(r,c) = 0.0; + } + }} + return L_rc; +} + +// IN: eta1_vf; L1_z +// OUT: jnll_comp; eta1_vc +template +matrix overdispersion_by_category_nll( int n_f, int n_v, int n_c, matrix eta_vf, vector L_z, Type &jnll_pointer){ + using namespace density; + matrix eta_vc(n_v, n_c); + vector Tmp_c; + // Turn off + if(n_f<0){ + eta_vc.setZero(); + } + // AR1 structure + if( n_f==0 ){ + for(int v=0; v0 ){ + // Assemble the loadings matrix + matrix L_cf = loadings_matrix( L_z, n_c, n_f ); + // Multiply out overdispersion + eta_vc = eta_vf * L_cf.transpose(); + // Probability of overdispersion + for(int v=0; v // +matrix convert_upper_cov_to_cor( matrix cov ){ + int nrow = cov.row(0).size(); + for( int i=0; i // +matrix gmrf_by_category_nll( int n_f, int method, int n_s, int n_c, Type logkappa, array gmrf_input_sf, array gmrf_mean_sf, vector L_z, density::GMRF_t gmrf_Q, Type &jnll_pointer){ + using namespace density; + matrix gmrf_sc(n_s, n_c); + Type logtau; + // IID + if(n_f == -2){ + for( int c=0; c0){ + if(method==0) logtau = log( 1 / (exp(logkappa) * sqrt(4*M_PI)) ); + if(method==1) logtau = log( 1 / sqrt(1-exp(logkappa*2)) ); + for( int f=0; f L_cf = loadings_matrix( L_z, n_c, n_f ); + gmrf_sc = (gmrf_input_sf.matrix() * L_cf.transpose()) / exp(logtau); + } + return gmrf_sc; +} + +// CMP distribution +template +Type dCMP(Type x, Type mu, Type nu, int give_log=0, int iter_max=30, int break_point=10){ + // Explicit + Type ln_S_1 = nu*mu - ((nu-1)/2)*log(mu) - ((nu-1)/2)*log(2*M_PI) - 0.5*log(nu); + // Recursive + vector S_i(iter_max); + S_i(0) = 1; + for(int i=1; i +Type dPoisGam( Type x, Type shape, Type scale, Type intensity, vector &diag_z, int maxsum=50, int minsum=1, int give_log=0 ){ + // Maximum integration constant to prevent numerical overflow, but capped at value for maxsum to prevent numerical underflow when subtracting by a higher limit than is seen in the sequence + Type max_log_wJ, z1, maxJ_bounded; + if( x==0 ){ + diag_z(0) = 1; + max_log_wJ = 0; + diag_z(1) = 0; + }else{ + z1 = log(intensity) + shape*log(x/scale) - shape*log(shape) + 1; + diag_z(0) = exp( (z1 - 1) / (1 + shape) ); + maxJ_bounded = CppAD::CondExpGe(diag_z(0), Type(maxsum), Type(maxsum), diag_z(0)); + max_log_wJ = maxJ_bounded*log(intensity) + (maxJ_bounded*shape)*log(x/scale) - lgamma(maxJ_bounded+1) - lgamma(maxJ_bounded*shape); + diag_z(1) = diag_z(0)*log(intensity) + (diag_z(0)*shape)*log(x/scale) - lgamma(diag_z(0)+1) - lgamma(diag_z(0)*shape); + } + // Integration constant + Type W = 0; + Type log_w_j, pos_penalty; + for( int j=minsum; j<=maxsum; j++ ){ + Type j2 = j; + //W += pow(intensity,j) * pow(x/scale, j2*shape) / exp(lgamma(j2+1)) / exp(lgamma(j2*shape)) / exp(max_log_w_j); + log_w_j = j2*log(intensity) + (j2*shape)*log(x/scale) - lgamma(j2+1) - lgamma(j2*shape); + //W += exp( posfun(log_w_j, Type(-30), pos_penalty) ); + W += exp( log_w_j - max_log_wJ ); + if(j==minsum) diag_z(2) = log_w_j; + if(j==maxsum) diag_z(3) = log_w_j; + } + // Loglikelihood calculation + Type loglike = 0; + if( x==0 ){ + loglike = -intensity; + }else{ + loglike = -x/scale - intensity - log(x) + log(W) + max_log_wJ; + } + // Return + if(give_log) return loglike; else return exp(loglike); +} + + +// Space time +template +Type objective_function::operator() () +{ + using namespace R_inla; + using namespace Eigen; + using namespace density; + + // Dimensions + DATA_INTEGER(n_i); // Number of observations (stacked across all years) + DATA_INTEGER(n_s); // Number of "strata" (i.e., vectices in SPDE mesh) + DATA_INTEGER(n_x); // Number of real "strata" (i.e., k-means locations) + DATA_INTEGER(n_t); // Number of time-indices + DATA_INTEGER(n_c); // Number of categories (e.g., length bins) + DATA_INTEGER(n_j); // Number of static covariates + DATA_INTEGER(n_p); // Number of dynamic covariates + DATA_INTEGER(n_k); // Number of catchability variables + DATA_INTEGER(n_v); // Number of tows/vessels (i.e., levels for the factor explaining overdispersion) + DATA_INTEGER(n_l); // Number of indices to post-process + DATA_INTEGER(n_m); // Number of range metrics to use (probably 2 for Eastings-Northings) + + // Config + DATA_IVECTOR( Options_vec ); + // Slot 0 -- Aniso: 0=No, 1=Yes + // Slot 1 -- DEPRECATED + // Slot 2 -- AR1 on betas (year intercepts) to deal with missing years: 0=No, 1=Yes + // Slot 3 -- DEPRECATED + // Slot 4 -- DEPRECATED + // Slot 5 -- Upper limit constant of integration calculation for infinite-series density functions (Conway-Maxwell-Poisson and Tweedie) + // Slot 6 -- Breakpoint in CMP density function + // Slot 7 -- Whether to use SPDE or 2D-AR1 hyper-distribution for spatial process: 0=SPDE; 1=2D-AR1 + DATA_IVECTOR(FieldConfig); // Input settings (vector, length 4) + DATA_IVECTOR(OverdispersionConfig); // Input settings (vector, length 2) + DATA_IVECTOR(ObsModel); // Observation model + // Slot 0: Distribution for positive catches + // Slot 1: Link function for encounter probabilities + DATA_IVECTOR(Options); // Reporting options + // Slot 0: Calculate SD for Index_xctl + // Slot 1: Calculate SD for log(Index_xctl) + // Slot 2: Calculate mean_Z_ctm (i.e., center-of-gravity) + // Slot 3: DEPRECATED + // Slot 4: Calculate mean_D_tl and effective_area_tl + // Slot 5: Calculate standard errors for Covariance and Correlation among categories using factor-analysis parameterization + // Slot 6: Calculate synchrony for different periods specified via yearbounds_zz + // Slot 7: Calculate coherence and variance for Epsilon1_sct and Epsilon2_sct + DATA_IMATRIX(yearbounds_zz); + // Two columns, and 1+ rows, specifying first and last t for each period used in calculating synchrony + + // Data vectors + DATA_VECTOR(b_i); // Response (biomass) for each observation + DATA_VECTOR(a_i); // Area swept for each observation (km^2) + DATA_IVECTOR(c_i); // Category for each observation + DATA_IVECTOR(s_i); // Station for each observation + DATA_IMATRIX(t_iz); // Time-indices (year, season, etc.) for each observation + DATA_IVECTOR(v_i); // tows/vessels for each observation (level of factor representing overdispersion) + DATA_VECTOR(PredTF_i); // vector indicating whether an observatino is predictive (1=used for model evaluation) or fitted (0=used for parameter estimation) + DATA_MATRIX(a_xl); // Area for each "real" stratum(km^2) in each stratum + DATA_MATRIX(X_xj); // Covariate design matrix (strata x covariate) + DATA_ARRAY(X_xtp); // Covariate design matrix (strata x covariate) + DATA_MATRIX(Q_ik); // Catchability matrix (observations x variable) + DATA_IMATRIX(t_yz); // Matrix for time-indices of calculating outputs (abundance index and "derived-quantity") + DATA_MATRIX(Z_xm); // Derived quantity matrix + + // SPDE objects + DATA_STRUCT(spde,spde_t); + + // Aniso objects + DATA_STRUCT(spde_aniso,spde_aniso_t); + + // Sparse matrices for precision matrix of 2D AR1 process + // Q = M0*(1+rho^2)^2 + M1*(1+rho^2)*(-rho) + M2*rho^2 + DATA_SPARSE_MATRIX(M0); + DATA_SPARSE_MATRIX(M1); + DATA_SPARSE_MATRIX(M2); + + // Parameters + PARAMETER_VECTOR(ln_H_input); // Anisotropy parameters + // -- presence/absence + PARAMETER_MATRIX(beta1_ct); // Year effect + PARAMETER_VECTOR(gamma1_j); // Static covariate effect + PARAMETER_ARRAY(gamma1_ctp); // Dynamic covariate effect + PARAMETER_VECTOR(lambda1_k); // Catchability coefficients + PARAMETER_VECTOR(L1_z); // Overdispersion parameters + PARAMETER_VECTOR(L_omega1_z); + PARAMETER_VECTOR(L_epsilon1_z); + PARAMETER(logkappa1); + PARAMETER(Beta_mean1); // mean-reversion for beta1_t + PARAMETER(logsigmaB1); // SD of beta1_t (default: not included in objective function) + PARAMETER(Beta_rho1); // AR1 for positive catch Epsilon component, Default=0 + PARAMETER(Epsilon_rho1); // AR1 for presence/absence Epsilon component, Default=0 + PARAMETER_VECTOR(log_sigmaratio1_z); // Ratio of variance for columns of t_iz + // -- Gaussian random fields + PARAMETER_MATRIX(eta1_vf); + PARAMETER_ARRAY(Omegainput1_sf); // Expectation + PARAMETER_ARRAY(Epsiloninput1_sft); // Annual variation + // -- positive catch rates + PARAMETER_MATRIX(beta2_ct); // Year effect + PARAMETER_VECTOR(gamma2_j); // Covariate effect + PARAMETER_ARRAY(gamma2_ctp); // Dynamic covariate effect + PARAMETER_VECTOR(lambda2_k); // Catchability coefficients + PARAMETER_VECTOR(L2_z); // Overdispersion parameters + PARAMETER_VECTOR(L_omega2_z); + PARAMETER_VECTOR(L_epsilon2_z); + PARAMETER(logkappa2); + PARAMETER(Beta_mean2); // mean-reversion for beta2_t + PARAMETER(logsigmaB2); // SD of beta2_t (default: not included in objective function) + PARAMETER(Beta_rho2); // AR1 for positive catch Epsilon component, Default=0 + PARAMETER(Epsilon_rho2); // AR1 for positive catch Epsilon component, Default=0 + PARAMETER_VECTOR(log_sigmaratio2_z); // Ratio of variance for columns of t_iz + PARAMETER_ARRAY(logSigmaM); // Slots: 0=mix1 CV, 1=prob-of-mix1, 2= + // -- Gaussian random fields + PARAMETER_MATRIX(eta2_vf); + PARAMETER_ARRAY(Omegainput2_sf); // Expectation + PARAMETER_ARRAY(Epsiloninput2_sft); // Annual variation + + // Indices -- i=Observation; j=Covariate; v=Vessel; t=Year; s=Stratum + int i,j,v,t,s,c,y,z; + + // Objective function + vector jnll_comp(13); + // Slot 0 -- spatial, encounter + // Slot 1 -- spatio-temporal, encounter + // Slot 2 -- spatial, positive catch + // Slot 3 -- spatio-temporal, positive catch + // Slot 4 -- tow/vessel overdispersion, encounter + // Slot 5 -- tow/vessel overdispersion, positive catch + // Slot 8 -- penalty on beta, encounter + // Slot 9 -- penalty on beta, positive catch + // Slot 10 -- likelihood of data, encounter + // Slot 11 -- likelihood of data, positive catch + jnll_comp.setZero(); + Type jnll = 0; + + // Derived parameters + Type Range_raw1, Range_raw2; + if( Options_vec(7)==0 ){ + Range_raw1 = sqrt(8) / exp( logkappa1 ); // Range = approx. distance @ 10% correlation + Range_raw2 = sqrt(8) / exp( logkappa2 ); // Range = approx. distance @ 10% correlation + } + if( Options_vec(7)==1 ){ + Range_raw1 = log(0.1) / logkappa1; // Range = approx. distance @ 10% correlation + Range_raw2 = log(0.1) / logkappa2; // Range = approx. distance @ 10% correlation + } + array SigmaM( n_c, 3 ); + SigmaM = exp( logSigmaM ); + + // Anisotropy elements + matrix H(2,2); + H(0,0) = exp(ln_H_input(0)); + H(1,0) = ln_H_input(1); + H(0,1) = ln_H_input(1); + H(1,1) = (1+ln_H_input(1)*ln_H_input(1)) / exp(ln_H_input(0)); + + //////////////////////// + // Calculate joint likelihood + //////////////////////// + + // Random field probability + Eigen::SparseMatrix Q1; + Eigen::SparseMatrix Q2; + GMRF_t gmrf_Q; + if( Options_vec(7)==0 & Options_vec(0)==0 ){ + Q1 = Q_spde(spde, exp(logkappa1)); + Q2 = Q_spde(spde, exp(logkappa2)); + } + if( Options_vec(7)==0 & Options_vec(0)==1 ){ + Q1 = Q_spde(spde_aniso, exp(logkappa1), H); + Q2 = Q_spde(spde_aniso, exp(logkappa2), H); + } + if( Options_vec(7)==1 ){ + Q1 = M0*pow(1+exp(logkappa1*2),2) + M1*(1+exp(logkappa1*2))*(-exp(logkappa1)) + M2*exp(logkappa1*2); + Q2 = M0*pow(1+exp(logkappa2*2),2) + M1*(1+exp(logkappa2*2))*(-exp(logkappa2)) + M2*exp(logkappa2*2); + } + // Probability of encounter + gmrf_Q = GMRF(Q1); + // Omega1 + int n_f; + n_f = Omegainput1_sf.cols(); + array Omegamean1_sf(n_s, n_f); + Omegamean1_sf.setZero(); + // Epsilon1 + n_f = Epsiloninput1_sft.col(0).cols(); + array Epsilonmean1_sf(n_s, n_f); + array Omega1_sc(n_s, n_c); + Omega1_sc = gmrf_by_category_nll(FieldConfig(0), Options_vec(7), n_s, n_c, logkappa1, Omegainput1_sf, Omegamean1_sf, L_omega1_z, gmrf_Q, jnll_comp(0)); + array Epsilon1_sct(n_s, n_c, n_t); + for(t=0; t=1){ + Epsilonmean1_sf = Epsilon_rho1 * Epsiloninput1_sft.col(t-1); + Epsilon1_sct.col(t) = gmrf_by_category_nll(FieldConfig(1), Options_vec(7), n_s, n_c, logkappa1, Epsiloninput1_sft.col(t), Epsilonmean1_sf, L_epsilon1_z, gmrf_Q, jnll_comp(1)); + } + } + // Positive catch rate + gmrf_Q = GMRF(Q2); + // Omega2 + n_f = Omegainput2_sf.cols(); + array Omegamean2_sf(n_s, n_f); + Omegamean2_sf.setZero(); + // Epsilon2 + n_f = Epsiloninput2_sft.col(0).cols(); + array Epsilonmean2_sf(n_s, n_f); + array Omega2_sc(n_s, n_c); + Omega2_sc = gmrf_by_category_nll(FieldConfig(2), Options_vec(7), n_s, n_c, logkappa2, Omegainput2_sf, Omegamean2_sf, L_omega2_z, gmrf_Q, jnll_comp(2)); + array Epsilon2_sct(n_s, n_c, n_t); + for(t=0; t=1){ + Epsilonmean2_sf = Epsilon_rho2 * Epsiloninput2_sft.col(t-1); + Epsilon2_sct.col(t) = gmrf_by_category_nll(FieldConfig(3), Options_vec(7), n_s, n_c, logkappa2, Epsiloninput2_sft.col(t), Epsilonmean2_sf, L_epsilon2_z, gmrf_Q, jnll_comp(3)); + } + } + + ////// Probability of correlated overdispersion among bins + // IN: eta1_vf; n_f; L1_z + // OUT: jnll_comp; eta1_vc + matrix eta1_vc(n_v, n_c); + eta1_vc = overdispersion_by_category_nll( OverdispersionConfig(0), n_v, n_c, eta1_vf, L1_z, jnll_comp(4) ); + matrix eta2_vc(n_v, n_c); + eta2_vc = overdispersion_by_category_nll( OverdispersionConfig(1), n_v, n_c, eta2_vf, L2_z, jnll_comp(5) ); + + // Possible structure on betas + if( Options_vec(2)!=0 ){ + for(c=0; c eta1_x = X_xj * gamma1_j.matrix(); + vector zeta1_i = Q_ik * lambda1_k.matrix(); + vector eta2_x = X_xj * gamma2_j.matrix(); + vector zeta2_i = Q_ik * lambda2_k.matrix(); + array eta1_xct(n_x, n_c, n_t); + array eta2_xct(n_x, n_c, n_t); + eta1_xct.setZero(); + eta2_xct.setZero(); + for(int x=0; x var_i(n_i); + Type var_y; + // Linear predictor (pre-link) for presence/absence component + vector P1_i(n_i); + // Response predictor (post-link) + // ObsModel = 0:4 or 11:12: probability ("phi") that data is greater than zero + // ObsModel = 5 (ZINB): phi = 1-ZeroInflation_prob -> Pr[D=0] = NB(0|mu,var)*phi + (1-phi) -> Pr[D>0] = phi - NB(0|mu,var)*phi + vector R1_i(n_i); + vector LogProb1_i(n_i); + // Linear predictor (pre-link) for positive component + vector P2_i(n_i); + // Response predictor (post-link) + // ObsModel = 0:3, 11:12: expected value of data, given that data is greater than zero -> E[D] = mu*phi + // ObsModel = 4 (ZANB): expected value ("mu") of neg-bin PRIOR to truncating Pr[D=0] -> E[D] = mu/(1-NB(0|mu,var))*phi ALSO Pr[D] = NB(D|mu,var)/(1-NB(0|mu,var))*phi + // ObsModel = 5 (ZINB): expected value of data for non-zero-inflation component -> E[D] = mu*phi + vector R2_i(n_i); + vector LogProb2_i(n_i); + vector maxJ_i(n_i); + vector diag_z(4); + matrix diag_iz(n_i,4); + diag_iz.setZero(); // Used to track diagnostics for Tweedie distribution (columns: 0=maxJ; 1=maxW; 2=lowerW; 3=upperW) + + // Likelihood contribution from observations + for(int i=0; i=0 & t_iz(i,z) 0 ){ + LogProb1_i(i) = log( R1_i(i) ); + }else{ + LogProb1_i(i) = log( 1-R1_i(i) ); + } + // Positive density likelihood -- models with continuous positive support + if( b_i(i) > 0 ){ // 1e-500 causes overflow on laptop + if(ObsModel(0)==0) LogProb2_i(i) = dnorm(b_i(i), R2_i(i), SigmaM(c_i(i),0), true); + if(ObsModel(0)==1) LogProb2_i(i) = dlnorm(b_i(i), log(R2_i(i))-pow(SigmaM(c_i(i),0),2)/2, SigmaM(c_i(i),0), true); // log-space + if(ObsModel(0)==2) LogProb2_i(i) = dgamma(b_i(i), 1/pow(SigmaM(c_i(i),0),2), R2_i(i)*pow(SigmaM(c_i(i),0),2), true); // shape = 1/CV^2, scale = mean*CV^2 + }else{ + LogProb2_i(i) = 0; + } + } + // Likelihood for Tweedie model with continuous positive support + if(ObsModel(0)==8){ + LogProb1_i(i) = 0; + //dPoisGam( Type x, Type shape, Type scale, Type intensity, Type &max_log_w_j, int maxsum=50, int minsum=1, int give_log=0 ) + LogProb2_i(i) = dPoisGam( b_i(i), SigmaM(c_i(i),0), R2_i(i), R1_i(i), diag_z, Options_vec(5), Options_vec(6), true ); + diag_iz.row(i) = diag_z; + } + if(ObsModel(0)==10){ + // Packaged code + LogProb1_i(i) = 0; + // dtweedie( Type y, Type mu, Type phi, Type p, int give_log=0 ) + // R1*R2 = mean + LogProb2_i(i) = dtweedie( b_i(i), R1_i(i)*R2_i(i), R1_i(i), invlogit(SigmaM(c_i(i),0))+1.0, true ); + } + // Likelihood for models with discrete support + if(ObsModel(0)==4 | ObsModel(0)==5 | ObsModel(0)==6 | ObsModel(0)==7 | ObsModel(0)==9){ + if(ObsModel(0)==5){ + // Zero-inflated negative binomial (not numerically stable!) + var_i(i) = R2_i(i)*(1.0+SigmaM(c_i(i),0)) + pow(R2_i(i),2.0)*SigmaM(c_i(i),1); + if( b_i(i)==0 ){ + LogProb2_i(i) = log( (1-R1_i(i)) + dnbinom2(Type(0.0), R2_i(i), var_i(i), false)*R1_i(i) ); // Pr[X=0] = 1-phi + NB(X=0)*phi + }else{ + LogProb2_i(i) = dnbinom2(b_i(i), R2_i(i), var_i(i), true) + log(R1_i(i)); // Pr[X=x] = NB(X=x)*phi + } + } + if(ObsModel(0)==6){ + // Conway-Maxwell-Poisson + LogProb2_i(i) = dCMP(b_i(i), R2_i(i), exp(P1_i(i)), true, Options_vec(5)); + } + if(ObsModel(0)==7){ + // Zero-inflated Poisson + if( b_i(i)==0 ){ + LogProb2_i(i) = log( (1-R1_i(i)) + dpois(Type(0.0), R2_i(i), false)*R1_i(i) ); // Pr[X=0] = 1-phi + Pois(X=0)*phi + }else{ + LogProb2_i(i) = dpois(b_i(i), R2_i(i), true) + log(R1_i(i)); // Pr[X=x] = Pois(X=x)*phi + } + } + if(ObsModel(0)==9){ + // Binned Poisson (for REEF data: 0=none; 1=1; 2=2-10; 3=>11) + /// Doesn't appear stable given spatial or spatio-temporal variation + vector logdBinPois(4); + logdBinPois(0) = logspace_add( log(1-R1_i(i)), dpois(Type(0.0), R2_i(i), true) + log(R1_i(i)) ); // Pr[X=0] = 1-phi + Pois(X=0)*phi + logdBinPois(1) = dpois(Type(1.0), R2_i(i), true) + log(R1_i(i)); // Pr[X | X>0] = Pois(X)*phi + logdBinPois(2) = dpois(Type(2.0), R2_i(i), true) + log(R1_i(i)); // SUM_J( Pr[X|X>0] ) = phi * SUM_J( Pois(J) ) + for(int j=3; j<=10; j++){ + logdBinPois(2) += logspace_add( logdBinPois(2), dpois(Type(j), R2_i(i), true) + log(R1_i(i)) ); + } + logdBinPois(3) = logspace_sub( log(Type(1.0)), logdBinPois(0) ); + logdBinPois(3) = logspace_sub( logdBinPois(3), logdBinPois(1) ); + logdBinPois(3) = logspace_sub( logdBinPois(3), logdBinPois(2) ); + if( b_i(i)==0 ) LogProb2_i(i) = logdBinPois(0); + if( b_i(i)==1 ) LogProb2_i(i) = logdBinPois(1); + if( b_i(i)==2 ) LogProb2_i(i) = logdBinPois(2); + if( b_i(i)==3 ) LogProb2_i(i) = logdBinPois(3); + } + LogProb1_i(i) = 0; + } + } + REPORT( diag_iz ); + + // Joint likelihood + jnll_comp(10) = -1 * (LogProb1_i * (Type(1.0)-PredTF_i)).sum(); + jnll_comp(11) = -1 * (LogProb2_i * (Type(1.0)-PredTF_i)).sum(); + jnll = jnll_comp.sum(); + Type pred_jnll = -1 * ( LogProb1_i*PredTF_i + LogProb2_i*PredTF_i ).sum(); + REPORT( pred_jnll ); + + //////////////////////// + // Calculate outputs + //////////////////////// + + // Number of output-years + int n_y = t_yz.col(0).size(); + + // Predictive distribution -- ObsModel(4) isn't implemented (it had a bug previously) + Type a_average = a_i.sum()/a_i.size(); + array P1_xcy(n_x, n_c, n_y); + array R1_xcy(n_x, n_c, n_y); + array P2_xcy(n_x, n_c, n_y); + array R2_xcy(n_x, n_c, n_y); + array D_xcy(n_x, n_c, n_y); + for(int c=0; c=0 & t_yz(y,z) Index_xcyl(n_x, n_c, n_y, n_l); + array Index_cyl(n_c, n_y, n_l); + array ln_Index_cyl(n_c, n_y, n_l); + Index_cyl.setZero(); + for(int c=0; c mean_Z_cym(n_c, n_y, n_m); + if( Options(2)==1 ){ + mean_Z_cym.setZero(); + int report_summary_TF = false; + for(int c=0; c mean_D_cyl(n_c, n_y, n_l); + array log_mean_D_cyl(n_c, n_y, n_l); + mean_D_cyl.setZero(); + for(int c=0; c effective_area_cyl(n_c, n_y, n_l); + array log_effective_area_cyl(n_c, n_y, n_l); + effective_area_cyl = Index_cyl / (mean_D_cyl/1000); // Correct for different units of Index and density + log_effective_area_cyl = log( effective_area_cyl ); + REPORT( effective_area_cyl ); + ADREPORT( effective_area_cyl ); + ADREPORT( log_effective_area_cyl ); + } + + // Reporting and standard-errors for covariance and correlation matrices + if( Options(5)==1 ){ + if( FieldConfig(0)>0 ){ + matrix L1_omega_cf = loadings_matrix( L_omega1_z, n_c, FieldConfig(0) ); + matrix lowercov_uppercor_omega1 = L1_omega_cf * L1_omega_cf.transpose(); + lowercov_uppercor_omega1 = convert_upper_cov_to_cor( lowercov_uppercor_omega1 ); + REPORT( lowercov_uppercor_omega1 ); + ADREPORT( lowercov_uppercor_omega1 ); + } + if( FieldConfig(1)>0 ){ + matrix L1_epsilon_cf = loadings_matrix( L_epsilon1_z, n_c, FieldConfig(1) ); + matrix lowercov_uppercor_epsilon1 = L1_epsilon_cf * L1_epsilon_cf.transpose(); + lowercov_uppercor_epsilon1 = convert_upper_cov_to_cor( lowercov_uppercor_epsilon1 ); + REPORT( lowercov_uppercor_epsilon1 ); + ADREPORT( lowercov_uppercor_epsilon1 ); + } + if( FieldConfig(2)>0 ){ + matrix L2_omega_cf = loadings_matrix( L_omega2_z, n_c, FieldConfig(2) ); + matrix lowercov_uppercor_omega2 = L2_omega_cf * L2_omega_cf.transpose(); + lowercov_uppercor_omega2 = convert_upper_cov_to_cor( lowercov_uppercor_omega2 ); + REPORT( lowercov_uppercor_omega2 ); + ADREPORT( lowercov_uppercor_omega2 ); + } + if( FieldConfig(3)>0 ){ + matrix L2_epsilon_cf = loadings_matrix( L_epsilon2_z, n_c, FieldConfig(3) ); + matrix lowercov_uppercor_epsilon2 = L2_epsilon_cf * L2_epsilon_cf.transpose(); + lowercov_uppercor_epsilon2 = convert_upper_cov_to_cor( lowercov_uppercor_epsilon2 ); + REPORT( lowercov_uppercor_epsilon2 ); + ADREPORT( lowercov_uppercor_epsilon2 ); + } + } + + // Synchrony + if( Options(6)==1 ){ + int n_z = yearbounds_zz.col(0).size(); + // Density ("D") or area-expanded total biomass ("B") for each category (use B when summing across sites) + matrix D_xy( n_x, n_y ); + matrix B_cy( n_c, n_y ); + vector B_y( n_y ); + D_xy.setZero(); + B_cy.setZero(); + B_y.setZero(); + // Sample variance in category-specific density ("D") and biomass ("B") + array varD_xcz( n_x, n_c, n_z ); + array varD_xz( n_x, n_z ); + array varB_cz( n_c, n_z ); + vector varB_z( n_z ); + vector varB_xbar_z( n_z ); + vector varB_cbar_z( n_z ); + vector ln_varB_z( n_z ); + vector ln_varB_xbar_z( n_z ); + vector ln_varB_cbar_z( n_z ); + array maxsdD_xz( n_x, n_z ); + array maxsdB_cz( n_c, n_z ); + vector maxsdB_z( n_z ); + varD_xcz.setZero(); + varD_xz.setZero(); + varB_cz.setZero(); + varB_z.setZero(); + varB_xbar_z.setZero(); + varB_cbar_z.setZero(); + maxsdD_xz.setZero(); + maxsdB_cz.setZero(); + maxsdB_z.setZero(); + // Proportion of total biomass ("P") for each location or each category + matrix propB_xz( n_x, n_z ); + matrix propB_cz( n_c, n_z ); + propB_xz.setZero(); + propB_cz.setZero(); + // Synchrony indices + matrix phi_xz( n_x, n_z ); + matrix phi_cz( n_c, n_z ); + vector phi_xbar_z( n_z ); + vector phi_cbar_z( n_z ); + vector phi_z( n_z ); + phi_xbar_z.setZero(); + phi_cbar_z.setZero(); + phi_z.setZero(); + // Calculate total biomass for different categories + for( int y=0; y CovHat( n_c, n_c ); + matrix CovHat( n_c, n_c ); + CovHat.setIdentity(); + CovHat *= pow(0.0001, 2); + if( FieldConfig(1)>0 ) CovHat += loadings_matrix(L_epsilon1_z, n_c, FieldConfig(1)) * loadings_matrix(L_epsilon1_z, n_c, FieldConfig(1)).transpose(); + if( FieldConfig(3)>0 ) CovHat += loadings_matrix(L_epsilon2_z, n_c, FieldConfig(3)) * loadings_matrix(L_epsilon2_z, n_c, FieldConfig(3)).transpose(); + // Coherence ranges from 0 (all factors are equal) to 1 (first factor explains all variance) + SelfAdjointEigenSolver > es(CovHat); + vector eigenvalues_c = es.eigenvalues(); // Ranked from lowest to highest for some reason + Type psi = 0; + for(int c=0; c diag_CovHat( n_c ); + vector log_diag_CovHat( n_c ); + for(int c=0; c PropIndex_cyl(n_c, n_y, n_l); + array ln_PropIndex_cyl(n_c, n_y, n_l); + Type sumtemp; + for(int y=0; y Date: Tue, 18 Jul 2017 10:36:46 -0700 Subject: [PATCH 2/5] added `calculate_proportion.R`... ... which does the composition-calculation for age/size-structured models --- R/calculate_proportion.R | 56 ++++++++++++++++++++++++++++++++ inst/executables/VAST_v2_7_0.cpp | 2 +- 2 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 R/calculate_proportion.R diff --git a/R/calculate_proportion.R b/R/calculate_proportion.R new file mode 100644 index 0000000..de8fb2f --- /dev/null +++ b/R/calculate_proportion.R @@ -0,0 +1,56 @@ + + +calculate_proportion = function( TmbData, Index, Year_Set=NULL, Years2Include=NULL, strata_names=NULL, category_names=NULL, plot_legend=TRUE, + DirName=paste0(getwd(),"/"), PlotName="Proportion.png", interval_width=1, width=4, height=4, ... ){ + + # Warnings and errors + if( !all(TmbData[['FieldConfig']] %in% c(-2,-1)) ){ + stop("Derivation only included for independent categories") + } + Index_ctl = array(Index$Index_ctl[,,,'Estimate'],dim=dim(Index$Index_ctl)[1:3]) + SE_Index_ctl = array(Index$Index_ctl[,,,'Std. Error'],dim=dim(Index$Index_ctl)[1:3]) + + # Calculate proportions, and total biomass + Prop_ctl = Index_ctl / outer(rep(1,TmbData$n_c),apply(Index_ctl,MARGIN=2:3,FUN=sum)) + Index_tl = apply(Index_ctl,MARGIN=2:3,FUN=sum) + SE_Index_tl = apply(SE_Index_ctl,MARGIN=2:3,FUN=sum) + + # Approximate variance for proportions, and effective sample size + Neff_ctl = var_Prop_ctl = array(NA,dim=dim(Prop_ctl)) + for( cI in 1:dim(var_Prop_ctl)[1]){ + for( tI in 1:dim(var_Prop_ctl)[2]){ + for( lI in 1:dim(var_Prop_ctl)[3]){ + var_Prop_ctl[cI,tI,lI] = Index_ctl[cI,tI,lI]^2/Index_tl[tI,lI]^2 * (SE_Index_ctl[cI,tI,lI]^2/Index_ctl[cI,tI,lI]^2 + SE_Index_tl[tI,lI]^2/Index_tl[tI,lI]^2 ) + Neff_ctl[cI,tI,lI] = Prop_ctl[cI,tI,lI] * (1-Prop_ctl[cI,tI,lI]) / var_Prop_ctl[cI,tI,lI] + }}} + + # Median effective sample size across categories + Neff_tl = apply(Neff_ctl, MARGIN=2:3, FUN=median) + + # Fill in missing + if( is.null(Year_Set) ) Year_Set = 1:TmbData$n_t + if( is.null(Years2Include) ) Years2Include = 1:TmbData$n_t + if( is.null(strata_names) ) strata_names = 1:TmbData$n_l + if( is.null(category_names) ) category_names = 1:TmbData$n_c + + # Plot + Par = list( mar=c(2,2,1,0), mgp=c(2,0.5,0), tck=-0.02, yaxs="i", oma=c(1,1,0,0), mfrow=c(ceiling(sqrt(TmbData$n_t)),ceiling(TmbData$n_t/ceiling(sqrt(TmbData$n_t)))), ... ) + png( file=paste0(DirName,"/",PlotName), width=width, height=height, res=200, units="in") + par( Par ) + for( tI in 1:TmbData$n_t ){ + # Calculate y-axis limits + Ylim = c(0, max(Prop_ctl[,tI,]%o%c(1,1) + sqrt(var_Prop_ctl[,tI,])%o%c(-interval_width,interval_width))) + # Plot stuff + plot(1, type="n", xlim=range(category_names), ylim=1.05*Ylim, xlab="", ylab="", main=ifelse(TmbData$n_t>1,Year_Set[tI],"") ) + for(l in 1:TmbData$n_l){ + SpatialDeltaGLMM:::Plot_Points_and_Bounds_Fn( y=Prop_ctl[,tI,l], x=1:TmbData$n_c+seq(-0.1,0.1,length=TmbData$n_l)[l], ybounds=Prop_ctl[,tI,]%o%c(1,1) + sqrt(var_Prop_ctl[,tI,])%o%c(-interval_width,interval_width), type="b", col=rainbow(TmbData[['n_l']])[l], col_bounds=rainbow(TmbData[['n_l']])[l], ylim=Ylim) + } + if(plot_legend==TRUE) legend( "top", bty="n", fill=rainbow(TmbData[['n_l']]), legend=as.character(strata_names), ncol=2 ) + } + mtext( side=1:2, text=c("Year","Proportion of biomass"), outer=TRUE, line=c(0,0) ) + dev.off() + + # Return stuff + Return = list("Prop_ctl"=Prop_ctl, "Neff_tl"=Neff_tl, "var_Prop_ctl"=var_Prop_ctl, "Index_tl"=Index_tl, "Neff_ctl"=Neff_ctl) + return( Return ) +} diff --git a/inst/executables/VAST_v2_7_0.cpp b/inst/executables/VAST_v2_7_0.cpp index 1654f35..94306e3 100644 --- a/inst/executables/VAST_v2_7_0.cpp +++ b/inst/executables/VAST_v2_7_0.cpp @@ -933,7 +933,7 @@ Type objective_function::operator() () PropIndex_cyl(c,y,l) = Index_cyl(c,y,l) / sumtemp; } }} - ln_Index_cyl = log( Index_cyl ); + ln_PropIndex_cyl = log( PropIndex_cyl ); REPORT( PropIndex_cyl ); REPORT( ln_PropIndex_cyl ); ADREPORT( PropIndex_cyl ); From dc8f3c2cf2372514e7965254eda6eb0f1dce0ef6 Mon Sep 17 00:00:00 2001 From: Jim Thorson Date: Tue, 18 Jul 2017 11:01:03 -0700 Subject: [PATCH 3/5] added doxygen for `calculate_proportion` --- NAMESPACE | 3 ++- R/calculate_proportion.R | 17 ++++++++++++- man/Data_Fn.Rd | 8 +++--- man/Plot_factors.Rd | 7 +++--- man/calculate_proportion.Rd | 49 +++++++++++++++++++++++++++++++++++++ 5 files changed, 75 insertions(+), 9 deletions(-) create mode 100644 man/calculate_proportion.Rd diff --git a/NAMESPACE b/NAMESPACE index 85f5cef..e594e84 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,4 +1,4 @@ -# Generated by roxygen2 (4.1.1): do not edit by hand +# Generated by roxygen2: do not edit by hand export(Build_TMB_Fn) export(Coherence) @@ -14,6 +14,7 @@ export(Summarize_Covariance) export(calc_coherence) export(calc_cov) export(calc_synchrony) +export(calculate_proportion) export(plot_cov) export(plot_eigen) importFrom(grDevices,colorRampPalette) diff --git a/R/calculate_proportion.R b/R/calculate_proportion.R index de8fb2f..a5b525b 100644 --- a/R/calculate_proportion.R +++ b/R/calculate_proportion.R @@ -1,7 +1,22 @@ +#' Calculate the compositional-expansion +#' +#' \code{calculate_proportion} takes output from a VAST run and calculates the proportion of biomass in different categories +#' +#' @param Index output from \code{SpatialDeltaGLMM::PlotIndex_Fn} +#' @inheritParams SpatialDeltaGLMM::PlotIndex_Fn +#' @param ... list of settings to pass to \code{sdreport} +#' +#' @return Tagged list of output +#' \describe{ +#' \item{Prop_ctl}{Proportion of biomass for each category c, time t, and stratum l} +#' \item{Neff_tl}{Effective sample size (median across categories) for each time t and stratum l} +#' } +#' +#' @export calculate_proportion = function( TmbData, Index, Year_Set=NULL, Years2Include=NULL, strata_names=NULL, category_names=NULL, plot_legend=TRUE, - DirName=paste0(getwd(),"/"), PlotName="Proportion.png", interval_width=1, width=4, height=4, ... ){ + DirName=paste0(getwd(),"/"), PlotName="Proportion.png", interval_width=1, width=6, height=6, ... ){ # Warnings and errors if( !all(TmbData[['FieldConfig']] %in% c(-2,-1)) ){ diff --git a/man/Data_Fn.Rd b/man/Data_Fn.Rd index b15f54a..ffc72c0 100644 --- a/man/Data_Fn.Rd +++ b/man/Data_Fn.Rd @@ -10,9 +10,9 @@ Data_Fn(Version, FieldConfig, OverdispersionConfig = c(eta1 = 0, eta2 = 0), length(b_i)), X_xj = NULL, X_xtp = NULL, Q_ik = NULL, Aniso = 1, RhoConfig = c(Beta1 = 0, Beta2 = 0, Epsilon1 = 0, Epsilon2 = 0), t_yz = NULL, CheckForErrors = TRUE, yearbounds_zz = NULL, - Options = c(SD_site_density = 0, SD_site_logdensity = 0, Calculate_Range = - 0, Calculate_evenness = 0, Calculate_effective_area = 0, Calculate_Cov_SE = 0, - Calculate_Synchrony = 0, Calculate_Coherence = 0)) + Options = c(SD_site_logdensity = 0, Calculate_Range = 0, + Calculate_effective_area = 0, Calculate_Cov_SE = 0, Calculate_Synchrony = 0, + Calculate_proportion = 0)) } \arguments{ \item{Version}{a version number (see example for current default).} @@ -76,7 +76,7 @@ Data_Fn(Version, FieldConfig, OverdispersionConfig = c(eta1 = 0, eta2 = 0), \item{yearbounds_zz}{OPTIONAL, matrix with two columns, giving first and last years for defining one or more periods (rows) used to calculate changes in synchrony over time (only used if \code{Options['Calculate_Synchrony']=1})} -\item{Options}{OPTIONAL, a vector of form c('SD_site_density'=0,'SD_site_logdensity'=0,'Calculate_Range'=0,'Calculate_evenness'=0,'Calculate_effective_area'=0,'Calculate_Cov_SE'=0,'Calculate_Synchrony'=0,'Calculate_Coherence'=0), where Calculate_Range=1 turns on calculation of center of gravity, and Calculate_effective_area=1 turns on calculation of effective area occupied} +\item{Options}{OPTIONAL, a vector of form c('SD_site_logdensity'=0,'Calculate_Range'=0,'Calculate_effective_area'=0,'Calculate_Cov_SE'=0,'Calculate_Synchrony'=0,'Calculate_proportion'=0), where Calculate_Range=1 turns on calculation of center of gravity, and Calculate_effective_area=1 turns on calculation of effective area occupied} } \value{ Tagged list containing inputs to function \code{VAST::Build_TMB_Fn()} diff --git a/man/Plot_factors.Rd b/man/Plot_factors.Rd index c24d549..5bb75b2 100644 --- a/man/Plot_factors.Rd +++ b/man/Plot_factors.Rd @@ -1,12 +1,13 @@ -% Generated by roxygen2 (4.1.1): do not edit by hand +% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Plot_factors.R \name{Plot_factors} \alias{Plot_factors} \title{Plot factor-decomposition of covariance} \usage{ Plot_factors(Report, ParHat, Data, SD, Year_Set = NULL, - category_names = NULL, mapdetails_list = NULL, Dim_year = NULL, - Dim_species = NULL, plotdir = paste0(getwd(), "/"), land_color = "grey") + category_names = NULL, RotationMethod = "PCA", mapdetails_list = NULL, + Dim_year = NULL, Dim_species = NULL, plotdir = paste0(getwd(), "/"), + land_color = "grey") } \arguments{ \item{Report}{output report, e.g., from \code{Report <- obj$report()}} diff --git a/man/calculate_proportion.Rd b/man/calculate_proportion.Rd new file mode 100644 index 0000000..87112bc --- /dev/null +++ b/man/calculate_proportion.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/calculate_proportion.R +\name{calculate_proportion} +\alias{calculate_proportion} +\title{Calculate the compositional-expansion} +\usage{ +calculate_proportion(TmbData, Index, Year_Set = NULL, Years2Include = NULL, + strata_names = NULL, category_names = NULL, plot_legend = TRUE, + DirName = paste0(getwd(), "/"), PlotName = "Proportion.png", + interval_width = 1, width = 4, height = 4, ...) +} +\arguments{ +\item{TmbData}{Formatted data inputs, from `SpatialDeltaGLMM::Data_Fn(...)`} + +\item{Index}{output from \code{SpatialDeltaGLMM::PlotIndex_Fn}} + +\item{Year_Set}{Year names for labeling panels} + +\item{Years2Include}{integer vector, specifying positions of \code{Year_Set} for plotting (used to avoid plotting years with no data, etc.)} + +\item{strata_names}{names for spatial strata} + +\item{category_names}{names for categories (if using package `VAST`)} + +\item{plot_legend}{Add legend for labelling colors} + +\item{DirName}{Directory for saving plot and table} + +\item{PlotName}{Name for plot} + +\item{interval_width}{width for confidence intervals} + +\item{width}{plot width in inches} + +\item{height}{plot height in inches} + +\item{...}{list of settings to pass to \code{sdreport}} +} +\value{ +Tagged list of output +\describe{ + \item{Prop_ctl}{Proportion of biomass for each category c, time t, and stratum l} + \item{Neff_tl}{Effective sample size (median across categories) for each time t and stratum l} +} +} +\description{ +\code{calculate_proportion} takes output from a VAST run and calculates the proportion of biomass in different categories +} + From 3a0d8c8c4e62ff4b1c36d25dc591d18a7c7c0059 Mon Sep 17 00:00:00 2001 From: Jim Thorson Date: Tue, 18 Jul 2017 19:52:35 -0700 Subject: [PATCH 4/5] fix bug in `calculate_proportion` --- R/calculate_proportion.R | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/R/calculate_proportion.R b/R/calculate_proportion.R index a5b525b..9cae014 100644 --- a/R/calculate_proportion.R +++ b/R/calculate_proportion.R @@ -28,7 +28,7 @@ calculate_proportion = function( TmbData, Index, Year_Set=NULL, Years2Include=NU # Calculate proportions, and total biomass Prop_ctl = Index_ctl / outer(rep(1,TmbData$n_c),apply(Index_ctl,MARGIN=2:3,FUN=sum)) Index_tl = apply(Index_ctl,MARGIN=2:3,FUN=sum) - SE_Index_tl = apply(SE_Index_ctl,MARGIN=2:3,FUN=sum) + SE_Index_tl = sqrt(apply(SE_Index_ctl^2,MARGIN=2:3,FUN=sum)) # Approximate variance for proportions, and effective sample size Neff_ctl = var_Prop_ctl = array(NA,dim=dim(Prop_ctl)) @@ -49,20 +49,20 @@ calculate_proportion = function( TmbData, Index, Year_Set=NULL, Years2Include=NU if( is.null(category_names) ) category_names = 1:TmbData$n_c # Plot - Par = list( mar=c(2,2,1,0), mgp=c(2,0.5,0), tck=-0.02, yaxs="i", oma=c(1,1,0,0), mfrow=c(ceiling(sqrt(TmbData$n_t)),ceiling(TmbData$n_t/ceiling(sqrt(TmbData$n_t)))), ... ) + Par = list( mar=c(2,2,1,0), mgp=c(2,0.5,0), tck=-0.02, yaxs="i", oma=c(2,2,0,0), mfrow=c(ceiling(sqrt(TmbData$n_t)),ceiling(TmbData$n_t/ceiling(sqrt(TmbData$n_t)))), ... ) png( file=paste0(DirName,"/",PlotName), width=width, height=height, res=200, units="in") par( Par ) for( tI in 1:TmbData$n_t ){ # Calculate y-axis limits Ylim = c(0, max(Prop_ctl[,tI,]%o%c(1,1) + sqrt(var_Prop_ctl[,tI,])%o%c(-interval_width,interval_width))) # Plot stuff - plot(1, type="n", xlim=range(category_names), ylim=1.05*Ylim, xlab="", ylab="", main=ifelse(TmbData$n_t>1,Year_Set[tI],"") ) + plot(1, type="n", xlim=range(category_names), ylim=1.05*Ylim, xlab="", ylab="", main=ifelse(TmbData$n_t>1,paste0("Year ",Year_Set[tI]),"") ) for(l in 1:TmbData$n_l){ SpatialDeltaGLMM:::Plot_Points_and_Bounds_Fn( y=Prop_ctl[,tI,l], x=1:TmbData$n_c+seq(-0.1,0.1,length=TmbData$n_l)[l], ybounds=Prop_ctl[,tI,]%o%c(1,1) + sqrt(var_Prop_ctl[,tI,])%o%c(-interval_width,interval_width), type="b", col=rainbow(TmbData[['n_l']])[l], col_bounds=rainbow(TmbData[['n_l']])[l], ylim=Ylim) } if(plot_legend==TRUE) legend( "top", bty="n", fill=rainbow(TmbData[['n_l']]), legend=as.character(strata_names), ncol=2 ) } - mtext( side=1:2, text=c("Year","Proportion of biomass"), outer=TRUE, line=c(0,0) ) + mtext( side=1:2, text=c("Age","Proportion of biomass"), outer=TRUE, line=c(0,0) ) dev.off() # Return stuff From ecfb4c1cdd43c22c088e605bb828e5f4e8a09ac0 Mon Sep 17 00:00:00 2001 From: Jim Thorson Date: Sun, 23 Jul 2017 08:24:07 -0700 Subject: [PATCH 5/5] now using `logspace_add` for discrete-PMFs... ... i.e., zero-inflated Poisson and zero-inflated NegBin --- inst/executables/VAST_v2_7_0.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/inst/executables/VAST_v2_7_0.cpp b/inst/executables/VAST_v2_7_0.cpp index 94306e3..2b92ad2 100644 --- a/inst/executables/VAST_v2_7_0.cpp +++ b/inst/executables/VAST_v2_7_0.cpp @@ -534,7 +534,8 @@ Type objective_function::operator() () // Zero-inflated negative binomial (not numerically stable!) var_i(i) = R2_i(i)*(1.0+SigmaM(c_i(i),0)) + pow(R2_i(i),2.0)*SigmaM(c_i(i),1); if( b_i(i)==0 ){ - LogProb2_i(i) = log( (1-R1_i(i)) + dnbinom2(Type(0.0), R2_i(i), var_i(i), false)*R1_i(i) ); // Pr[X=0] = 1-phi + NB(X=0)*phi + //LogProb2_i(i) = log( (1-R1_i(i)) + dnbinom2(Type(0.0), R2_i(i), var_i(i), false)*R1_i(i) ); // Pr[X=0] = 1-phi + NB(X=0)*phi + LogProb2_i(i) = logspace_add( log(1-R1_i(i)), dnbinom2(Type(0.0),R2_i(i),var_i(i),true)+log(R1_i(i)) ); // Pr[X=0] = 1-phi + NB(X=0)*phi }else{ LogProb2_i(i) = dnbinom2(b_i(i), R2_i(i), var_i(i), true) + log(R1_i(i)); // Pr[X=x] = NB(X=x)*phi } @@ -546,7 +547,8 @@ Type objective_function::operator() () if(ObsModel(0)==7){ // Zero-inflated Poisson if( b_i(i)==0 ){ - LogProb2_i(i) = log( (1-R1_i(i)) + dpois(Type(0.0), R2_i(i), false)*R1_i(i) ); // Pr[X=0] = 1-phi + Pois(X=0)*phi + //LogProb2_i(i) = log( (1-R1_i(i)) + dpois(Type(0.0), R2_i(i), false)*R1_i(i) ); // Pr[X=0] = 1-phi + Pois(X=0)*phi + LogProb2_i(i) = logspace_add( log(1-R1_i(i)), dpois(Type(0.0),R2_i(i),true)+log(R1_i(i)) ); // Pr[X=0] = 1-phi + Pois(X=0)*phi }else{ LogProb2_i(i) = dpois(b_i(i), R2_i(i), true) + log(R1_i(i)); // Pr[X=x] = Pois(X=x)*phi }