diff --git a/+invert/tikhonov.m b/+invert/tikhonov.m
index 477f8a2..52ba916 100644
--- a/+invert/tikhonov.m
+++ b/+invert/tikhonov.m
@@ -1,29 +1,49 @@
-% TIKHONOV Performs inversion using various order Tikhonov regularization in 2D.
-% Author: Timothy Sipkens, 2018-11-21
-%
-% Inputs:
-% A Model matrix
-% b Data
-% lambda Regularization parameter
-% order_L Order of regularization -OR-
-% pre-computed Tikhonov matrix structure
-% (OPTIONAL, default is set by tikhonov_lpr)
-% n The length of first dimension of solution -OR-
-% a grid, so as to support partial grids
-% (ONLY REQUIRED when order is specified)
-% xi Initial guess for solver
-% (OPTIONAL, default is zeros)
-% solver Solver (OPTIONAL, default is interior-point)
-%
-% Outputs:
-% x Regularized estimate
-% D Inverse operator (x = D*[b;0])
-% Lpr0 Tikhonov matrix structure
-% Gpo_inv Inverse of posterior covariance
-%=========================================================================%
+% TIKHONOV Performs inversion using various order Tikhonov regularization.
+% Regularization takes place in 2D. The type of regularization or prior
+% information added depends on the order of Tikhonov applied.
+%
+% X = invert.tikhonov(A,B,LAMBDA) inverts the system A*X = B using
+% Tikhonov regularization, where A is a model matrix or kernel and B is a
+% data vector, and appling a regularization parameter LAMBDA. By default,
+% the order of Tikhonov and thus the constructed Tikhonov matrix are given
+% as the default of the invert.tikhonov_lpr(...) function.
+%
+% X = invert.tikhonov(A,B,LAMBDA,L) uses the pre-computed Tikhonov matrix
+% L during the inversion. This matrix can be computed using the
+% invert.tikhonov_lpr(...) function.
+%
+% X = invert.tikhonov(A,B,LAMBDA,ORDER,N) applies Tikhonov regularization
+% of the order specified by ORDER, where ORDER is an integer in the range
+% [0,2]. This requires the quantity N, an integer representing the length
+% of the first dimension of the solution, to build the Tikhonov matrix.
+% This is relevant for full (not partial) grids.
+%
+% X = invert.tikhonov(A,B,LAMBDA,ORDER,GRID) applies Tikhonov regularization
+% of the order specified by ORDER, as above. In the case of a partial
+% grid, an instance of the Grid class is required to build the Tikhonov
+% matrix in the place of an integer dimension.
+%
+% X = invert.tikhonov(...,XI) applies Tikhonov regularization using an
+% initial guess of XI. The default if not included is zeros.
+%
+% X = invert.tikhonov(...,XI,SOLVER) applies Tikhonov regularization
+% using the solver specified by the string in SOLVER.
+%
+% [X,D] = invert.tikhonov(...) applies Tikhonov regularization, outputting
+% explicit inverse operator implied by this procedure, specifically such
+% that X = D*([B;0]).
+%
+% [X,D,LPR0] = invert.tikhonov(...) applies Tikhonov regularization, also
+% outputting the unscaled Tikhonov matrix.
+%
+% [X,D,LPR0,GPO_INV] = invert.tikhonov(...) applies Tikhonov
+% regularization, also outputting the inverse posterior covariance.
+%
+% AUTHOR: Timothy Sipkens, 2018-11-21
-function [x,D,Lpr0,Gpo_inv] = tikhonov(A,b,lambda,order_L,n_grid,xi,solver)
+function [x, D, Lpr0, Gpo_inv] = ...
+ tikhonov(A, b, lambda, order_L, n_grid, xi, solver)
x_length = size(A,2);
diff --git a/+invert/twomey.m b/+invert/twomey.m
index 6772537..8d17b44 100644
--- a/+invert/twomey.m
+++ b/+invert/twomey.m
@@ -1,25 +1,27 @@
-% TWOMEY Performs inversion using the iterative Twomey approach.
-% Author: Timothy Sipkens, 2018-11-21
-%-------------------------------------------------------------------------%
-% Inputs:
-% A Model matrix
-% b Data
-% Lb Cholesky factorization of inverse covariance matrix
-% xi Initial guess
-% iter Max. number of iterations
-% sigma_fun Function of x to be evaluated to determine convergence
-% (Optional, default: ignore)
-% sigma Value to which sigma_fun is compared to determine convergence
-% (Optional, default: ignore)
-% f_bar Boolean to determine whether or not to show textbar
-% (Optional, default: 0)
-%
-% Outputs:
-% x Twomey estimate
-%=========================================================================%
+% TWOMEY Performs inversion using the iterative Twomey approach.
+%
+% X = invert.twomey(A,B) inverts the system A*X = B using Twomey
+% inversion, where A is a model matrix or kernel and B is a data vector.
+%
+% X = invert.twomey(A,B,XI) applies Twomey inversion using an initial
+% guess of XI. If excluded, a vector of ones is used.
+%
+% X = invert.twomey(A,B,XI,ITER) applies Twomey inversion using ITER
+% number of iterations. If excluded, 100 iterations is used.
+%
+% X = invert.twomey(A,B,XI,ITER,SIGMA_FUN,SIGMA) applies Twomey inversion
+% using a function of X, SIGMA_FUN, and threshold, SIGMA, to determine
+% convergence. If excluded, convergence is not checked and the full number
+% of iterations is used.
+%
+% X = invert.twomey(A,B,XI,ITER,SIGMA_FUN,SIGMA,F_BAR) applies Twomey
+% inversion, as above, with F_BAR flagging whether a progress bar is
+% output to the console.
+%
+% AUTHOR: Timothy Sipkens, 2018-11-21
-function [x] = twomey(A,b,xi,iter,sigma_fun,sigma,f_bar)
+function [x] = twomey(A, b, xi, iter, sigma_fun, sigma, f_bar)
%-- Parse inputs ---------------------------------------------------------%
if ~exist('iter','var'); iter = []; end
diff --git a/+io/load_config.m b/+io/load_config.m
new file mode 100644
index 0000000..7113fde
--- /dev/null
+++ b/+io/load_config.m
@@ -0,0 +1,25 @@
+
+% LOAD_CONFIG Loads settings from configuration file (YML).
+% Files are loaded in order supplied, overwriting properties where
+% relevant.
+%
+% AUTHOR: Timothy Sipkens, 2021-03-25
+
+function config = load_config(fnames)
+
+if ~iscell(fnames); fnames = {fnames}; end
+
+config = struct();
+for ii=1:length(fnames)
+
+ config0 = io.read_yml(fnames{ii}); % read new settings
+
+ % Copy (or overwrite) existing settings.
+ f = fieldnames(config0);
+ for jj = 1:length(f)
+ config.(f{jj}) = config0.(f{jj});
+ end
+
+end
+
+end
diff --git a/+io/read_yml.m b/+io/read_yml.m
new file mode 100644
index 0000000..b7b3c3c
--- /dev/null
+++ b/+io/read_yml.m
@@ -0,0 +1,53 @@
+
+% READ_YML Simple little function to read simple little YAML format parameter files.
+%
+% AUTHOR: Lloyd Russell, 2017
+% REPO: https://github.com/llerussell/ReadYAML
+
+function results = read_yml(filePath)
+
+% read file line by line
+fid = fopen(filePath, 'r');
+data = textscan(fid, '%s', 'delimiter', '\n', 'whitespace', '');
+fclose(fid);
+
+% remove empty lines
+data = deblank(data{1});
+data(cellfun('isempty', data)) = [];
+
+% prepare final results structure
+results = [];
+
+% parse the contents (line by line)
+for i = 1:numel(data)
+
+ % extract this line
+ thisLine = data{i};
+
+ % ignore if this line is a comment
+ if strcmpi(thisLine(1), '#')
+ continue
+ end
+
+ % find the seperator between key and value
+ sepIndex = find(thisLine==':', 1, 'first');
+
+ % get the key name (remove whitespace)
+ key = strtrim(thisLine(1:sepIndex-1));
+
+ % get the value, ignoring any comments (remove whitespace)
+ value = strsplit(thisLine(sepIndex+1:end), '#');
+ value = strtrim(value{1});
+
+ % attempt to convert value to numeric type
+ [convertedValue, success] = str2num(value);
+ if success
+ value = convertedValue;
+ end
+
+ % store the key and value in the results
+ results.(key) = value;
+end
+
+end
+
diff --git a/+kernel/gen.m b/+kernel/gen_pma_dma.m
similarity index 62%
rename from +kernel/gen.m
rename to +kernel/gen_pma_dma.m
index 0190696..6e79022 100644
--- a/+kernel/gen.m
+++ b/+kernel/gen_pma_dma.m
@@ -1,31 +1,43 @@
-% GEN Evaluate kernel/transfer functions for DMA-PMA to find A.
-% Author: Timothy Sipkens, 2020-02-04
+% GEN_PMA_DMA Evaluate kernel/transfer functions for PMA-DMA.
+% This function applies to miscellaneous PMA-DMA setpoints.
+% The kernel.gen_pma_dma_grid function is preferred for computional
+% reasions. As such, this function should only be used when the data does
+% not adhere to a reasonable grid.
%
-% Notes:
-% 1. Cell arrays are used for Omega_mat and Lambda_mat in order to
-% allow for the use of sparse matrices, which is necessary to
-% store information on higher resolutions grids
-% (such as those used for phantoms).
-% 2. By default, this function uses the analytical PMA transfer function
-% corresponding to Case 1C from Sipkens et al. (Aerosol Sci. Technol. 2020b).
-% 3. Unlike gen_grid, requires a pre-computed setpoint structure (and,
-% as such, a varargin argument is not included).
+% A = kernel.gen_pma_dma(SP,D_STAR,GRID_I) evaluates the transfer function
+% for the PMA setpoints specified by SP and the DMA setpoints specified by
+% D_STAR. The kernel is evaluated by integrating the transfer function
+% over the elements in GRID_I. This require the same numder of entries in
+% SP and D_STAR.
+% Please refer to the get_setpoint(...) function in tfer_pma folder for
+% more details on generating the SP struture.
%
-% Inputs:
-% sp PMA setpoint structure
-% d_star DMA setpoints
-% grid_i Grid on which to perform integration
-% prop_pma Structure defining the properties of the PMA
-%=========================================================================%
+% A = kernel.gen_pma_dma(SP,D_STAR,GRID_I,PROP_PMA) specifies a
+% pre-computed PMA property data structure. If not given, the function
+% uses the defaults of kernel.prop_pma(...).
+%
+% A = kernel.gen_pma_dma(SP,D_STAR,GRID_I,PROP_PMA,PROP_DMA) specifies a
+% pre-computed DMA property data structure.
+%
+% ------------------------------------------------------------------------
+%
+% NOTE: Cell arrays are used for Omega_mat and Lambda_mat in order to
+% allow for the use of sparse matrices, which is necessary to
+% store information on higher resolutions grids (such as those
+% used for phantoms).
+%
+% AUTHOR: Timothy Sipkens, 2020-02-04
-function A = gen(sp,d_star,grid_i,prop_dma)
+function A = gen_pma_dma(sp, d_star, grid_i, prop_pma, prop_dma)
+% If not given, import default properties of PMA,
+% as selected by prop_pma function.
if ~exist('prop_pma','var'); prop_pma = []; end
if isempty(prop_pma); prop_pma = kernel.prop_pma; end
- % import properties of PMA
- % use default properties selected by prop_pma function
+
if length(sp)~=length(d_star); error('Setpoint / d_star mismatch.'); end
+
if ~exist('prop_dma','var'); prop_dma = []; end
@@ -55,7 +67,7 @@
% Note: The DMA transfer function is 1D (only a function of mobility),
% which is exploited to speed evaluation. The results is 1 by 3 cell,
% with one entry per charge state.
-disp('Computing DMA contribution...');
+disp(' Computing DMA contribution...');
Omega_mat = cell(1,n_z); % pre-allocate for speed, one cell entry per charge state
for kk=1:n_z
Omega_mat{kk} = sparse(N_b,n_i(2)); % pre-allocate for speed
@@ -75,12 +87,12 @@
Omega_mat{kk} = Omega_mat{kk}(:,jj);
% repeat transfer function for repeated mass setpoint
end
-disp('Completed DMA contribution.');
+disp(' Completed DMA contribution.');
disp(' ');
%== STEP 2: Evaluate PMA transfer function ===============================%
-disp('Computing PMA contribution:');
+disp(' Computing PMA contribution:');
tools.textbar(0); % initiate textbar
Lambda_mat = cell(1,n_z); % pre-allocate for speed
% one cell entry per charge state
@@ -96,18 +108,19 @@
tools.textbar((N_b*(kk-1)+ii)/(n_z*N_b));
end
end
+disp(' Complete.');
disp(' ');
%== SETP 3: Combine to compile kernel ====================================%
-disp('Compiling kernel...');
+disp(' Compiling kernel...');
K = sparse(N_b,N_i);
for kk=1:n_z
K = K+f_z(z_vec(kk),:).*... % charging contribution
Lambda_mat{kk}(:,:).*... % PMA contribution
Omega_mat{kk}(:,:); % DMA contribution
end
-disp('Kernel compiled.');
+disp(' Kernel compiled.');
dr_log = grid_i.dr; % area of integral elements in [logm,logd]T space
A = bsxfun(@times,K,dr_log'); % multiply kernel by element area
diff --git a/+kernel/gen_grid.m b/+kernel/gen_pma_dma_grid.m
similarity index 58%
rename from +kernel/gen_grid.m
rename to +kernel/gen_pma_dma_grid.m
index 5d00050..5c777ed 100644
--- a/+kernel/gen_grid.m
+++ b/+kernel/gen_pma_dma_grid.m
@@ -1,35 +1,57 @@
-% GEN_GRID Evaluate kernel/transfer functions for DMA-PMA to find A, exploiting grid structure.
-% Author: Timothy Sipkens, 2018-11-27
+% GEN_PMA_DMA_GRID Evaluate kernel/transfer functions for PMA-DMA.
+% Relative to kernel.gen_pma_dma, this function exploits the grid
+% structure of the data and reconstruction domain to speed computation.
%
-% Notes:
-% 1. This function exploits the grid structure to minimize the number of
-% transfer function evaluations.
-% 2. Cell arrays are used for Omega_mat and Lambda_mat in order to
-% allow for the use of sparse matrices, which is necessary to
-% store information on higher resolutions grids
-% (such as those used for phantoms).
-% 3. By default, this function uses the analytical PMA transfer function
-% corresponding to Case 1C from Sipkens et al. (Aerosol Sci. Technol. 2020b).
+% A = kernel.gen_pma_dma_grid(GRID_B,GRID_I) uses the data grid, GRID_B,
+% and integration grid, GRID_I, which is generally higher resolution.
%
-%-------------------------------------------------------------------------%
-% Inputs:
-% grid_b Grid on which the data exists
-% grid_i Grid on which to perform integration
-% prop_pma Structure defining the properties of the PMA
-% varargin Name-value pairs used in evaluating the PMA tfer. fun.
-%=========================================================================%
+% A = kernel.gen_pma_dma_grid(GRID_B,GRID_I,PROP_PMA) uses the
+% pre-computed particle mass analyzer properties specified by PROP_PMA.
+% This structure can be generated manually or with the help of the
+% kernel.prop_pma(...) function.
+%
+% A = kernel.gen_pma_dma_grid(GRID_B,GRID_I,PROP_PMA,PROP_DMA) uses the
+% pre-computed differential mobility analyzer properties specified by
+% PROP_DMA. This structure can be generated manually or with the help of
+% the kernel.prop_dma(...) function. PROP_PMA can be excluded by supplying
+% an empty input, i.e., PROP_PMA = [].
+%
+% A = kernel.gen_pma_dma_grid(...,'name',value) specifies a name-value
+% pairs that is passed on as input to the get_setpoint(...) function
+% from the tfer_pma folder. This specifies one quantity other than the
+% setpoint mass that is used to constrain the PMA operating point. For
+% example, {'Rm',10} specifies a resolution PMA resolution of 10, which is
+% used for all of the setpoints.
+% If not provided, the function uses the default in the get_setpoint(...)
+% function in the tfer_pma folder.
+%
+% [A,SP] = kernel.gen_pma_dma_grid(...) also outputs the PMA setpoint
+% structure (associated with the mat_tfer_pma submodule) for the given
+% grid.
+%
+% ------------------------------------------------------------------------
+%
+% NOTE: Cell arrays are used for Omega_mat and Lambda_mat in order to
+% allow for the use of sparse matrices, which is necessary to
+% store information on higher resolutions grids
+% (such as those used for phantoms).
+%
+% AUTHOR: Timothy Sipkens, 2018-11-27
-function [A,sp] = gen_grid(grid_b, grid_i, prop_pma, prop_dma, varargin)
+function [A, sp] = gen_pma_dma_grid(grid_b, grid_i, prop_pma, prop_dma, varargin)
-addpath tfer_pma; % add mat-tfer-pma package to MATLAB path
+% Add mat-tfer-pma package to MATLAB path.
+addpath tfer_pma;
+
+% If not given, import default properties of PMA,
+% as selected by prop_pma function.
if ~exist('prop_pma','var'); prop_pma = []; end
if isempty(prop_pma); prop_pma = kernel.prop_pma; end
- % import properties of PMA
- % use default properties selected by prop_pma function
+
if ~exist('prop_dma','var'); prop_dma = []; end
-
+
%-- Parse measurement set points (b) -------------------------------------%
r_star = grid_b.elements; % vector of all setpoint pairs
m_star = r_star(:,1); % mass setpoints
@@ -61,11 +83,11 @@
% Note: The DMA transfer function is 1D (only a function of mobility),
% which is exploited to speed evaluation. The results is 1 by 3 cell,
% with one entry per charge state.
-disp('Computing DMA contribution:');
+disp(' Computing DMA contribution:');
Omega_mat = cell(1,n_z); % pre-allocate for speed, one cell entry per charge state
tools.textbar([0, n_b(2), 0, n_z]);
for kk=1:n_z
- Omega_mat{kk} = sparse(n_b(2),n_i(2));% pre-allocate for speed
+ Omega_mat{kk} = sparse(n_b(2),n_i(2)); % pre-allocate for speed
for ii=1:n_b(2) % loop over d_star
Omega_mat{kk}(ii,:) = kernel.tfer_dma( ...
@@ -76,20 +98,20 @@
tools.textbar([ii, n_b(2), kk, n_z]);
end
- Omega_mat{kk}(Omega_mat{kk}<(1e-7.*max(max(Omega_mat{kk})))) = 0;
- % remove numerical noise in kernel
-
+ % Remove numerical noise in kernel.
+ Omega_mat{kk}(Omega_mat{kk} < (1e-7 .* max(max(Omega_mat{kk})))) = 0;
+
+ % Duplicate transfer function for repeated mass in grid_i.
[~,jj] = max(d==grid_i.edges{2},[],2);
Omega_mat{kk} = Omega_mat{kk}(:,jj);
- % duplicate transfer function for repeated mass in grid_i
end
-disp('Complete.');
+disp(' Complete.');
disp(' ');
%=========================================================================%
%== STEP 2: Evaluate PMA transfer function ===============================%
-disp('Computing PMA contribution:');
+disp(' Computing PMA contribution:');
tools.textbar([0, n_b(1), 0, n_z]); % initiate textbar
Lambda_mat = cell(1,n_z); % pre-allocate for speed, one cell entry per charge state
@@ -103,19 +125,20 @@
for ii=1:n_b(1) % loop over m_star
% Evaluate PMA transfer function.
- Lambda_mat{kk}(ii,:) = kernel.tfer_pma(...
+ Lambda_mat{kk}(ii,:) = kernel.tfer_pma( ...
sp(ii), m.*1e-18, ...
- d.*1e-9, z_vec(kk), prop_pma)';
+ d.*1e-9, z_vec(kk), prop_pma)';
tools.textbar([ii, n_b(1), kk, n_z]); % update text progress bar
end
end
+disp(' Complete.');
disp(' ');
%=========================================================================%
%== SETP 3: Combine to compile kernel ====================================%
-disp('Compiling kernel...');
+disp(' Compiling kernel...');
K = sparse(N_b,N_i);
for kk=1:n_z
[~,i1] = max(m_star==grid_b.edges{1},[],2); % index corresponding to PMA setpoint
@@ -125,8 +148,8 @@
Lambda_mat{kk}(i1,:).*... % PMA contribution
Omega_mat{kk}(i2,:); % DMA contribution
end
+disp(' Kernel compiled.');
%=========================================================================%
-disp('Kernel compiled.');
dr_log = grid_i.dr; % area of integral elements in [logm,logd]T space
A = bsxfun(@times,K,dr_log'); % multiply kernel by element area
diff --git a/+kernel/gen_pma_sp2.m b/+kernel/gen_pma_sp2.m
new file mode 100644
index 0000000..7149d93
--- /dev/null
+++ b/+kernel/gen_pma_sp2.m
@@ -0,0 +1,139 @@
+
+% GEN_PMA_SP2 Evaluate kernel/transfer functions for PMA-SP2 to find A.
+%
+% A = kernel.gen_pma_sp2(SP,MRBC_NODES,GRID_I) evaluates the transfer
+% function for the PMA setpoints specified by SP and the SP2 bins
+% specified by MRBC_NODES. The kernel is evaluated by integrating the
+% transfer function over the elements in GRID_I.
+% Please refer to the get_setpoint(...) function in tfer_pma folder for
+% more details on generating the SP struture.
+%
+% A = kernel.gen_pma_sp2(...,PROP_PMA) specifies a pre-computed PMA
+% property data structure. If not given, the function uses the
+% defaults of kernel.prop_pma(...).
+%
+% INPUTS:
+% sp PMA setpoint structure
+% mrbc_nodes Nodes (bin edges) for binned SP2 data setpoints,
+% refractory black carbon mass
+% mrbc_edges(:,1) = lower edge;
+% mrbc_edges(:,2) = upper edge;
+% grid_i Integration grid, e.g. grid_x
+% prop_pma PMA properties structure, e.g. prop.r1 = 0.06
+% (Optional: default is the default properties from the
+% kernel.prop_pma function)
+%
+% OUTPUTS:
+% A Kernel matrix
+%
+% ------------------------------------------------------------------------
+%
+% NOTES:
+% 1. Cell arrays are used for Omega_mat and Lambda_mat in order to
+% allow for the use of sparse matrices, which is necessary to
+% store information on higher resolutions grids
+% (such as those used for phantoms).
+% 2. By default, this function uses the analytical PMA transfer function
+% corresponding to Case 1C from Sipkens et al. (Aerosol Sci. Technol. 2020b).
+%
+% AUTHOR: Timothy Sipkens, Arash Naseri, 2020-02-19
+
+function A = gen_pma_sp2(sp, mrbc_nodes, grid_i, prop_pma)
+
+% If not given, import default properties of PMA,
+% as selected by prop_pma function.
+if ~exist('prop_pma','var'); prop_pma = []; end
+if isempty(prop_pma); prop_pma = kernel.prop_pma; end
+
+if length(sp)~=length(mrbc_nodes); error('Setpoint / mrbc_edges mismatch.'); end
+
+
+%-- Parse measurement set points (b) -------------------------------------%
+N_b = length(sp); % length of data vector
+
+
+%-- Generate grid for intergration ---------------------------------------%
+n_i = grid_i.ne;
+N_i = grid_i.Ne; % length of integration vector
+
+r = grid_i.elements;
+m = r(:,2);
+mrbc = r(:,1);
+d = (m.*1e-18./prop_pma.rho0).^...
+ (1/prop_pma.Dm).*1e9;
+ % invoke mass-mobility relation
+
+
+%-- Start evaluate kernel ------------------------------------------------%
+tools.textheader('Computing PMA-SP2 kernel');
+
+%== Evaluate particle charging fractions =================================%
+z_vec = (1:3)';
+f_z = sparse(kernel.tfer_charge(d.*1e-9,z_vec)); % get fraction charged for d
+n_z = length(z_vec);
+
+
+
+%== STEP 1: Evaluate SP2 transfer function ===============================%
+% Note: The SP2 contribution is a boxcar function that takes into
+% account discretization only.
+disp(' Computing SP2 contribution:');
+tools.textbar([0, N_b]); % initiate textbar
+Omega_mat = sparse(N_b,n_i(1));% pre-allocate for speed
+for ii=1:N_b
+ Omega_mat(ii,:) = max(...
+ min(grid_i.nodes{1}(2:end),mrbc_nodes(ii,2))-... % lower bound
+ max(grid_i.nodes{1}(1:(end-1)),mrbc_nodes(ii,1))... % upper bound
+ ,0)./(mrbc_nodes(ii,2)-mrbc_nodes(ii,1)); % normalize by SP2 bin size
+ tools.textbar([ii, N_b]);
+end
+
+[~,jj] = max(mrbc==grid_i.edges{1},[],2);
+Omega_mat = Omega_mat(:,jj);
+ % repeat transfer function for repeated mass in grid_i
+
+disp(' Completed SP2 contribution.');
+disp(' ');
+
+
+
+%== STEP 2: Evaluate PMA transfer function ===============================%
+disp('Computing PMA contribution:');
+tools.textbar(0); % initiate textbar
+Lambda_mat = cell(1,n_z); % pre-allocate for speed
+ % one cell entry per charge state
+for kk=1:n_z % loop over the charge state
+ Lambda_mat{kk} = sparse(N_b,N_i);% pre-allocate for speed
+
+ for ii=1:N_b % loop over m_star
+ Lambda_mat{kk}(ii,:) = kernel.tfer_pma(...
+ sp(ii),m.*1e-18,...
+ d.*1e-9,z_vec(kk),prop_pma)';
+ % PMA transfer function
+
+ tools.textbar((N_b*(kk-1)+ii)/(n_z*N_b));
+ end
+end
+disp(' ');
+
+
+%== STEP 3: Combine to compile kernel ====================================%
+disp(' Compiling kernel...');
+K = sparse(N_b,N_i);
+for kk=1:n_z
+ K = K+f_z(z_vec(kk),:).*... % charging contribution
+ Lambda_mat{kk}(:,:).*... % PMA contribution
+ Omega_mat; % SP2 contribution
+end
+disp(' Complete.');
+
+dr_log = grid_i.dr; % area of integral elements in [logm,logd]T space
+A = bsxfun(@times,K,dr_log'); % multiply kernel by element area
+A = sparse(A); % exploit sparse structure
+
+tools.textheader;
+
+end
+
+
+
diff --git a/+kernel/gen_grid_c2.m b/+kernel/gen_pma_sp2_grid.m
similarity index 59%
rename from +kernel/gen_grid_c2.m
rename to +kernel/gen_pma_sp2_grid.m
index d0fb8f2..8a54d18 100644
--- a/+kernel/gen_grid_c2.m
+++ b/+kernel/gen_pma_sp2_grid.m
@@ -1,33 +1,50 @@
-% GEN_GRID_C2 Evaluate kernel/transfer functions for PMA-SP2 to find A, exploiting grid structure.
-% Author: Timothy Sipkens, 2018-11-27
+% GEN_PMA_SP2_GRID Evaluate kernel/transfer functions for PMA-SP2 to find A, exploiting grid structure.
%
-% Notes:
-% 1. This function exploits the grid structure to minimize the number of
-% transfer function evaluations.
-% 2. Cell arrays are used for Omega_mat and Lambda_mat in order to
-% allow for the use of sparse matrices, which is necessary to
-% store information on higher resolutions grids
-% (such as those used for phantoms).
-% 3. By default, this function uses the analytical PMA transfer function
-% corresponding to Case 1C from Sipkens et al. (Aerosol Sci. Technol. 2020b).
+% A = kernel.gen_pma_sp2_grid(GRID_B,GRID_I) uses the data grid, GRID_B,
+% and integration grid, GRID_I, which is generally higher resolution.
%
-% Inputs:
-% grid_b Grid on which the data exists
-% grid_i Grid on which to perform integration
-% prop_pma Structure defining the properties of the PMA
-% varargin Name-value pairs used in evaluating the PMA tfer. fun.
-%=========================================================================%
+% A = kernel.gen_pma_sp2_grid(GRID_B,GRID_I,PROP_PMA) uses the
+% pre-computed particle mass analyzer properties specified by PROP_PMA.
+% This structure can be generated manually or with the help of the
+% kernel.prop_pma(...) function.
+%
+% A = kernel.gen_pma_sp2_grid(...,'name',value) specifies a name-value
+% pairs that is passed on as input to the get_setpoint(...) function
+% from the tfer_pma folder. This specifies one quantity other than the
+% setpoint mass that is used to constrain the PMA operating point. For
+% example, {'Rm',10} specifies a resolution PMA resolution of 10, which is
+% used for all of the setpoints.
+% If not provided, the function uses the default in the get_setpoint(...)
+% function in the tfer_pma folder.
+%
+% [A,SP] = kernel.gen_pma_sp2_grid(...) also outputs the PMA setpoint
+% structure (associated with the mat_tfer_pma or tfer_pma folder) for
+% the given grid.
+%
+% ------------------------------------------------------------------------
+%
+% NOTES:
+% 1. This function exploits the grid structure to minimize the number of
+% transfer function evaluations.
+% 2. Cell arrays are used for Omega_mat and Lambda_mat in order to
+% allow for the use of sparse matrices, which is necessary to
+% store information on higher resolutions grids
+% (such as those used for phantoms).
+% 3. By default, this function uses the analytical PMA transfer function
+% corresponding to Case 1C from Sipkens et al. (Aerosol Sci. Technol. 2020b).
+%
+% AUTHOR: Timothy Sipkens, 2018-11-27
-function [A,sp] = gen_grid_c2(grid_b,grid_i,prop_pma,varargin)
+function [A,sp] = gen_pma_sp2_grid(grid_b, grid_i, prop_pma, varargin)
%-- Parse inputs ---------------------------------------------------------%
+% If not given, import default properties of PMA,
+% as selected by prop_pma function.
addpath tfer_pma; % add mat-tfer-pma package to MATLAB path
if ~exist('prop_pma','var'); prop_pma = []; end
if isempty(prop_pma); prop_pma = kernel.prop_pma; end
- % import properties of PMA
- % use default properties selected by prop_pma function
if or(isempty(varargin),length(varargin)~=2) % parse extra information for PMA
error('Invalid additional information for PMA setpoint.');
@@ -56,7 +73,7 @@
%-- Start evaluate kernel ------------------------------------------------%
-disp('Computing kernel...');
+tools.textheader('Computing PMA-SP2 kernel');
%== Evaluate particle charging fractions =================================%
z_vec = (1:3)';
@@ -68,28 +85,30 @@
% Note: The SP2 contribution is 1D and does not depend on the charge
% state. It is boxcar function that takes into account discretization
% only.
-disp('Computing SP2 contribution...');
+disp(' Computing SP2 contribution...');
+tools.textbar([0,n_b(1)]); % initiate textbar
Omega_mat = sparse(n_b(1),n_i(1));% pre-allocate for speed
for ii=1:n_b(1)
Omega_mat(ii,:) = max(...
min(grid_i.nodes{1}(2:end),grid_b.nodes{1}(ii+1))-... % lower bound
max(grid_i.nodes{1}(1:(end-1)),grid_b.nodes{1}(ii))... % upper bound
,0)./(grid_b.nodes{1}(ii+1)-grid_b.nodes{1}(ii)); % normalize by SP2 bin size
+ tools.textbar([ii,n_b(1)]);
end
[~,jj] = max(mrbc==grid_i.edges{1},[],2);
Omega_mat = Omega_mat(:,jj);
% repeat transfer function for repeated mass in grid_i
-disp('Completed SP2 contribution.');
+disp(' Completed SP2 contribution.');
disp(' ');
%=========================================================================%
%== STEP 2: Evaluate PMA transfer function ===============================%
-disp('Computing PMA contribution:');
+disp(' Computing PMA contribution:');
-tools.textbar(0); % initiate textbar
+tools.textbar([0, (n_z*n_b(2))]); % initiate textbar
Lambda_mat = cell(1,n_z); % pre-allocate for speed, one cell entry per charge state
sp = get_setpoint(prop_pma,...
'm_star',grid_b.edges{2}.*1e-18,varargin{:}); % get PMA setpoints
@@ -103,15 +122,16 @@
z_vec(kk),prop_pma)';
% PMA transfer function
- tools.textbar((n_b(2)*(kk-1)+ii)/(n_z*n_b(2)));
+ tools.textbar([(n_b(2)*(kk-1)+ii), (n_z*n_b(2))]);
end
end
+disp(' Complete.');
disp(' ');
%=========================================================================%
%== SETP 3: Combine to compile kernel ====================================%
-disp('Compiling kernel...');
+disp(' Compiling kernel...');
K = sparse(N_b,N_i);
for kk=1:n_z
[~,i1] = max(m_star==grid_b.edges{2},[],2); % index corresponding to PMA setpoint
@@ -121,7 +141,7 @@
Lambda_mat{kk}(i1,:).*... % PMA contribution
Omega_mat(i2,:); % SP2 contribution
end
-disp('Completed kernel.');
+disp(' Completed kernel.');
%=========================================================================%
@@ -129,8 +149,7 @@
A = bsxfun(@times,K,dr_log'); % multiply kernel by element area
A = sparse(A); % exploit sparse structure
-disp('Completed computing kernel matrix, A.');
-disp(' ');
+tools.textheader;
end
diff --git a/+kernel/prop_pma.m b/+kernel/prop_pma.m
index 4ad3d98..a7b42b8 100644
--- a/+kernel/prop_pma.m
+++ b/+kernel/prop_pma.m
@@ -1,14 +1,13 @@
-% PROP_PMA Generates the prop struct used to summarize PMA parameters.
-% Author: Timothy Sipkens, 2019-06-26
-%-------------------------------------------------------------------------%
-% Input:
-% opts Options string specifying parameter set
-% (Optional, default 'Olfert')
-%
-% Output:
-% prop Properties struct for use in evaluating transfer function
-%=========================================================================%
+% PROP_PMA Generates the prop struct used to summarize CPMA parameters.
+%
+% PROP = kernel.prop_pma() creates a default PMA properties structure for
+% use in evaluating transfer function. This is equiavlent to
+% kernel.prop_pma('olfert').
+%
+% PROP = kernel.prop_pma(SPEC) add a string specifying parameter set.
+%
+% AUTHOR: Timothy Sipkens, 2019-06-26
function [prop] = prop_pma(opts)
@@ -129,5 +128,9 @@
prop.D = @(B) kB.*prop.T.*B; % diffusion coefficient
+% Fill mass-mobility relation equivalents.
+addpath tfer_pma;
+prop = prop_massmob(prop);
+
end
diff --git a/+kernel/tfer_dma.m b/+kernel/tfer_dma.m
index ebfe26d..8e13b1f 100644
--- a/+kernel/tfer_dma.m
+++ b/+kernel/tfer_dma.m
@@ -1,21 +1,31 @@
% TFER_DMA Evaluates the transfer function of a differential mobility analyzer.
-% Author: Timothy Sipkens, 2018-12-27
-% Adapted: Buckley et al. (2017) and Olfert group
%
-% Inputs:
-% d_star Particle diameter, measurement set point for DMA [m]
-% d Particle diameter, points in integral, can be vector [m]
-% z Integer charge state, scalar
-% prop DMA properties, struct, generated using prop_DMA function (optional)
-% opts.diffusion Indicates whether to include diffusion, boolean (optional)
+% OMEGA = kernel.tfer_dma(D_STAR,D,Z) uses the mobility diameter set points
+% specified by D_STAR [m] and evalautes the DMA transfer function at D
+% [m] for an integer charge state of Z (a scalar, integer). Uses default
+% properties specified by kernel.prop_dma. Explicitly stating prop_dma is
+% preferred. Output is transfer function, OMEGA.
+%
+% OMEGA = kernel.tfer_dma(D_STAR,D,Z,PROP_DMA) explicitly specified the
+% properties of the DMA (e.g., the radii) as a data structure. This is the
+% preferred usage to the previous call. For structure of PROP_DMA, see
+% kernel.prop_dma(...).
+%
+% OMEGA = kernel.tfer_dma(...,OPTS) adds an options structure with the
+% field specified below:
+% OPTS.diffusion Indicates whether to include diffusion, boolean (optional)
% .solver Indicates the method by which diffusion is calculated (optional)
% .param String indicated which parameter set to use (see prop_DMA.m)
-%
-% Outputs:
-% Omega Transfer function
-% Zp_tilde Non-dimensional electrical mobility, vector
-%=========================================================================%
+%
+% [OMEGA,ZP_TILDE] = kernel.tfer_dma(...) adds an output contianing the
+% non-dimensional electrical mobility as a vector.
+%
+% ------------------------------------------------------------------------
+%
+% AUTHOR: Timothy Sipkens, 2018-12-27
+% ADAPTED FROM: Buckley et al. (2017) and Olfert group
+
function [Omega,Zp_tilde] = tfer_dma(d_star,d,z,prop,opts)
diff --git a/+tools/get_noise.m b/+tools/get_noise.m
index 6aac6ec..f3f8a1d 100644
--- a/+tools/get_noise.m
+++ b/+tools/get_noise.m
@@ -1,31 +1,39 @@
% GET_NOISE Simulates Poisson-Gaussian noise in signals.
-% Author: Timothy Sipkens, 2019-12-08
-%
-% Modeled after Sipkens et al., Appl. Opt. (2017).
-% DOI: https://doi.org/10.1364/AO.56.008436
-%
-% Inputs:
-% b0 Uncorrupted data vector
-%
-% n_tot Factor by which the data was normalized
-% Note that `get_noise(n_tot.*b0)./n_tot;` and
-% `get_noise(b0,n_tot);` should give identical results.
-% (Optional, dafault: n_tot = 1)
-%
-% gam0 Percentage of the maximum noise used for the background
-% Gaussian noise.
-% (Optional, default: gam0 = 1e-4,
-% i.e. 0.01% of the peak noise)
-%
-% f_apx Flag for whether to use a Gaussian approximation for Poisson
-% noise. Note that Lb is the Cholesky factorization of the
-% inverse of the covariance matrix for the Gaussian
-% approximation. Though, this covariance will
-% well-approximates the covariance information for the
-% true Poisson case.
-% (Optional, default: f_apx = 1;)
-%=========================================================================%
+% Modeled after Sipkens et al., Appl. Opt. (2017).
+% DOI: https://doi.org/10.1364/AO.56.008436
+%
+% B = tools.get_noise(B0) adds Poisson-Gaussian noise to the raw signal
+% B0 (representing raw counts) and outputs data corrupted wiht noise to B.
+%
+% B = tools.get_noise(B0,N_TOT) adds the option to use scaled data as
+% input. By default, N_TOT = 1, which means that B0 is counts. For
+% example, this usage can corrupt a PDF specified by B0 with
+% Poisson-Gaussian noise if the total counts over the PDF was N_TOT.
+% Note that tools.get_noise(N_TOT.*B0)./N_TOT and
+% tools.get_noise(B0,N_TOT) will give the same result.
+%
+% B = tools.get_noise(B0,N_TOT,GAM0) adds an input to specify the level of
+% Gaussian background noise. The quantity is phrased relative to the
+% maximum value of N_TOT.*B0. By default, GAM0 = 1e-4, which corresponds
+% to a Gaussian noise level of 0.01% of the the peak noise.
+%
+% B = tools.get_noise(...,F_APX) adds a flag for whether to use a Gaussian
+% approximation is used for the Poisson noise.
+%
+% [B,LB] = tools.get_noise(...) adds an output for the Cholesky
+% factorization of the inverse of the covariance matrix, assuming the
+% Poisson noise can be approximated as Gaussian.
+%
+% ------------------------------------------------------------------------
+%
+% NOTE: Note that Lb is the Cholesky factorization of the
+% inverse of the covariance matrix for the Gaussian
+% approximation. Though, this covariance will
+% well-approximates the covariance information for the
+% true Poisson case.
+%
+% AUTHOR: Timothy Sipkens, 2019-12-08
function [b,Lb] = get_noise(b0,n_tot,gam0,f_apx)
diff --git a/+tools/textbar.m b/+tools/textbar.m
index a6fc195..541dfba 100644
--- a/+tools/textbar.m
+++ b/+tools/textbar.m
@@ -1,33 +1,31 @@
-% TEXTBAR Print out a text-based progress bar.
-% Author: Timothy Sipkens, 2020-11-02
-% Based on: Samuel Grauer, 2017-11-16 + tqdm for Python
+% TEXTBAR Print out a text-based progress bar.
%
-% DESCRIPTION:
-% textbar() or textbar(0) initializes a textbar without
+% DESCRIPTION:
+% tools.textbar() or textbar(0) initializes a textbar without
% a trailing fraction:
% 0% | |
%
-% textbar(pct) displays a textbar with the provided percentage,
+% tools.textbar(pct) displays a textbar with the provided percentage,
% removing text corresponding to the previous textbar.
% For example, textbar(0.1) yields:
% 10% |██ |
%
-% textbar([i(1), i(2)]) displays a textbar with an appended fraction,
-% again removing text corresponding to the previous textbar.
+% tools.textbar([i(1), i(2)]) displays a textbar with an appended
+% fraction, again removing text corresponding to the previous textbar.
% In this case, i(1) corresponds to the current iterable, and
% i(2) to the total. For example, textbar([1, 10]) yields:
% 10% |██ | 1/10
%
-% textbar([i(1), i(2), i(3), i(4), ...]) displays a textbar considering
+% tools.textbar([i(1), i(2), i(3), i(4), ...]) displays a textbar considering
% multiple iterables. These are given as pairs of integers, starting
% from the most recent loop and moving outward. Integer pairs can be
% added for as many iterables are present. For example,
% textbar([1, 3, 2, 5]) yields:
% 27% |█████▌ | 4/15
%
-% textbar(..., f_back) displays a textbar with control over whether the
-% previous textbar is removed, where f_back=1 will remove the
+% tools.textbar(..., f_back) displays a textbar with control over whether
+% the previous textbar is removed, where f_back=1 will remove the
% previous text and f_back = 0 will not. By default, f_back=1, that
% is, textbar(0.4, 1) is the same as textbar(0.4). By contrast,
% the code `for ii=1:3; textbar([ii, 3], 0); end` will yield:
@@ -35,10 +33,12 @@
% 67% |█████████████▌ | 2/3
% 100% |████████████████████| 3/3
%
-% NOTE:
+% NOTE:
% If this function is in a tools package, append `tools.` to the
% function call. For example: `tools.textbar(0)` to initialize bar.
-%=========================================================================%
+%
+% AUTHOR: Timothy Sipkens, 2020-11-02
+% INSPIRED BY: Samuel Grauer, 2017-11-16 + tqdm for Python
function textbar(i, f_back)
@@ -97,8 +97,8 @@ function textbar(i, f_back)
% Format text for middle of the bar.
nc = ceil(pct * n_dot); % number of completed elements
-str_p01 = repmat(char(9608), [1, nc]); % completed portion of bar
-if ((nc - pct * n_dot) > 0.5); str_p01(end) = char(9612); end % allow for half blocks
+str_p01 = repmat('█', [1, nc]); % completed portion of bar
+if ((nc - pct * n_dot) > 0.5); str_p01(end) = '▌'; end % allow for half blocks
str_p02 = repmat(' ', [1, n_dot-nc]); % uncompleted portion of bar
@@ -115,9 +115,14 @@ function textbar(i, f_back)
% Compile formatted sting.
str_out = [' ', str_p00, '%%', ' |', str_p01, str_p02, '| ', str_p03];
+str_all = [str_out, repmat(' ', [1, n_str-length(str_out)]), '\n'];
-
-fprintf([str_back, str_out, repmat(' ', [1, n_str-length(str_out)]), '\n']);
+if pct<(1-eps)
+ fprintf([str_back, str_all]);
+else
+ fprintf([str_back, str_all, ...
+ char(8), ' [', 8, '< DONE]', 8, '\n']); % add orange, bold DONE
+end
%-------------------------------------------------------------------------%
end
diff --git a/+tools/textheader.m b/+tools/textheader.m
index 08730e7..03194ac 100644
--- a/+tools/textheader.m
+++ b/+tools/textheader.m
@@ -12,11 +12,13 @@
if ~exist('txt', 'var'); txt = []; end % if no text, instead format footer
if isempty(txt); f_before = 0; txt = 'Complete.'; end
+% Replacement allows for printing of '%' character.
+txt = strrep(txt, '%', '%%');
dots = ''; % will hold text to display "..." if before
if f_before
fprintf('\n');
- dots = ' ...';
+ dots = ''; % '...'
end
diff --git a/.gitignore b/.gitignore
index 7bef63e..4189305 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,13 +10,11 @@ results/
main_x_*.m
main_jas19_noise.m
-main_bayes_sp2.m
main_kernel_sp2.m
main_tfer.m
CPMASP2KernelV0.m
ImportSP2.m
-+kernel/gen_c2.m
cmap/correct.m
cmap/rgb2hsl.m
cmap/hsl2rgb.m
diff --git a/@Grid/Grid.m b/@Grid/Grid.m
index f53d932..4c1af74 100644
--- a/@Grid/Grid.m
+++ b/@Grid/Grid.m
@@ -1,19 +1,40 @@
% GRID Responsible for discretizing space as a grid and related operations.
-% Author: Timothy Sipkens, 2019-02-03
-%
-% Notes:
-% + The grid class is currently used when a simple discretization of
-% two-dimensional space is required. It then takes either the span
-% of spcae to be covered or pre-defined edge vectors to form a grid.
-%
-% + See constructor method for list of other variables required
-% for creation.
-%=========================================================================%
+%
+% Grid is a class developed to discretize a parameter space (e.g.,
+% mass-mobility space). This is done using a simple rectangular grid that
+% can have linear, logarithmic or custom spaced elements along the edges.
+% Methods are designed to make it easier to deal with gridded data,
+% allowing users to reshape vectorized data back to a 2D grid
+% (`Grid.reshape` method) or vice versa. Other methods allow for plotting
+% the 2D representation of vector data (`Grid.plot2d` method) or
+% calculate the gradient of vector data (`Grid.grad` method).
+%
+% G = Grid(SPAN,NE) creates a grid with the domain specified by SPAN, a
+% 2x2 array with [min(dim1),max(dim1); min(dim2),max(dim2)], and with the
+% number of elements in each dimension specified by NE, a 1x2 array.
+%
+% G = Grid(EDGES) create a grid with edges specified by the entries of
+% EDGES, a 1x2 cell with the edges for the dim1 and dim2, respectively.
+% Some functionality is limited if the edges are not uniform in log or
+% linear space.
+%
+% G = GRID(SPAN,NE,DISCRETE) adds an input to specify whether logarithmic
+% (default), specified using DISCRETE = 'log', or linear spacing, specified
+% using DISCRETE = 'linear'.
+%
+%
+% AUTHOR: Timothy Sipkens, 2019-02-03
+%
+% ------------------------------------------------------------------------
+%
+% We refer the reader to the
@@ -126,7 +153,11 @@ grid_x.plot2d_marg(A(527,:)); % plot kernel for 527th data point
-One can see multiple peaks corresponding to the multiple charging contributions. Now we can generate a noiseless data set using the forward model: +One can see multiple peaks corresponding to the multiple charging contributions. + +### (3) Generate data + +Now we can generate a noiseless data set using the forward model: ```Matlab b0 = A * x0; % generate a set of data using the forward model @@ -150,8 +181,11 @@ ylabel('log_{10}(m_p)'); +Note that since we chose a very narrow phantom, multiple charging artifacts are visible in the data (in the form of multiple modes or a shoulder in the main peaks). + +### (4) Perform inversion -Note that since we chose a very narrow phantom, multiple charging artifacts are visible in the data (in the form of multiple modes or a shoulder in the main peaks). Next, we compute a Tikhonov-regularized solution using the corrupted data and data covariance information (`Lb`) and plot the result: +Next, we compute a Tikhonov-regularized solution using the corrupted data and data covariance information (`Lb`) and plot the result: ```Matlab lambda = 1; % regularization parameter @@ -169,6 +203,8 @@ x_ed = invert.exp_dist(Lb*A, Lb*b, ... lambda, Gd, grid_x); % exponential distance solution ``` +### (5) Post-processing and visualization + Finally, plot the solutions: ```Matlab @@ -182,19 +218,16 @@ grid_x.plot2d(x_ed); % plot exponential distance solution set(gcf, 'Position', [50 300 900 300]); % position and size plot ``` +This results in the following reconstruction: +- For this narrow phantom, the exponential distance approach appears to outperform Tikhonov. This example is provided in the `main_0` script in the upper directory of this program. Runtimes are typically on the order of a minute. - - ------ - - This program is organized into several: [classes](#3-classes) (folders starting with the `@` symbol), [packages](#4-packages) (folders starting with the `+` symbol), and scripts that form the base of the program. These will be described, along with the underlying physics, below. ## 1. UNDERLYING PHYSICS @@ -203,7 +236,7 @@ Size characterization is critical to understanding the role of aerosols in vario Mathematically, the problem to be solved here is of the form -![](https://latex.codecogs.com/svg.latex?{\epsilon}=\frac{1}{{\pi}}{\text{tot}}\int_0^{\infty}{\int_0^{\infty}\frac{{\delta}(r)r}{sqrt{y_0^2-r^2}}dy_0}) +![](https://latex.codecogs.com/svg.latex?N_i(a_i*,b_i*)=N_{\text{tot}}\int_0^{\infty}{\int_0^{\infty}{K(a_i*,b_i*,a,b)\cdot{p(a,b)}\cdot\text{d}a\cdot\text{d}b}}) where: @@ -214,11 +247,11 @@ where: Inversion refers to finding *p*(*a*,*b*) from some set of measurements, {*N*1,*N*2,...}. For computation, the two-dimensional size distribution is discretized, most simply by representing the quantity on a regular rectangular grid with *n*a discrete points for the first type of particle size (that is for *a*, e.g., particle mass) and *n*b for the second type of particle size (that is for *b*, e.g., particle mobility diameter). In this case, we define a global index for the grid, *j*, and vectorize the distribution, such that -![](https://latex.codecogs.com/svg.latex?x_j=p(a_j,b_j)) +![](https://latex.codecogs.com/svg.latex?{x_j=p(a_j,b_j)}) This results is a vector with *n*a x *n*b total entries. This vectorized form is chosen over a two-dimensional **x** so that the problem can be represented as a linear system of equations. Here, the solution is assumed to be uniform within each element, in which case -![](https://latex.codecogs.com/svg.latex?N_i(a_i*,b_i*){\approx}N_{\text{tot}}\sum_{j=1}^{n_a\cdot{n_b}}{p(a_j,b_j)\int_{a_j}{\int_{b_j}{K(a_i*,b_i*,a_j,b_j)\cdot\text{d}a\cdot\text{d}b}}}) +![](https://latex.codecogs.com/svg.latex?{N_i(a_i*,b_i*){\approx}N_{\text{tot}}\sum_{j=1}^{n_a\cdot{n_b}}{p(a_j,b_j)\int_{a_j}{\int_{b_j}{K(a_i*,b_i*,a_j,b_j)\cdot\text{d}a\cdot\text{d}b}}}}) (where the integrals are over the two-dimensional area of the *j*th element in [*a*,*b*]T space). This results is a linear system of equations of the form @@ -227,7 +260,7 @@ in [*a*,*b*]T space). This results is a linear system of equations of where **b** is the data vector (i.e., *bi* = *Ni*); **A** is a discrete form of the kernel, -![](https://latex.codecogs.com/svg.latex?A_{i,j}=\int_{a_j}{\int_{b_j}{K(a_i*,b_i*,a_j,b_j)\cdot\text{d}a\cdot\text{d}b}}) +![](https://latex.codecogs.com/svg.latex?{A_{i,j}=\int_{a_j}{\int_{b_j}{K(a_i*,b_i*,a_j,b_j){\cdot}\text{d}a\cdot\text{d}b}}}) and **e** is a vector of measurement errors that corrupt the results of **Ax**. This is the problem that the current code is designed to solve. @@ -248,35 +281,53 @@ to the corresponding phantom number (e.g., to `'3'` for the phantom from [Buckle ### 2.1 Main scripts (main*.m) -The `main*` scripts in the top directory of the program constitute the primary code that can be called to demonstrate use of the code. The are generally composed of four parts. +The `main*` scripts in the top directory of the program constitute the primary code that can be called to demonstrate use of the code. They are generally composed of fout parts (with an optional pre-step composed of generating a phantom distribtuion), similar to the steps noted in the example at the beginning of this README. + +As per the demonstration above, the `main*` scripts generally have five parts. + +### (1) Generate a reconstruction grid + +The first main step involves defining a reconstruction grid, which corresponds to the points at which `x`, the mass-mobility distribution, is to be reconstructed. This is typically done using an instance of the `Grid` class, which is described in Section [3.1](#31-grid-class). Optionally, one could first define a phantom, which will subsequently be used to generate synthetic data and a ground truth. The `Phantom` class, described in Section [3.2](#32-phantom-class), is designed to perform this task. The results is , and a vector, `x_t`, that contains a vectorized form of the phantom distribution, defined on the output grid. In all other cases, the grid for `x` (and possibly `b`) can be generated by calling the `Grid` class directly. -##### 2.1.1 General structure: Four parts +### (2) Compute the kernel / transfer functions -**STEP 1**: Optionally, one can define a phantom used to generate synthetic data and a ground truth. The `Phantom` class, described in Section [3.2](#32-phantom-class), is designed to perform this task. The results is an instance of the `Grid` class, which is described in Section [3.1](#31-grid-class), and a vector, `x_t`, that contains a vectorized form of the phantom distribution, defined on the output grid. +One must now generate a model matrix, `A`, which relates the distribution, `x`, to the data, `b`, such that **Ax** = **b**. This requires one to compute the transfer functions of all of the devices involved in the measurement for the points on which `x` and `b` are to be defined. This generally involves invoking the `kernel.gen_*(...)` methods. For example, -**STEP 2A**: One must now generate a model matrix, `A`, which relates the distribution, `x`, to the data, `b`, such that **Ax** = **b**. This requires one to compute the transfer functions of all of the devices involved in the measurement as well as the grids on which `x` and `b` are to be defined. For phantom distributions, the grid for `x` can generated using the `Phantom` class. In all other cases, the grid for `x` and `b` can be generated by creating an instance of the `Grid` class described below. +```Matlab +A = kernel.gen_pma_dma_grid(... + grid_b, grid_x, prop_pma, ... + [], 'omega', omega); +``` + +will generate a kernel for a reconstruction gird, `grid_x`, and gridded data, on `grid_b`. + +### (3) Generate data -**STEP 2b**: One must also define some set of data in an appropriate format. For simulated data, this is straightforward: `b = A*x;`. For experimental data, the data should be imported along with either (i) a grid on which the data is defined or (ii) using a series of setpoints for the DMA and PMA. For experimental data, one must have knowledge of the setpoint before computing `A`. Accordingly, the data may first be imported prior to Step 2A. Also in this step, one should include some definition of the expected uncertainties in each point in `b`, encoded in the matrix `Lb`. For those cases involving simple counting noise, this can be approximated as +One must also define some set of data in an appropriate format. For simulated data, this is straightforward: `b = A*x;`. For experimental data, the data should be imported along with either (*i*) a grid on which the data is defined or (*ii*) using a series of setpoints for the DMA and PMA. For experimental data, one must have knowledge of the setpoints before computing `A`, such that, the data must be imported prior to Step (**2**). Also in this step, one should include some definition of the expected uncertainties in each point in `b`, encoded in the matrix `Lb`. For those cases involving simple counting noise, this can be approximated as ```Matlab Lb = diag(1 ./ sqrt(1 / Ntot .* b)); ``` -where `Ntot` is the total number of particle counts as described in [Sipkens et al. (2020a)][1_JAS1]. The function `tools.get_noise` is included to help with noise creation and more information on the noise model is provided in [Sipkens et al. (2017)][6_AO17]. +where `Ntot` is the total number of particle counts as described in [Sipkens et al. (2020a)][1_JAS1]. The function `tools.get_noise(...)` is included to help with noise creation and more information on the noise model is provided in [Sipkens et al. (2017)][6_AO17]. -**STEP 3**: With this information, one can proceed to implement various inversion approaches, such as those available in the `invert` package described below. Preset groupings on inversion approaches are available in the `run_inversions*` scripts, also described below. +### (4) Perform inversion -**STEP 4**: Finally, one can post-process and visualize the results as desired. The `Grid` class allows for a simple visualization of the inferred distribution by calling the `Grid.plot2d_marg` method of this class. This plots both the retrieved distribution as well as the marginalized distribution on each of the axes, taking the reconstruction (e.g., `x_tk1`, `x_lsq`) as an input. +With this information, one can proceed to implement various inversion approaches, such as those available in the **invert** package described below. Preset groupings on inversion approaches are available in the `run_inversions*` scripts, also described below. + +### (5) Post-processing and visualization + +Finally, one can post-process and visualize the results as desired. The `Grid` class allows for a simple visualization of the inferred distribution by calling the `Grid.plot2d_marg(...)` method of this class. This plots both the retrieved distribution as well as the marginalized distribution on each of the axes, taking the reconstruction (e.g., `x_tk1`, `x_lsq`) as an input. ### 2.2 Scripts to run a series of inversion methods (run_inversions*.m) -As noted above, these scripts are intend to bundle a series of inversion methods into a single line of code in the `main*` scripts. This can include optimization routines, included in the `+optimize` package, which run through several values of the regularization parameters. The lettered scripts each denote different combinations of techniques. The `run_inversions_a` script, for example, attempts to optimize the regularization parameter in the Tikhonov, MART, Twomey, and Twomey-Markowski approaches. Other lettered scripts combine other sets of inversion techniques. +As noted above, these scripts are intend to bundle a series of inversion methods into a single line of code in the `main*` scripts. This can include optimization routines, included in the **optimize** package, which run through several values of the regularization parameters. The lettered scripts each denote different combinations of techniques. The `run_inversions_a` script, for example, attempts to optimize the regularization parameter in the Tikhonov, MART, Twomey, and Twomey-Markowski approaches. Other lettered scripts combine other sets of inversion techniques. ## 3. CLASSES ### 3.1 Grid class -Grid is a class developed to discretize a parameter space (e.g., mass-mobility space). This is done using a simple rectangular grid that can have linear, logarithmic or custom spaced elements along the edges. Methods are designed to make it easier to deal with gridded data, allowing users to reshape vectorized data back to a 2D grid (`Grid.reshape` method) or vice versa. Other methods allow for plotting the 2D representation of vector data (`Grid.plot2d` method) or calculate the gradient of vector data (`Grid.grad` method). +Grid is a class developed to discretize a parameter space (e.g., mass-mobility space). This is done using a simple rectangular grid that can have linear, logarithmic or custom spaced elements along the edges. Methods are designed to make it easier to deal with gridded data, allowing users to reshape vectorized data back to a 2D grid (`Grid.reshape(...)` method) or vice versa. Other methods allow for plotting the 2D representation of vector data (`Grid.plot2d(...)` method) or calculate the gradient of vector data (`Grid.grad(...)` method). Instances of the Grid class can primarily be constructed in two ways. First, one can specify a `Grid.span` for the grid to cover in the parameter space. The span is specified using a 2 x 2 matrix, where the first row corresponds to the span for the first dimension of the parameter space (e.g., mass) and the second row corresponds to the span for the second dimension of the parameter space (e.g., mobility diameter). For example, if one wanted to logarithmically discretize mass space between 0.01 and 100 fg and mobility space between 10 and 1000 nm, one could call: @@ -286,8 +337,7 @@ ne = [10,12]; % number of elements for each dimension grid = Grid(span, ne, 'log'); % create instance of Grid ``` -Second, one can supply a 1 x 2 cell array of edges, where the first entry is the center of the elements in the first dimension of parameter space and the second entry of the elements in the second dimension of parameter space. For example, to make -a simple grid with elements at 0.1 and 1 fg in mass space and 10, 200, and 1000 nm in mobility space, one would call: +Second, one can supply a 1 x 2 cell array of edges, where the first entry is the center of the elements in the first dimension of parameter space and the second entry of the elements in the second dimension of parameter space. For example, to make a simple grid with elements at 0.1 and 1 fg in mass space and 10, 200, and 1000 nm in mobility space, one would call: ```Matlab edges = {[0.1,1], [10,200,1000]}; % cell array of edge vectors @@ -306,8 +356,7 @@ For mass-mobility measurements, this can be used to block out particles with ext These partial grids are also useful for PMA-SP2 inversion, where part of the grid will be unphysical. -Partial grids can be created using the `Grid.partial`, which cuts the grid about a line specified by a y-intercept and slope. All of the grid points with a center above this line is removed from the grid. For example, if one wanted to create a grid where all of the points above the 1-1 line should be removed -(as is relevant for PMA-SP2 inversion), one can call +Partial grids can be created using the `Grid.partial(...)`, which cuts the grid about a line specified by a y-intercept and slope. All of the grid points with a center above this line is removed from the grid. For example, if one wanted to create a grid where all of the points above the 1-1 line should be removed (as is relevant for PMA-SP2 inversion), one can call ```Matlab grid = grid.partial(0, 1); @@ -329,7 +378,7 @@ Methods specific to partial grids include: 1. `Grid.partial2full(x)` will convert a partial x (resolved with only the remaining points on the grid) to one resolved on the original grid, filling the missing points with zeros. I use this to plot and marginalize the distributions (i.e., plots will have zeros in all of the missing elements/pixels) and 2. `Grid.full2partial(x)` will do the reverse of above, converting a full x (with all of the points from the original grid) to one resolved on the partial grid. -Most other Grid methods will operate on partial grids. For example, the `Grid.marginalization` method will only sum the pixels that are still on the grid (exploiting the fact that the other pixels are necessarily zero), including accounting for the fact that pixels on the diagonal are only half pixels (i.e., they are triangles rather than rectangular elements). The `Grid.plot2d` method, equally, will plot zeros in the upper left triangle. The `Grid.l1` method will generate the first-order Tikhonov L matrix for the modified grid (only considering pixels that are adjacent on the partial grid). The `Grid.ray_sum` method will sum the distribution along some ray (i.e., along some line), assuming missing pixels are zero and do not contribute to the ray-sum. The list goes on. +Most other Grid methods will operate on partial grids. For example, the `Grid.marginalization(...)` method will only sum the pixels that are still on the grid (exploiting the fact that the other pixels are necessarily zero), including accounting for the fact that pixels on the diagonal are only half pixels (i.e., they are triangles rather than rectangular elements). The `Grid.plot2d(...)` method, equally, will plot zeros in the upper left triangle. The `Grid.l1(...)` method will generate the first-order Tikhonov L matrix for the modified grid (only considering pixels that are adjacent on the partial grid). The `Grid.ray_sum(...)` method will sum the distribution along some ray (i.e., along some line), assuming missing pixels are zero and do not contribute to the ray-sum. The list goes on. ### 3.2 Phantom class @@ -396,7 +445,7 @@ The '**mass-mobility**' parameterization uses a `p` structured array, which is b `rhog` - Effective density of at the mean mobility diameter -For lognormal modes, means should be geometric means and standard deviations should be geometric standard deviations. Remaining entries of the `p` structure will be filled using the `Phantom.fill_p` method. (We note that `p = Phantom.fill_p(p);` can be used to fill out the `p` structure without the need to create an instance of the Phantom class.) +For lognormal modes, means should be geometric means and standard deviations should be geometric standard deviations. Remaining entries of the `p` structure will be filled using the `Phantom.fill_p(...)` method. (We note that `p = Phantom.fill_p(p);` can be used to fill out the `p` structure without the need to create an instance of the Phantom class.) For this scenario, instances of the class are generated by calling: @@ -449,7 +498,7 @@ where the distribution parameters match those from [Sipkens et al. (2020a)][1_JA ##### 3.2.3 Converting between the 'standard' and 'mass-mobility' parameterizations -In both cases, creating an instance of the class will also contain the information corresponding to the other creation method (e.g. using a `p` structure, the class constructor will determined the corresponding mean and covariance information and store this in `Phantom.mu` and `Phantom.Sigma`). This can be demonstrated by investigating the examples provided in the proceeding sections, which generate the same phantom, save for rounding errors. Conversion between the '**standard**' parameterization and the '**mass-mobility**' parameterizations can be accomplished using the `Phantom.cov2p` method of the Phantom class and vice versa using the `Phantom.p2cov` method of the Phantom class. +In both cases, creating an instance of the class will also contain the information corresponding to the other creation method (e.g. using a `p` structure, the class constructor will determined the corresponding mean and covariance information and store this in `Phantom.mu` and `Phantom.Sigma`). This can be demonstrated by investigating the examples provided in the proceeding sections, which generate the same phantom, save for rounding errors. Conversion between the '**standard**' parameterization and the '**mass-mobility**' parameterizations can be accomplished using the `Phantom.cov2p(...)` method of the Phantom class and vice versa using the `Phantom.p2cov(...)` method of the Phantom class. ##### 3.2.4 OPTION 3: Preset phantoms @@ -469,23 +518,23 @@ corresponds to the one used by [Buckley et al. (2017)][3_Buck] and demonstrates For experimental data, the Phantom class can also be used to derive morphological parameters from the reconstructions. -Of particular note, the `Phantom.fit` method, which is defined external to the main definition of the Phantom class, takes a reconstruction, `x` and the grid on which it is defined and creates a bivariate lognormal phantom that most resembles the data. This done using least squares analysis. The `p` structure of the Phantom class then contains many of the morphological parameters of interest to practitioners measuring mass-mobility distributions. +Of particular note, the `Phantom.fit(...)` method, which is defined external to the main definition of the Phantom class, takes a reconstruction, `x` and the grid on which it is defined and creates a bivariate lognormal phantom that most resembles the data. This done using least squares analysis. The `p` structure of the Phantom class then contains many of the morphological parameters of interest to practitioners measuring mass-mobility distributions. -The `Phantom.fit2` method can be used in an attempt to derive multimodal phantoms for the data. This task is often challenging, such that the method may need tuning in order to get distributions that appropriately resemble the data. +The `Phantom.fit2(...)` method can be used in an attempt to derive multimodal phantoms for the data. This task is often challenging, such that the method may need tuning in order to get distributions that appropriately resemble the data. ## 4. PACKAGES ### 4.1 +kernel -This package is used to evaluate the transfer function of the different instruments, such as the differential mobility analyzer (DMA), particle mass analyzer (such as the CPMA or APM), and charging fractions. Various functions within this package can generate the matrix `A` that acts as the forward model. +This package is used to evaluate the transfer function of the different instruments, such as the differential mobility analyzer (DMA), particle mass analyzer (such as the CPMA or APM), single particle soot photometer (SP2), and charging fractions to generate discrete kernels useful for computation. In general, functions starting with `gen` are upper level functions that can generate the matrix `A` that acts as the forward model ins subsequent steps. Other functions (e.g., `kernel.tfer_dma(...)`) act as supporting methods (e.g., in evaluating the transfer function for just the DMA). The transfer function for the DMA uses the analytical expressions of [Stozenburg et al. (2018)][Stolz18]. -Transfer function evaluation for a PMA can proceed using one of two inputs either (i) a `sp` structure or (ii) an instance of the Grid class defined for the data setpoints. Evaluation proceeds using the analytical expressions of [Sipkens et al. (2020b)][2_AST] and the tfer_pma package provided with that work. The package uses a `sp` structure to define the PMA setpoints. +Transfer function evaluation for a PMA can proceed using one of two inputs either (*i*) a `sp` structure or (*ii*) an instance of the Grid class defined for the data setpoints. Evaluation proceeds using the analytical expressions of [Sipkens et al. (2020b)][2_AST] and the **tfer_pma** package provided with that work. The package uses a `sp` structure to define the PMA setpoints. ##### 4.1.1 sp -The `sp` or setpoint structure is a structured array containing the information necessary to define the PMA setpoints, which is described in more detail in the [README](tfer_pma/README.md) for the tfer_pma package. Defining the quantity requires a pair of parameters and a property structure defining the physical dimensions of the PMA. Pairings can be converted into a `sp` structured array using the `tfer_pma.get_setpoint` function described below and in the README for the tfer_pma package. Generally, this function can be placed inside a loop that generates an entry in `sp` for each available setpoint. The output structure will contain all of the relevant parameters that could be used to specify that setpoint, including mass setpoint (assuming a singly charged particle), `m_star`; the resolution, `Rm`; the voltage, `V`; and the electrode speeds, `omega*`. A sample `sp` is shown below. +The `sp` or setpoint structure is a structured array containing the information necessary to define the setpoints for particle mass analyzers, which is described in more detail in the [README](tfer_pma/README.md) for the **tfer_pma** package. Defining the quantity requires a pair of parameters and a property structure defining the physical dimensions of the PMA. Pairings can be converted into a `sp` structured array using the `get_setpoint(...)` function (in the **tfer_pma** folder). Generally, this function can be placed inside a loop that generates an entry in `sp` for each available setpoint. The output structure will contain all of the relevant parameters that could be used to specify that setpoint, including mass setpoint (assuming a singly charged particle), `m_star`; the resolution, `Rm`; the voltage, `V`; and the electrode speeds, `omega*`. A sample `sp` is shown below. | Fields | m_star | V | Rm | omega | omega1 | omega2 | alpha | beta | m_max | | ------- | :-------: | :----: | :-: | :---: | :----: | :----: | :---: | :---: | :------: | @@ -495,20 +544,18 @@ The `sp` or setpoint structure is a structured array containing the information | 4 | 2.22×10-18 | 198.02 | 3 | 486.1 | 493.7 | 478.7 | 33.63 | 1.656 | 2.96×10-18 | | ... |||||||||| -As an example, the array can be generated from a vector of mass setpoints assuming -a resolution of *R*m = 10 and PMA properties specified in `kernel.prop_pma` using: +As an example, the array can be generated from a vector of mass setpoints assuming a resolution of *R*m = 10 and PMA properties specified in `kernel.prop_pma` using: ```Matlab -m_star = 1e-18.*logspace(log10(0.1),log10(100),25); % mass setpoints -sp = tfer_pma.get_setpoint(prop_pma,... - 'm_star',m_star,'Rm',10); % get PMA setpoints +addpath('tfer_pma'); +m_star = 1e-18 .* logspace(log10(0.1), log10(100), 25); % mass setpoints +sp = get_setpoint(prop_pma,... + 'm_star', m_star, 'Rm', 10); % get PMA setpoints ``` ##### 4.1.2 Exploiting the gridded structure of that data -Alternatively, one can generate a grid corresponding to the data points. This can -speed transfer function evaluation be exploiting the structure of the setpoints -to minimize the number of function evaluations (using the `kernel.gen_grid` function). +We note that functions that end in `_grid` exploit the structure of gridded data to speed transfer function evaluation. This is particularly useful when the transfer function may not be a function of both aerosol size parameters (e.g., for SP2 data, the SP2 binning process does not depend on the total particle mass). ### 4.2 tfer_pma @@ -518,13 +565,13 @@ Unlike the other packages, tfer_pma corresponds to a submodule that is imported addpath('tfer_pma'); ``` -to be used explicitly in scripts. The package is automatically added to the Matlab path, whenever it is necessary in calling functions in the kernel package. +to be used explicitly in scripts. The package is automatically added to the Matlab path, whenever it is necessary in calling functions in the **kernel** package. The package is used in evaluating the transfer function of the particle mass analyzers (PMAs), such as the aerosol particle mass analyzer (APM) and centrifugal particle mass analyzer (CPMA). PMA transfer functions are evaluated using the analytical transfer functions derived by [Sipkens et al. (2020b)][2_AST], including different approximations for the particle migration velocity and options for transfer functions that include diffusion. For more details on the theory, one is referred to the referenced work. The package also contains some standard reference functions (e.g. `dm2zp`) used in evaluating the DMA transfer function when calling `kernel.tfer_dma(...)`. ### 4.3 +invert -The invert package contains various functions used to invert the measured data for the desired two-dimensional distribution. This includes implementations of least-squares, Tikhonov regularization, Twomey, Twomey-Markowski (including using +The **invert** package contains various functions used to invert the measured data for the desired two-dimensional distribution. This includes implementations of least-squares, Tikhonov regularization, Twomey, Twomey-Markowski (including using the method of [Buckley et al. (2017)][3_Buck] and [Rawat et al. (2016)][rawat]), and the multiplicative algebraic reconstruction technique (MART). An important note in connection with these methods is that they do not have the matrix `Lb` as an input. This is done for two reasons: @@ -541,20 +588,17 @@ Development is underway on the use of an exponential distance covariance functio ### 4.4 +optimize -This package mirrors the content of the +invert package but aims to determine the optimal number of -iterations for the Twomey and MART schemes or the optimal prior parameter set for the other methods. This includes some methods aimed to optimize the prior/regularization parameters used in the reconstructions, without knowledge of the data. +This package mirrors the content of the **invert** package but aims to determine the optimal number of iterations for the Twomey and MART schemes or the optimal prior parameter set for the other methods. This includes some methods aimed to optimize the prior/regularization parameters used in the reconstructions, without knowledge of the data. -Of particular note are a subset of the methods that implement evaluation of the Bayes factor for a range of methods, namely the `optimize.bayesf*.m` methods. The functions have inputs that mirror the functions in the `invert` package, this means that data uncertainties can be included -in the procedure by giving `Lb*A` as an input to the program in the place of `A`. The methods general take `lambda` as a separate parameter, to promote the stability of the algorithm. -More details on this method are found in [Sipkens et al. (2020c)][4]. Appendix C of that work includes a discussion of the special considerations required to compute the determinants of the large covariance matrices in this problem. +Of particular note are a subset of the methods that implement evaluation of the Bayes factor for a range of methods, namely the `optimize.bayesf*(...)` methods. The functions have inputs that mirror the functions in the **invert** package, this means that data uncertainties can be included in the procedure by giving `Lb*A` as an input to the program in the place of `A`. The methods general take `lambda` as a separate parameter, to promote the stability of the algorithm. More details on this method are found in [Sipkens et al. (2020c)][4]. Appendix C of that work includes a discussion of the special considerations required to compute the determinants of the large covariance matrices in this problem. ### 4.5 +tools A series of utility functions that serve various purposes, including printing a text-based progress bar (based on code from [Samuel Grauer](https://www.researchgate.net/profile/Samuel_Grauer)) and a function to convert mass-mobility distributions to effective density-mobility distributions. -The `tools.overlay*` functions produce overlay to be placed on top of plots in -mass-mobility space. For example, `tools.overlay_phantom` will plot the line +The `tools.overlay*(...)` functions produce overlay to be placed on top of plots in +mass-mobility space. For example, `tools.overlay_phantom(...)` will plot the line corresponding to the least-squares line representative of the phantom (equivalent to the mass-mobility relation for mass-mobility phantoms) and ellipses representing isolines. By default, the function plots one, two, and three standard deviations from the center of the distribution, accounting for the correlation encoded in the distribution. diff --git a/cmap b/cmap index 83f4d0e..5b94ca9 160000 --- a/cmap +++ b/cmap @@ -1 +1 @@ -Subproject commit 83f4d0e31da12f576797d2f696c21f19db6649a6 +Subproject commit 5b94ca979a6ecc36edbb0e4f94810c49c498cb12 diff --git a/main_0.m b/main_0.m index 960df04..038d72e 100644 --- a/main_0.m +++ b/main_0.m @@ -1,59 +1,74 @@ +% MAIN_0 A simple script demonstrating a simple inversion. + + clear; close all; clc; -%== STEP 1 ===============================================================% -span = [0.01,100; ... - 10,1000]; % span of grid [min(mass),max(mass); min(mobility),max(mobility)] + + +%== (1) ==================================================================% +span = [0.01, 100; ... + 10, 1000]; % span of grid ne = [100, 125]; % number of elements in grid for each dimension -grid_x = Grid(span, ne, 'logarithmic'); % create instance of Grid, with logarithmic spacing + +% Create an instance of Grid, with logarithmic spacing. +grid_x = Grid(span, ne, 'log'); -ut_r = [2,0.5]; % point in line to cut upper triangle +ut_r = [0.5,2]; % point in line to cut upper triangle ut_m = 3; % slope for line to cut upper triangle -lt_r = [2,-1.2]; % point in line to cut lower triangle +lt_r = [-1.2,2]; % point in line to cut lower triangle lt_m = 3; % slope for line to cut upper triangle grid_x = grid_x.partial(... - fliplr(ut_r),ut_m,... - fliplr(lt_r),lt_m); % convert to a partial grid + ut_r, ut_m,... + lt_r, lt_m); % convert to a partial grid -phantom = Phantom('4', grid_x); % get Phantom 4 from Sipkens et al. (2020a) -x0 = phantom.x; +phantom = Phantom('4'); % get Phantom 4 from Sipkens et al. (2020a) +x0 = phantom.eval(grid_x); % evaluate phantom on grid_x +% Plot the phantom. figure(1); grid_x.plot2d(x0); -% define a new grid for the measurements + +%== (2) ==================================================================% +% Define a new grid for the measurements span_b = span; ne_b = [20, 65]; grid_b = Grid(span_b, ne_b, 'log'); -%== Step 2A ==============================================================% -prop_pma = kernel.prop_pma % use default CPMA properties (will display in command line) -A = kernel.gen_grid(grid_b, grid_x); % generate the kernel, use default CPMA properties +% Use default CPMA properties (will display in command line). +prop_pma = kernel.prop_pma; + +% Generate the kernel, use default CPMA properties. +A = kernel.gen_pma_dma_grid(grid_b, grid_x, prop_pma); figure(2); grid_x.plot2d_marg(A(527,:)); % plot kernel for 527th data point -%== STEP 2b ==============================================================% + +%== (3) ==================================================================% b0 = A * x0; % generate a set of data using the forward model [b, Lb] = tools.get_noise(b0, 1e5); % corrupt data, assume peak counts ~1e5 % plot resultant data as mobility scans at a range of mass setpoint figure(3); -tools.plot2d_slices(grid_b, b); +opts.f_lines = 1; +tools.plot2d_patch(grid_b, b0, [], [], opts); xlabel('log_{10}(d_m)'); ylabel('log_{10}(m_p)'); -%== STEP 3 ===============================================================% -disp('Performing Tikhonov regularization...'); + +%== (4) ==================================================================% +tools.textheader('Tikhonov inversion'); lambda = 1; % regularization parameter order = 1; % order of Tikhonov matrix to be used x_tk1 = invert.tikhonov(Lb*A, Lb*b, ... @@ -62,16 +77,18 @@ disp(' '); -disp('Performing exponential distance regularization...'); +tools.textheader('Exponential distance inversion'); lambda = 1; % regularization parameter Gd = phantom.Sigma; -x_ed = invert.exp_dist(Lb*A, Lb*b, ... +x_ed = invert.exp_dist( ... + Lb*A, Lb*b, ... lambda, Gd, grid_x); % exponential distance solution disp('Complete.'); disp(' '); -%== STEP 4 ===============================================================% + +%== (5) ==================================================================% figure(4); subplot(1,2,1); grid_x.plot2d(x_tk1); % plot Tikhonov solution diff --git a/main_bayes.m b/main_bayes.m index 704c77b..5a11abe 100644 --- a/main_bayes.m +++ b/main_bayes.m @@ -18,12 +18,15 @@ %% -%== STEP 1: Generate phantom (x_t) =======================================% +%== (1) ==================================================================% +% Generate phantom (x_t) and reconstruction grid. % High resolution version of the distribution to be projected to coarse % grid to generate x. -span_t = [10^-1.5,10^1.5;20,10^3]; % range of mobility and mass +span_t = [ ... + 10^-1.5, 10^1.5; ... % range of mobilities + 20, 10^3]; % range of masses -phantom = Phantom('1',span_t); +phantom = Phantom('1',span_t); % choose phantom x_t = phantom.x; grid_t = phantom.grid; nmax = max(x_t); @@ -54,7 +57,8 @@ %% -%== STEP 2A: Generate A matrix ===========================================% +%== (2) ==================================================================% +% Compute kernel. % Note that here a dense kernel is computed for % data synthesis in Step 3. n_b = [14,50]; %[14,50]; %[17,35]; @@ -63,7 +67,7 @@ n_b,'logarithmic'); % grid for data prop_pma = kernel.prop_pma; -[A_t,sp] = kernel.gen_grid(grid_b,grid_t,prop_pma,[],'Rm',3); +[A_t,sp] = kernel.gen_pma_dma_grid(grid_b,grid_t,prop_pma,[],'Rm',3); % generate A matrix based on grid for x_t and b disp('Transform to discretization in x...'); @@ -84,7 +88,8 @@ %% -%== STEP 2b: Generate data ===============================================% +%== (3) ==================================================================% +% Generate data using forward model. b0 = A_t*x_t; % forward evaluate kernel @@ -94,14 +99,14 @@ Ntot = 1e5; [b,Lb] = tools.get_noise(b0,Ntot); +% Plot the data three different ways. figure(5); tools.plot2d_scatter(... - grid_b.elements(:,1),grid_b.elements(:,2),b,cm_b); + grid_b.elements(:,1), grid_b.elements(:,2), b, cm_b); title('Data: 2D scatter'); figure(6); -% tools.plot2d_patch(grid_b,b,cm_b); -tools.plot2d_slices(grid_b,b,cm_b); +tools.plot2d_patch(grid_b, b, cm_b); title('Data: 2D slices'); figure(20); @@ -121,15 +126,56 @@ %% -%== STEP 3: Perform inversions ===========================================% -run_inversions_h; % simple, faster, stand-alone Tikhonov + ED +%== (4) ==================================================================% +% Invert. +%-{ +% (Previously run_inversions_h) + +%-- Tikhonov (1st order) -------------------------------------------------% +tools.textheader('Tikhonov (1st) regularization'); +lambda_tk1 = 1.1053; % found using other run_inversion* scripts +x_tk1 = invert.tikhonov(... + Lb*A,Lb*b,lambda_tk1,1,n_x(1)); +disp('Inversion complete.'); +disp(' '); + +eps.tk1_0 = norm(x0-x_tk1); + + +%-- Exponential distance approach ----------------------------------------% +Gd = phantom.Sigma(:,:,1); +if isempty(Gd) % for Phantom 3 + [~,Gd] = phantom.p2cov(phantom.p(2),phantom.modes(2)); +end + +%-- Gd properties -----------------% +l1 = sqrt(Gd(1,1)); +l2 = sqrt(Gd(2,2)); +R12 = Gd(1,2)/(l1*l2); +Dm = Gd(1,2)/Gd(2,2); % s1*R12/s2 +%----------------------------------% + +tools.textheader('Exponential distance regularization'); +lambda_ed = 1.0826; % found using other run_inversion* scripts +[x_ed] = ... + invert.exp_dist(... + Lb*A,Lb*b,lambda_ed,Gd,... + grid_x,[]); +disp('Inversion complete.'); +disp(' '); + +eps.ed_0 = norm(x_ed-x0); +%} + + + +% Alternative code to optimize different +% components of the regularization. % run_inversions_g; % run_inversions_i; % run_inversions_j; -% run_inversions_k; % time methods - %% diff --git a/main_bayes_sp2.m b/main_bayes_sp2.m new file mode 100644 index 0000000..ba4c8d1 --- /dev/null +++ b/main_bayes_sp2.m @@ -0,0 +1,154 @@ + +clear; +clc; +close all; + + +%-- Load colour maps -----------------------------------------------------% +addpath cmap; +cm_alt = bupu(255); +cm_b = inferno(255); +cm_b = cm_b(40:end,:); +cm_div = rdbu(200); +cm = magma; +cm = flipud(cm); +cm = [1,1,1;cm]; + + + +%% +%== (1) ==================================================================% +% Generate phantom (x_t). +% High resolution version of the distribution to be projected to coarse +% grid to generate x. +span_t = [ ... + 10^-2,10^2; ... % range of mobilities + 10^-2,10^2]; % range of masses + +grid_t = Grid(span_t,... + [540,540],'logarithmic'); +grid_t = grid_t.partial(0,1); +phantom = Phantom('distr-sp2',grid_t); +x_t = phantom.x; +nmax = max(x_t); +cmax = nmax; + +%-- Generate x vector on coarser grid -----------------------------------% +n_x = [72,72]; % number of elements per dimension in x + +grid_x = Grid([grid_t.span],... + n_x,'logarithmic'); +grid_x = grid_x.partial(0,1); +x0 = grid_x.project(grid_t,x_t); % project into basis for x + +% phantom_a = Phantom('distr-sp2',grid_x); +% x_a = phantom_a.x; + +figure(1); +tools.plot2d_marg_b(phantom.grid,phantom.x) +colormap(gcf,cm); +caxis([0,cmax*(1+1/256)]); + +hold on; % plots mg ridges of phantom +plot(log10(grid_t.edges{2}),... + log10(phantom.mg_fun(grid_t.edges{2})),'w:'); +hold off; + + + +%% +%== (2) ==================================================================% +% Generate A matrix. Note that here a dense kernel is computed +% for data synthesis in Step 3. + +n_b = [60,15]; %[14,50]; %[17,35]; +span_b = grid_t.span; +grid_b = Grid(span_b,... + n_b,'logarithmic'); % grid for data +% grid_b = grid_b.partial(0,1); + +prop_pma = kernel.prop_pma; +[A_t,sp] = kernel.gen_pma_sp2_grid(grid_b, grid_t, prop_pma, 'Rm', 3); + % generate A matrix based on grid for x_t and b + +disp('Transform to discretization in x...'); +B = grid_x.transform(grid_t); % evaluate matrix modifier to transform kernel +A = A_t*B; % equivalent to integration, rebases kernel to grid for x (instead of x_t) +A = sparse(A); +disp('Complete.'); +disp(' '); + +figure(2); +colormap(gcf,cm); +d0 = grid_x.marginalize(x0); +tools.plot2d_marg_b(grid_x,x0,d0{1},grid_t,x_t); +caxis([0,cmax*(1+1/256)]); + + + +%% +%== (3) ==================================================================% +% Generate data. + +b0 = A_t*x_t; % forward evaluate kernel + +%-- Corrupt data with noise ----------------------------------------------% +b0(0<1e-10.*max(max(b0))) = 0; % zero very small values of b + +% Note: The total number of particles, Ntot, scales the pdf to equal the +% number distribution, d2N/dlogA*dlogB. It is also equal to the +% concentration of particles reduced by the product of the flow +% rate and overall collection time, that is Ntot = N*Q*t. +Ntot = 1e5; % converts pdf to counts +[b,Lb] = tools.get_noise(b0,Ntot); + +figure(5); +tools.plot2d_scatter(... + grid_b.elements(:,1),grid_b.elements(:,2),b,cm_b); + +figure(20); +grid_b.plot2d_sweep(Ntot.*b,cm_b,2); +xlabel('{{\itm}_{p} [fg]}'); +ylabel('{d{\itN}/dlog {\itm}_{p}}'); + + + +%% +%== (4) ==================================================================% +% Invert. + +s1 = 0.25; +s2 = s1; +R12 = 0.99; +Gd = [s1^2,R12*s1*s2; R12*s1*s2,s2^2]; +lambda_ed = 1.2e0; + +tools.textheader('Exponential distance'); +Lpr0 = invert.exp_dist_lpr(Gd,grid_x.elements(:,2),grid_x.elements(:,1)); +[x_ed,~,Lpr0] = invert.exp_dist(... + Lb*A,Lb*b,lambda_ed,Gd,... + grid_x.elements(:,2),grid_x.elements(:,1)); +% [x_ed,lambda_ed,out] = optimize.exp_dist_op(... +% Lb*A,Lb*b,[1e1,1e4],Gd,... +% grid_x.elements(:,2),grid_x.elements(:,1)); +disp('Inversion complete.'); +disp(' '); + + + +%% +%== (5) ==================================================================% +% Visualize the results. + +x_plot = x_ed; + +%-- Plot retrieved solution --------------% +figure(10); +colormap(gcf,cm); +tools.plot2d_marg_b(grid_x,Ntot.*x_plot,[],grid_t,x_t); +tools.overlay_line(grid_x,[0,0],1,'k--'); +% colorbar; +caxis([0,Ntot*cmax*(1+1/256)]); +colormap(cm); + + diff --git a/main_jas20a.m b/main_jas20a.m index 5aca961..a1bad28 100644 --- a/main_jas20a.m +++ b/main_jas20a.m @@ -19,14 +19,17 @@ %% -%== STEP 1: Generate phantom (x_t) =======================================% +%== (1) ==================================================================% +% Phantom and reconstruction grid. % High resolution version of the distribution to be projected to coarse % grid to generate x. -span_t = [10^-1.5,10^1.5;10,10^3]; % range of mobility and mass +span_t = [... + 10^-1.5, 10^1.5; ... % range of masses + 10, 10^3]; % range of mobilities -phantom = Phantom('1',span_t); -x_t = phantom.x; -grid_t = phantom.grid; +phantom = Phantom('1', span_t); % get one of the preset phantoms +x_t = phantom.x; % evaluated phantom +grid_t = phantom.grid; % grid on which phantom is defined nmax = max(x_t); cmax = nmax; @@ -53,17 +56,21 @@ %% -%== STEP 2A: Generate A matrix ===========================================% -n_b = [14,50]; %[12,50]; %[17,35]; +%== (2) ==================================================================% +% Compute kernel. + + +n_b = [14,50]; %[12,50]; %[17,35]; % size of the data span_b = grid_t.span; grid_b = Grid(span_b,... - n_b,'logarithmic'); % should be uniform basis + n_b,'logarithmic'); % grid for data -prop_pma = kernel.prop_pma; -A_t = kernel.gen_grid(grid_b,grid_t,prop_pma,[],'Rm',3); - % generate A matrix based on grid for x_t and b +prop_pma = kernel.prop_pma; % get default CPMA properties -disp('Transform to discretization in x...'); +% Generate A matrix based on grid for x_t (fine resolution) and b. +A_t = kernel.gen_pma_dma_grid(grid_b, grid_t, prop_pma, [], 'Rm', 3); + +disp('Transform kernel to discretization in x...'); B = grid_x.transform(grid_t); % evaluate matrix modifier to transform kernel A = A_t*B; % equivalent to integration, rebases kernel to grid for x (instead of x_t) A = sparse(A); @@ -78,7 +85,8 @@ %% -%== STEP 2b: Generate data ==============================================% +%== (3) ==================================================================% +% Generate data. b0 = A_t*x_t; % forward evaluate kernel (high dimension) @@ -99,19 +107,22 @@ %% -%== STEP 3: Perform inversions ============================================% +%== (4) ==================================================================% +% Invert. + +% Run inversion for pre-selected regularization settings. run_inversions_c; -% % optimize the inversion schemes, incurs longer runtimes -% run_inversions_a; % optimize regularization parameter -% run_inversions_b; +% Optimize inversion schemes, incurs much longer runtimes. +% run_inversions_a; %% -%== STEP 4: Plot solution =================================================% -x_plot = x_tk1; +%== (5) ==================================================================% +% Post-process / plot. +x_plot = x_tk1; % select Tikhonov (or other) reconstructions for plotting figure(10); % plot reconstruction and marginal distributions colormap(gcf,[cm;1,1,1]); diff --git a/main_jas20a_nb.m b/main_jas20a_nb.m deleted file mode 100644 index 3f9c9b4..0000000 --- a/main_jas20a_nb.m +++ /dev/null @@ -1,202 +0,0 @@ - -% Evaluates the JAS19 cases for a varying number of data points. - -clear; -clc; -close all; - - -%-- Load colour schemes --------------------------------------------------% -addpath cmap; -cm = ylgnbu(255); -cm_alt = cm; -cm = inferno; -cm = cm(40:end,:); -cm_b = cm; -cm = viridis; - - -%% -%-- Generate phantom (x_t) -----------------------------------------------% -% High resolution version of the distribution to be projected to coarse -% grid to generate x. -span_t = [10^-1.5,10^1.5;10,10^3]; % range of mobility and mass - -phantom = Phantom('1',span_t); -x_t = phantom.x; -grid_t = phantom.grid; -nmax = max(x_t); -cmax = nmax; - -figure(1); -phantom.plot; -colormap(gcf,[cm;1,1,1]); -caxis([0,cmax*(1+1/256)]); - -hold on; % plots mg ridges of phantom -plot(log10(grid_t.edges{2}),... - log10(phantom.mg_fun(grid_t.edges{2})),'w:'); -hold off; - -n_b_vec = [25,35,42,50,60,75]; -for ii=1:length(n_b_vec) - %% - %-- Generate A matrix and b vector ---------------------------------------% - n_b = [14,n_b_vec(ii)]; %[12,50]; %[17,35]; - span_b = grid_t.span; - grid_b = Grid(span_b,... - n_b,'logarithmic'); % should be uniform basis - - A_t = kernel.gen_grid(grid_b,grid_t,prop_pma,[],'Rm',3); - % generate A matrix based on grid for x_t and b - - - %% - %-- Generate x vector on coarser grid -----------------------------------% - n_x = [50,64]; % number of elements per dimension in x - % [20,32]; % used for plotting projections of basis functions - % [40,64]; % used in evaluating previous versions of regularization - - grid_x = Grid([grid_t.span],... - n_x,'logarithmic'); - x0 = grid_x.project(grid_t,x_t); % project into basis for x - - disp('Transform to discretization in x...'); - B = grid_x.transform(grid_t); % evaluate matrix modifier to transform kernel - A = A_t*B; % equivalent to integration, rebases kernel to grid for x (instead of x_t) - A = sparse(A); - disp('Complete.'); - disp(' '); - - figure(2); - colormap(gcf,[cm;1,1,1]); - grid_x.plot2d_marg(x0,grid_t,x_t); - caxis([0,cmax*(1+1/256)]); - - - %% - %-- Generate data --------------------------------------------------------% - b0 = A_t*x_t; % forward evaluate kernel - - %-- Corrupt data with noise ----------------------------------------------% - b0(0<1e-10.*max(max(b0))) = 0; % zero very small values of b - - Ntot = 1e5; - [b,Lb] = tools.get_noise(b0,Ntot); - - - figure(5); - colormap(gcf,cm_b); - grid_b.plot2d_marg(b); - - figure(20); - n2 = floor(grid_b.ne(1)); - n3 = floor(length(cm_b(:,1))/n2); - cm_b_mod = cm_b(10:n3:end,:); - set(gca,'ColorOrder',cm_b_mod,'NextPlot','replacechildren'); - b_plot_rs = reshape(b,grid_b.ne); - semilogx(grid_b.edges{2},b_plot_rs.*Ntot); - - x_vec(ii).b = max(b); - - - %-- Perform inversions -----------------------------------------------% - run_inversions_a; % run regularization - - - %-- Store values -----------------------------------------------------% - x_vec(ii).tk0 = x_tk0; - x_vec(ii).tk1 = x_tk1; - x_vec(ii).tk2 = x_tk2; - x_vec(ii).out_tk0 = out_tk0; - x_vec(ii).out_tk1 = out_tk1; - x_vec(ii).out_tk2 = out_tk2; - x_vec(ii).out_mart = out_mart; - x_vec(ii).out_two = out_two; - x_vec(ii).out_two_mh = out_two_mh; - x_vec(ii).chi_tk0 = chi.tk0; - x_vec(ii).chi_tk1 = chi.tk1; - x_vec(ii).chi_tk2 = chi.tk2; - x_vec(ii).chi_mart = chi.mart; - x_vec(ii).chi_two = chi.two; - x_vec(ii).chi_two_mh = chi.two_mh; - x_vec(ii).chi_lsq = chi.lsq; -end - - -%% -%-- Plot solution --------------------------------------------------------% -x_plot = x_vec(1).x_two; - -figure(10); -colormap(gcf,[cm;1,1,1]); -grid_x.plot2d_marg(x_plot,grid_t,x_t); -caxis([0,cmax*(1+1/256)]); - -%{ -figure(13); -n1 = ceil(grid_x.ne(1)./20); -n2 = floor(grid_x.ne(1)/n1); -n3 = floor(240/n2); -cm_x = cm(10:n3:250,:); -set(gca,'ColorOrder',cm_x,'NextPlot','replacechildren'); -x_plot_rs = reshape(x_plot,grid_x.ne); -semilogx(grid_x.edges{2},x_plot_rs(1:n1:end,:)); - -figure(10); -%} - - -%% -%-- Bar plot of results --------------------------------------------------% -figure(30); -chi_names = fieldnames(chi); -chi_vals = zeros(length(chi_names),1); -for ii=1:length(chi_names) - chi_vals(ii) = chi.(chi_names{ii}); -end - -bar(chi_vals); -% ylim([0,20]); -% ylim([0,100]); -set(gca,'xticklabel',chi_names); - - -%% -%{ -%-- Bar plot of times ----------------------------------------------------% -figure(40); -t_names = fieldnames(t); -t_vals = zeros(length(t_names),1); -for ii=1:length(t_names) - t_vals(ii) = mean(t.(t_names{ii}),2); -end - -bar(t_vals); -set(gca,'xticklabel',t_names); -set(gca,'yscale','log'); - - - -%% -%-- Plot marginal distributions ------------------------------------------% -figure(31); -clf; -dim = 2; - -grid_t.plot_marginal(x_t,dim); -grid_x.plot_marginal(... - {x_Tk1,x_init,x_MART,x_Two,x_TwoMH},dim,x0); - - - -%% -%-- Plot conditional distributions ---------------------------------------% -figure(31); -clf; -dim = 2; -ind_plot = 25; - -grid_x.plot_conditional(... - {x0,x_Tk1,x_init,x_MART,x_Two,x_TwoMH},dim,ind_plot,x0); -%} diff --git a/main_jas20a_noise.m b/main_jas20a_noise.m deleted file mode 100644 index 96d64c0..0000000 --- a/main_jas20a_noise.m +++ /dev/null @@ -1,204 +0,0 @@ - -% Evaluates the JAS19 cases for a varying amount of noise. -%=========================================================================% - -clear; -clc; -close all; - -%-- Load colour schemes --------------------------------------------------% -addpath cmap; -cm = ylgnbu(255); -cm_alt = cm; -cm = inferno; -cm = cm(40:end,:); -cm_b = cm; -cm = viridis; - - -%% -%-- Generate phantom (x_t) -----------------------------------------------% -% High resolution version of the distribution to be projected to coarse -% grid to generate x. -span_t = [10^-1.5,10^1.5;10,10^3]; % range of mobility and mass - -phantom = Phantom('1',span_t); -x_t = phantom.x; -grid_t = phantom.grid; -nmax = max(x_t); -cmax = nmax; - -figure(1); -phantom.plot; -colormap(gcf,[cm;1,1,1]); -caxis([0,cmax*(1+1/256)]); - -hold on; % plots mg ridges of phantom -plot(log10(grid_t.edges{2}),... - log10(phantom.mg_fun(grid_t.edges{2})),'w:'); -hold off; - - -%% -%-- Generate A matrix and b vector ---------------------------------------% -n_b = [14,50]; %[12,50]; %[17,35]; -span_b = grid_t.span; -grid_b = Grid(span_b,... - n_b,'logarithmic'); % should be uniform basis - -A_t = kernel.gen_grid(grid_b,grid_t,prop_pma,[],'Rm',3); - % generate A matrix based on grid for x_t and b - - -%% -%-- Generate x vector on coarser grid -----------------------------------% -n_x = [50,64]; % number of elements per dimension in x - % [20,32]; % used for plotting projections of basis functions - % [40,64]; % used in evaluating previous versions of regularization - -grid_x = Grid([grid_t.span],... - n_x,'logarithmic'); -x0 = grid_x.project(grid_t,x_t); % project into basis for x - -disp('Transform to discretization in x...'); -B = grid_x.transform(grid_t); % evaluate matrix modifier to transform kernel -A = A_t*B; % equivalent to integration, rebases kernel to grid for x (instead of x_t) -A = sparse(A); -disp('Complete.'); -disp(' '); - -figure(2); -colormap(gcf,[cm;1,1,1]); -grid_x.plot2d_marg(x0,grid_t,x_t); -caxis([0,cmax*(1+1/256)]); - - -%% -%-- Generate data --------------------------------------------------------% -b0 = A_t*x_t; % forward evaluate kernel - -%-- Corrupt data with noise ----------------------------------------------% -b0(0<1e-10.*max(max(b0))) = 0; % zero very small values of b - -Ntot_vec = 10.^[3,4,5,6,7]; -x_vec = []; -for ii=1:length(Ntot_vec) - Ntot = Ntot_vec(ii); - [b,Lb] = tools.get_noise(b0,Ntot); - - - figure(5); - colormap(gcf,cm_b); - grid_b.plot2d_marg(b); - - figure(20); - n2 = floor(grid_b.ne(1)); - n3 = floor(length(cm_b(:,1))/n2); - cm_b_mod = cm_b(10:n3:end,:); - set(gca,'ColorOrder',cm_b_mod,'NextPlot','replacechildren'); - b_plot_rs = reshape(b,grid_b.ne); - semilogx(grid_b.edges{2},b_plot_rs.*Ntot); - - x_vec(ii).b = max(b); - - -% %-- Perform inversions -----------------------------------------------% -% run_inversions_a; % run regularization -% -% -% %-- Store values -----------------------------------------------------% -% x_vec(ii).tk0 = x_tk0; -% x_vec(ii).tk1 = x_tk1; -% x_vec(ii).tk2 = x_tk2; -% x_vec(ii).out_tk0 = out_tk0; -% x_vec(ii).out_tk1 = out_tk1; -% x_vec(ii).out_tk2 = out_tk2; -% x_vec(ii).out_mart = out_mart; -% x_vec(ii).out_two = out_two; -% x_vec(ii).out_two_mh = out_two_mh; -% x_vec(ii).chi_tk0 = chi.tk0; -% x_vec(ii).chi_tk1 = chi.tk1; -% x_vec(ii).chi_tk2 = chi.tk2; -% x_vec(ii).chi_mart = chi.mart; -% x_vec(ii).chi_two = chi.two; -% x_vec(ii).chi_two_mh = chi.two_mh; -% x_vec(ii).chi_lsq = chi.lsq; -end - - -%% -%-- Plot solution --------------------------------------------------------% -x_plot = x_tk1; - -figure(10); -colormap(gcf,[cm;1,1,1]); -grid_x.plot2d_marg(x_plot,grid_t,x_t); -caxis([0,cmax*(1+1/256)]); - -%{ -figure(13); -n1 = ceil(grid_x.ne(1)./20); -n2 = floor(grid_x.ne(1)/n1); -n3 = floor(240/n2); -cm_x = cm(10:n3:250,:); -set(gca,'ColorOrder',cm_x,'NextPlot','replacechildren'); -x_plot_rs = reshape(x_plot,grid_x.ne); -semilogx(grid_x.edges{2},x_plot_rs(1:n1:end,:)); - -figure(10); -%} - - -%% -%-- Bar plot of results --------------------------------------------------% -figure(30); -chi_names = fieldnames(chi); -chi_vals = zeros(length(chi_names),1); -for ii=1:length(chi_names) - chi_vals(ii) = chi.(chi_names{ii}); -end - -bar(chi_vals); -% ylim([0,20]); -% ylim([0,100]); -set(gca,'xticklabel',chi_names); - - -%% -%{ -%-- Bar plot of times ----------------------------------------------------% -figure(40); -t_names = fieldnames(t); -t_vals = zeros(length(t_names),1); -for ii=1:length(t_names) - t_vals(ii) = mean(t.(t_names{ii}),2); -end - -bar(t_vals); -set(gca,'xticklabel',t_names); -set(gca,'yscale','log'); - - - -%% -%-- Plot marginal distributions ------------------------------------------% -figure(31); -clf; -dim = 2; - -grid_t.plot_marginal(x_t,dim); -grid_x.plot_marginal(... - {x_Tk1,x_init,x_MART,x_Two,x_TwoMH},dim,x0); - - - -%% -%-- Plot conditional distributions ---------------------------------------% -figure(31); -clf; -dim = 2; -ind_plot = 25; - -grid_x.plot_conditional(... - {x0,x_Tk1,x_init,x_MART,x_Two,x_TwoMH},dim,ind_plot,x0); -%} diff --git a/run_inversions_a.m b/run_inversions_a.m index 0a1737a..41dae69 100644 --- a/run_inversions_a.m +++ b/run_inversions_a.m @@ -17,7 +17,7 @@ %% Least squares -disp('Performing LS inversion...'); +tools.textheader('LS inversion'); x_length = length(A(1,:)); x_lsq = invert.lsq(Lb*A,Lb*b); disp('Inversion complete.'); @@ -27,7 +27,7 @@ %% Tikhonov (0th) implementation -disp('Performing Tikhonov (0th) regularization...'); +tools.textheader('Tikhonov (0th) regularization'); tic; [x_tk0,lambda_tk0,out_tk0] = optimize.tikhonov_op(... Lb*A,Lb*b,[1e-2,1e2],0,grid_x,x0,[],'interior-point'); @@ -39,7 +39,7 @@ %% Tikhonov (1st) implementation -disp('Performing Tikhonov (1st) regularization...'); +tools.textheader('Tikhonov (1st) regularization'); tic; [x_tk1,lambda_tk1,out_tk1] = optimize.tikhonov_op(... Lb*A,Lb*b,[1e-2,1e2],1,grid_x,x0,[],'interior-point'); @@ -51,7 +51,7 @@ %% Tikhonov (2nd) implementation -disp('Performing Tikhonov (2nd) regularization...'); +tools.textheader('Performing Tikhonov (2nd) regularization'); tic; [x_tk2,lambda_tk2,out_tk2] = optimize.tikhonov_op(... Lb*A,Lb*b,[1e-2,1e2],2,grid_x,x0,[],'interior-point'); @@ -64,7 +64,7 @@ %% MART, Maximum entropy regularized solution -disp('Performing MART...'); +tools.textheader('MART'); tic; [x_mart,iter_mart,out_mart] = ... optimize.mart_op(A,b,x_init,1:300,x0); @@ -76,7 +76,7 @@ %% Twomey -disp('Performing Twomey...'); +tools.textheader('Twomey'); tic; [x_two,iter_two,out_two] = ... optimize.twomey_op(A,b,x_init,1:500,x0); @@ -89,7 +89,7 @@ %% Twomey-Markowski-Buckley -disp('Performing Twomey-Markowski-Buckley...'); +tools.textheader('Twomey-Markowski-Buckley'); tic; [x_two_mh,Sf_two_mh,out_two_mh] = ... optimize.twomark_op(A,b,Lb,grid_x,... diff --git a/run_inversions_b.m b/run_inversions_b.m deleted file mode 100644 index bf2ab81..0000000 --- a/run_inversions_b.m +++ /dev/null @@ -1,133 +0,0 @@ - -% RUN_INVERSIONS_B Single inversion of each technique using externally defined parameters. -% Author: Timothy Sipkens, 2019-05-28 -%=========================================================================% - -%% Initial guess for iterative schemes -b_init = b; -b_init(b_init<(1e-5*max(b_init))) = 0; -x_init = interp2(grid_b.edges{2}',grid_b.edges{1}',... - reshape(full(b_init)./(A*ones(size(x0))),grid_b.ne),... - grid_x.elements(:,2),grid_x.elements(:,1)); -x_init(isnan(x_init)) = 0; -x_init(isinf(x_init)) = 0; -x_init = sparse(max(0,x_init)); -eps.init = norm(x0-x_init); -x_init_m = grid_x.marginalize(x_init); - - -%% Least squares -disp('Performing LS inversion...'); -x_lsq = invert.lsq(Lb*A,Lb*b); -disp('Inversion complete.'); -disp(' '); - -disp('Performing LS inversion...'); -tic; -x_lsq_nn = invert.lsq(Lb*A,Lb*b,[],'non-neg'); -t.lsq = toc; -disp('Inversion complete.'); -disp(' '); - -% chi.lsq = norm(x0-x_lsq); -eps.lsq = norm(x0-x_lsq_nn); - - -%% Tikhonov (0th) implementation -% lambda_tk0_hr = tools.perform_hankeraus(out_tk0,A,b,0); -% disp('Performing Tikhonov (0th) regularization (Hanke-Raus)...'); -% x_tk0_hr = invert.tikhonov(Lb*A,Lb*b,n_x(1),lambda_tk0_hr,0); -% disp('Inversion complete.'); -% disp(' '); - -disp('Performing Tikhonov (0th) regularization (int.-p.)...'); -x_tk0 = invert.tikhonov(Lb*A,Lb*b,lambda_tk0,0,n_x(1)); -disp('Inversion complete.'); -disp(' '); - -disp('Performing Tikhonov (0th) regularization (non-neg)...'); -x_tk0_nn = invert.tikhonov(Lb*A,Lb*b,lambda_tk0,0,n_x(1),... - [],'non-neg'); -disp('Inversion complete.'); -disp(' '); - -% diff.tk0 = norm(x_tk0-x_tk0_nn); -eps.tk0 = norm(x0-x_tk0_nn); -% chi.tk0_hr = norm(x0-x_tk0_hr); - - -%% Tikhonov (1st) implementation -% lambda_tk1_hr = tools.perform_hankeraus(out_tk1,A,b,0); -% disp('Performing Tikhonov (1st) regularization (Hanke-Raus)...'); -% x_tk1_hr = invert.tikhonov(Lb*A,Lb*b,n_x(1),lambda_tk1_hr,1); -% disp('Inversion complete.'); -% disp(' '); - -disp('Performing Tikhonov (1st) regularization (int.-p.)...'); -x_tk1 = invert.tikhonov(Lb*A,Lb*b,lambda_tk1,1,n_x(1)); -disp('Inversion complete.'); -disp(' '); - -disp('Performing Tikhonov (1st) regularization (non-neg)...'); -x_tk1_nn = invert.tikhonov(Lb*A,Lb*b,lambda_tk1,1,n_x(1),... - [],'non-neg'); -disp('Inversion complete.'); -disp(' '); - -% diff.tk1 = norm(x_tk1-x_tk1_nn); -eps.tk1 = norm(x0-x_tk1_nn); -% chi.tk1_hr = norm(x0-x_tk1_hr); - - -%% Tikhonov (2nd) implementation -% lambda_tk2_hr = tools.perform_hankeraus(out_tk2,A,b,0); -% disp('Performing Tikhonov (2nd) regularization (Hanke-Raus)...'); -% x_tk2_hr = invert.tikhonov(Lb*A,Lb*b,n_x(1),lambda_tk2_hr,2); -% disp('Inversion complete.'); -% disp(' '); - -disp('Performing Tikhonov (2nd) regularization (int.-p.)...'); -x_tk2 = invert.tikhonov(Lb*A,Lb*b,lambda_tk2,2,n_x(1)); -disp('Inversion complete.'); -disp(' '); - -disp('Performing Tikhonov (2nd) regularization (non-neg)...'); -x_tk2_nn = invert.tikhonov(Lb*A,Lb*b,lambda_tk2,2,n_x(1),... - [],'non-neg'); -disp('Inversion complete.'); -disp(' '); - -% diff.tk2 = norm(x_tk2-x_tk2_nn); -eps.tk2 = norm(x0-x_tk2_nn); -% chi.tk2_hr = norm(x0-x_tk2_hr); - - -%% MART, Maximum entropy regularized solution - -disp('Performing MART...'); -x_MART = invert.mart(A,b,x_init,299); -disp('Inversion complete.'); -disp(' '); - -eps.mart = norm(x0-x_MART); - - -%% Twomey -disp('Performing Twomey...'); -x_Two = invert.twomey(A,b,x_init,500); -disp('Completed Twomey.'); -disp(' '); - -eps.two = norm(x0-x_Two); - - -%% Twomey-Markowski-Buckley -disp('Performing Twomey-Markowski...'); -x_two_mh = invert.twomark(A,b,Lb,grid_x,... - x_init,35,'Buckley',Sf_two_mh); -disp('Completed Twomey-Markowski.'); - -eps.two_mh = norm(x0-x_two_mh); - - - diff --git a/run_inversions_c.m b/run_inversions_c.m index 7c1841a..06e2dfc 100644 --- a/run_inversions_c.m +++ b/run_inversions_c.m @@ -14,7 +14,7 @@ %% Tikhonov (0th) implementation -disp('Performing Tikhonov (0th) regularization...'); +tools.textheader('Performing Tikhonov (0th) regularization'); lambda_tk0 = 0.419941123497942; [x_tk0,D_tk0,L_tk0,Gpo_tk0] = invert.tikhonov(... Lb*A,Lb*b,lambda_tk0,0,n_x(1),[],'non-neg'); @@ -25,7 +25,7 @@ %% Tikhonov (1st) implementation -disp('Performing Tikhonov (1st) regularization...'); +tools.textheader('Performing Tikhonov (1st) regularization'); lambda_tk1 = 0.935436889902617; [x_tk1,D_tk1,L_tk1,Gpo_tk1] = invert.tikhonov(... Lb*A,Lb*b,lambda_tk1,1,n_x(1),[],'non-neg'); @@ -36,7 +36,7 @@ %% Tikhonov (2nd) implementation -disp('Performing Tikhonov (2nd) regularization...'); +tools.textheader('Performing Tikhonov (2nd) regularization'); lambda_tk2 = 1.069019204603001; [x_tk2,D_tk2,L_tk2] = invert.tikhonov(... Lb*A,Lb*b,lambda_tk2,2,n_x(1),[],'non-neg'); @@ -48,7 +48,7 @@ %% Twomey %-- Perform Twomey algorithm ----------------------------% -disp('Performing Twomey...'); +tools.textheader('Performing Twomey'); x_two = invert.twomey(A,b,x_init,500,[],[],1); disp('Completed Twomey.'); diff --git a/run_inversions_d.m b/run_inversions_d.m deleted file mode 100644 index 1545ed8..0000000 --- a/run_inversions_d.m +++ /dev/null @@ -1,103 +0,0 @@ - -% RUN_INVERSIONS_D Invert multuple times to determine CPU time. -% Author: Timothy Sipkens, 2019-07-22 -%=========================================================================% - -%% Initial guess for iterative schemes -b_init = b; -b_init(b_init<(1e-5*max(b_init))) = 0; -x_init = interp2(grid_b.edges{2}',grid_b.edges{1}',... - reshape(full(b_init)./(A*ones(size(x0))),grid_b.ne),... - grid_x.elements(:,2),grid_x.elements(:,1)); -x_init(isnan(x_init)) = 0; -x_init(isinf(x_init)) = 0; -x_init = sparse(max(0,x_init)); -eps.init = norm(x0-x_init); -x_init_m = grid_x.marginalize(x_init); - - -for ii=1:20 - -%% Least squares -disp('Performing LS inversion...'); -tic; -x_LSQ = invert.lsq(A,b,'interior-point'); -t.LSQ(ii) = toc; -disp('Inversion complete.'); -disp(' '); - -eps.LSQ = norm(x0-x_LSQ); - - -%% Tikhonov (0th) implementation -disp('Performing Tikhonov (0th) regularization...'); -tic; -x_tk0 = invert.tikhonov(Lb*A,Lb*b,lambda_tk0,0,n_x(1)); -t.tk0(ii) = toc; -disp('Inversion complete.'); -disp(' '); - -eps.tk0(ii) = norm(x0-x_tk0); - - -%% Tikhonov (1st) implementation -disp('Performing Tikhonov (1st) regularization...'); -tic; -x_tk1 = invert.tikhonov(Lb*A,Lb*b,lambda_tk1,1,n_x(1)); -t.tk1(ii) = toc; -disp('Inversion complete.'); -disp(' '); - -eps.tk1(ii) = norm(x0-x_tk1); - - -%% Tikhonov (2nd) implementation -disp('Performing Tikhonov (2nd) regularization...'); -% lambda_tk2 = 8e1; -tic; -x_tk2 = invert.tikhonov(Lb*A,Lb*b,lambda_tk2,2,n_x(1)); -t.tk2(ii) = toc; -disp('Inversion complete.'); -disp(' '); - -eps.tk2(ii) = norm(x0-x_tk2); - - -%% MART, Maximum entropy regularized solution - -disp('Performing MART...'); -tic; -x_mart = invert.mart(A,b,x_init,299); -t.mart(ii) = toc; -disp('Inversion complete.'); -disp(' '); - -eps.mart = norm(x0-x_mart); - - -%% Twomey -disp('Performing Twomey...'); -tic; -x_two = invert.twomey(A,b,x_init,500); -t.two(ii) = toc; -disp('Completed Twomey.'); -disp(' '); - -eps.two = norm(x0-x_two); - - -%% Twomey-Markowski-Buckley -disp('Performing Twomey-Markowski...'); -tic; -x_two_mh = invert.twomark(A,b,Lb,grid_x,... - x_init,35,'Buckley',1/Sf_two_mh); -t.two_mh(ii) = toc; -disp('Completed Twomey-Markowski.'); - -eps.two_mh = norm(x0-x_two_mh); - - -end - - - diff --git a/run_inversions_e.m b/run_inversions_e.m deleted file mode 100644 index c2297f5..0000000 --- a/run_inversions_e.m +++ /dev/null @@ -1,154 +0,0 @@ - -% RUN_INVERSIONS_E Run full set of optimal inversions for CPMA-SP2 inversion. -% Author: Arash Naseri, Timothy Sipkens, 2020-02-06 -%=========================================================================% - -%% Initial guess for iterative schemes -b_init = b; -b_init(b_init<(1e-5*max(b_init))) = 0; -x_init = interp2(grid_b.edges{2}',grid_b.edges{1}',... - reshape(full(b_init)./(A*ones(size(x0))),grid_b.ne),... - grid_x.elements(:,2),grid_x.elements(:,1)); -x_init(isnan(x_init)) = 0; -x_init(isinf(x_init)) = 0; -x_init = sparse(max(0,x_init)); -eps.init = 100*norm(x0-x_init)/norm(x0); -x_init_m = grid_x.marginalize(x_init); - - -%% Least squares -disp('Performing LS inversion...'); -x_length = length(A(1,:)); -x_lsq = invert.lsq(Lb*A,Lb*b); -disp('Inversion complete.'); -disp(' '); - -eps.lsq = 100*norm(x0-x_lsq)/norm(x0); - - -%% Tikhonov (0th) implementation -disp('Performing Tikhonov (0th) regularization...'); -tic; -[x_tk0,lambda_tk0,out_tk0] = optimize.tikhonov_op(... - Lb*A,Lb*b,[1e-2,1e2],0,grid_x,... - x0,[],'interior-point'); -t.tk0 = toc; -disp('Inversion complete.'); -disp(' '); - -eps.tk0 = 100*norm(x0-x_tk0)/norm(x0); - - -%% Tikhonov (1st) implementation -disp('Performing Tikhonov (1st) regularization...'); -tic; -[x_tk1,lambda_tk1,out_tk1] = optimize.tikhonov_op(... - Lb*A,Lb*b,[1e-2,1e2],1,grid_x,... - x0,[],'interior-point'); -t.tk1 = toc; -disp('Inversion complete.'); -disp(' '); - -eps.tk1 = 100*norm(x0-x_tk1)/norm(x0); - - -%% Tikhonov (2nd) implementation -disp('Performing Tikhonov (2nd) regularization...'); -tic; -[x_tk2,lambda_tk2,out_tk2] = optimize.tikhonov_op(... - Lb*A,Lb*b,[1e-2,1e2],2,grid_x,... - x0,[],'interior-point'); -t.tk2 = toc; -disp('Inversion complete.'); -disp(' '); - -eps.tk2 = 100*norm(x0-x_tk2)/norm(x0); - - -%% Tikhonov (0th) implementation L-curve -disp('L-curve optimization of Tikhonov (0th) regularization...'); -tic; -Lpr0 = invert.tikhonov_lpr(0,grid_x); -[x_tk0_lc,lambda_tk0_lc] = optimize.tikhonov_lcurve(... - Lb*A,Lb*b,[10^-3,100],Lpr0); -t.tk0_lc = toc; -disp('Inversion complete.'); -disp(' '); -eps.tk0_lcurve = 100*norm(x0-x_tk0_lc)/norm(x0); - - -%% Tikhonov (1st) implementation L-curve -disp('L-curve optimization of Tikhonov (1st) regularization...'); -tic; -Lpr0 = invert.tikhonov_lpr(1,grid_x); -[x_tk1_lc,lambda_tk1_lc,res_norm,x_norm] = ... - optimize.tikhonov_lcurve(... - Lb*A,Lb*b,[10^-3,100],Lpr0); -t.tk1_lc = toc; -disp('Inversion complete.'); -disp(' '); -eps.tk1_lcurve = 100*norm(x0-x_tk1_lc)/norm(x0); - - -%% Tikhonov (2nd), L-curve -disp('L-curve optimization of Tikhonov (2nd) regularization...'); -tic; -Lpr0 = invert.tikhonov_lpr(2,grid_x); -[x_tk2_lc,lambda_tk2_lc] = optimize.tikhonov_lcurve(... - Lb*A,Lb*b,[10^-3,100],Lpr0); -t.tk2_lc = toc; -disp('Inversion complete.'); -disp(' '); -eps.tk2_lc = 100*norm(x0-x_tk2_lc)/norm(x0); - - -%% MART, Maximum entropy regularized solution - -disp('Performing MART...'); -tic; -[x_mart,iter_mart,out_mart] = ... - optimize.mart_op(A,b,x_init,1:300,x0); -t.MART = toc; -disp('Inversion complete.'); -disp(' '); - -eps.mart = 100*norm(x0-x_mart)/norm(x0); - - -%% MART with smoothing function -disp('Performing MART with smoothing function...'); -tic; -[x_mart_mh,Sf_mart_mh,out_mart_mh] = optimize.martmark_op(... - A,b,Lb,grid_x,x_init,35,[1e2,1e-5],x0,'Buckley'); -t.mart_mh = toc; - -eps.mart_mh = 100*norm(x0-x_mart_mh)/norm(x0); - -disp('Completed MART with smoothing function.'); -disp(' '); - - -%% Twomey -disp('Performing Twomey...'); -tic; -[x_two,iter_two,out_two] = ... - optimize.twomey_op(A,b,x_init,1:500,x0); -t.two = toc; - -disp('Completed Twomey.'); -disp(' '); - -eps.two = 100*norm(x0-x_two)/norm(x0); - - -%% Twomey-Markowski-Buckley -disp('Performing Twomey-Markowski-Buckley...'); -tic; -[x_two_mh,Sf_two_mh,out_two_mh] = optimize.twomark_op(... - A,b,Lb,grid_x,x_init,35,[1e2,1e-5],x0,'Buckley'); -t.two_mh = toc; - -eps.two_mh = 100*norm(x0-x_two_mh)/norm(x0); - - - diff --git a/run_inversions_h.m b/run_inversions_h.m deleted file mode 100644 index c7d7a4c..0000000 --- a/run_inversions_h.m +++ /dev/null @@ -1,46 +0,0 @@ - -% RUN_INVERSIONS_H Run exponential distance and 1st-order Tikhonov regularization. -% Author: Timothy Sipkens, 2020-04-06 -%=========================================================================% - - - - -%-- Tikhonov (1st order) -------------------------------------------------% -disp('Performing Tikhonov (1st) regularization...'); -lambda_tk1 = 1.1053; % found using other run_inversion* scripts -x_tk1 = invert.tikhonov(... - Lb*A,Lb*b,lambda_tk1,1,n_x(1)); -disp('Inversion complete.'); -disp(' '); - -eps.tk1_0 = norm(x0-x_tk1); - - - - -%-- Exponential distance approach ----------------------------------------% -Gd = phantom.Sigma(:,:,1); -if isempty(Gd) % for Phantom 3 - [~,Gd] = phantom.p2cov(phantom.p(2),phantom.modes(2)); -end - -%-- Gd properties -----------------% -l1 = sqrt(Gd(1,1)); -l2 = sqrt(Gd(2,2)); -R12 = Gd(1,2)/(l1*l2); -Dm = Gd(1,2)/Gd(2,2); % s1*R12/s2 -%----------------------------------% - -disp('Performing exponential distance regularization...'); -lambda_ed = 1.0826; % found using other run_inversion* scripts -[x_ed] = ... - invert.exp_dist(... - Lb*A,Lb*b,lambda_ed,Gd,... - grid_x,[]); -disp('Complete.'); -disp(' '); - -eps.ed_0 = norm(x_ed-x0); - - diff --git a/run_inversions_k.m b/run_inversions_k.m deleted file mode 100644 index e2a39e2..0000000 --- a/run_inversions_k.m +++ /dev/null @@ -1,55 +0,0 @@ - -% RUN_INVERSIONS_K Time Bayesian and Twomey-Markowski optimization routines. -% Author: Timothy Sipkens, 2020-02-22 -%=========================================================================% - -clear t; - -[x_two_mh,Sf_two_mh,out_two_mh] = ... - optimize.twomark_op(A,b,Lb,grid_x,... - x_init,35,[1e2,1e-5],x0,'Buckley'); -eps.two_mh = norm(x0-x_two_mh); - -%% -nt = 25; -disp('Performing time repeats...'); -tools.textbar(0); -for tt=1:nt - tic; - x_two_mh = invert.twomark(A,b,Lb,grid_x,... - x_init,35,'Buckley',1/Sf_two_mh); - t(tt).two_mh = toc; - disp('Completed Twomey-Markowski.'); - - eps.two_mh = norm(x0-x_two_mh); - - - tic; - invert.em(Lb*A,Lb*b,x_init,3,x0); - t(tt).em = toc; - - - tic; - invert.tikhonov(... - Lb*A,Lb*b,lambda_tk1,1,grid_x,[]); - t(tt).tk1 = toc; - - - - tic; - invert.exp_dist(... - Lb*A,Lb*b,lambda_ed_lam,Gd,... - grid_x); - t(tt).ed = toc; - - disp('Performing time repeats...'); - tools.textbar(0); - tools.textbar(tt/nt); -end - -tmean.two_mh = mean([t.two_mh]); -tmean.em = mean([t.em])./tmean.two_mh; -tmean.tk1 = mean([t.tk1])./tmean.two_mh; -tmean.ed = mean([t.ed])./tmean.two_mh; -tmean.two_mh = 1; - diff --git a/tfer_pma b/tfer_pma index afc3359..919d157 160000 --- a/tfer_pma +++ b/tfer_pma @@ -1 +1 @@ -Subproject commit afc3359816a54c8357b5f5bdff8e11861e5f4bdb +Subproject commit 919d157639574f064bddb5e1d49c4c404e38e691