diff --git a/fm/negfc_speckle_noise.py b/fm/negfc_speckle_noise.py new file mode 100644 index 00000000..6c752cb3 --- /dev/null +++ b/fm/negfc_speckle_noise.py @@ -0,0 +1,497 @@ +#! /usr/bin/env python +""" +Module with routines allowing for the estimation of the uncertainty on the +parameters of an imaged companion associated to residual speckle noise. +""" + +__author__ = 'O. Wertz, C. A. Gomez Gonzalez, V. Christiaens' +__all__ = ['speckle_noise_uncertainty'] + +from multiprocessing import cpu_count +import numpy as np +import matplotlib.pyplot as plt + +from ..config.utils_conf import pool_map, iterable # eval_func_tuple +from ..fm import cube_inject_companions, cube_planet_free +from .negfc_simplex import firstguess_simplex +from .negfc_fmerit import get_mu_and_sigma +from .negfc_mcmc import confidence + + +def speckle_noise_uncertainty(cube, p_true, angle_range, derot_angles, algo, + psfn, fwhm, aperture_radius, opp_ang=False, + indep_ap=False, cube_ref=None, fmerit='sum', + algo_options={}, transmission=None, + radial_gradient=False, mu_sigma=None, wedge=None, + weights=None, force_rPA=False, ndet=None, + nproc=None, simplex_options=None, bins=None, + save=False, output=None, verbose=True, + full_output=True, plot=False, sigma_trim=None): + """ + Step-by-step procedure used to determine the speckle noise uncertainty\ + associated to the parameters of a companion candidate. + + The steps 1 to 3 need to be performed for each angle. + + 1) At the true planet radial distance and for a given angle, we \ + inject a fake companion in our planet-free cube. + + 2) Then, using the negative fake companion method, we determine the \ + position and flux of the fake companion thanks to a Simplex \ + Nelder-Mead minimization. + + 3) We calculate the offset between the true values of the position \ + and the flux of the fake companion, and those obtained from the \ + minimization. The results will be dependent on the angular \ + position of the fake companion. + + The resulting distribution of deviations is then used to infer the + 1-sigma uncertainty on each parameter by fitting a 1d-gaussian. + + Parameters + ---------- + cube: 3d or 4d numpy array + The original ADI or ADI+IFS cube. + p_true: tuple or numpy array with 3 (or more) elements + The radial separation, position angle (from x=0 axis) and flux + associated to a given companion candidate for which the speckle + uncertainty is to be evaluated. The planet will first be subtracted + from the cube, then used for test injections. For a 4D input cube, the + length of ``p_true`` should be equal to 2 (for r, theta) + the number + of spectral channels (flux at each wavelength). + angle_range: 1d numpy array + Range of angles (counted from x=0 axis, counter-clockwise) at which the + fake companions will be injected, in [0,360]. + derot_angles: 1d numpy array + Derotation angles for ADI. Length should match input cube. + algo: python routine + Routine to be used to model and subtract the stellar PSF. From an input + cube, derotation angles, and optional arguments, it should return a + post-processed frame. + psfn: 2d numpy array + 2d array with the normalized PSF template. The PSF image must be + centered wrt to the array. Therefore, it is recommended to run the + function ``metrics/normalize_psf()`` to generate a centered and + flux-normalized PSF template. + fwhm: float + FWHM of the PSF in pixels. + aperture_radius: float + Radius of the apertures used for NEGFC, in terms of FWHM. + opp_ang: bool, opt + Whether to also use opposite derotation angles to double sample size. + Uses the same angle range. + indep_ap: bool, opt. + Whether to only consider independent apertures. If yes, will supersede + the range provided in angle_range, and only consider the first and last + values, then fit as many non-overlapping apertures as possible. + The empty cube will also be used with opposite derotation angles to + double the number of independent apertures. + algo_options: dict, opt. + Options for algo. To be provided as a dictionary. Can include ncomp + (for PCA), svd_mode, collapse, imlib, interpolation, scaling, delta_rot + transmission: numpy array, optional + Array with 2 columns. First column is the radial separation in pixels. + Second column is the off-axis transmission (between 0 and 1) at the + radial separation given in column 1. + radial_gradient: bool, optional + Whether to apply a radial gradient to the psf image at the moment of + injection. By default False, i.e. the flux of the psf image is scaled + only considering the value of transmission at the exact radius the + companion is injected (e.g. this is the case for the vortex + coronagraph). Setting it to True may better represent the transmission + at the very edge of a physical mask (e.g. ALC) or the effect on the + light distribution of a marginally extended source near the IWA of the + coronagraph. + mu_sigma: tuple of 2 floats, bool or None, opt + If set to None: not used, and falls back to original version of the + algorithm, using fmerit. + If a tuple of 2 elements: should be the mean and standard deviation of + pixel intensities in an annulus centered on the location of the + companion candidate, excluding the area directly adjacent to the CC. + If set to anything else, but None/False/tuple: will compute said mean + and standard deviation automatically. + wedge: tuple, opt + [Only used if mu_sigma is not None] Input for the + ``vip_hci.fm.get_mu_and_sigma`` function. Range in theta where the mean + and standard deviation are computed in an annulus defined in the PCA + image. If None, it will be calculated automatically based on initial + guess and derotation angles to avoid. If some disc signal is present + elsewhere in the annulus, it is recommended to provide wedge manually. + The provided range should be continuous and >0. E.g. provide (270, 370) + to consider a PA range between [-90,+10]. + force_rPA: bool, optional + Whether to only search for optimal flux, provided (r,PA). + ndet: int or None, optional + [only used if fmerit='hessian'] If not None, ndet should be the number + of pixel(s) along x and y around the first guess position for which the + determinant of the Hessian matrix is calculated. If odd, the pixel(s) + around the closest integer coordinates will be considered. If even, the + pixel(s) around the subpixel coordinates of the first guess location are + considered. The figure of merit is the absolute sum of the determinants. + If None, ndet is determined automatically to be max(1, round(fwhm/2)). + nproc: int or None, optional + The number of processes to use for parallelization. If None, will be set + automatically to half the number of CPUs available. + fmerit : {'sum', 'stddev', 'hessian'}, string optional + If mu_sigma is not provided nor set to True, this parameter determines + which figure of merit to be used: + + * ``sum``: minimizes the sum of absolute residual intensities in the + aperture defined with `initial_state` and `aperture_radius`. More + details in [WER17]_. + + * ``stddev``: minimizes the standard deviation of residual + intensities in the aperture defined with `initial_state` and + `aperture_radius`. More details in [WER17]_. + + * ``hessian``: minimizes the sum of absolute values of the + determinant of the Hessian matrix calculated for each of the 4 + pixels encompassing the first guess location defined with + `initial_state`. More details in [QUA15]_. + + From experience: ``sum`` is more robust for high SNR companions (but + rather consider setting mu_sigma=True), while ``stddev`` tend to be more + reliable in presence of strong residual speckle noise. ``hessian`` is + expected to be more reliable in presence of extended signals around the + companion location. + simplex_options: dict + All the required simplex parameters, for instance {'tol':1e-08, + 'max_iter':200} + bins: int or None, opt + Number of bins for histogram of parameter deviations. If None, will be + determined automatically based on number of injected fake companions. + full_output: bool, optional + Whether to return more outputs. + output: str, optional + The name of the output file (if save is True) + save: bool, optional + If True, the result are pickled. + verbose: bool, optional + If True, information is displayed in the shell. + plot: bool, optional + Whether to plot the gaussian fit to the distributions of parameter + deviations (between retrieved and injected). + sigma_trim: float, opt + If provided, sigma threshold used to trim out outliers before + considering a Gaussian fit to the histogram of residual deviations. + inc: float, opt + If non-zero, the test aperture locations will be distributed along an + ellipse, described by this inclination. + theta_a: float, opt + If inc is non-zero, theta_a will be the position angle of the semi-major + axis of the ellipse measured counter-clockwise from the positive x axis. + + Returns + ------- + sp_unc: numpy ndarray of 3 elements + Uncertainties on the radius, position angle and flux of the companion, + respectively, associated to residual speckle noise. Only 1 element if + force_rPA is set to True. + mean_dev: numpy ndarray of 3 elements + [full_output = True] Mean deviation for each of the 3 parameters + p_simplex: numpy ndarray n_fc x 3 + [full_output = True] Parameters retrieved by the simplex for the + injected fake companions; n_fc is the number of injected + offset: numpy ndarray n_fc x 3 + [full_output = True] Deviations with respect to the values used for + injection of the fake companions. + chi2, nit, success: numpy ndarray of length n_fc + [full_output = True] Outputs from the simplex function for the retrieval + of the parameters of each injected companion: chi square value, number + of iterations and whether the simplex converged, respectively. + + """ + if not nproc: # Hyper-threading "duplicates" the cores -> cpu_count/2 + nproc = int(cpu_count()/2) + + if verbose: + print('') + print('#######################################################') + print('### SPECKLE NOISE DETERMINATION ###') + print('#######################################################') + print('') + + if len(p_true) == 3: + r_true, theta_true, f_true = p_true + nch = 1 + elif len(p_true) > 3 and cube.ndim == 4 and cube.shape[0] == len(p_true)-2: + r_true = p_true[0] + theta_true = p_true[1] + f_true = np.array(p_true[2:]) + nch = cube.shape[0] + else: + msg = "cube ndim ({}) and parameter length ({}) combo not accepted" + raise TypeError(msg.format(cube.ndim, len(p_true))) + + if indep_ap: + angle_span = angle_range[-1]-angle_range[0] + n_ap = int(np.deg2rad(angle_span)*r_true/fwhm) + delta_theta = angle_span/n_ap + angle_range = np.linspace(angle_range[0]+delta_theta/2, + angle_range[-1]+delta_theta/2, n_ap, + endpoint=False) + else: + n_ap = len(angle_range) + + if angle_range[0] % 360 == angle_range[-1] % 360: + angle_range = angle_range[:-1] + + if verbose: + print('Number of steps: {}'.format(angle_range.shape[0])) + print('') + + imlib = algo_options.get('imlib', 'vip-fft') + interpolation = algo_options.get('interpolation', 'lanczos4') + + # FIRST SUBTRACT THE TRUE COMPANION CANDIDATE + if len(p_true) == 3: + planet_parameter = np.array([[r_true, theta_true, f_true]]) + else: + planet_parameter = np.zeros([1, 3, nch]) + planet_parameter[0, 0, :] = r_true + planet_parameter[0, 1, :] = theta_true + planet_parameter[0, 2] = f_true + cube_pf = cube_planet_free(planet_parameter, cube, derot_angles, psfn, + imlib=imlib, interpolation=interpolation, + transmission=transmission, + radial_gradient=radial_gradient) + + # Measure mu and sigma once in the annulus (instead of each MCMC step) + if isinstance(mu_sigma, tuple): + if len(mu_sigma) != 2: + raise TypeError("If a tuple, mu_sigma must have 2 elements") + elif mu_sigma is not None: + ncomp = algo_options.get('ncomp', 1) + annulus_width = algo_options.get('annulus_width', int(fwhm)) + if weights is not None: + if not len(weights) == cube.shape[0]: + raise TypeError( + "Weights should have same length as cube axis 0") + norm_weights = weights/np.sum(weights) + else: + norm_weights = weights + mu_sigma = get_mu_and_sigma(cube, derot_angles, ncomp, annulus_width, + aperture_radius, fwhm, r_true, theta_true, + f_true, psfn, cube_ref=cube_ref, + wedge=wedge, algo=algo, + weights=norm_weights, + algo_options=algo_options) + + res = pool_map(nproc, _estimate_speckle_one_angle, iterable(angle_range), + cube_pf, psfn, derot_angles, r_true, f_true, fwhm, + aperture_radius, cube_ref, fmerit, algo, algo_options, + transmission, radial_gradient, mu_sigma, weights, force_rPA, ndet, + simplex_options, imlib, interpolation, verbose=verbose) + + if inc != 0: + # determine a and b from test position and provided PA_a + dth = theta_true-theta_a # pa provided with astronomy conventions + dth = dth % 360 + t1 = np.sin(np.deg2rad(dth))**2 + t2 = np.cos(np.deg2rad(dth))**2 * np.cos(np.deg2rad(inc))**2 + b = np.sqrt(r_true**2 * (t1 + t2)) + a = b/np.cos(np.deg2rad(inc)) + + # define ellipse through r values + r_trues = np.zeros(n_ap) + for i, theta in enumerate(angle_range): + # convert to angle from ellipse's semi-major axis + th = (theta-theta_a) % 360 + th_rad = np.deg2rad(th) + # use general ellipse equation to infer r values + num = b**2 + denum = np.sin(th_rad)**2 + (b**2/a**2)*np.cos(th_rad)**2 + r_trues[i] = np.sqrt(num/denum) + + res = pool_map(nproc, _estimate_speckle_one_angle, + iterable(angle_range), cube_pf, psfn, derot_angles, + iterable(r_trues), f_true, fwhm, aperture_radius, + cube_ref, fmerit, algo, algo_options, transmission, + mu_sigma, weights, force_rPA, ndet, simplex_options, + imlib, interpolation, verbose=verbose) + else: + res = pool_map(nproc, _estimate_speckle_one_angle, + iterable(angle_range), cube_pf, psfn, derot_angles, + r_true, f_true, fwhm, aperture_radius, cube_ref, fmerit, + algo, algo_options, transmission, mu_sigma, weights, + force_rPA, ndet, + simplex_options, imlib, interpolation, verbose=verbose) + residuals = np.array(res) + + if opp_ang: # do opposite angles + res = pool_map(nproc, _estimate_speckle_one_angle, + iterable(angle_range), cube_pf, psfn, -derot_angles, + r_true, f_true, fwhm, aperture_radius, cube_ref, fmerit, + algo, algo_options, transmission, radial_gradient, + mu_sigma, weights, force_rPA, ndet, simplex_options, + imlib, interpolation, verbose=verbose) + residuals2 = np.array(res) + residuals = np.concatenate((residuals, residuals2)) + + if verbose: + print("residuals (offsets): ", residuals[:, nch+2], residuals[:, nch+3], + residuals[:, nch+4]) + + p_simp_stack = [residuals[:, 0], residuals[:, 1]] + for ch in range(nch): + p_simp_stack.append(residuals[:, 2+ch]) + p_simplex = np.transpose(np.vstack(p_simp_stack)) + p_off_stack = [residuals[:, nch+2], residuals[:, nch+3]] + for ch in range(nch): + p_off_stack.append(residuals[:, nch+4+ch]) + offset = np.transpose(np.vstack(p_off_stack)) + print(offset) + chi2 = residuals[:, int(2*nch)+4] + nit = residuals[:, int(2*nch)+5] + success = residuals[:, int(2*nch)+6] + + if save: + speckles = {'r_true': r_true, + 'angle_range': angle_range, + 'f_true': f_true, + 'r_simplex': residuals[:, 0], + 'theta_simplex': residuals[:, 1], + 'f_simplex': residuals[:, 2:2+nch], + 'offset': offset, + 'chi2': chi2, + 'nit': nit, + 'success': success} + + if output is None: + output = 'speckles_noise_result' + + from pickle import Pickler + with open(output, 'wb') as fileSave: + myPickler = Pickler(fileSave) + myPickler.dump(speckles) + + # Calculate 1 sigma of distribution of deviations + print(offset.shape) + if force_rPA: + offset = offset[:, 2:] + print(offset.shape) + if sigma_trim: + std = np.std(offset, axis=0) + trim_offset = [] + for i in range(offset.shape[0]): + if np.all(np.abs(offset[i]) < sigma_trim*std): + trim_offset.append(offset[i]) + offset = np.array(trim_offset) + + if bins is None: + bins = int(offset.shape[0]/6) + + if force_rPA: + labels = [] + else: + labels = ['r', 'theta'] + + if cube.ndim == 3: + labels.append('f') + else: + for ch in range(nch): + labels.append('f{}'.format(ch)) + + mean_dev, sp_unc = confidence(offset, cfd=68.27, bins=bins, + gaussian_fit=True, verbose=verbose, + save=False, output_dir='', labels=labels, + force=True, plot=verbose) + if plot: + plt.show() + + if full_output: + return sp_unc, mean_dev, p_simplex, offset, chi2, nit, success + else: + return sp_unc + + +def _estimate_speckle_one_angle(angle, cube_pf, psfn, angs, r_true, f_true, + fwhm, aperture_radius, cube_ref, fmerit, algo, + algo_options, transmission, radial_gradient, + mu_sigma, weights, force_rPA, ndet, + simplex_options, imlib, interpolation, + verbose=True): + + if verbose: + print('Process is running for angle: {:.2f}'.format(angle)) + + cube_fc = cube_inject_companions(cube_pf, psfn, angs, flevel=f_true, + rad_dists=[r_true], n_branches=1, + theta=angle, transmission=transmission, + radial_gradient=radial_gradient, + imlib=imlib, interpolation=interpolation, + verbose=False) + + if cube_pf.ndim == 4: + p_ini = [r_true, angle] + for f in f_true: + p_ini.append(f) + p_ini = tuple(p_ini) + else: + p_ini = (r_true, angle, f_true) + + ncomp = algo_options.get('ncomp', 1) + annulus_width = algo_options.get('annulus_width', int(fwhm)) + delta_rot = algo_options.get('delta_rot', 1) + + res_simplex = firstguess_simplex(p_ini, cube_fc, angs, + psfn, ncomp, fwhm, annulus_width, + aperture_radius, cube_ref=cube_ref, + fmerit=fmerit, algo=algo, + delta_rot=delta_rot, + algo_options=algo_options, imlib=imlib, + interpolation=interpolation, + transmission=transmission, + radial_gradient=radial_gradient, + mu_sigma=mu_sigma, weights=weights, + force_rPA=force_rPA, ndet=ndet, + options=simplex_options, + verbose=False) + res = [] + if cube_pf.ndim == 3: + if force_rPA: + simplex_res_f, = res_simplex.x + simplex_res_r, simplex_res_PA = r_true, angle + else: + simplex_res_r, simplex_res_PA, simplex_res_f = res_simplex.x + res.append(simplex_res_r) + res.append(simplex_res_PA) + res.append(simplex_res_f) + offset_r = simplex_res_r - r_true + offset_PA = simplex_res_PA - angle + offset_f = simplex_res_f - f_true + res.append(offset_r) + res.append(offset_PA) + res.append(offset_f) + else: + if force_rPA: + simplex_res_f = np.array(res_simplex.x) + simplex_res_r, simplex_res_PA = r_true, angle + else: + simplex_res = res_simplex.x + simplex_res_r = simplex_res[0] + simplex_res_PA = simplex_res[1] + simplex_res_f = np.array(simplex_res[2:]) + res.append(simplex_res_r) + res.append(simplex_res_PA) + offset_r = simplex_res_r - r_true + offset_PA = simplex_res_PA - angle + offset_f = simplex_res_f - f_true + for f in simplex_res_f: + res.append(f) + res.append(offset_r) + res.append(offset_PA) + for f in offset_f: + res.append(f) + + chi2 = res_simplex.fun + nit = res_simplex.nit + success = res_simplex.success + + res.append(chi2) + res.append(nit) + res.append(success) + + res = tuple(res) + + return res \ No newline at end of file diff --git a/greedy/ipca_fullfr.py b/greedy/ipca_fullfr.py new file mode 100644 index 00000000..ee81f85f --- /dev/null +++ b/greedy/ipca_fullfr.py @@ -0,0 +1,874 @@ +#! /usr/bin/env python +"""Full-frame iterative PCA algorithm for ADI or ADI+RDI cubes. + +The concept was proposed in [PAI18]_ and [PAI21]. + +.. [PAI18] + | Pairet et al. 2018 + | **Reference-less algorithm for circumstellar disks imaging** + | *In Proceedings of iTWIST'18, 23* + | `https://arxiv.org/abs/1812.01333 + `_ + +.. [PAI21] + | Pairet et al. 2021 + | **MAYONNAISE: a morphological components analysis pipeline for + circumstellar discs and exoplanets imaging in the near-infrared** + | *MNRAS 503, 3724* + | `https://arxiv.org/abs/2008.05170 + `_ + +.. [JUI23] + | Juillard et al. 2023 + | **Inverse-problem versus principal component analysis methods for angular + differential imaging of circumstellar disks. The mustard algorithm** + | *A&A 679, 52* + | `https://arxiv.org/abs/2309.14827 + `_ + +.. [CHR24] + | Christiaens et al. 2024 + | **MINDS: JWST/NIRCam imaging of the protoplanetary disk PDS 70. A spiral + accretion stream and a potential third protoplanet** + | *A&A 685, 1* + | `https://arxiv.org/abs/2403.04855 + `_ + +.. [JUI24] + | Juillard et al. 2024 + | **Combining reference-star and angular differential imaging for + high-contrast imaging of extended sources** + | *A&A 688, 185* + | `https://arxiv.org/abs/2406.14444 + `_ +""" + +__author__ = 'Valentin Christiaens, Sandrine Juillard' +__all__ = ['ipca', + 'IPCA_Params'] + +from dataclasses import dataclass +import numpy as np +from typing import Union, List +from ..config.paramenum import ALGO_KEY +from ..config.utils_param import separate_kwargs_dict +from ..config import Progressbar, time_ini, time_fin +from ..psfsub import pca, PCA_Params +from ..preproc import cube_derotate, cube_collapse +from ..metrics import stim_map, inverse_stim_map +from ..var import prepare_matrix, mask_circle, frame_filter_lowpass + +try: + from GreeDS import GreeDS + no_greeds = False +except ImportError: + from warnings import warn + msg = "GreeDS python bindings are missing." + warn(msg, ImportWarning) + no_greeds = True + + +@dataclass +class IPCA_Params(PCA_Params): + """ + Set of parameters for the iterative PCA routine. + + Inherits from PCA_Params. + + See function `ipca` for documentation. + """ + + mode: str = None + strategy: str = "ADI" + ncomp_start: int = 1 + ncomp_step: int = 1 + nit: int = 1 + thr: Union[float, str] = 0. + thr_mode: str = 'STIM' + r_out: float = None + r_max: float = None + smooth_ker: Union[float, List, np.ndarray] = None + rtol: float = 1e-2 + atol: float = 1e-2 + continue_without_smooth_after_conv: bool = False + add_nd_excess: bool = False + + +def ipca(*all_args: List, **all_kwargs: dict): + """ + Run iterative version of PCA (hereafter IPCA). + + The algorithm finds significant disc or planet signal in the PCA image at + each iteration, then subtracts it (after rotation) from the cube used to + build and project the principal components. This is repeated nit times, + which progressively reduces geometric biases in the image + (e.g. negative side lobes for ADI). + + The first reported usage of IPCA is in [PAI18], although the algorithm + implementation here was presented in [PAI21] for ADI and [JUI24] for ARDI. + + The same parameters as pca() can be provided, except 'batch'. There are two + additional parameters related to the iterative algorithm: the number of + iterations (nit) and the threshold (thr) used for the identification of + significant signals. + + Note: IPCA can only be used in ADI, RDI, ARDI, R+ADI or R+ARDI modes. + + References: + - IPCA concept: [Pai18] + - GreeDs implementation of IPCA: [Pai21] + - Torch implementation of GreeDs-IPCA (mode='Juillard23'): [JUI23] + - IPCA with extra options (mode='Christiaens24'): [CHR24] + - IPCA-ARDI: [JUI24] + + Parameters + ---------- + cube : str or numpy ndarray, 3d or 4d + Input cube (ADI). + angle_list : numpy ndarray, 1d + Corresponding parallactic angle for each frame. + cube_ref : numpy ndarray, 3d, optional + Reference library cube. For Reference Star Differential Imaging. + scale_list : numpy ndarray, 1d, optional + Scaling factors in case of IFS data (ADI+mSDI cube). Usually, the + scaling factors are the central channel wavelength divided by the + shortest wavelength in the cube (more thorough approaches can be used + to get the scaling factors). This scaling factors are used to re-scale + the spectral channels and align the speckles. + mode: str or None, opt {'Christiaens24','Juillard23'} + Whether to run IPCA with fixed (mode=None) or incremental number of PCs + (mode != None) as the iterations progress. + - If None: runs with provided value of ``ncomp`` for ``nit`` iterations, + and considering threshold ``thr``. + - If 'Juillard23': Exact implementation from Juillard et al. 2023 using + Torch. Runs incrementally with n_PCs=1,...,``ncomp``, and + ``nit`` times for each n_PCs value (i.e. outer loop on n_PCs, inner loop + on ``nit``). This method works exclusively with Torch (making it faster) + but has no additional options for the extraction of circumstellar signal + at each iteration (all signals > 0 are considered). Installation of the + GreeDS package is required for this option. + - If 'Christiaens24': Same as 'Juillard23' but not in Torch, and with + the extra parameters ``thr`` and ``ncomp_step`` available. + ncomp : int or tuple/list of 2 or 3 int, optional + How many PCs are used as a lower-dimensional subspace to project the + target frames. + - if mode is None: + * strategy in {'ADI', 'RDI', 'ARDI'} and no ``mask_rdi`` provided: + an int must be provided, ``ncomp`` is the fixed number of PCs to use + + * strategy in {'RADI' , 'RARDI'} and no ``mask_rdi`` is provided: + an int or a tuple/list of 2 int is accepted. In the latter case, the + first value is used for IPCA-RDI, and the second for IPCA-ADI for + the remaining iterations after the former converged. + + * strategy in {'ADI', 'RDI', 'ARDI} and ``mask_rdi`` is provided: an + int or a tuple/list of 2 int can be provided. In the latter case, + the first value is used for PCA-data imputation at the first + iteration, and the second for IPCA for the subsequent iterations. + + * strategy in {'RADI' , 'RARDI'} and ``mask_rdi`` is provided: an + int or a tuple/list of 3 int can be provided. In the latter case, + the first value is used for PCA-data imputation at the first + iteration, the second for IPCA-RDI,and the third for IPCA-ADI for + the remaining iterations after the former converged. + + - if mode is not None: + ncomp should correspond to the maximum number of principal + components to be tested. The first ncomp will be ``ncomp_start`` + and the increment will be ``ncomp_step``. + + ncomp_start: int, opt + For incremental versions of iterative PCA (i.e. if mode is 'Juillard23' + or 'Christiaens24'), this is the number of principal components at the + first iteration (by default 1). In some cases, it is better to increase + it to avoid propagating circular artefacts (see [JUI24]). + ncomp_step: int, opt + Incremental step for number of principal components - used if mode is + 'Christiaens24'. + nit: int, opt + Number of iterations for the iterative PCA. + - if mode is None: + total number of iterations + + - if mode is 'Juillard23', or 'Christiaens24': + iterations per tested ncomp. + + strategy: str {'ADI, 'RDI', 'ARDI', 'RADI', 'RARDI''}, opt + Whether to do iterative ADI only ('ADI'), iterative RDI only ('RDI'), + iterative ADI and RDI together ('ARDI', i.e. with a combined PCA + library), iterative RDI followed by iterative ADI consecutively + ('RADI'), or iterative RDI followed by iterative ARDI consecutively + ('RARDI'). A reference cube `cube_ref` must be provided for 'RDI', + 'ARDI', 'RADI' and 'RARDI'. + thr: float or 'auto', opt + Minimum threshold used to identify significant signals in the PCA image + obtained at each iteration, according to ``thr_mode``. + thr_mode: str {'STIM', 'abs'}, opt + How the threshold is expressed: whether based on the STIM map ('STIM') + or expressed as absolute pixel intensity threshold ('abs'). The optimal + choice may depend on whether you are speckle noise or sensitivity + limited in your region of interest. For the 'STIM' criterion, the + threshold corresponds to the minimum intensity in the STIM map computed + from PCA residuals (Pairet et al. 2019), as expressed in units of + maximum intensity obtained in the inverse STIM map (i.e. obtained from + using opposite derotation angles). + r_out: float or None, opt + Outermost radius in pixels of circumstellar signals (estimated). This + will be used if thr is set to 'auto'. The max STIM value beyond that + radius will be used as minimum threshold. If r_out is set to None, the + half-width of the frames will be used (assuming no circumstellar signal + is present in the corners of the field, where it'd be lost by rotation). + r_max: float or None, opt + Max radius in pixels where the STIM map will be considered. The max STIM + value beyond r_out but within r_max will be used as minimum threshold. + If r_max is set to None, the half-width of the frames will be used. + svd_mode : {'lapack', 'arpack', 'eigen', 'randsvd', 'cupy', 'eigencupy', + 'randcupy', 'pytorch', 'eigenpytorch', 'randpytorch'}, str optional + Switch for the SVD method/library to be used. + + ``lapack``: uses the LAPACK linear algebra library through Numpy + and it is the most conventional way of computing the SVD + (deterministic result computed on CPU). + + ``arpack``: uses the ARPACK Fortran libraries accessible through + Scipy (computation on CPU). + + ``eigen``: computes the singular vectors through the + eigendecomposition of the covariance M.M' (computation on CPU). + + ``randsvd``: uses the randomized_svd algorithm implemented in + Sklearn (computation on CPU). + + ``cupy``: uses the Cupy library for GPU computation of the SVD as in + the LAPACK version. ` + + `eigencupy``: offers the same method as with the ``eigen`` option + but on GPU (through Cupy). + + ``randcupy``: is an adaptation of the randomized_svd algorithm, + where all the computations are done on a GPU (through Cupy). ` + + `pytorch``: uses the Pytorch library for GPU computation of the SVD. + + ``eigenpytorch``: offers the same method as with the ``eigen`` + option but on GPU (through Pytorch). + + ``randpytorch``: is an adaptation of the randomized_svd algorithm, + where all the linear algebra computations are done on a GPU + (through Pytorch). + + scaling : {None, "temp-mean", spat-mean", "temp-standard", + "spat-standard"}, None or str optional + Pixel-wise scaling mode using ``sklearn.preprocessing.scale`` + function. If set to None, the input matrix is left untouched. Otherwise: + + ``temp-mean``: temporal px-wise mean is subtracted. + + ``spat-mean``: spatial mean is subtracted. + + ``temp-standard``: temporal mean centering plus scaling pixel values + to unit variance. HIGHLY RECOMMENDED FOR ASDI AND RDI CASES! + + ``spat-standard``: spatial mean centering plus scaling pixel values + to unit variance. + + mask_center_px : None or int + If None, no masking is done. If an integer > 1 then this value is the + radius of the circular mask. + source_xy : tuple of int, optional + For ADI-PCA, this triggers a frame rejection in the PCA library, with + ``source_xy`` as the coordinates X,Y of the center of the annulus where + the PA criterion is estimated. When ``ncomp`` is a tuple, a PCA grid is + computed and the S/Ns (mean value in a 1xFWHM circular aperture) of the + given (X,Y) coordinates are computed. + delta_rot : int or float, optional + Factor for tunning the parallactic angle threshold, expressed in FWHM. + Default is 1 (excludes 1xFHWM on each side of the considered frame). + fwhm : float, optional + Known size of the FHWM in pixels to be used. Default value is 4. + imlib : str, optional + See the documentation of the ``vip_hci.preproc.frame_rotate`` function. + imlib2 : str, optional + See the documentation of the + ``vip_hci.preproc.cube_rescaling_wavelengths`` function. + interpolation : str, optional + See the documentation of the ``vip_hci.preproc.frame_rotate`` function. + collapse : {'median', 'mean', 'sum', 'trimmean'}, str optional + Sets the way of collapsing the frames for producing a final image. + check_memory : bool, optional + If True, it checks that the input cube is smaller than the available + system memory. + nproc : None or int, optional + Number of processes for parallel computing. If None the number of + processes will be set to (cpu_count()/2). Defaults to ``nproc=1``. + full_output: bool, optional + Whether to return the final median combined image only or with other + intermediate arrays. + verbose : bool, optional + If True prints intermediate info and timing. + weights: 1d numpy array or list, optional + Weights to be applied for a weighted mean. Need to be provided if + collapse mode is 'wmean'. + rtol: float, optional + Relative tolerance threshold element-wise in the significant signal + image compared to the same image obtained either 1 or 2 iterations + before, to consider convergence [more details in np.allclose]. + atol: float, optional + Absolute tolerance threshold element-wise in the significant signal + image compared to the same image obtained either 1 or 2 iterations + before, to consider convergence [more details in np.allclose]. + smooth_ker: None or float or list/1darray of floats or 2darray, optional + If a float: size in pixels of the Gaussian kernel to use on + post-processed image obtained at each iteration to include the expected + spatial correlation of neighbouring pixels. If a 2D numpy array: the + normalized PSF to use for convolution. Default is to consider a + kernel size of 1 pixel. If a list/1d array, length should be equal to + the number of iterations. Depending on your data, starting with a + larger kernel (e.g. FWHM/2) and progressively decreasing it with + iterations can provide better results in terms of signal recovery. + continue_without_smooth_after_conv: bool, opt + Whether to continue to iterate after convergence, but without applying + the smoothing criterion. At this stage, with a good first guess of the + circumstellar signals, this can lead to a very sharp final image. + add_nd_excess: bool, opt + Whether to continue to iterate after convergence, but adding the + positive residuals in the image obtained after PCA processing of the + cube where the estimated circumstellar signals are subtracted. + + Returns + ------- + frame : numpy ndarray + 2D array, median combination of the de-rotated/re-scaled residuals cube. + Always returned. This is the final image obtained at the last iteration. + it_cube: numpy ndarray + [full_output=True] 3D array with final image from each iteration. + sig_images: numpy ndarray + [full_output=True] 3D array similar to it_cube, but only containing + significant signals identified at each iteration. + residuals_cube : numpy ndarray + [full_output=True] Residuals cube from the last iteration. + residuals_cube_ : numpy ndarray + [full_output=True] Derotated residuals cube from the last iteration. + stim_cube: numpy ndarray + [full_output=True] 3D array with normalized STIM map from each iteration + used for the identification of significant signals. If thr_mode is set + to 'abs', this contains instead a binary mask of identified significant + signals at each iteration. + it_cube_nd: numpy ndarray + [full_output=True] 3D array with image obtained with the (disk-)empty + cube at each iteration, serves to show how much residual disk flux is + not yet captured at each iteration. + """ + + def _find_significant_signals(residuals_cube, residuals_cube_, angle_list, + thr, mask=0, r_out=None, r_max=None): + # Identifies significant signals with STIM map (outside mask) + stim = stim_map(residuals_cube_) + inv_stim = inverse_stim_map(residuals_cube, angle_list) + if mask: + inv_stim = mask_circle(inv_stim, mask) + max_inv = np.amax(inv_stim) + if max_inv == 0: + max_inv = 1 # np.amin(stim[np.where(stim>0)]) + if thr == 'auto': + if r_out is None: + r_out = residuals_cube.shape[-1]//4 + if r_max is None: + r_max = residuals_cube.shape[-1]//2 + inv_stim_rout = mask_circle(inv_stim, r_out) + inv_stim_rmax = mask_circle(inv_stim_rout, r_max, mode='out') + thr = np.amax(inv_stim_rmax)/max_inv + norm_stim = stim/max_inv + good_mask = np.zeros_like(stim) + good_mask[np.where(norm_stim > thr)] = 1 + return good_mask, norm_stim + + def _blurring_2d(array, mask_center_sz, fwhm_sz=2): + if mask_center_sz: + frame_mask = mask_circle(array, radius=mask_center_sz+fwhm_sz, + fillwith=np.nan, mode='out') + frame_mask2 = mask_circle(array, radius=mask_center_sz, + fillwith=np.nan, mode='out') + if np.isscalar(fwhm_sz): + frame_filt = frame_filter_lowpass(frame_mask, mode='gauss', + fwhm_size=fwhm_sz, + iterate=False) + elif fwhm_sz.ndim == 2: # if a psf + frame_filt = frame_filter_lowpass(frame_mask, psf=fwhm_sz, + iterate=False) + nonan_loc = np.where(np.isfinite(frame_mask2)) + array[nonan_loc] = frame_filt[nonan_loc] + else: + array = frame_filter_lowpass(array, mode='gauss', + fwhm_size=fwhm_sz, iterate=False) + return array + + def _blurring_3d(array, mask_center_sz, fwhm_sz=2): + bl_array = np.zeros_like(array) + for i in range(array.shape[0]): + bl_array[i] = _blurring_2d(array[i], mask_center_sz, fwhm_sz) + return bl_array + + # 0. Identify parameters + # Separating the parameters of the ParamsObject from optional rot_options + class_params, rot_options = separate_kwargs_dict( + initial_kwargs=all_kwargs, parent_class=IPCA_Params + ) + # Do the same to separate IPCA and PCA params + pca_params, ipca_params = separate_kwargs_dict( + initial_kwargs=class_params, parent_class=PCA_Params + ) + + # Extracting the object of parameters (if any) + algo_params = None + if ALGO_KEY in rot_options.keys(): + algo_params = rot_options[ALGO_KEY] + del rot_options[ALGO_KEY] + + if algo_params is None: + algo_params = IPCA_Params(*all_args, **class_params) + + start_time = time_ini(algo_params.verbose) + + # force full_output but no verbose in PCA. + pca_params['full_output'] = True + pca_params['verbose'] = False # too verbose otherwise + + # 1. Identify exceptions + if algo_params.mask_rdi is not None and algo_params.mode is not None: + msg = "IPCA with first step initiated with data imputation is not " + msg += "compatible with incremental mode. Set 'mode' to None." + raise TypeError("") + + if algo_params.mode == "Juillard23": + if no_greeds: + msg = 'GreeDS Python bindings cannot be imported. Install GreeDS' + msg += ' (pip install GreeDS) or use a different method.' + raise RuntimeError(msg) + if algo_params.strategy not in ['ADI', "ARDI"]: + msg = 'Juillard23 not compatible with this mode.' + raise RuntimeError(msg) + + if algo_params.strategy == 'ARDI': + ref = algo_params.cube_ref.copy() + else: + ref = None + + mask_center_px = algo_params.mask_center_px + pup = mask_center_px if mask_center_px is not None else 0 + + if algo_params.full_output is True: + it_cube, star_estim = GreeDS(algo_params.cube, + algo_params.angle_list, + refs=ref, r=algo_params.ncomp, + l=algo_params.nit, + r_start=algo_params.ncomp_start, + pup=pup, full_output=True, + returnL=True) + else: + it_cube = GreeDS(algo_params.cube, algo_params.angle_list, + refs=ref, r=algo_params.ncomp, l=algo_params.nit, + r_start=algo_params.ncomp_start, pup=pup, + full_output=True, returnL=False) + frame = it_cube[-1] + + # Set results matching full outputs + it = len(it_cube)-1 + algo_params.thr = 0 + + stim_cube = it_cube_nd = sig_images = nstim = np.zeros(it_cube.shape) + sig_mask = np.zeros(it_cube.shape) + + if algo_params.full_output is True: + print(algo_params.cube.shape) + print(star_estim.shape) + residuals_cube_ = cube_derotate(algo_params.cube - star_estim[-1], + algo_params.angle_list, + imlib="torch-fft", + nproc=algo_params.nproc) - frame + + residuals_cube = cube_derotate(residuals_cube_, + -algo_params.angle_list, + imlib="torch-fft", + nproc=algo_params.nproc) + + if algo_params.thr_mode == 'STIM': + for it_i in range(len(it_cube)): + cube_tmp = algo_params.cube - star_estim[it_i] + der_cube = cube_derotate(cube_tmp, algo_params.angle_list, + imlib="torch-fft", + nproc=algo_params.nproc) + residuals_cube__i = der_cube - it_cube[it_i] + + residuals_cube_i = cube_derotate(residuals_cube__i, + -algo_params.angle_list, + imlib="torch-fft", + nproc=algo_params.nproc) + + res = _find_significant_signals(residuals_cube_i, + residuals_cube__i, + algo_params.angle_list, + algo_params.thr, + mask=mask_center_px, + r_out=algo_params.r_out) + sig_mask_i, nstim_i = res + + sig_mask[it_i] = sig_mask_i.copy() + nstim[it_i] = nstim_i.copy() + + else: + sig_mask = np.ones_like(it_cube) + sig_mask[np.where(it_cube < algo_params.thr)] = 0 + nstim = sig_mask.copy() + + sig_images = it_cube.copy() + sig_images[np.where(1-sig_mask)] = 0 + sig_images[np.where(sig_images < 0)] = 0 + stim_cube = nstim.copy() + + else: + # 2. Prepare/format additional parameters depending on chosen options + mask_center_px = algo_params.mask_center_px # None or not? + mask_rdi_tmp = None + if algo_params.strategy == 'ADI' and algo_params.cube_ref is None: + ref_cube = None + mask_rdi_tmp = algo_params.mask_rdi + elif algo_params.cube_ref is not None: + if algo_params.strategy == 'ADI': + msg = "WARNING: requested strategy is 'ADI' but reference cube " + msg += "detected! Strategy automatically switched to 'ARDI'." + print(msg) + algo_params.strategy = 'ARDI' + if algo_params.mask_rdi is not None: + if isinstance(algo_params.mask_rdi, (list, tuple)): + mask_rdi_tmp = algo_params.mask_rdi + else: + mask_rdi_tmp = algo_params.mask_rdi.copy() + if algo_params.cube_ref is None: + raise ValueError("cube_ref should be provided for RDI or RADI") + if algo_params.strategy == 'ARDI' and algo_params.mask_rdi is None: + ref_cube = np.concatenate((algo_params.cube, + algo_params.cube_ref), axis=0) + else: + ref_cube = algo_params.cube_ref.copy() + else: + msg = "strategy not recognized: must be ADI, RDI, ARDI or RADI" + raise ValueError(msg) + + cond_di = algo_params.mask_rdi is not None + + if isinstance(algo_params.ncomp, (float, int)): + ncomp_list = [algo_params.ncomp] + if cond_di: + ncomp_list.append(algo_params.ncomp) + if algo_params.strategy == 'RADI': + ncomp_list.append(algo_params.ncomp) + elif isinstance(algo_params.ncomp, (tuple, list)): + ncomp_list = algo_params.ncomp + if len(algo_params.ncomp) == 1: + if algo_params.mask_rdi is not None: + ncomp_list.append(algo_params.ncomp) + if algo_params.strategy == 'RADI': + ncomp_list.append(algo_params.ncomp) + elif len(algo_params.ncomp) == 2: + if cond_di and algo_params.strategy == 'RADI': + msg = "npc list/tuple should have 3 elements for " + raise ValueError(msg+"DI+IPCA-RADI") + elif not len(algo_params.ncomp) == 3: + raise ValueError("Length of npc list cannot be larger than 3") + else: + raise TypeError("ncomp should be float, int, tuple or list") + + ncomp_tmp = ncomp_list[0] + ncomp_start = algo_params.ncomp_start + ncomp_step = algo_params.ncomp_step + nframes = algo_params.cube.shape[0] + nit_ori = algo_params.nit + + if algo_params.mode is not None: + if algo_params.mode == 'Christiaens24': + final_ncomp = [] + for npc in range(ncomp_start, ncomp_tmp+1, ncomp_step): + for ii in range(algo_params.nit): + final_ncomp.append(npc) + algo_params.nit = len(final_ncomp) + else: + raise ValueError("mode is not recognized.") + else: + final_ncomp = [ncomp_tmp]*algo_params.nit + if cond_di: + final_ncomp = [ncomp_tmp]+[ncomp_list[1]]*(algo_params.nit-1) + + # Scale cube and cube_ref if necessary + cube_tmp = prepare_matrix(algo_params.cube, scaling=algo_params.scaling, + mask_center_px=mask_center_px, mode='fullfr', + verbose=False) + cube_tmp = np.reshape(cube_tmp, algo_params.cube.shape) + if ref_cube is not None: + cube_ref_tmp = prepare_matrix(ref_cube, scaling=algo_params.scaling, + mask_center_px=mask_center_px, + mode='fullfr', verbose=False) + cube_ref_tmp = np.reshape(cube_ref_tmp, ref_cube.shape) + else: + cube_ref_tmp = None + + # 3. Get a first disc estimate, using PCA + pca_params['ncomp'] = final_ncomp[0] + pca_params['cube'] = cube_tmp + pca_params['cube_ref'] = cube_ref_tmp + + res = pca(**pca_params, **rot_options) + + frame = res[0] + residuals_cube = res[-2] + residuals_cube_ = res[-1] + # smoothing and manual derotation if requested + smooth_ker = algo_params.smooth_ker + if smooth_ker is None or np.isscalar(smooth_ker): + smooth_ker = [smooth_ker]*algo_params.nit + elif len(smooth_ker) == algo_params.nit: + smooth_ker = np.array(smooth_ker, dtype=object) + elif smooth_ker.ndim == 2: + smooth_ker = [smooth_ker]*algo_params.nit + else: + if len(smooth_ker) != algo_params.nit: + msg = "If a 1d array or list, smooth_ker should have nit length" + raise TypeError(msg) + else: + msg = "Type not recognized for smooth_ker" + raise TypeError(msg) + # if smooth_ker[0] is not None: + # residuals_cube = _blurring_3d(residuals_cube, None, + # fwhm_sz=smooth_ker[0]) + # residuals_cube_ = cube_derotate(residuals_cube, + # algo_params.angle_list, + # imlib=algo_params.imlib, + # nproc=algo_params.nproc) + # frame = cube_collapse(residuals_cube_, algo_params.collapse) + if smooth_ker[0] is not None: + if np.isscalar(smooth_ker[0]): + frame = frame_filter_lowpass(frame, fwhm_size=smooth_ker[0]) + elif smooth_ker[0].ndim == 2: + frame = frame_filter_lowpass(frame, psf=smooth_ker[0]) + + # 4. Identify significant signals with STIM map + it_cube = np.zeros([algo_params.nit, frame.shape[0], frame.shape[1]]) + it_cube_nd = np.zeros_like(it_cube) + stim_cube = np.zeros_like(it_cube) + sig_images = np.zeros_like(it_cube) + it_cube[0] = frame.copy() + it_cube_nd[0] = frame.copy() + if algo_params.thr_mode == 'STIM': + sig_mask, nstim = _find_significant_signals(residuals_cube, + residuals_cube_, + algo_params.angle_list, + algo_params.thr, + mask=mask_center_px, + r_out=algo_params.r_out) + else: + sig_mask = np.ones_like(frame) + sig_mask[np.where(frame < algo_params.thr)] = 0 + nstim = sig_mask.copy() + sig_image = frame.copy() + sig_image[np.where(1-sig_mask)] = 0 + sig_image[np.where(sig_image < 0)] = 0 + sig_images[0] = sig_image.copy() + stim_cube[0] = nstim.copy() + mask_rdi_tmp = None # after first iteration do not use it any more + + # 5. Loop, updating the reference cube before projection by subtracting + # best disc estimate. This is done by providing sig_cube. + cond_skip = False # whether skip an iteration e.g. in incremental mode + cond_add_nd_excess = False + for it in Progressbar(range(1, algo_params.nit), desc="Iterating..."): + if not cond_skip: + # Uncomment here (and comment below) to do like IROLL + # if smooth_ker[it] is not None: + # frame = _blurring_2d(frame, None, fwhm_sz=smooth_ker[it]) + # create and rotate sig cube + sig_cube = np.repeat(frame[np.newaxis, :, :], nframes, axis=0) + sig_cube = cube_derotate(sig_cube, -algo_params.angle_list, + imlib=algo_params.imlib, + nproc=algo_params.nproc) + + if algo_params.thr_mode == 'STIM': + # create and rotate binary mask + mask_sig = np.zeros_like(sig_image) + mask_sig[np.where(sig_image > 0)] = 1 + sig_mcube = np.repeat(mask_sig[np.newaxis, :, :], nframes, + axis=0) + sig_mcube = cube_derotate(sig_mcube, + -algo_params.angle_list, + imlib='skimage', + interpolation='bilinear', + nproc=algo_params.nproc) + sig_cube[np.where(sig_mcube < 0.5)] = 0 + sig_cube[np.where(sig_cube < 0)] = 0 + else: + sig_cube[np.where(sig_cube < algo_params.thr)] = 0 + + if algo_params.strategy == 'ARDI': + ref_cube = np.concatenate((algo_params.cube-sig_cube, + algo_params.cube_ref), axis=0) + cube_ref_tmp = prepare_matrix(ref_cube, + scaling=algo_params.scaling, + mask_center_px=mask_center_px, + mode='fullfr', verbose=False) + cube_ref_tmp = np.reshape(cube_ref_tmp, ref_cube.shape) + + # Run PCA on original cube + # Update PCA PARAMS + pca_params['cube'] = algo_params.cube + if algo_params.strategy == 'ADI': + pca_params['cube_ref'] = None + else: + pca_params['cube_ref'] = ref_cube + pca_params['ncomp'] = final_ncomp[it] + pca_params['scaling'] = algo_params.scaling + pca_params['cube_sig'] = sig_cube + pca_params['mask_rdi'] = mask_rdi_tmp + + res = pca(**pca_params, **rot_options) + + frame = res[0] + residuals_cube = res[-2] + it_cube[it] = frame.copy() + + # Run PCA on disk-empty cube + # Update PCA PARAMS + pca_params['cube'] = cube_tmp-sig_cube + pca_params['cube_ref'] = cube_ref_tmp + pca_params['cube_sig'] = None + pca_params['scaling'] = None + + res_nd = pca(**pca_params, **rot_options) + + residuals_cube_nd = res_nd[-2] + frame_nd = res_nd[0] + + # smoothing and manual derotation if requested + if smooth_ker[it] is not None: + # original cube + residuals_cube = _blurring_3d(residuals_cube, None, + fwhm_sz=smooth_ker[it]) + residuals_cube_ = cube_derotate(residuals_cube, + algo_params.angle_list, + imlib=algo_params.imlib, + nproc=algo_params.nproc) + frame = cube_collapse(residuals_cube_, algo_params.collapse) + # cube with circumstellar signals subtracted + residuals_cube_nd = _blurring_3d(residuals_cube_nd, None, + fwhm_sz=smooth_ker[it]) + residuals_cube_nd_ = cube_derotate(residuals_cube_nd, + algo_params.angle_list, + imlib=algo_params.imlib, + nproc=algo_params.nproc) + frame_nd = cube_collapse(residuals_cube_nd_, + algo_params.collapse) + + # also add significant signals from frame_nd, if requested + if cond_add_nd_excess and algo_params.thr_mode != 'STIM': + sig_mask_nd = np.ones_like(frame_nd) + sig_mask_nd[np.where(frame_nd < algo_params.thr)] = 0 + frame += frame_nd*sig_mask_nd + + if algo_params.thr_mode == 'STIM': + res = _find_significant_signals(residuals_cube_nd, + residuals_cube_, + algo_params.angle_list, + algo_params.thr, + mask=mask_center_px, + r_out=algo_params.r_out) + sig_mask, nstim = res + else: + sig_mask = np.ones_like(frame) + sig_mask[np.where(frame < algo_params.thr)] = 0 + nstim = sig_mask.copy() + inv_sig_mask = np.ones_like(sig_mask) + inv_sig_mask[np.where(sig_mask)] = 0 + if mask_center_px: + inv_sig_mask = mask_circle(inv_sig_mask, mask_center_px, + fillwith=1) + sig_image = frame.copy() + sig_image[np.where(inv_sig_mask)] = 0 + sig_image[np.where(sig_image < 0)] = 0 + + # whether skipped or not: + it_cube[it] = frame.copy() + it_cube_nd[it] = frame_nd.copy() + sig_images[it] = sig_image.copy() + stim_cube[it] = nstim.copy() + + # check if improvement compared to last iteration + if it > 1: + cond1 = np.allclose(sig_image, sig_images[it-1], + rtol=algo_params.rtol, + atol=algo_params.atol) + cond2 = np.allclose(sig_image, sig_images[it-2], + rtol=algo_params.rtol, + atol=algo_params.atol) + if cond1 or cond2: + # if convergence in incremental mode: skip iterations until + # next increment in ncomp + cond_mod = algo_params.mode == 'Christiaens24' + cond_it = (it % nit_ori != nit_ori-1) + cond_st = algo_params.strategy in ['ADI', 'RDI', 'ARDI'] + cond_ad1 = cond_add_nd_excess is True + cond_ad2 = algo_params.add_nd_excess is False + if cond_mod and cond_it: + cond_skip = True + elif cond_mod: # in incremental mode don't skip if cond_it + cond_skip = False + else: # else in non-incremental mode: break or don't smooth + cond_skip = False + condc = algo_params.continue_without_smooth_after_conv + msg = "Convergence criterion met after {} iterations in" + msg += " {}." + msg2 = ".. Smoothing turned off and iterating more. " + if cond_st and (cond_ad1 or cond_ad2): + if smooth_ker[it] is not None and condc: + smooth_ker_N = [None]*(len(smooth_ker)-it-1) + smooth_ker[it+1:] = smooth_ker_N + if algo_params.verbose: + dt = time_fin(start_time) + print("\n"+msg.format(it, dt)+msg2) + else: + if algo_params.verbose: # and not cond_it: + dt = time_fin(start_time) + print("\n Final " + msg.format(it, dt)) + break + if not cond_st: + # continue to iterate with ADI + # ncomp_tmp = ncomp_list[-1] # BUG: not taking + final_ncomp[:] = [ncomp_list[-1] for _ in range(len(final_ncomp))] + algo_params.strategy = algo_params.strategy[1:] # -'R' + ref_cube = None + if algo_params.verbose: + msg = "After {:.0f} iterations, PCA-RDI -> PCA-{}." + print("\n" + msg.format(it, algo_params.strategy)) + elif not (cond_ad1 or cond_ad2): + # continue to iterate with nd excess + # but increase smooth_ker up to 1.5 to avoid instability + if smooth_ker[it+1] < 2: + for nit_r in range(it+1, algo_params.nit): + smooth_ker[nit_r] = min(smooth_ker[nit_r-1]*1.1, + 1.5) + if algo_params.verbose: + msg = "After {:.0f} iterations, now iterating " + msg += "with nd excess..." + print("\n" + msg.format(it)) + cond_add_nd_excess = True + + # mask everything last + if mask_center_px is not None: + frame = mask_circle(frame, mask_center_px) + it_cube = mask_circle(it_cube, mask_center_px) + residuals_cube = mask_circle(residuals_cube, mask_center_px) + residuals_cube_ = mask_circle(residuals_cube_, mask_center_px) + it_cube_nd = mask_circle(it_cube_nd, mask_center_px) + + if algo_params.full_output: + return (frame, it_cube[:it+1], sig_images[:it+1], residuals_cube, + residuals_cube_, stim_cube[:it+1], it_cube_nd[:it+1]) + else: + return frame diff --git a/metrics/contrcurve.py b/metrics/contrcurve.py new file mode 100644 index 00000000..925e57de --- /dev/null +++ b/metrics/contrcurve.py @@ -0,0 +1,1269 @@ +#! /usr/bin/env python +""" +Module with contrast curve generation function. + +""" + +__author__ = "C. Gomez, O. Absil @ ULg" +__all__ = ["contrast_curve", "noise_per_annulus", "throughput", "aperture_flux"] + +import numpy as np +import pandas as pd +from inspect import getfullargspec +try: + from photutils.aperture import aperture_photometry, CircularAperture +except: + from photutils import aperture_photometry, CircularAperture +from scipy.interpolate import InterpolatedUnivariateSpline +from scipy import stats +from scipy.signal import savgol_filter +from skimage.draw import disk +from matplotlib import pyplot as plt +from ..fm import cube_inject_companions, frame_inject_companion, normalize_psf +from ..config import time_ini, timing +from ..config.utils_conf import vip_figsize, vip_figdpi +from ..var import frame_center, dist + + +# TODO: Include algo_class modifications in any tutorial using this function +def contrast_curve( + cube, + angle_list, + psf_template, + fwhm, + pxscale, + starphot, + algo, + sigma=5, + nbranch=1, + theta=None, + inner_rad=1, + fc_rad_sep=3, + noise_sep=1, + wedge=(0, 360), + fc_snr=100, + student=True, + transmission=None, + smooth=True, + interp_order=2, + plot=True, + dpi=vip_figdpi, + debug=False, + verbose=True, + full_output=False, + save_plot=None, + object_name=None, + frame_size=None, + fix_y_lim=(), + figsize=vip_figsize, + algo_class=None, + **algo_dict, +): + """Compute the contrast curve at a given confidence (``sigma``) level for\ + an ADI cube or ADI+IFS cube. The contrast is calculated as\ + sigma*noise/throughput. This implementation takes into account the small\ + sample statistics correction proposed in [MAW14]_. + + Parameters + ---------- + cube : 3d or 4d numpy ndarray + The input cube, 3d (ADI data) or 4d array (IFS data), without fake + companions. + angle_list : 1d numpy ndarray + Vector of derotation angles to align North up in your images. + psf_template : 2d or 3d numpy ndarray + Frame with the psf template for the fake companion(s). + PSF must be centered in array. Normalization is done internally. + fwhm: int or float or 1d array, optional + The the Full Width Half Maximum in pixels. It can handle a different + FWHM value for different wavelengths (IFS data). + pxscale : float + Plate scale or pixel scale of the instrument (only used for plots) + starphot : int or float or 1d array + If int or float it corresponds to the aperture photometry of the + non-coronagraphic PSF which we use to scale the contrast. If a vector + is given it must contain the photometry correction for each frame. + algo : callable or function + The post-processing algorithm, e.g. vip_hci.pca.pca. + sigma : int + Sigma level for contrast calculation. Note this is a "Gaussian sigma" + regardless of whether Student t correction is performed (set by the + 'student' parameter). E.g. setting sigma to 5 will yield the contrast + curve corresponding to a false alarm probability of 3e-7. + nbranch : int, optional + Number of branches on which to inject fakes companions. Each branch + is tested individually. + theta : float, optional + Angle in degrees for rotating the position of the first branch that by + default is located midway in the wedge range. Theta counts counterclockwise from + the positive x axis. When working on a wedge, make sure that theta is + located inside of it. + inner_rad : int, optional + Innermost radial distance to be considered in terms of FWHM. Should be + >= 1. + fc_rad_sep : int optional + Radial separation between the injected companions (in each of the + patterns) in FWHM. Must be large enough to avoid overlapping. With the + maximum possible value, a single fake companion will be injected per + cube and algorithm post-processing (which greatly affects computation + time). + noise_sep: int or None, optional + Radial sampling of the noise level. By default it is performed with a + radial step of 1 pixel. If set to None, it will be automatically set to + be sampled every fwhm pixels radially. + wedge : tuple of floats, optional + Initial and Final angles for using a wedge. For example (-90,90) only + considers the right side of an image. + fc_snr: float optional + Signal to noise ratio of injected fake companions (w.r.t a Gaussian + distribution). + student : bool, optional + If True uses Student t correction to inject fake companion. + transmission: numpy array, optional + Radial transmission of the coronagraph, if any. Array with either + 2 x n_rad or 1+n_ch x n_rad columns. The first column should contain the + radial separation in pixels, while the other column(s) are the + corresponding off-axis transmission (between 0 and 1), for either all, + or each spectral channel (only relevant for a 4D input cube). + smooth : bool, optional + If True the radial noise curve is smoothed with a Savitzky-Golay filter + of order 2. + interp_order : int or None, optional + If an integer is provided, the throughput vector is interpolated with a + spline of order ``interp_order``. Takes values from 1 to 5. If None, + then the throughput is not interpolated. + plot : bool, optional + Whether to plot the final contrast curve or not. True by default. + dpi : int optional + Dots per inch for the plots. 100 by default. 300 for printing quality. + imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp', 'vip-fft'}, str opt + Library or method used for image operations (rotations). Opencv is the + default for being the fastest. See description of + `vip_hci.preproc.frame_rotate`. + interpolation: str, opt + See description of ``vip_hci.preproc.frame_rotate`` function + debug : bool, optional + Whether to print and plot additional info such as the noise, throughput, + the contrast curve with different X axis and the delta magnitude instead + of contrast. + verbose : {True, False, 0, 1, 2}, optional + If True or 1 the function prints to stdout intermediate info and timing, + if set to 2 more output will be shown. + full_output : bool, optional + If True returns intermediate arrays. + save_plot: string or None, optional + If provided, the contrast curve will be saved to this path. + object_name: string or None, optional + Target name, used in the plot title. + frame_size: int, optional + Frame size used for generating the contrast curve, used in the plot + title. + fix_y_lim: tuple, optional + If provided, the y axis limits will be fixed, for easier comparison + between plots. + fig_size: tuple, optional + Figure size + **algo_dict + Any other valid parameter of the post-processing algorithms can be + passed here, including e.g. imlib and interpolation. + + Returns + ------- + datafr : pandas dataframe + Dataframe containing the sensitivity (Gaussian and Student corrected if + Student parameter is True), the interpolated throughput, the distance in + pixels, the noise and the sigma corrected (if Student is True). + + If full_output is True then the function returns: + datafr, cube_fc_all, frame_fc_all, frame_nofc and fc_map_all. + + frame_fc_all : numpy ndarray + 3d array with the 3 frames of the 3 (patterns) processed cubes with + companions. + frame_nofc : numpy ndarray + 2d array, PCA processed frame without companions. + fc_map_all : numpy ndarray + 3d array with 3 frames containing the position of the companions in the + 3 patterns. + """ + if cube.ndim != 3 and cube.ndim != 4: + raise TypeError("The input array is not a 3d or 4d cube") + if cube.ndim == 3 and (cube.shape[0] != angle_list.shape[0]): + raise TypeError("Input parallactic angles vector has wrong length") + if cube.ndim == 4 and (cube.shape[1] != angle_list.shape[0]): + raise TypeError("Input parallactic angles vector has wrong length") + if cube.ndim == 3 and psf_template.ndim != 2: + raise TypeError("Template PSF is not a frame (for ADI case)") + if cube.ndim == 4 and psf_template.ndim != 3: + raise TypeError("Template PSF is not a cube (for ADI+IFS case)") + if transmission is not None: + if len(transmission) != 2 and len(transmission) != cube.shape[0] + 1: + msg = "Wrong shape for transmission should be 2xn_rad or (nch+1) " + msg += "x n_rad, instead of {}".format(transmission.shape) + raise TypeError(msg) + + if isinstance(fwhm, (np.ndarray, list)): + fwhm_med = np.median(fwhm) + else: + fwhm_med = fwhm + + if verbose: + start_time = time_ini() + if isinstance(starphot, float) or isinstance(starphot, int): + msg0 = "ALGO : {}, FWHM = {}, # BRANCHES = {}, SIGMA = {}," + msg0 += " STARPHOT = {}" + print(msg0.format(algo.__name__, fwhm_med, nbranch, sigma, + starphot)) + else: + msg0 = "ALGO : {}, FWHM = {}, # BRANCHES = {}, SIGMA = {}" + print(msg0.format(algo.__name__, fwhm_med, nbranch, sigma)) + + # throughput + verbose_thru = False + if verbose == 2: + verbose_thru = True + res_throug = throughput( + cube, + angle_list, + psf_template, + fwhm, + algo=algo, + nbranch=nbranch, + theta=theta, + inner_rad=inner_rad, + fc_rad_sep=fc_rad_sep, + wedge=wedge, + fc_snr=fc_snr, + noise_sep=noise_sep, + full_output=True, + verbose=verbose_thru, + algo_class=algo_class, + **algo_dict, + ) + vector_radd = res_throug[3] + if res_throug[0].shape[0] > 1: + thruput_mean = np.nanmean(res_throug[0], axis=0) + else: + thruput_mean = res_throug[0][0] + frame_fc_all = res_throug[4] + frame_nofc = res_throug[5] + fc_map_all = res_throug[6] + + if verbose: + print("Finished the throughput calculation") + timing(start_time) + + if transmission is not None: + t_nz = transmission.shape[0] + if transmission.ndim != 2: + raise ValueError("transmission should be a 2D ndarray") + elif t_nz != 2 and t_nz != 1 + cube.shape[0]: + msg = "transmission dimensions should be (2,N) or (n_wave+1,N)" + raise ValueError(msg) + # if transmission doesn't have right format for interpolation, adapt it + diag = np.sqrt(2) * cube.shape[-1] + if transmission[0, 0] != 0 or transmission[0, -1] < diag: + trans_rad_list = transmission[0].tolist() + for j in range(t_nz - 1): + trans_list = transmission[j + 1].tolist() + # should have a zero point + if transmission[0, 0] != 0: + if j == 0: + trans_rad_list = [0] + trans_rad_list + trans_list = [0] + trans_list + # last point should be max possible distance between fc and star + if transmission[0, -1] < np.sqrt(2) * cube.shape[-1] / 2.0: + if j == 0: + trans_rad_list = trans_rad_list + [diag] + trans_list = trans_list + [1] + if j == 0: + ntransmission = np.zeros([t_nz, len(trans_rad_list)]) + ntransmission[0] = trans_rad_list + ntransmission[j + 1] = trans_list + transmission = ntransmission.copy() + if t_nz > 2: # take the mean transmission over all wavelengths + ntransmission = np.zeros([2, len(trans_rad_list)]) + ntransmission[0] = transmission[0] + ntransmission[1] = np.mean(transmission[1:], axis=0) + transmission = ntransmission.copy() + + if interp_order is not None or noise_sep is not None: + # noise measured in the empty frame with noise_sep sampling + if noise_sep is None: + rad_samp = vector_radd + noise_samp = res_throug[1] + res_lev_samp = res_throug[2] + else: + # starting from 1*FWHM + noise_samp, res_lev_samp, rad_samp = noise_per_annulus( + frame_nofc, + separation=noise_sep, + fwhm=fwhm_med, + init_rad=fwhm_med, + wedge=wedge, + ) + radmin = vector_radd.astype(int).min() + cutin1 = np.where(rad_samp.astype(int) == radmin)[0][0] + noise_samp = noise_samp[cutin1:] + res_lev_samp = res_lev_samp[cutin1:] + rad_samp = rad_samp[cutin1:] + # define max radius, ensuring it is > fwhm/2 from the outer image edge + radmax_fwhm = int(((cube.shape[-1]-1)//2)-fwhm_med/2) + radmax = min(vector_radd.astype(int).max(), radmax_fwhm) + radtmp = radmax + if len(np.where(rad_samp.astype(int) == radmax)[0]) == 0: + while len(np.where(rad_samp.astype(int) == radtmp)[0]) == 0: + radtmp -= 1 + cutin2 = np.where(rad_samp.astype(int) == radtmp)[0][0] + noise_samp = noise_samp[: cutin2 + 1] + res_lev_samp = res_lev_samp[: cutin2 + 1] + rad_samp = rad_samp[: cutin2 + 1] + + if interp_order is not None: + # interpolating the throughput vector, spline order 2 + f = InterpolatedUnivariateSpline( + vector_radd, thruput_mean, k=interp_order) + thruput_interp = f(rad_samp) + else: + thruput_interp = thruput_mean.copy() + + # interpolating the transmission vector, spline order 1 + if transmission is not None: + trans = transmission[1] + radvec_trans = transmission[0] + f2 = InterpolatedUnivariateSpline(radvec_trans, trans, k=1) + trans_interp = f2(rad_samp) + thruput_interp *= trans_interp + else: + rad_samp = vector_radd + noise_samp = res_throug[1] + res_lev_samp = res_throug[2] + thruput_interp = thruput_mean + if transmission is not None: + if not transmission[1].shape == thruput_interp.shape[0]: + msg = "Transmiss. and throughput vectors have different length" + raise ValueError(msg) + thruput_interp *= transmission[1] + + rad_samp_arcsec = rad_samp * pxscale + + # take abs value of the mean residual fluxes otherwise the more + # oversubtraction (negative res_lev_samp), the better the contrast!! + # res_lev_samp = np.abs(res_lev_samp) + + # correction (noticed in cases of oversubtraction) - to be confirmed: + res_lev_samp = np.zeros_like(res_lev_samp) + + if smooth: + # smoothing the noise vector using a Savitzky-Golay filter + win = min(noise_samp.shape[0] - 2, int(2 * fwhm_med)) + if win % 2 == 0: + win += 1 + noise_samp_sm = savgol_filter( + noise_samp, polyorder=2, mode="nearest", window_length=win + ) + res_lev_samp_sm = savgol_filter( + res_lev_samp, polyorder=2, mode="nearest", window_length=win + ) + else: + noise_samp_sm = noise_samp + res_lev_samp_sm = res_lev_samp + + # calculating the contrast + if isinstance(starphot, float) or isinstance(starphot, int): + cont_curve_samp = ( + (sigma * noise_samp_sm + res_lev_samp_sm) / thruput_interp + ) / starphot + else: + cont_curve_samp = ( + (sigma * noise_samp_sm + res_lev_samp_sm) / thruput_interp + ) / np.median(starphot) + cont_curve_samp[np.where(cont_curve_samp < 0)] = 1 + cont_curve_samp[np.where(cont_curve_samp > 1)] = 1 + + # calculating the Student corrected contrast + if student: + n_res_els = np.floor(rad_samp / fwhm_med * 2 * np.pi) + ss_corr = np.sqrt(1 + 1 / n_res_els) + sigma_corr = stats.t.ppf(stats.norm.cdf(sigma), n_res_els - 1) * ss_corr + if isinstance(starphot, float) or isinstance(starphot, int): + cont_curve_samp_corr = ( + (sigma_corr * noise_samp_sm + res_lev_samp_sm) / thruput_interp + ) / starphot + else: + cont_curve_samp_corr = ( + (sigma_corr * noise_samp_sm + res_lev_samp_sm) / thruput_interp + ) / np.median(starphot) + cont_curve_samp_corr[np.where(cont_curve_samp_corr < 0)] = 1 + cont_curve_samp_corr[np.where(cont_curve_samp_corr > 1)] = 1 + + if debug: + plt.rc("savefig", dpi=dpi) + plt.figure(figsize=figsize, dpi=dpi) + # throughput + plt.plot(vector_radd * pxscale, thruput_mean, + ".", label="computed", alpha=0.6) + plt.plot( + rad_samp_arcsec, thruput_interp, ",-", label="interpolated", lw=2, + alpha=0.5 + ) + plt.grid("on", which="both", alpha=0.2, linestyle="solid") + plt.xlabel("Angular separation [arcsec]") + plt.ylabel("Throughput") + plt.legend(loc="best") + plt.xlim(0, np.max(rad_samp * pxscale)) + # noise + plt.figure(figsize=figsize, dpi=dpi) + plt.plot(rad_samp_arcsec, noise_samp, ".", label="computed", alpha=0.6) + if smooth: + plt.plot( + rad_samp_arcsec, + noise_samp_sm, + ",-", + label="noise smoothed", + lw=2, + alpha=0.5, + ) + plt.grid("on", alpha=0.2, linestyle="solid") + plt.xlabel("Angular separation [arcsec]") + plt.ylabel("Noise") + plt.legend(loc="best") + plt.xlim(0, np.max(rad_samp_arcsec)) + # mean residual level + plt.figure(figsize=figsize, dpi=dpi) + plt.plot( + rad_samp_arcsec, + res_lev_samp, + ".", + label="computed residual level", + alpha=0.6, + ) + if smooth: + plt.plot( + rad_samp_arcsec, + res_lev_samp_sm, + ",-", + label="smoothed residual level", + lw=2, + alpha=0.5, + ) + plt.grid("on", alpha=0.2, linestyle="solid") + plt.xlabel("Angular separation [arcsec]") + plt.ylabel("Mean residual level") + plt.legend(loc="best") + plt.xlim(0, np.max(rad_samp_arcsec)) + + # plotting + if plot or debug: + if student: + label = ["Sensitivity (Gaussian)", + "Sensitivity (Student-t correction)"] + else: + label = ["Sensitivity (Gaussian)"] + + plt.rc("savefig", dpi=dpi) + fig = plt.figure(figsize=figsize, dpi=dpi) + ax1 = fig.add_subplot(111) + (con1,) = ax1.plot( + rad_samp_arcsec, cont_curve_samp, "-", alpha=0.2, lw=2, + color="green" + ) + (con2,) = ax1.plot( + rad_samp_arcsec, cont_curve_samp, ".", alpha=0.2, color="green" + ) + if student: + (con3,) = ax1.plot( + rad_samp_arcsec, + cont_curve_samp_corr, + "-", + alpha=0.4, + lw=2, + color="blue", + ) + (con4,) = ax1.plot( + rad_samp_arcsec, cont_curve_samp_corr, ".", alpha=0.4, + color="blue" + ) + lege = [(con1, con2), (con3, con4)] + else: + lege = [(con1, con2)] + plt.legend(lege, label, fancybox=True, fontsize="medium") + plt.xlabel("Angular separation [arcsec]") + plt.ylabel(str(sigma) + " sigma contrast") + plt.grid("on", which="both", alpha=0.2, linestyle="solid") + ax1.set_yscale("log") + ax1.set_xlim(0, np.max(rad_samp_arcsec)) + + # Give a title to the contrast curve plot + if object_name is not None and frame_size is not None: + # Retrieve ncomp and pca_type info to use in title + ncomp = algo_dict["ncomp"] + if algo_dict["cube_ref"] is None: + pca_type = "ADI" + else: + pca_type = "RDI" + title = "{} {} {}pc {} + {}".format( + pca_type, object_name, ncomp, frame_size, inner_rad + ) + plt.title(title, fontsize=14) + + # Option to fix the y-limit + if len(fix_y_lim) == 2: + min_y_lim = min(fix_y_lim[0], fix_y_lim[1]) + max_y_lim = max(fix_y_lim[0], fix_y_lim[1]) + ax1.set_ylim(min_y_lim, max_y_lim) + + # Optionally, save the figure to a path + if save_plot is not None: + fig.savefig(save_plot, dpi=dpi) + + if debug: + fig2 = plt.figure(figsize=figsize, dpi=dpi) + ax3 = fig2.add_subplot(111) + cc_mags = -2.5 * np.log10(cont_curve_samp) + (con4,) = ax3.plot( + rad_samp_arcsec, cc_mags, "-", alpha=0.2, lw=2, color="green" + ) + (con5,) = ax3.plot(rad_samp_arcsec, + cc_mags, ".", alpha=0.2, color="green") + if student: + cc_mags_corr = -2.5 * np.log10(cont_curve_samp_corr) + (con6,) = ax3.plot( + rad_samp_arcsec, cc_mags_corr, "-", alpha=0.4, lw=2, + color="blue" + ) + (con7,) = ax3.plot( + rad_samp_arcsec, cc_mags_corr, ".", alpha=0.4, color="blue" + ) + lege = [(con4, con5), (con6, con7)] + else: + lege = [(con4, con5)] + plt.legend(lege, label, fancybox=True, fontsize="medium") + plt.xlabel("Angular separation [arcsec]") + plt.ylabel("Delta magnitude") + plt.gca().invert_yaxis() + plt.grid("on", which="both", alpha=0.2, linestyle="solid") + ax3.set_xlim(0, np.max(rad_samp * pxscale)) + ax4 = ax3.twiny() + ax4.set_xlabel("Distance [pixels]") + ax4.plot(rad_samp, cc_mags, "", alpha=0.0) + ax4.set_xlim(0, np.max(rad_samp)) + + if student: + datafr = pd.DataFrame( + { + "sensitivity_gaussian": cont_curve_samp, + "sensitivity_student": cont_curve_samp_corr, + "throughput": thruput_interp, + "distance": rad_samp, + "distance_arcsec": rad_samp_arcsec, + "noise": noise_samp_sm, + "residual_level": res_lev_samp_sm, + "sigma corr": sigma_corr, + } + ) + else: + datafr = pd.DataFrame( + { + "sensitivity_gaussian": cont_curve_samp, + "throughput": thruput_interp, + "distance": rad_samp, + "distance_arcsec": rad_samp_arcsec, + "noise": noise_samp_sm, + "residual_level": res_lev_samp_sm, + } + ) + + if full_output: + return datafr, frame_fc_all, frame_nofc, fc_map_all + else: + return datafr + + +# TODO: Include algo_class modifications in any tutorial using this function +def throughput( + cube, + angle_list, + psf_template, + fwhm, + algo, + nbranch=1, + theta=None, + inner_rad=1, + fc_rad_sep=3, + wedge=(0, 360), + fc_snr=100, + noise_sep=1, + full_output=False, + verbose=True, + algo_class=None, + **algo_dict, +): + """Measure the throughput for chosen algorithm and input dataset (ADI or\ + ADI+mSDI). + + The final throughput is the average of the same procedure measured in + ``nbranch`` azimutally equidistant branches. + + Parameters + ---------- + cube : 3d or 4d numpy ndarray + The input cube, 3d (ADI data) or 4d array (IFS data), without fake + companions. + angle_list : 1d numpy ndarray + Vector with parallactic angles. + psf_template : 2d or 3d numpy ndarray + Frame with the psf template for the fake companion(s). + PSF must be centered in array. Normalization is done internally. + fwhm: int or float or 1d array, optional + The the Full Width Half Maximum in pixels. It can handle a different + FWHM value for different wavelengths (IFS data). + algo : callable or function + The post-processing algorithm, e.g. vip_hci.pca.pca. Third party Python + algorithms can be plugged here. They must have the parameters: 'cube', + 'angle_list' and 'verbose'. Optionally a wrapper function can be used. + nbranch : int optional + Number of branches on which to inject fakes companions. Each branch + is tested individually. + theta : float, optional + Angle in degrees for rotating the position of the first branch that by + default is located midway in the wedge range. Theta counts counterclockwise from + the positive x axis. + inner_rad : int, optional + Innermost radial distance to be considered in terms of FWHM. Should be + >= 1. + fc_rad_sep : int optional + Radial separation between the injected companions (in each of the + patterns) in FWHM. Must be large enough to avoid overlapping. With the + maximum possible value, a single fake companion will be injected per + cube and algorithm post-processing (which greatly affects computation + time). + wedge : tuple of floats, optional + Initial and Final angles for using a wedge. For example (-90,90) only + considers the right side of an image. + fc_snr: float optional + Signal to noise ratio of injected fake companions (w.r.t a Gaussian + distribution). + noise_sep: int or None, optional + Radial sampling of the noise level. By default it is performed with a + radial step of 1 pixel. If set to None, it will be automatically set to + be sampled every fwhm pixels radially. + full_output : bool, optional + If True returns intermediate arrays. + verbose : bool, optional + If True prints out timing and information. + **algo_dict + Parameters of the post-processing algorithms can be passed here, + including e.g. ``imlib``, ``interpolation`` or ``nproc``. + + Returns + ------- + thruput_arr : numpy ndarray + 2d array whose rows are the annulus-wise throughput values for each + branch. + vector_radd : numpy ndarray + 1d array with the distances in FWHM (the positions of the annuli). + + If full_output is True then the function returns: thruput_arr, noise, + vector_radd, cube_fc_all, frame_fc_all, frame_nofc and fc_map_all. + + noise : numpy ndarray + 1d array with the noise per annulus. + frame_fc_all : numpy ndarray + 3d array with the 3 frames of the 3 (patterns) processed cubes with + companions. + frame_nofc : numpy ndarray + 2d array, PCA processed frame without companions. + fc_map_all : numpy ndarray + 3d array with 3 frames containing the position of the companions in the + 3 patterns. + + """ + array = cube + parangles = angle_list + nproc = algo_dict.get("nproc", 1) + imlib = algo_dict.get("imlib", "vip-fft") + interpolation = algo_dict.get("interpolation", "lanczos4") + scaling = algo_dict.get("scaling", None) + + if array.ndim != 3 and array.ndim != 4: + raise TypeError("The input array is not a 3d or 4d cube") + else: + if array.ndim == 3: + if array.shape[0] != parangles.shape[0]: + msg = "Input parallactic angles vector has wrong length" + raise TypeError(msg) + if psf_template.ndim != 2: + raise TypeError("Template PSF is not a frame or 2d array") + maxfcsep = int((array.shape[1] / 2.0) / fwhm) - 1 + if fc_rad_sep < 3 or fc_rad_sep > maxfcsep: + msg = "Too large separation between companions in the radial " + msg += "patterns. Should lie between 3 and {}" + raise ValueError(msg.format(maxfcsep)) + + elif array.ndim == 4: + if array.shape[1] != parangles.shape[0]: + msg = "Input vector or parallactic angles has wrong length" + raise TypeError(msg) + if psf_template.ndim != 3: + raise TypeError("Template PSF is not a frame, 3d array") + if "scale_list" in algo_dict: + # raise ValueError("Vector of wavelength not found") + # else: + if algo_dict["scale_list"].shape[0] != array.shape[0]: + raise TypeError("Input wavelength vector has wrong length") + if np.isscalar(fwhm): + maxfcsep = int((array.shape[2] / 2.0) / fwhm) - 1 + else: + maxfcsep = int((array.shape[2] / 2.0) / np.amin(fwhm)) - 1 + if fc_rad_sep < 3 or fc_rad_sep > maxfcsep: + msg = "Too large separation between companions in the " + msg += "radial patterns. Should lie between 3 and {}" + raise ValueError(msg.format(maxfcsep)) + + if psf_template.shape[1] % 2 == 0: + raise ValueError("Only odd-sized PSF is accepted") + if not hasattr(algo, "__call__"): + raise TypeError("Parameter `algo` must be a callable function") + if not isinstance(inner_rad, int): + raise TypeError("inner_rad must be an integer") + + angular_range = wedge[1] - wedge[0] + if wedge != (0, 360) and (nbranch == 1): + if not theta: # not theta value provided + theta = (wedge[0] + wedge[1]) / 2 + else: + if not theta: + theta = wedge[0] + if not wedge[0] <= theta <= wedge[1]: + raise TypeError("Theta value is out of the wedge range") + + if isinstance(fwhm, (np.ndarray, list)): + fwhm_med = np.median(fwhm) + else: + fwhm_med = fwhm + + if verbose: + start_time = time_ini() + # *************************************************************************** + # Compute noise in concentric annuli on the "empty frame" + + # TODO: Clean below? + # Consider 3 cases depending on whether algo is (i) defined externally, + # (ii) a VIP postproc algorithm; (iii) ineligible for contrast curves + argl = getfullargspec(algo).args + if "cube" in argl and "angle_list" in argl and "verbose" in argl: + # (i) external algorithm with appropriate parameters [OK] + pass + else: + algo_name = algo.__name__ + idx = algo.__module__.index('.', algo.__module__.index('.') + 1) + mod = algo.__module__[:idx] + tmp = __import__(mod, fromlist=[algo_name.upper()+'_Params']) + algo_params = getattr(tmp, algo_name.upper()+'_Params') + argl = [attr for attr in dir(algo_params)] + if "cube" in argl and "angle_list" in argl and "verbose" in argl: + # (ii) a VIP postproc algorithm [OK] + pass + else: + # (iii) ineligible routine for contrast curves [Raise error] + msg = "Ineligible algo for contrast curve function. algo should " + msg += "have parameters 'cube', 'angle_list' and 'verbose'" + raise TypeError(msg) + + if "cube" in argl and "angle_list" in argl and "verbose" in argl: + if "fwhm" in argl: + frame_nofc = algo(cube=array, angle_list=parangles, fwhm=fwhm_med, + verbose=False, **algo_dict) + if algo_dict.pop("scaling", None): + new_algo_dict = algo_dict.copy() + new_algo_dict["scaling"] = None + frame_nofc_noscal = algo(cube=array, angle_list=parangles, + fwhm=fwhm_med, verbose=False, + **new_algo_dict) + else: + frame_nofc_noscal = frame_nofc + else: + frame_nofc = algo(cube=array, angle_list=parangles, verbose=False, + **algo_dict) + if algo_dict.pop("scaling", None): + new_algo_dict = algo_dict.copy() + new_algo_dict["scaling"] = None + frame_nofc_noscal = algo(cube=array, angle_list=parangles, + verbose=False, **new_algo_dict) + else: + frame_nofc_noscal = frame_nofc + else: + msg = "The algorithm used should take input parameters named 'cube', " + msg += "'angle_list' and 'verbose'." + raise ValueError(msg) + + if verbose: + msg1 = "Cube without fake companions processed with {}" + print(msg1.format(algo.__name__)) + timing(start_time) + + if noise_sep is None: + sep = fwhm_med + else: + sep = noise_sep + + noise, res_level, vector_radd = noise_per_annulus(frame_nofc, + separation=sep, + fwhm=fwhm_med, + wedge=wedge) + if scaling is not None: + noise_noscal, _, _ = noise_per_annulus(frame_nofc_noscal, + separation=sep, + fwhm=fwhm_med, wedge=wedge) + else: + noise_noscal = noise.copy() + + vector_radd = vector_radd[inner_rad - 1:] + noise = noise[inner_rad - 1:] + res_level = res_level[inner_rad - 1:] + noise_noscal = noise_noscal[inner_rad - 1:] + if verbose: + print("Measured annulus-wise noise in resulting frame") + timing(start_time) + + # We crop the PSF and check if PSF has been normalized (so that flux in + # 1*FWHM aperture = 1) and fix if needed + new_psf_size = int(round(3 * fwhm_med)) + if new_psf_size % 2 == 0: + new_psf_size += 1 + + if cube.ndim == 3: + n, y, x = array.shape + psf_template = normalize_psf( + psf_template, + fwhm=fwhm, + verbose=verbose, + size=min(new_psf_size, psf_template.shape[1]), + ) + + # Initialize the fake companions + angle_branch = angular_range / nbranch + thruput_arr = np.zeros((nbranch, noise.shape[0])) + fc_map_all = np.zeros((nbranch * fc_rad_sep, y, x)) + frame_fc_all = np.zeros((nbranch * fc_rad_sep, y, x)) + cy, cx = frame_center(array[0]) + + # each branch is computed separately + for br in range(nbranch): + # each pattern is computed separately. For each one the companions + # are separated by "fc_rad_sep * fwhm", interleaving the injections + for irad in range(fc_rad_sep): + radvec = vector_radd[irad::fc_rad_sep] + cube_fc = array.copy() + # filling map with small numbers + fc_map = np.ones_like(array[0]) * 1e-6 + fcy = [] + fcx = [] + for i in range(radvec.shape[0]): + flux = fc_snr * noise_noscal[irad + i * fc_rad_sep] + cube_fc = cube_inject_companions( + cube_fc, + psf_template, + parangles, + flux, + rad_dists=[radvec[i]], + theta=br * angle_branch + theta, + nproc=nproc, + imlib=imlib, + interpolation=interpolation, + copy_array=False, + verbose=False, + ) + y = cy + radvec[i] * \ + np.sin(np.deg2rad(br * angle_branch + theta)) + x = cx + radvec[i] * \ + np.cos(np.deg2rad(br * angle_branch + theta)) + fc_map = frame_inject_companion( + fc_map, psf_template, y, x, flux, imlib, interpolation + ) + fcy.append(y) + fcx.append(x) + + if verbose: + msg2 = "Fake companions injected in branch {} " + msg2 += "(pattern {}/{})" + print(msg2.format(br + 1, irad + 1, fc_rad_sep)) + timing(start_time) + + # *************************************************************** + # TODO: Clean below? + # Consider 3 cases depending on whether algo is + # (i) defined externally, + # (ii) a VIP postproc algorithm; + # (iii) ineligible for contrast curves + ar = getfullargspec(algo).args + if "cube" in ar and "angle_list" in ar and "verbose" in ar: + # (i) external algorithm with appropriate parameters [OK] + pass + else: + algo_name = algo.__name__ + idx = algo.__module__.index( + '.', algo.__module__.index('.') + 1) + mod = algo.__module__[:idx] + tmp = __import__( + mod, fromlist=[algo_name.upper()+'_Params']) + algo_params = getattr(tmp, algo_name.upper()+'_Params') + ar = [attr for attr in dir(algo_params)] + if "cube" in ar and "angle_list" in ar and "verbose" in ar: + # (ii) a VIP postproc algorithm [OK] + pass + else: + # (iii) ineligible routine [Raise error] + msg = "Ineligible algo for contrast curve function. " + msg += "algo should have parameters 'cube', " + msg += "'angle_list' and 'verbose'" + raise TypeError(msg) + if "fwhm" in ar: + frame_fc = algo( + cube=cube_fc, + angle_list=parangles, + fwhm=fwhm_med, + verbose=False, + **algo_dict, + ) + else: + frame_fc = algo( + cube=cube_fc, + angle_list=parangles, + verbose=False, + **algo_dict, + ) + + if verbose: + msg3 = "Cube with fake companions processed with {}" + msg3 += "\nMeasuring its annulus-wise throughput" + print(msg3.format(algo.__name__)) + timing(start_time) + + # ************************************************************** + injected_flux = aperture_flux(fc_map, fcy, fcx, fwhm_med) + recovered_flux = aperture_flux( + (frame_fc - frame_nofc), fcy, fcx, fwhm_med + ) + thruput = recovered_flux / injected_flux + thruput[np.where(thruput < 0)] = 0 + + thruput_arr[br, irad::fc_rad_sep] = thruput + fc_map_all[br * fc_rad_sep + irad, :, :] = fc_map + frame_fc_all[br * fc_rad_sep + irad, :, :] = frame_fc + + elif cube.ndim == 4: + w, n, y, x = array.shape + if isinstance(fwhm, (int, float)): + fwhm = [fwhm] * w + psf_template = normalize_psf( + psf_template, + fwhm=fwhm, + verbose=verbose, + size=min(new_psf_size, psf_template.shape[1]), + ) + + # Initialize the fake companions + angle_branch = angular_range / nbranch + thruput_arr = np.zeros((nbranch, noise.shape[0])) + fc_map_all = np.zeros((nbranch * fc_rad_sep, w, y, x)) + frame_fc_all = np.zeros((nbranch * fc_rad_sep, y, x)) + cy, cx = frame_center(array[0, 0]) + + # each branch is computed separately + for br in range(nbranch): + # each pattern is computed separately. For each pattern the + # companions are separated by "fc_rad_sep * fwhm" + # radius = vector_radd[irad::fc_rad_sep] + for irad in range(fc_rad_sep): + radvec = vector_radd[irad::fc_rad_sep] + thetavec = range(int(theta), int(theta) + + 360, 360 // len(radvec)) + cube_fc = array.copy() + # filling map with small numbers + fc_map = np.ones_like(array[:, 0]) * 1e-6 + fcy = [] + fcx = [] + for i in range(radvec.shape[0]): + flux = fc_snr * noise_noscal[irad + i * fc_rad_sep] + cube_fc = cube_inject_companions( + cube_fc, + psf_template, + parangles, + flux, + rad_dists=[radvec[i]], + theta=thetavec[i], + verbose=False, + imlib=imlib, + interpolation=interpolation, + ) + y = cy + radvec[i] * np.sin( + np.deg2rad(br * angle_branch + thetavec[i]) + ) + x = cx + radvec[i] * np.cos( + np.deg2rad(br * angle_branch + thetavec[i]) + ) + fc_map = frame_inject_companion( + fc_map, psf_template, y, x, flux) + fcy.append(y) + fcx.append(x) + + if verbose: + msg2 = "Fake companions injected in branch {} " + msg2 += "(pattern {}/{})" + print(msg2.format(br + 1, irad + 1, fc_rad_sep)) + timing(start_time) + + # ************************************************************** + # TODO: Clean below? + # Consider 3 cases depending on whether algo is + # (i) defined externally, + # (ii) a VIP postproc algorithm; + # (iii) ineligible for contrast curves + ar = getfullargspec(algo).args + if "cube" in ar and "angle_list" in ar and "verbose" in ar: + # (i) external algorithm with appropriate parameters [OK] + pass + else: + algo_name = algo.__name__ + idx = algo.__module__.index( + '.', algo.__module__.index('.') + 1) + mod = algo.__module__[:idx] + tmp = __import__( + mod, fromlist=[algo_name.upper()+'_Params']) + algo_params = getattr(tmp, algo_name.upper()+'_Params') + ar = [attr for attr in dir(algo_params)] + if "cube" in ar and "angle_list" in ar and "verbose" in ar: + # (ii) a VIP postproc algorithm [OK] + pass + else: + # (iii) ineligible routine [Raise error] + msg = "Ineligible algo for contrast curve function. " + msg += "algo should have parameters 'cube', " + msg += "'angle_list' and 'verbose'" + raise TypeError(msg) + if "fwhm" in ar: + frame_fc = algo( + cube=cube_fc, + angle_list=parangles, + fwhm=fwhm_med, + verbose=False, + **algo_dict, + ) + else: + frame_fc = algo( + cube=cube_fc, + angle_list=parangles, + verbose=False, + **algo_dict, + ) + + if verbose: + msg3 = "Cube with fake companions processed with {}" + msg3 += "\nMeasuring its annulus-wise throughput" + print(msg3.format(algo.__name__)) + timing(start_time) + + # ************************************************************* + injected_flux = [ + aperture_flux(fc_map[i], fcy, fcx, fwhm[i]) + for i in range(array.shape[0]) + ] + injected_flux = np.mean(injected_flux, axis=0) + recovered_flux = aperture_flux( + (frame_fc - frame_nofc), fcy, fcx, fwhm_med + ) + thruput = recovered_flux / injected_flux + thruput[np.where(thruput < 0)] = 0 + + thruput_arr[br, irad::fc_rad_sep] = thruput + fc_map_all[br * fc_rad_sep + irad, :, :] = fc_map + frame_fc_all[br * fc_rad_sep + irad, :, :] = frame_fc + + if verbose: + msg = "Finished measuring the throughput in {} branches" + print(msg.format(nbranch)) + timing(start_time) + + if full_output: + return ( + thruput_arr, + noise, + res_level, + vector_radd, + frame_fc_all, + frame_nofc, + fc_map_all, + ) + else: + return thruput_arr, vector_radd + + +def noise_per_annulus(array, separation, fwhm, init_rad=None, wedge=(0, 360), + verbose=False, debug=False): + """Measure the noise and mean residual level as the standard deviation\ + and mean, respectively, of apertures defined in each annulus with a given\ + separation. + + The annuli start at init_rad (= fwhm by default if not provided) and stop + 2*separation before the edge of the frame. + + Parameters + ---------- + array : numpy ndarray + Input frame. + separation : float + Separation in pixels of the centers of the annuli measured from the + center of the frame. + fwhm : float + FWHM in pixels. + init_rad : float + Initial radial distance to be used. If None then the init_rad = FWHM. + wedge : tuple of floats, optional + Initial and Final angles for using a wedge. For example (-90,90) only + considers the right side of an image. Be careful when using small + wedges, this leads to computing a standard deviation of very small + samples (<10 values). + verbose : bool, optional + If True prints information. + debug : bool, optional + If True plots the positioning of the apertures. + + Returns + ------- + noise : numpy ndarray + Vector with the noise value per annulus. + res_level : numpy ndarray + Vector with the mean residual level per annulus. + vector_radd : numpy ndarray + Vector with the radial distances values. + + """ + + def find_coords(rad, sep, init_angle, fin_angle): + angular_range = fin_angle - init_angle + npoints = (np.deg2rad(angular_range) * rad) / sep # (2*np.pi*rad)/sep + ang_step = angular_range / npoints # 360/npoints + x = [] + y = [] + for i in range(int(npoints)): + newx = rad * np.cos(np.deg2rad(ang_step * i + init_angle)) + newy = rad * np.sin(np.deg2rad(ang_step * i + init_angle)) + x.append(newx) + y.append(newy) + return np.array(y), np.array(x) + + ### + + if array.ndim != 2: + raise TypeError("Input array is not a frame or 2d array") + if not isinstance(wedge, tuple): + raise TypeError( + "Wedge must be a tuple with the initial and final " "angles") + + if init_rad is None: + init_rad = fwhm + + init_angle, fin_angle = wedge + centery, centerx = frame_center(array) + n_annuli = int(np.floor((centery - init_rad) / separation)) - 1 + noise = [] + res_level = [] + vector_radd = [] + if verbose: + print("{} annuli".format(n_annuli)) + + if debug: + _, ax = plt.subplots(figsize=(6, 6)) + ax.imshow( + array, origin="lower", interpolation="nearest", alpha=0.5, cmap="gray" + ) + + for i in range(n_annuli): + y = centery + init_rad + separation * i + rad = dist(centery, centerx, y, centerx) + yy, xx = find_coords(rad, fwhm, init_angle, fin_angle) + yy += centery + xx += centerx + + apertures = CircularAperture(np.array((xx, yy)).T, fwhm / 2) + fluxes = aperture_photometry(array, apertures) + fluxes = np.array(fluxes["aperture_sum"]) + + noise_ann = np.std(fluxes) + mean_ann = np.mean(fluxes) + noise.append(noise_ann) + res_level.append(mean_ann) + vector_radd.append(rad) + + if debug: + for j in range(xx.shape[0]): + # Circle takes coordinates as (X,Y) + aper = plt.Circle( + (xx[j], yy[j]), radius=fwhm / 2, color="r", fill=False, + alpha=0.8 + ) + ax.add_patch(aper) + cent = plt.Circle( + (xx[j], yy[j]), radius=0.8, color="r", fill=True, alpha=0.5 + ) + ax.add_patch(cent) + + if verbose: + print("Radius(px) = {}, Noise = {:.3f} ".format(rad, noise_ann)) + + return np.array(noise), np.array(res_level), np.array(vector_radd) + + +def aperture_flux(array, yc, xc, fwhm, ap_factor=1, mean=False, verbose=False): + """Return the sum of pixel values in a circular aperture centered on the\ + input coordinates. The radius of the aperture is set as (ap_factor*fwhm)/2. + + Parameters + ---------- + array : numpy ndarray + Input frame. + yc, xc : list or 1d arrays + List of y and x coordinates of sources. + fwhm : float + FWHM in pixels. + ap_factor : int, optional + Diameter of aperture in terms of the FWHM. + + Returns + ------- + flux : list of floats + List of fluxes. + + Note + ---- + From Photutils documentation, the aperture photometry defines the aperture + using one of 3 methods: + + 'center': A pixel is considered to be entirely in or out of the aperture + depending on whether its center is in or out of the aperture. + 'subpixel': A pixel is divided into subpixels and the center of each + subpixel is tested (as above). + 'exact': (default) The exact overlap between the aperture and each pixel is + calculated. + + """ + n_obj = len(yc) + flux = np.zeros((n_obj)) + for i, (y, x) in enumerate(zip(yc, xc)): + if mean: + ind = disk((y, x), (ap_factor * fwhm) / 2) + values = array[ind] + obj_flux = np.mean(values) + else: + aper = CircularAperture((x, y), (ap_factor * fwhm) / 2) + obj_flux = aperture_photometry(array, aper, method="exact") + obj_flux = np.array(obj_flux["aperture_sum"]) + flux[i] = float(obj_flux[0]) + + if verbose: + print("Coordinates of object {} : ({},{})".format(i, y, x)) + print("Object Flux = {:.2f}".format(flux[i])) + + return flux \ No newline at end of file