diff --git a/python/lsst/summit/utils/guiders/detection.py b/python/lsst/summit/utils/guiders/detection.py index a2c95bdc..42b67ac8 100644 --- a/python/lsst/summit/utils/guiders/detection.py +++ b/python/lsst/summit/utils/guiders/detection.py @@ -43,6 +43,8 @@ from .reading import GuiderData +log = logging.getLogger(__name__) + _DEFAULT_COLUMNS: str = ( "trackid detector expid elapsed_time dalt daz dtheta dx dy " "fwhm xroi yroi xccd yccd xroi_ref yroi_ref xccd_ref yccd_ref " @@ -87,6 +89,11 @@ class GuiderStarTrackerConfig: Aperture size in arcseconds for star detection. gain : `float` Gain factor for the guider data, used in flux calculations. + nFallbackStamps : `int` + Number of individual stamps to sample when coadd detection fails. + peakSnrThreshold : `float` + Minimum peak pixel SNR to consider a stamp non-empty during + single-stamp fallback detection. """ minSnr: float = 10.0 @@ -96,6 +103,8 @@ class GuiderStarTrackerConfig: cutOutSize: int = 50 aperSizeArcsec: float = 5.0 gain: float = 1.0 + nFallbackStamps: int = 10 + peakSnrThreshold: float = 5.0 def trackStarAcrossStamp( @@ -330,6 +339,7 @@ def runSourceDetection( if not footprints: return pd.DataFrame(columns=DEFAULT_COLUMNS) + nFootprints = len(footprints.getFootprints()) results = [] for fp in footprints.getFootprints(): # Create a cutout of the image around the footprint @@ -338,6 +348,11 @@ def runSourceDetection( if not star.empty: results.append(star) if not results: + if nFootprints > 0: + log.warning( + f"FootprintSet found {nFootprints} sources but GalSim " + f"failed on all of them (cutOutSize={cutOutSize})." + ) return pd.DataFrame(columns=DEFAULT_COLUMNS) df = pd.concat([sf for sf in results], ignore_index=True) return df @@ -373,11 +388,17 @@ def measureStarOnStamp( StarMeasurement object with populated fields (may be empty on failure). """ cutout = getCutouts(stamp, refCenter, cutoutSize=cutOutSize) - data = cutout.data + data = cutout.data.copy() - if np.all(data == 0) | (not np.isfinite(data).all()): + if np.all(data == 0): return StarMeasurement() + # Replace NaN (from border-clipped cutouts) with the median so + # GalSim can still measure stars near the stamp edge. + nan_mask = ~np.isfinite(data) + if nan_mask.any(): + data[nan_mask] = np.nanmedian(data) + # 1) Subtract the background annulus = (apertureRadius * 1.0, apertureRadius * 2) dataBkgSub, bkgStd = annulusBackgroundSubtraction(data, annulus) @@ -439,7 +460,7 @@ def runGalSim( ellipticity = np.sqrt(e1**2 + e2**2) nEff = 2 * np.pi * sigma**2 * np.sqrt(1 - ellipticity**2) shotNoise = np.sqrt(nEff * bkgStd**2) - fluxErr = np.sqrt(flux / gain + shotNoise**2) + fluxErr = np.sqrt(max(0, flux / gain) + shotNoise**2) snr = flux / (shotNoise + 1e-9) if shotNoise > 0 else 0.0 # Calculate second moments @@ -597,6 +618,69 @@ def makeEllipticalGaussianStar( return image +def _detectOnSingleStamps( + guiderData: GuiderData, + guiderName: str, + config: GuiderStarTrackerConfig, + apertureRadius: int, + log: logging.Logger, +) -> pd.DataFrame: + """Try source detection on individual stamps when coadd detection fails. + + Samples up to ``config.nFallbackStamps`` stamps evenly spread across + the sequence. Returns the detection with the highest SNR, or an empty + DataFrame if no star is found on any stamp. + + A stamp is considered truly empty if its peak pixel SNR + (max - median) / std is below ``config.peakSnrThreshold``. + """ + nStamps = len(guiderData) + if nStamps == 0: + return pd.DataFrame(columns=DEFAULT_COLUMNS) + + nSample = min(config.nFallbackStamps, nStamps) + indices = np.linspace(0, nStamps - 1, nSample, dtype=int) + + bestSources = pd.DataFrame(columns=DEFAULT_COLUMNS) + bestSnr = 0.0 + nTrulyEmpty = 0 + + for idx in indices: + stamp = guiderData[guiderName, idx].astype(np.float32) + arr = stamp - np.nanmin(stamp) + + # Quick peak-SNR check before running full detection + med = np.nanmedian(arr) + std = np.nanstd(arr) + if std > 0: + peakSnr = (np.nanmax(arr) - med) / std + else: + peakSnr = 0.0 + + if peakSnr < config.peakSnrThreshold: + nTrulyEmpty += 1 + continue + + sources = runSourceDetection( + arr, + threshold=config.minSnr, + apertureRadius=apertureRadius, + cutOutSize=config.cutOutSize, + gain=config.gain, + ) + if not sources.empty and sources["snr"].max() > bestSnr: + bestSnr = sources["snr"].max() + bestSources = sources + + if not bestSources.empty: + log.info( + f"Single-stamp fallback recovered source on {guiderName} " + f"(SNR={bestSnr:.1f}, {nTrulyEmpty}/{nSample} stamps empty)" + ) + + return bestSources + + def buildReferenceCatalog( guiderData: GuiderData, log: logging.Logger, @@ -638,10 +722,18 @@ def buildReferenceCatalog( cutOutSize=cutOutSize, gain=gain, ) + detectionMethod = "coadd" + if sources.empty: + # Fallback: try detection on individual stamps. The coadd can + # wash out stars that drift between frames or have artifacts. + sources = _detectOnSingleStamps(guiderData, guiderName, config, apertureRadius, log) + detectionMethod = "single_stamp" if sources.empty: log.warning(f"No sources detected in `buildReferenceCatalog`for {guiderName} in {expId}. ") continue + sources["detection_method"] = detectionMethod + sources.sort_values(by=["snr"], ascending=False, inplace=True) sources.reset_index(drop=True, inplace=True) @@ -681,23 +773,39 @@ def getCutouts(imageArray: np.ndarray, refCenter: tuple[float, float], cutoutSiz return Cutout2D(imageArray, (refX, refY), size=cutoutSize, mode="partial", fill_value=np.nan) -def isBlankImage(image: np.ndarray, fluxMin: int = 300) -> bool: +def isBlankImage(image: np.ndarray, fluxMin: float = 300, peakSnrMin: float = 5.0) -> bool: """ Returns True if the image has no significant source (e.g., no star). + An image is considered non-blank if the peak flux above the median + exceeds ``fluxMin`` OR the peak pixel SNR (using MAD-based robust + std) exceeds ``peakSnrMin``. + Parameters ---------- - image : 2D array - Image data (float or int). - fluxMin : float - Minimum deviation from the background median to be considered a source. + image : `np.ndarray` + 2D image data. + fluxMin : `float` + Minimum peak flux above median (ADU) to consider non-blank. + peakSnrMin : `float` + Minimum peak pixel SNR to consider non-blank. Returns ------- bool True if the image is blank, False otherwise. - (no pixel deviates more than flux_min) """ med = np.nanmedian(image) - diff = np.abs(image - med) - return not np.any(diff > fluxMin) + peakFlux = np.nanmax(image) - med + + # Absolute flux check + if peakFlux > fluxMin: + return False + + # SNR check with MAD-based robust std + mad = np.nanmedian(np.abs(image - med)) + std = 1.4826 * mad + if std <= 0: + return True + peakSnr = peakFlux / std + return peakSnr < peakSnrMin diff --git a/python/lsst/summit/utils/guiders/reading.py b/python/lsst/summit/utils/guiders/reading.py index 2ca65206..c1799f9c 100644 --- a/python/lsst/summit/utils/guiders/reading.py +++ b/python/lsst/summit/utils/guiders/reading.py @@ -701,6 +701,7 @@ def get( seqNum: int, doSubtractMedian: bool = True, columnMaskK: float = 50.0, + biasPercentile: float = 10.0, scienceDetNum: int = 94, ) -> GuiderData: """ @@ -713,9 +714,12 @@ def get( seqNum : `int` Sequence number. doSubtractMedian : `bool`, optional - If True, subtract median row bias from each stamp. + If True, subtract column bias from each stamp. columnMaskK : `float`, optional Threshold factor for column mask detection. + biasPercentile : `float`, optional + Percentile (0-100) for column bias estimation. + Lower values avoid star/trail contamination. scienceDetNum : `int`, optional Science detector number for WCS reference. @@ -748,6 +752,7 @@ def get( rawStampsDict, doSubtractMedian, columnMaskK, + biasPercentile, ) guiderData = GuiderData( seqNum=seqNum, @@ -797,9 +802,10 @@ def processStamps( rawStampsDict: dict[str, Stamps], doSubtractMedian: bool, columnMaskK: float, + biasPercentile: float = 10.0, ) -> dict[str, Stamps]: """ - Apply median row bias subtraction and per-stamp column masking. + Apply column bias subtraction and per-stamp column masking. Parameters @@ -807,9 +813,11 @@ def processStamps( rawStampsDict : `dict[str, Stamps]` ROI view of stamps from butler `guider_raw`. doSubtractMedian : `bool`, optional - If True, subtract median row bias. + If True, subtract column bias. columnMaskK : `float`, optional Threshold factor for column mask detection. + biasPercentile : `float`, optional + Percentile for column bias estimation. Returns ------- @@ -829,23 +837,28 @@ def processStamps( # Work on a copy - never modify original data = stamps[i].stamp_im.image.array.copy() - # Compute median row bias - medianRows = np.nanmedian(data, axis=0) + # Compute column bias using low percentile to avoid + # star contamination (median pulls dark dips at star cols). + percRows = np.nanpercentile(data, biasPercentile, axis=0) + rPerc = np.nanpercentile(data, biasPercentile) medianValue = np.nanmedian(data) - # Replace NaN medians (fully masked columns) with global median - medianRows = np.where(np.isnan(medianRows), medianValue, medianRows) + # Replace NaN values (fully masked columns) with global median + percRows = np.where(np.isnan(percRows), medianValue, percRows) # Compute column mask on bias-subtracted data - colMask = getColumnMask(data - medianRows[np.newaxis, :], k=columnMaskK) + colMask = getColumnMask( + data - percRows[np.newaxis, :] - (rPerc - medianValue), + k=columnMaskK, + ) # Create mask array for MaskedImageF (BAD=1 for masked cols) nRows, nCols = data.shape maskArray = np.zeros((nRows, nCols), dtype=np.uint32) maskArray[colMask] = 1 # Set BAD bit for masked columns - # Apply median row bias subtraction + # Apply column bias subtraction if doSubtractMedian: - data = data - medianRows[np.newaxis, :] + data = data - percRows[np.newaxis, :] - (rPerc - medianValue) # Fill masked columns with global median if colMask.any(): diff --git a/python/lsst/summit/utils/guiders/tracking.py b/python/lsst/summit/utils/guiders/tracking.py index 9b64ed18..738d111d 100644 --- a/python/lsst/summit/utils/guiders/tracking.py +++ b/python/lsst/summit/utils/guiders/tracking.py @@ -95,6 +95,7 @@ def trackGuiderStars(self, refCatalog: None | pd.DataFrame = None) -> pd.DataFra DataFrame with tracked stars and their properties, including positions, fluxes, and residual offsets. """ + _refCatalog = None if refCatalog is None: self.log.info("Using self-generated refcat") _refCatalog = buildReferenceCatalog(self.guiderData, self.log, self.config) @@ -108,7 +109,23 @@ def trackGuiderStars(self, refCatalog: None | pd.DataFrame = None) -> pd.DataFra for guiderName in self.guiderData.guiderNames: refDet = refCatalog.query(f"detector == '{guiderName}'") if refDet.empty: - self.log.warning(f"No stars found in reference catalog for {guiderName} in {self.expid}.") + # Diagnose: check what was in the unfiltered refcat + preCut = _refCatalog.query(f"detector == '{guiderName}'") if _refCatalog is not None else None + if preCut is not None and not preCut.empty: + best = preCut.sort_values("snr", ascending=False).iloc[0] + reason = _diagnoseQualityCutRejections( + preCut.sort_values("snr", ascending=False).iloc[:1], self.shape, self.config + )[0] + self.log.warning( + f"No stars in refcat for {guiderName} in {self.expid}. " + f"Best candidate: flux={best['flux']:.0f} snr={best['snr']:.1f} " + f"e={np.hypot(best['e1'], best['e2']):.2f} fwhm={best['fwhm']:.1f} " + f"pos=({best['xroi']:.0f},{best['yroi']:.0f}). Rejected: {reason}" + ) + else: + self.log.warning( + f"No stars detected for {guiderName} in {self.expid} " f"(empty before quality cuts)." + ) continue table = self._trackStarForOneGuider(refDet, guiderName) @@ -336,6 +353,60 @@ def applyQualityCuts( return stars[mask].copy() +def _diagnoseQualityCutRejections( + stars: pd.DataFrame, shape: tuple[float, float], config: GuiderStarTrackerConfig +) -> list[str]: + """Return per-row rejection reasons using the same masks as + applyQualityCuts. + + Parameters + ---------- + stars : `pd.DataFrame` + Pre-cut DataFrame (rows that were all rejected). + shape : `tuple[float, float]` + ROI shape (rows, cols). + config : `GuiderStarTrackerConfig` + Configuration with quality cut thresholds. + + Returns + ------- + reasons : `list[str]` + One string per row describing which cuts failed. + """ + minSnr = config.minSnr + maxEllipticity = config.maxEllipticity + edgeMargin = config.edgeMargin + roiCols, roiRows = shape + + # Same masks as applyQualityCuts + snrOk = (stars["snr"] >= minSnr) & (stars["flux"] > 0) & (stars["flux_err"] > 0) + + eabs = np.hypot(stars["e1"], stars["e2"]) + ellipOk = (stars["e1"].abs() <= maxEllipticity) & (stars["e1"].abs() <= maxEllipticity) + ellipOk &= eabs <= maxEllipticity + + edgeOk = ( + (stars["xroi"] >= edgeMargin) + & (stars["xroi"] <= roiRows - edgeMargin) + & (stars["yroi"] >= edgeMargin) + & (stars["yroi"] <= roiCols - edgeMargin) + ) + + reasons = [] + for i, (_, row) in enumerate(stars.iterrows()): + r = [] + if not snrOk.iloc[i]: + snr = row["snr"] + r.append(f"snr={snr:.1f}" if np.isfinite(snr) else "snr=NaN") + if not ellipOk.iloc[i]: + e = np.hypot(row["e1"], row["e2"]) + r.append(f"e={e:.2f}" if np.isfinite(e) else "e=NaN") + if not edgeOk.iloc[i]: + r.append(f"edge(x={row['xroi']:.0f},y={row['yroi']:.0f})") + reasons.append("|".join(r) if r else "?") + return reasons + + def setUniqueId(guiderData: GuiderData, stars: pd.DataFrame) -> pd.DataFrame: """ Assign unique IDs to tracked stars.