From aed5fe14ea6d588540164ae64e0ad4680ed07002 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 9 Aug 2023 14:24:34 -0400 Subject: [PATCH 01/36] Add pre-commit file Include all the base changes --- .github/labeler.yml | 2 +- .github/pull_request_template.md | 2 +- .github/workflows/build.yml | 1 - .github/workflows/ci.yml | 2 +- .github/workflows/label_pull_request.yml | 2 +- .gitignore | 2 +- .pre-commit-config.yaml | 55 ++++++++++++++ CHANGES.rst | 2 +- README.md | 2 +- docs/Makefile | 1 - docs/stcal/alignment/description.rst | 2 +- docs/stcal/jump/description.rst | 2 +- docs/stcal/package_index.rst | 2 +- docs/stcal/ramp_fitting/description.rst | 88 +++++++++++----------- pyproject.toml | 2 +- src/stcal/ramp_fitting/ols_cas22/_fit.pyx | 2 +- src/stcal/ramp_fitting/ols_cas22/_jump.pyx | 12 +-- 17 files changed, 117 insertions(+), 64 deletions(-) create mode 100644 .pre-commit-config.yaml diff --git a/.github/labeler.yml b/.github/labeler.yml index 65a38200..32b3d93b 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -43,4 +43,4 @@ ramp_fitting: saturation: - '**/*saturation*' - - '**/*saturation*/**' \ No newline at end of file + - '**/*saturation*/**' diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 2752658c..8f551808 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,4 +1,4 @@ - Resolves [JP-nnnn](https://jira.stsci.edu/browse/JP-nnnn) Resolves [RCAL-nnnn](https://jira.stsci.edu/browse/RCAL-nnnn) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 718510c7..4bf8f0a0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,4 +25,3 @@ jobs: sdist: true secrets: pypi_token: ${{ secrets.PYPI_PASSWORD_STSCI_MAINTAINER }} - diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b35acca3..9591f594 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -43,7 +43,7 @@ jobs: setenv: | CRDS_PATH: /tmp/crds_cache CRDS_CLIENT_RETRY_COUNT: 3 - CRDS_CLIENT_RETRY_DELAY_SECONDS: 20 + CRDS_CLIENT_RETRY_DELAY_SECONDS: 20 envs: | - linux: py311-jwst-cov-xdist - linux: py311-romancal-cov-xdist diff --git a/.github/workflows/label_pull_request.yml b/.github/workflows/label_pull_request.yml index aee1b221..4e8b3fb4 100644 --- a/.github/workflows/label_pull_request.yml +++ b/.github/workflows/label_pull_request.yml @@ -13,4 +13,4 @@ jobs: - uses: actions/labeler@v4 if: github.event_name == 'pull_request_target' || github.event_name == 'pull_request' with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" \ No newline at end of file + repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.gitignore b/.gitignore index 2afbcecc..d35d7af2 100644 --- a/.gitignore +++ b/.gitignore @@ -151,4 +151,4 @@ docs/source/api .DS_Store # VSCode stuff -.vscode \ No newline at end of file +.vscode diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..02a27ca8 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,55 @@ +repos: + +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-added-large-files + - id: check-ast + - id: check-case-conflict + - id: check-yaml + args: ["--unsafe"] + - id: check-toml + - id: check-merge-conflict + - id: check-symlinks + - id: debug-statements + - id: detect-private-key + - id: end-of-file-fixer + - id: trailing-whitespace + +# - repo: https://github.com/pre-commit/pygrep-hooks +# rev: v1.10.0 +# hooks: +# - id: python-check-blanket-noqa +# - id: python-check-mock-methods +# - id: rst-directive-colons +# - id: rst-inline-touching-normal +# - id: text-unicode-replacement-char + +# - repo: https://github.com/asottile/pyupgrade +# rev: 'v3.10.1' +# hooks: +# - id: pyupgrade +# args: ["--py39-plus"] + +# - repo: https://github.com/astral-sh/ruff-pre-commit +# rev: 'v0.0.282' +# hooks: +# - id: ruff +# args: ["--fix"] + +# - repo: https://github.com/pycqa/isort +# rev: 5.12.0 +# hooks: +# - id: isort + +# - repo: https://github.com/psf/black +# rev: 23.7.0 +# hooks: +# - id: black + +# - repo: https://github.com/PyCQA/bandit +# rev: 1.7.5 +# hooks: +# - id: bandit +# args: ["-c", "pyproject.toml"] +# additional_dependencies: ["bandit[toml]"] diff --git a/CHANGES.rst b/CHANGES.rst index 03572974..639206bf 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -30,7 +30,7 @@ Bug Fixes Other ----- -- +- 1.4.4 (2023-09-15) ================== diff --git a/README.md b/README.md index 305213bc..540bb821 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ STScI Calibration algorithms and tools. **Linux and MacOS platforms are tested and supported. Windows is not currently supported.** -**If installing on MacOS Mojave 10.14, you must install +**If installing on MacOS Mojave 10.14, you must install into an environment with python 3.9. Installation will fail on python 3.10 due to lack of a stable build for dependency ``opencv-python``.** diff --git a/docs/Makefile b/docs/Makefile index 1235f237..af9ba19d 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -76,4 +76,3 @@ linkcheck: livehtml: sphinx-autobuild -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - diff --git a/docs/stcal/alignment/description.rst b/docs/stcal/alignment/description.rst index a537e476..f08b86e5 100644 --- a/docs/stcal/alignment/description.rst +++ b/docs/stcal/alignment/description.rst @@ -1,4 +1,4 @@ Description ============ -This sub-package contains all the modules common to all missions. \ No newline at end of file +This sub-package contains all the modules common to all missions. diff --git a/docs/stcal/jump/description.rst b/docs/stcal/jump/description.rst index 81f67acf..c2d4298f 100644 --- a/docs/stcal/jump/description.rst +++ b/docs/stcal/jump/description.rst @@ -40,4 +40,4 @@ Note that any ramp values flagged as SATURATED in the input GROUPDQ array are not used in any of the above calculations and hence will never be marked as containing a jump. -.. _Anderson&Gordon2011: https://ui.adsabs.harvard.edu/abs/2011PASP..123.1237A \ No newline at end of file +.. _Anderson&Gordon2011: https://ui.adsabs.harvard.edu/abs/2011PASP..123.1237A diff --git a/docs/stcal/package_index.rst b/docs/stcal/package_index.rst index b68f11b5..e47e63c7 100644 --- a/docs/stcal/package_index.rst +++ b/docs/stcal/package_index.rst @@ -6,4 +6,4 @@ Package Index jump/index.rst ramp_fitting/index.rst - alignment/index.rst \ No newline at end of file + alignment/index.rst diff --git a/docs/stcal/ramp_fitting/description.rst b/docs/stcal/ramp_fitting/description.rst index 14556043..8664815f 100644 --- a/docs/stcal/ramp_fitting/description.rst +++ b/docs/stcal/ramp_fitting/description.rst @@ -15,12 +15,12 @@ more detail below. The count rate for each pixel is determined by a linear fit to the cosmic-ray-free and saturation-free ramp intervals for each pixel; hereafter -this interval will be referred to as a "segment." The fitting algorithm uses an +this interval will be referred to as a "segment." The fitting algorithm uses an 'optimal' weighting scheme, as described by Fixsen et al, PASP, 112, 1350. Segments are determined using the 4-D GROUPDQ array of the input data set, under the assumption that the jump step will have already flagged CR's. Segments are terminated where -saturation flags are found. Pixels are processed simultaneously in blocks +saturation flags are found. Pixels are processed simultaneously in blocks using the array-based functionality of numpy. The size of the block depends on the image size and the number of groups. @@ -46,19 +46,19 @@ If the input dataset has only a single group in each integration, the count rate for all unsaturated pixels in that integration will be calculated as the value of the science data in that group divided by the group time. If the input dataset has only two groups per integration, the count rate for all -unsaturated pixels in each integration will be calculated using the differences +unsaturated pixels in each integration will be calculated using the differences between the two valid groups of the science data. -For datasets having more than a single group in each integration, a ramp having -a segment with only a single group is processed differently depending on the +For datasets having more than a single group in each integration, a ramp having +a segment with only a single group is processed differently depending on the number and size of the other segments in the ramp. If a ramp has only one segment and that segment contains a single group, the count rate will be calculated to be the value of the science data in that group divided by the group time. If a ramp has a segment having a single group, and at least one other segment having more -than one good group, only data from the segment(s) having more than a single +than one good group, only data from the segment(s) having more than a single good group will be used to calculate the count rate. -The data are checked for ramps in which there is good data in the first group, +The data are checked for ramps in which there is good data in the first group, but all first differences for the ramp are undefined because the remainder of the groups are either saturated or affected by cosmic rays. For such ramps, the first differences will be set to equal the data in the first group. The @@ -67,14 +67,14 @@ first difference is used to estimate the slope of the ramp, as explained in the If any input dataset contains ramps saturated in their second group, the count rates for those pixels in that integration will be calculated as the value -of the science data in the first group divided by the group time. +of the science data in the first group divided by the group time. The MIRI first frame correction step flags all pixels in the first group of each integration, so that those data do not get used in either the jump detection -or ramp fitting steps. -Similarly, the MIRI last frame correction step flags all pixels in the last +or ramp fitting steps. +Similarly, the MIRI last frame correction step flags all pixels in the last group of each integration. -The ramp fitting will only fit data if there are at least 2 good groups +The ramp fitting will only fit data if there are at least 2 good groups of data and will log a warning otherwise. All Cases @@ -86,7 +86,7 @@ After computing the slopes for all segments for a given pixel, the final slope i determined as a weighted average from all segments in all integrations, and is written as the primary output product. In this output product, the 4-D GROUPDQ from all integrations is collapsed into 2-D, merged -(using a bitwise OR) with the input 2-D PIXELDQ, and stored as a 2-D DQ array. +(using a bitwise OR) with the input 2-D PIXELDQ, and stored as a 2-D DQ array. The 3-D VAR_POISSON and VAR_RNOISE arrays from all integrations are averaged into corresponding 2-D output arrays. There is a case where the median rate for a pixel can be computed as negative. This value is used in the numerator @@ -100,7 +100,7 @@ arrays in this product corresponds to the result for a given integration. In th product, the GROUPDQ data for a given integration is collapsed into 2-D, which is then merged with the input 2-D PIXELDQ to create the output DQ array for each integration. The 3-D VAR_POISSON and VAR_RNOISE arrays are -calculated by averaging over the fit segments in the corresponding 4-D +calculated by averaging over the fit segments in the corresponding 4-D variance arrays. A third, optional output product is also available and is produced only when @@ -113,7 +113,7 @@ due to read noise only for each segment of each pixel, respectively. The y-inter to the result of the fit at an effective exposure time of zero. This product also contains a 3-D array called PEDESTAL, which gives the signal at zero exposure time for each pixel, and the 4-D CRMAG array, which contains the magnitude of -each group that was flagged as having a CR hit. By default, the name of this +each group that was flagged as having a CR hit. By default, the name of this output file will have the suffix "_fitopt". In this optional output product, the pedestal array is calculated for each integration by extrapolating the final slope (the weighted @@ -130,9 +130,9 @@ Slope and Variance Calculations +++++++++++++++++++++++++++++++ Slopes and their variances are calculated for each segment, for each integration, and for the entire exposure. As defined above, a segment is a set of contiguous -groups where none of the groups are saturated or cosmic ray-affected. The -appropriate slopes and variances are output to the primary output product, the -integration-specific output product, and the optional output product. The +groups where none of the groups are saturated or cosmic ray-affected. The +appropriate slopes and variances are output to the primary output product, the +integration-specific output product, and the optional output product. The following is a description of these computations. The notation in the equations is the following: the type of noise (when appropriate) will appear as the superscript ‘R’, ‘P’, or ‘C’ for readnoise, Poisson noise, or combined, respectively; @@ -194,16 +194,16 @@ Segment-specific Computations: ------------------------------ The variance of the slope of a segment due to read noise is: -.. math:: +.. math:: var^R_{s} = \frac{12 \ R^2 }{ (ngroups_{s}^3 - ngroups_{s})(tgroup^2) } \,, -where :math:`R` is the noise in the difference between 2 frames, -:math:`ngroups_{s}` is the number of groups in the segment, and :math:`tgroup` is the group -time in seconds (from the keyword TGROUP). +where :math:`R` is the noise in the difference between 2 frames, +:math:`ngroups_{s}` is the number of groups in the segment, and :math:`tgroup` is the group +time in seconds (from the keyword TGROUP). -The variance of the slope in a segment due to Poisson noise is: +The variance of the slope in a segment due to Poisson noise is: -.. math:: +.. math:: var^P_{s} = \frac{ slope_{est} }{ tgroup \times gain\ (ngroups_{s} -1)} \,, where :math:`gain` is the gain for the pixel (from the GAIN reference file), @@ -211,55 +211,55 @@ in e/DN. The :math:`slope_{est}` is an overall estimated slope of the pixel, calculated by taking the median of the first differences of the groups that are unaffected by saturation and cosmic rays, in all integrations. This is a more robust estimate of the slope than the segment-specific slope, which may be noisy -for short segments. +for short segments. -The combined variance of the slope of a segment is the sum of the variances: +The combined variance of the slope of a segment is the sum of the variances: -.. math:: +.. math:: var^C_{s} = var^R_{s} + var^P_{s} Integration-specific computations: ----------------------------------- +---------------------------------- The variance of the slope for an integration due to read noise is: -.. math:: +.. math:: var^R_{i} = \frac{1}{ \sum_{s} \frac{1}{ var^R_{s} }} \,, where the sum is over all segments in the integration. -The variance of the slope for an integration due to Poisson noise is: +The variance of the slope for an integration due to Poisson noise is: -.. math:: - var^P_{i} = \frac{1}{ \sum_{s} \frac{1}{ var^P_{s}}} +.. math:: + var^P_{i} = \frac{1}{ \sum_{s} \frac{1}{ var^P_{s}}} The combined variance of the slope for an integration due to both Poisson and read -noise is: +noise is: -.. math:: +.. math:: var^C_{i} = \frac{1}{ \sum_{s} \frac{1}{ var^R_{s} + var^P_{s}}} The slope for an integration depends on the slope and the combined variance of each segment's slope: -.. math:: +.. math:: slope_{i} = \frac{ \sum_{s}{ \frac{slope_{s}} {var^C_{s}}}} { \sum_{s}{ \frac{1} {var^C_{s}}}} Exposure-level computations: ---------------------------- -The variance of the slope due to read noise depends on a sum over all integrations: +The variance of the slope due to read noise depends on a sum over all integrations: -.. math:: - var^R_{o} = \frac{1}{ \sum_{i} \frac{1}{ var^R_{i}}} +.. math:: + var^R_{o} = \frac{1}{ \sum_{i} \frac{1}{ var^R_{i}}} -The variance of the slope due to Poisson noise is: +The variance of the slope due to Poisson noise is: -.. math:: +.. math:: var^P_{o} = \frac{1}{ \sum_{i} \frac{1}{ var^P_{i}}} -The combined variance of the slope is the sum of the variances: +The combined variance of the slope is the sum of the variances: -.. math:: +.. math:: var^C_{o} = var^R_{o} + var^P_{o} The square root of the combined variance is stored in the ERR array of the primary output. @@ -267,7 +267,7 @@ The square root of the combined variance is stored in the ERR array of the prima The overall slope depends on the slope and the combined variance of the slope of each integration's segments, so is a sum over integrations and segments: -.. math:: +.. math:: slope_{o} = \frac{ \sum_{i,s}{ \frac{slope_{i,s}} {var^C_{i,s}}}} { \sum_{i,s}{ \frac{1} {var^C_{i,s}}}} @@ -280,7 +280,7 @@ Error Propagation Error propagation in the ramp fitting step is implemented by storing the square-root of the exposure-level combined variance in the ERR array of the primary output product. This combined variance of the exposure-level slope is the sum -of the variance of the slope due to the Poisson noise and the variance of the +of the variance of the slope due to the Poisson noise and the variance of the slope due to the read noise. These two variances are also separately written to the extensions VAR_POISSON and VAR_RNOISE in the primary output. @@ -289,7 +289,7 @@ Poisson noise is written to the VAR_POISSON extension in the integration-specific product, and the variance of the per-integration slope due to read noise is written to the VAR_RNOISE extension. The square-root of the combined variance of the slope due to both Poisson and read noise -is written to the ERR extension. +is written to the ERR extension. For the optional output product, the variance of the slope due to the Poisson noise of the segment-specific slope is written to the VAR_POISSON extension. diff --git a/pyproject.toml b/pyproject.toml index d07ed759..16503c87 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,4 +98,4 @@ exclude = [ archs = ["x86_64", "arm64"] [tool.cibuildwheel.linux] -archs = ["auto", "aarch64"] \ No newline at end of file +archs = ["auto", "aarch64"] diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit.pyx index 87aaf02d..2bc2069f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit.pyx @@ -230,6 +230,6 @@ def fit_ramps(float[:, :] resultants, # Cast memory views into numpy arrays for ease of use in python. return RampFitOutputs(np.array(parameters, dtype=np.float32), - np.array(variances, dtype=np.float32), + np.array(variances, dtype=np.float32), dq, ramp_fits if include_diagnostic else None) diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump.pyx index 4ace423b..8b8969f7 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump.pyx @@ -231,7 +231,7 @@ cdef inline float _correction(float[:] t_bar, RampIndex ramp, float slope): Compute the correction factor for the variance used by a statistic - slope / (t_bar[end] - t_bar[start]) - + Parameters ---------- t_bar : float[:] @@ -268,7 +268,7 @@ cdef inline float _statstic(float local_slope, var_read_noise = read_noise ** 2 * (1/n_reads[i + j] + 1/n_reads[i]) var_slope_coeff = tau[i + j] + tau[i] - 2 * min(t_bar[i + j], t_bar[i]) t_bar_diff_sqr = (t_bar[i + j] - t_bar[i]) ** 2 - + Parameters ---------- local_slope : float @@ -290,7 +290,7 @@ cdef inline float _statstic(float local_slope, """ cdef float delta = local_slope - slope - cdef float var = (var_read_noise + slope * var_slope_coeff) / t_bar_diff_sqr + cdef float var = (var_read_noise + slope * var_slope_coeff) / t_bar_diff_sqr return delta / sqrt(var + correct) @@ -385,7 +385,7 @@ cdef inline (int, float) _fit_statistic(float[:, :] pixel, argmax = stat_index return argmax, max_stat - + @boundscheck(False) @wraparound(False) @@ -429,7 +429,7 @@ cdef inline JumpFits fit_jumps(float[:] resultants, fixed : float[:, :] The jump detection pre-computed values for a given read_pattern pixel : float[:, :] - A pre-allocated array for the jump detection fixed values for the + A pre-allocated array for the jump detection fixed values for the given pixel. This will be modified in place, it is passed in to avoid re-allocating it for each pixel. thresh : Thresh @@ -592,4 +592,4 @@ cdef inline JumpFits fit_jumps(float[:] resultants, # Multiply poisson term by flux, (no negative fluxes) ramp_fits.average.poisson_var *= max(ramp_fits.average.slope, 0) - return ramp_fits \ No newline at end of file + return ramp_fits From a31d88f5db4efdca7aa428cb087e94ffad4f858a Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 9 Aug 2023 14:26:34 -0400 Subject: [PATCH 02/36] Fix pygrep --- .pre-commit-config.yaml | 16 ++++++++-------- CHANGES.rst | 2 +- src/stcal/alignment/util.py | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 02a27ca8..4ee68f60 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,14 +16,14 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace -# - repo: https://github.com/pre-commit/pygrep-hooks -# rev: v1.10.0 -# hooks: -# - id: python-check-blanket-noqa -# - id: python-check-mock-methods -# - id: rst-directive-colons -# - id: rst-inline-touching-normal -# - id: text-unicode-replacement-char +- repo: https://github.com/pre-commit/pygrep-hooks + rev: v1.10.0 + hooks: + - id: python-check-blanket-noqa + - id: python-check-mock-methods + - id: rst-directive-colons + - id: rst-inline-touching-normal + - id: text-unicode-replacement-char # - repo: https://github.com/asottile/pyupgrade # rev: 'v3.10.1' diff --git a/CHANGES.rst b/CHANGES.rst index 639206bf..beb7aa78 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -218,7 +218,7 @@ Other ----- - Remove use of deprecated ``pytest-openfiles`` ``pytest`` plugin. This has been replaced by - catching ``ResourceWarning``s. [#159] + catching ``ResourceWarning``. [#159] 1.3.5 (2023-03-30) diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index 762be702..8be36d64 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -817,7 +817,7 @@ def _get_forward_transform_func(wcs1): y (str, ndarray), and origin (int). The origin should be between 0, and 1 https://docs.astropy.org/en/latest/wcs/index.html#loading-wcs-information-from-a-fits-file ) - """ # noqa : E501 + """ # noqa: E501 if isinstance(wcs1, fitswcs.WCS): forward_transform = wcs1.all_pix2world elif isinstance(wcs1, gwcs.WCS): From 4b07900e795e8d08a946a931608c9d0c281616d2 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 14:13:56 -0500 Subject: [PATCH 03/36] Add ruff to pre-commit --- .pre-commit-config.yaml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4ee68f60..3b71a35c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -31,11 +31,12 @@ repos: # - id: pyupgrade # args: ["--py39-plus"] -# - repo: https://github.com/astral-sh/ruff-pre-commit -# rev: 'v0.0.282' -# hooks: -# - id: ruff -# args: ["--fix"] +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: 'v0.1.4' + hooks: + - id: ruff + args: ["--fix"] + # - repo: https://github.com/pycqa/isort # rev: 5.12.0 From 1c2ee2849edc9aecc676767ac3c41f12dea91837 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 14:15:52 -0500 Subject: [PATCH 04/36] Apply ruff formatter --- .pre-commit-config.yaml | 3 +- setup.py | 18 +- src/stcal/__init__.py | 2 +- src/stcal/alignment/util.py | 112 +-- src/stcal/dark_current/dark_class.py | 5 +- src/stcal/dark_current/dark_sub.py | 61 +- src/stcal/jump/jump.py | 552 ++++++----- src/stcal/jump/twopoint_difference.py | 214 +++-- src/stcal/linearity/linearity.py | 44 +- src/stcal/ramp_fitting/gls_fit.py | 335 ++++--- src/stcal/ramp_fitting/ols_cas22/__init__.py | 2 +- src/stcal/ramp_fitting/ols_cas22_fit.py | 9 +- src/stcal/ramp_fitting/ols_fit.py | 913 +++++++++++++------ src/stcal/ramp_fitting/ramp_fit.py | 51 +- src/stcal/ramp_fitting/ramp_fit_class.py | 4 +- src/stcal/ramp_fitting/utils.py | 224 ++--- src/stcal/saturation/saturation.py | 42 +- tests/test_alignment.py | 41 +- tests/test_dark_current.py | 42 +- tests/test_dq.py | 8 +- tests/test_jump.py | 380 ++++---- tests/test_jump_cas22.py | 174 ++-- tests/test_linearity.py | 56 +- tests/test_ramp_fitting.py | 585 +++++++----- tests/test_ramp_fitting_cas22.py | 49 +- tests/test_ramp_fitting_cases.py | 318 ++++--- tests/test_ramp_fitting_gls_fit.py | 341 +++---- tests/test_saturation.py | 142 +-- tests/test_twopoint_difference.py | 599 ++++++------ 29 files changed, 2996 insertions(+), 2330 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3b71a35c..b6839fa5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -35,7 +35,8 @@ repos: rev: 'v0.1.4' hooks: - id: ruff - args: ["--fix"] + args: ["--fix", "--show-fixes"] + - id: ruff-format # - repo: https://github.com/pycqa/isort diff --git a/setup.py b/setup.py index ffb85902..bd962aab 100644 --- a/setup.py +++ b/setup.py @@ -8,22 +8,22 @@ extensions = [ Extension( - 'stcal.ramp_fitting.ols_cas22._ramp', - ['src/stcal/ramp_fitting/ols_cas22/_ramp.pyx'], + "stcal.ramp_fitting.ols_cas22._ramp", + ["src/stcal/ramp_fitting/ols_cas22/_ramp.pyx"], include_dirs=[np.get_include()], - language='c++' + language="c++", ), Extension( - 'stcal.ramp_fitting.ols_cas22._jump', - ['src/stcal/ramp_fitting/ols_cas22/_jump.pyx'], + "stcal.ramp_fitting.ols_cas22._jump", + ["src/stcal/ramp_fitting/ols_cas22/_jump.pyx"], include_dirs=[np.get_include()], - language='c++' + language="c++", ), Extension( - 'stcal.ramp_fitting.ols_cas22._fit', - ['src/stcal/ramp_fitting/ols_cas22/_fit.pyx'], + "stcal.ramp_fitting.ols_cas22._fit", + ["src/stcal/ramp_fitting/ols_cas22/_fit.pyx"], include_dirs=[np.get_include()], - language='c++' + language="c++", ), ] diff --git a/src/stcal/__init__.py b/src/stcal/__init__.py index b869c093..495c9ffd 100644 --- a/src/stcal/__init__.py +++ b/src/stcal/__init__.py @@ -1,4 +1,4 @@ from ._version import version as __version__ -__all__ = ['__version__'] +__all__ = ["__version__"] diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index 8be36d64..66d4152f 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -66,9 +66,7 @@ def _calculate_fiducial_from_spatial_footprint( y_mid = (np.max(y) + np.min(y)) / 2.0 z_mid = (np.max(z) + np.min(z)) / 2.0 lon_fiducial = np.rad2deg(np.arctan2(y_mid, x_mid)) % 360.0 - lat_fiducial = np.rad2deg( - np.arctan2(z_mid, np.sqrt(x_mid**2 + y_mid**2)) - ) + lat_fiducial = np.rad2deg(np.arctan2(z_mid, np.sqrt(x_mid**2 + y_mid**2))) return lon_fiducial, lat_fiducial @@ -130,23 +128,14 @@ def _generate_tranform( # reshape the rotation matrix returned from calc_rotation_matrix # into the correct shape for constructing the transformation - pc = np.reshape( - calc_rotation_matrix(roll_ref, v3yangle, vparity=vparity), (2, 2) - ) + pc = np.reshape(calc_rotation_matrix(roll_ref, v3yangle, vparity=vparity), (2, 2)) - rotation = astmodels.AffineTransformation2D( - pc, name="pc_rotation_matrix" - ) + rotation = astmodels.AffineTransformation2D(pc, name="pc_rotation_matrix") transform = [rotation] if sky_axes: if not pscale: - pscale = compute_scale( - refmodel.meta.wcs, ref_fiducial, pscale_ratio=pscale_ratio - ) - transform.append( - astmodels.Scale(pscale, name="cdelt1") - & astmodels.Scale(pscale, name="cdelt2") - ) + pscale = compute_scale(refmodel.meta.wcs, ref_fiducial, pscale_ratio=pscale_ratio) + transform.append(astmodels.Scale(pscale, name="cdelt1") & astmodels.Scale(pscale, name="cdelt2")) if transform: transform = functools.reduce(lambda x, y: x | y, transform) @@ -179,9 +168,7 @@ def _get_axis_min_and_bounding_box(ref_model, wcs_list, ref_wcs): ((x0_lower, x0_upper), (x1_lower, x1_upper)). """ footprints = [w.footprint().T for w in wcs_list] - domain_bounds = np.hstack( - [ref_wcs.backward_transform(*f) for f in footprints] - ) + domain_bounds = np.hstack([ref_wcs.backward_transform(*f) for f in footprints]) axis_min_values = np.min(domain_bounds, axis=1) domain_bounds = (domain_bounds.T - axis_min_values).T @@ -266,26 +253,17 @@ def _calculate_offsets(fiducial, wcs, axis_min_values, crpix): find the pixel coordinates of the fiducial point and then correct it by the minimum pixel value for each axis. """ - if ( - crpix is None - and fiducial is not None - and wcs is not None - and axis_min_values is not None - ): + if crpix is None and fiducial is not None and wcs is not None and axis_min_values is not None: offset1, offset2 = wcs.backward_transform(*fiducial) offset1 -= axis_min_values[0] offset2 -= axis_min_values[1] else: offset1, offset2 = crpix - return astmodels.Shift(-offset1, name="crpix1") & astmodels.Shift( - -offset2, name="crpix2" - ) + return astmodels.Shift(-offset1, name="crpix1") & astmodels.Shift(-offset2, name="crpix2") -def _calculate_new_wcs( - ref_model, shape, wcs_list, fiducial, crpix=None, transform=None -): +def _calculate_new_wcs(ref_model, shape, wcs_list, fiducial, crpix=None, transform=None): """ Calculates a new WCS object based on the combined WCS objects provided. @@ -325,9 +303,7 @@ def _calculate_new_wcs( transform=transform, input_frame=ref_model.meta.wcs.input_frame, ) - axis_min_values, output_bounding_box = _get_axis_min_and_bounding_box( - ref_model, wcs_list, wcs_new - ) + axis_min_values, output_bounding_box = _get_axis_min_and_bounding_box(ref_model, wcs_list, wcs_new) offsets = _calculate_offsets( fiducial=fiducial, wcs=wcs_new, @@ -339,9 +315,7 @@ def _calculate_new_wcs( wcs_new.bounding_box = output_bounding_box if shape is None: - shape = [ - int(axs[1] - axs[0] + 0.5) for axs in output_bounding_box[::-1] - ] + shape = [int(axs[1] - axs[0] + 0.5) for axs in output_bounding_box[::-1]] wcs_new.pixel_shape = shape[::-1] wcs_new.array_shape = shape @@ -371,14 +345,10 @@ def _validate_wcs_list(wcs_list): instance of WCS. """ if not isiterable(wcs_list): - raise ValueError( - "Expected 'wcs_list' to be an iterable of WCS objects." - ) + raise ValueError("Expected 'wcs_list' to be an iterable of WCS objects.") elif len(wcs_list): if not all(isinstance(w, gwcs.WCS) for w in wcs_list): - raise TypeError( - "All items in 'wcs_list' are to be instances of gwcs.wcs.WCS." - ) + raise TypeError("All items in 'wcs_list' are to be instances of gwcs.wcs.WCS.") else: raise TypeError("'wcs_list' should not be empty.") @@ -420,9 +390,7 @@ def wcsinfo_from_model(input_model: SupportsDataWithWcs): pc = np.zeros((wcsaxes, wcsaxes), dtype=np.float32) for i in range(1, wcsaxes + 1): for j in range(1, wcsaxes + 1): - pc[i - 1, j - 1] = getattr( - input_model.meta.wcsinfo, "pc{0}_{1}".format(i, j), 1 - ) + pc[i - 1, j - 1] = getattr(input_model.meta.wcsinfo, "pc{0}_{1}".format(i, j), 1) wcsinfo["PC"] = pc wcsinfo["RADESYS"] = input_model.meta.coordinates.reference_frame wcsinfo["has_cd"] = False @@ -470,9 +438,7 @@ def compute_scale( spatial_idx = np.where(np.array(wcs.output_frame.axes_type) == "SPATIAL")[0] delta[spatial_idx[0]] = 1 - crpix_with_offsets = np.vstack( - (crpix, crpix + delta, crpix + np.roll(delta, 1)) - ).T + crpix_with_offsets = np.vstack((crpix, crpix + delta, crpix + np.roll(delta, 1))).T crval_with_offsets = wcs(*crpix_with_offsets, with_bounding_box=False) coords = SkyCoord( @@ -528,25 +494,19 @@ def compute_fiducial(wcslist: list, bounding_box=None) -> np.ndarray: axes_types = wcslist[0].output_frame.axes_type spatial_axes = np.array(axes_types) == "SPATIAL" spectral_axes = np.array(axes_types) == "SPECTRAL" - footprints = np.hstack( - [w.footprint(bounding_box=bounding_box).T for w in wcslist] - ) + footprints = np.hstack([w.footprint(bounding_box=bounding_box).T for w in wcslist]) spatial_footprint = footprints[spatial_axes] spectral_footprint = footprints[spectral_axes] fiducial = np.empty(len(axes_types)) if spatial_footprint.any(): - fiducial[spatial_axes] = _calculate_fiducial_from_spatial_footprint( - spatial_footprint - ) + fiducial[spatial_axes] = _calculate_fiducial_from_spatial_footprint(spatial_footprint) if spectral_footprint.any(): fiducial[spectral_axes] = spectral_footprint.min() return fiducial -def calc_rotation_matrix( - roll_ref: float, v3i_yangle: float, vparity: int = 1 -) -> List[float]: +def calc_rotation_matrix(roll_ref: float, v3i_yangle: float, vparity: int = 1) -> List[float]: """Calculate the rotation matrix. Parameters @@ -677,9 +637,7 @@ def wcs_from_footprints( _validate_wcs_list(wcs_list) - fiducial = _calculate_fiducial( - wcs_list=wcs_list, bounding_box=bounding_box, crval=crval - ) + fiducial = _calculate_fiducial(wcs_list=wcs_list, bounding_box=bounding_box, crval=crval) refmodel = dmodels[0] if refmodel is None else refmodel @@ -688,9 +646,7 @@ def wcs_from_footprints( pscale_ratio=pscale_ratio, pscale=pscale, rotation=rotation, - ref_fiducial=np.array( - [refmodel.meta.wcsinfo.ra_ref, refmodel.meta.wcsinfo.dec_ref] - ), + ref_fiducial=np.array([refmodel.meta.wcsinfo.ra_ref, refmodel.meta.wcsinfo.dec_ref]), transform=transform, ) @@ -729,9 +685,7 @@ def update_s_region_imaging(model, center=True): ### which means we are interested in each pixel's vertice, not its center. ### By using center=True, a difference of 0.5 pixel should be accounted for ### when comparing the world coordinates of the bounding box and the footprint. - footprint = model.meta.wcs.footprint( - bbox, center=center, axis_type="spatial" - ).T + footprint = model.meta.wcs.footprint(bbox, center=center, axis_type="spatial").T # take only imaging footprint footprint = footprint[:2, :] @@ -777,11 +731,9 @@ def update_s_region_keyword(model, footprint): String containing the S_REGION object. """ s_region = ( - "POLYGON ICRS " - " {0:.9f} {1:.9f}" - " {2:.9f} {3:.9f}" - " {4:.9f} {5:.9f}" - " {6:.9f} {7:.9f}".format(*footprint.flatten()) + "POLYGON ICRS " " {0:.9f} {1:.9f}" " {2:.9f} {3:.9f}" " {4:.9f} {5:.9f}" " {6:.9f} {7:.9f}".format( + *footprint.flatten() + ) ) if "nan" in s_region: # do not update s_region if there are NaNs. @@ -823,9 +775,7 @@ def _get_forward_transform_func(wcs1): elif isinstance(wcs1, gwcs.WCS): forward_transform = wcs1.forward_transform else: - raise TypeError( - "Expected input to be astropy.wcs.WCS or gwcs.WCS " "object" - ) + raise TypeError("Expected input to be astropy.wcs.WCS or gwcs.WCS " "object") return forward_transform def _get_backward_transform_func(wcs2): @@ -834,14 +784,10 @@ def _get_backward_transform_func(wcs2): elif isinstance(wcs2, gwcs.WCS): backward_transform = wcs2.backward_transform else: - raise TypeError( - "Expected input to be astropy.wcs.WCS or gwcs.WCS " "object" - ) + raise TypeError("Expected input to be astropy.wcs.WCS or gwcs.WCS " "object") return backward_transform - def _reproject( - x: Union[float, np.ndarray], y: Union[float, np.ndarray] - ) -> tuple: + def _reproject(x: Union[float, np.ndarray], y: Union[float, np.ndarray]) -> tuple: """ Reprojects the input coordinates from one WCS to another. @@ -871,9 +817,7 @@ def _reproject( flat_sky = [] for axis in sky: flat_sky.append(axis.flatten()) - det = np.array( - _get_backward_transform_func(wcs2)(flat_sky[0], flat_sky[1], 0) - ) + det = np.array(_get_backward_transform_func(wcs2)(flat_sky[0], flat_sky[1], 0)) det_reshaped = [] for axis in det: det_reshaped.append(axis.reshape(x.shape)) diff --git a/src/stcal/dark_current/dark_class.py b/src/stcal/dark_current/dark_class.py index bceef88f..d49ecb92 100644 --- a/src/stcal/dark_current/dark_class.py +++ b/src/stcal/dark_current/dark_class.py @@ -7,6 +7,7 @@ class DarkData: This class contains all data needed to perform the dark current subtraction step. """ + def __init__(self, dims=None, dark_model=None): """ Creates a class to remove data model dependencies in the internals of @@ -84,14 +85,14 @@ def __init__(self, science_model=None): populating the data. """ if science_model is not None: - if isinstance(science_model.data,u.Quantity): + if isinstance(science_model.data, u.Quantity): self.data = science_model.data.value else: self.data = science_model.data self.groupdq = science_model.groupdq self.pixeldq = science_model.pixeldq - if isinstance(science_model.err,u.Quantity): + if isinstance(science_model.err, u.Quantity): self.err = science_model.err.value else: self.err = science_model.err diff --git a/src/stcal/dark_current/dark_sub.py b/src/stcal/dark_current/dark_sub.py index 0ff3f953..ef92edd6 100644 --- a/src/stcal/dark_current/dark_sub.py +++ b/src/stcal/dark_current/dark_sub.py @@ -86,12 +86,18 @@ def do_correction_data(science_data, dark_data, dark_output=None): drk_groupgap = dark_data.exp_groupgap log.info( - 'Science data nints=%d, ngroups=%d, nframes=%d, groupgap=%d', - sci_nints, sci_ngroups, sci_nframes, sci_groupgap + "Science data nints=%d, ngroups=%d, nframes=%d, groupgap=%d", + sci_nints, + sci_ngroups, + sci_nframes, + sci_groupgap, ) log.info( - 'Dark data nints=%d, ngroups=%d, nframes=%d, groupgap=%d', - drk_nints, drk_ngroups, drk_nframes, drk_groupgap + "Dark data nints=%d, ngroups=%d, nframes=%d, groupgap=%d", + drk_nints, + drk_ngroups, + drk_nframes, + drk_groupgap, ) # Check that the number of groups in the science data does not exceed @@ -99,11 +105,9 @@ def do_correction_data(science_data, dark_data, dark_output=None): sci_total_frames = sci_ngroups * sci_nframes + (sci_ngroups - 1) * sci_groupgap drk_total_frames = drk_ngroups * drk_nframes + (drk_ngroups - 1) * drk_groupgap if sci_total_frames > drk_total_frames: - log.warning( - "Not enough data in dark reference file to match to science data." - ) + log.warning("Not enough data in dark reference file to match to science data.") log.warning("Input will be returned without subtracting dark current.") - science_data.cal_step = 'SKIPPED' + science_data.cal_step = "SKIPPED" out_data = copy.deepcopy(science_data) return out_data, None @@ -116,7 +120,7 @@ def do_correction_data(science_data, dark_data, dark_output=None): "greater than that of the science data." "Input will be returned without subtracting dark current." ) - science_data.cal_step = 'SKIPPED' + science_data.cal_step = "SKIPPED" out_data = copy.deepcopy(science_data) return out_data, None @@ -127,7 +131,6 @@ def do_correction_data(science_data, dark_data, dark_output=None): # nframes and groupgap settings. averaged_dark = None if sci_nframes == drk_nframes and sci_groupgap == drk_groupgap: - # They match, so we can subtract the dark ref file data directly output_data = subtract_dark(science_data, dark_data) @@ -140,7 +143,6 @@ def do_correction_data(science_data, dark_data, dark_output=None): averaged_dark.output_name = dark_output else: - # Create a frame-averaged version of the dark data to match # the nframes and groupgap settings of the science data. # If the data are from JWST/MIRI, the darks are integration-dependent @@ -151,9 +153,7 @@ def do_correction_data(science_data, dark_data, dark_output=None): dark_data, sci_nints, sci_ngroups, sci_nframes, sci_groupgap ) else: - averaged_dark = average_dark_frames_3d( - dark_data, sci_ngroups, sci_nframes, sci_groupgap - ) + averaged_dark = average_dark_frames_3d(dark_data, sci_ngroups, sci_nframes, sci_groupgap) # Save the frame-averaged dark data that was just created, # if requested by the user @@ -164,7 +164,7 @@ def do_correction_data(science_data, dark_data, dark_output=None): # Subtract the frame-averaged dark data from the science data output_data = subtract_dark(science_data, averaged_dark) - output_data.cal_step = 'COMPLETE' + output_data.cal_step = "COMPLETE" return output_data, averaged_dark @@ -213,17 +213,18 @@ def average_dark_frames_3d(dark_data, ngroups, nframes, groupgap): # If there's only 1 frame per group, just copy the dark frames if nframes == 1: - log.debug('copy dark frame %d', start) + log.debug("copy dark frame %d", start) avg_dark.data[group] = dark_data.data[start] avg_dark.err[group] = dark_data.err[start] # Otherwise average nframes into a new group: take the mean of # the SCI arrays and the quadratic sum of the ERR arrays. else: - log.debug('average dark frames %d to %d', start + 1, end) + log.debug("average dark frames %d to %d", start + 1, end) avg_dark.data[group] = dark_data.data[start:end].mean(axis=0) - avg_dark.err[group] = np.sqrt(np.add.reduce( - dark_data.err[start:end]**2, axis=0)) / (end - start) + avg_dark.err[group] = np.sqrt(np.add.reduce(dark_data.err[start:end] ** 2, axis=0)) / ( + end - start + ) # Skip over unused frames start = end + groupgap @@ -297,19 +298,20 @@ def average_dark_frames_4d(dark_data, nints, ngroups, nframes, groupgap): # If there's only 1 frame per group, just copy the dark frames if nframes == 1: - log.debug('copy dark frame %d', start) + log.debug("copy dark frame %d", start) avg_dark.data[it, group] = dark_data.data[it, start] avg_dark.err[it, group] = dark_data.err[it, start] # Otherwise average nframes into a new group: take the mean of # the SCI arrays and the quadratic sum of the ERR arrays. else: - log.debug('average dark frames %d to %d', start + 1, end) + log.debug("average dark frames %d to %d", start + 1, end) avg_dark.data[it, group] = dark_data.data[it, start:end].mean(axis=0) - avg_dark.err[it, group] = np.sqrt(np.add.reduce( - dark_data.err[it, start:end]**2, axis=0)) / (end - start) + avg_dark.err[it, group] = np.sqrt( + np.add.reduce(dark_data.err[it, start:end] ** 2, axis=0) + ) / (end - start) - # Skip over unused frames + # Skip over unused frames start = end + groupgap # Reset some metadata values for the averaged dark @@ -350,9 +352,13 @@ def subtract_dark(science_data, dark_data): else: dark_nints = 1 - log.debug("subtract_dark: nints=%d, ngroups=%d, size=%d,%d", - science_data.data.shape[0], science_data.data.shape[1], - science_data.data.shape[2], science_data.data.shape[3]) + log.debug( + "subtract_dark: nints=%d, ngroups=%d, size=%d,%d", + science_data.data.shape[0], + science_data.data.shape[1], + science_data.data.shape[2], + science_data.data.shape[3], + ) # Create output as a copy of the input science data model output = copy.deepcopy(science_data) @@ -372,7 +378,6 @@ def subtract_dark(science_data, dark_data): # Loop over all integrations in input science data for i in range(science_data.data.shape[0]): - if len(dark_data.data.shape) == 4: # MIRI data # Apply the first dark_nints-1 integrations from the dark ref file # to the first few science integrations. There's an additional diff --git a/src/stcal/jump/jump.py b/src/stcal/jump/jump.py index 7f97a23c..6b1b6dcf 100644 --- a/src/stcal/jump/jump.py +++ b/src/stcal/jump/jump.py @@ -15,28 +15,47 @@ log.setLevel(logging.DEBUG) -def detect_jumps(frames_per_group, data, gdq, pdq, err, - gain_2d, readnoise_2d, rejection_thresh, - three_grp_thresh, four_grp_thresh, max_cores, - max_jump_to_flag_neighbors, - min_jump_to_flag_neighbors, flag_4_neighbors, dqflags, - after_jump_flag_dn1=0.0, - after_jump_flag_n1=0, - after_jump_flag_dn2=0.0, - after_jump_flag_n2=0, - min_sat_area=1, - min_jump_area=5, - expand_factor=2.0, - use_ellipses=False, - sat_required_snowball=True, - expand_large_events=False, - sat_expand=2, min_sat_radius_extend=2.5, find_showers=False, - edge_size=25, extend_snr_threshold=1.2, extend_min_area=90, - extend_inner_radius=1, extend_outer_radius=2.6, - extend_ellipse_expand_ratio=1.2, grps_masked_after_shower=5, - max_extended_radius=200, minimum_groups=3, - minimum_sigclip_groups=100, only_use_ints=True): - +def detect_jumps( + frames_per_group, + data, + gdq, + pdq, + err, + gain_2d, + readnoise_2d, + rejection_thresh, + three_grp_thresh, + four_grp_thresh, + max_cores, + max_jump_to_flag_neighbors, + min_jump_to_flag_neighbors, + flag_4_neighbors, + dqflags, + after_jump_flag_dn1=0.0, + after_jump_flag_n1=0, + after_jump_flag_dn2=0.0, + after_jump_flag_n2=0, + min_sat_area=1, + min_jump_area=5, + expand_factor=2.0, + use_ellipses=False, + sat_required_snowball=True, + expand_large_events=False, + sat_expand=2, + min_sat_radius_extend=2.5, + find_showers=False, + edge_size=25, + extend_snr_threshold=1.2, + extend_min_area=90, + extend_inner_radius=1, + extend_outer_radius=2.6, + extend_ellipse_expand_ratio=1.2, + grps_masked_after_shower=5, + max_extended_radius=200, + minimum_groups=3, + minimum_sigclip_groups=100, + only_use_ints=True, +): """ This is the high-level controlling routine for the jump detection process. It loads and sets the various input data and parameters needed by each of @@ -201,7 +220,7 @@ def detect_jumps(frames_per_group, data, gdq, pdq, err, jump_flag = dqflags["JUMP_DET"] number_extended_events = 0 # Flag the pixeldq where the gain is <=0 or NaN so they will be ignored - wh_g = np.where(gain_2d <= 0.) + wh_g = np.where(gain_2d <= 0.0) if len(wh_g[0] > 0): pdq[wh_g] = np.bitwise_or(pdq[wh_g], dqflags["NO_GAIN_VALUE"]) pdq[wh_g] = np.bitwise_or(pdq[wh_g], dqflags["DO_NOT_USE"]) @@ -221,7 +240,7 @@ def detect_jumps(frames_per_group, data, gdq, pdq, err, after_jump_flag_e2 = after_jump_flag_dn2 * gain_2d # Apply the 2-point difference method as a first pass - log.info('Executing two-point difference method') + log.info("Executing two-point difference method") start = time.time() # Set parameters of input data shape @@ -231,48 +250,69 @@ def detect_jumps(frames_per_group, data, gdq, pdq, err, n_ints = data.shape[0] row_above_gdq = np.zeros((n_ints, n_groups, n_cols), dtype=np.uint8) - previous_row_above_gdq = np.zeros((n_ints, n_groups, n_cols), - dtype=np.uint8) + previous_row_above_gdq = np.zeros((n_ints, n_groups, n_cols), dtype=np.uint8) row_below_gdq = np.zeros((n_ints, n_groups, n_cols), dtype=np.uint8) # figure out how many slices to make based on 'max_cores' max_available = multiprocessing.cpu_count() n_slices = calc_num_slices(n_rows, max_cores, max_available) if n_slices == 1: - gdq, row_below_dq, row_above_dq, total_primary_crs, stddev = \ - twopt.find_crs(data, gdq, readnoise_2d, rejection_thresh, - three_grp_thresh, four_grp_thresh, frames_per_group, - flag_4_neighbors, max_jump_to_flag_neighbors, - min_jump_to_flag_neighbors, dqflags, - after_jump_flag_e1=after_jump_flag_e1, - after_jump_flag_n1=after_jump_flag_n1, - after_jump_flag_e2=after_jump_flag_e2, - after_jump_flag_n2=after_jump_flag_n2, copy_arrs=False, - minimum_groups=3, minimum_sigclip_groups=minimum_sigclip_groups, - only_use_ints=only_use_ints) + gdq, row_below_dq, row_above_dq, total_primary_crs, stddev = twopt.find_crs( + data, + gdq, + readnoise_2d, + rejection_thresh, + three_grp_thresh, + four_grp_thresh, + frames_per_group, + flag_4_neighbors, + max_jump_to_flag_neighbors, + min_jump_to_flag_neighbors, + dqflags, + after_jump_flag_e1=after_jump_flag_e1, + after_jump_flag_n1=after_jump_flag_n1, + after_jump_flag_e2=after_jump_flag_e2, + after_jump_flag_n2=after_jump_flag_n2, + copy_arrs=False, + minimum_groups=3, + minimum_sigclip_groups=minimum_sigclip_groups, + only_use_ints=only_use_ints, + ) # This is the flag that controls the flagging of snowballs. if expand_large_events: - total_snowballs = flag_large_events(gdq, jump_flag, sat_flag, min_sat_area=min_sat_area, - min_jump_area=min_jump_area, - expand_factor=expand_factor, - sat_required_snowball=sat_required_snowball, - min_sat_radius_extend=min_sat_radius_extend, - edge_size=edge_size, sat_expand=sat_expand, - max_extended_radius=max_extended_radius) - log.info('Total snowballs = %i' % total_snowballs) + total_snowballs = flag_large_events( + gdq, + jump_flag, + sat_flag, + min_sat_area=min_sat_area, + min_jump_area=min_jump_area, + expand_factor=expand_factor, + sat_required_snowball=sat_required_snowball, + min_sat_radius_extend=min_sat_radius_extend, + edge_size=edge_size, + sat_expand=sat_expand, + max_extended_radius=max_extended_radius, + ) + log.info("Total snowballs = %i" % total_snowballs) number_extended_events = total_snowballs if find_showers: - gdq, num_showers = find_faint_extended(data, gdq, readnoise_2d, - frames_per_group, minimum_sigclip_groups, - snr_threshold=extend_snr_threshold, - min_shower_area=extend_min_area, - inner=extend_inner_radius, - outer=extend_outer_radius, - sat_flag=sat_flag, jump_flag=jump_flag, - ellipse_expand=extend_ellipse_expand_ratio, - num_grps_masked=grps_masked_after_shower, - max_extended_radius=max_extended_radius) - log.info('Total showers= %i' % num_showers) + gdq, num_showers = find_faint_extended( + data, + gdq, + readnoise_2d, + frames_per_group, + minimum_sigclip_groups, + snr_threshold=extend_snr_threshold, + min_shower_area=extend_min_area, + inner=extend_inner_radius, + outer=extend_outer_radius, + sat_flag=sat_flag, + jump_flag=jump_flag, + ellipse_expand=extend_ellipse_expand_ratio, + num_grps_masked=grps_masked_after_shower, + max_extended_radius=max_extended_radius, + ) + log.info("Total showers= %i" % num_showers) number_extended_events = num_showers else: yinc = int(n_rows / n_slices) @@ -290,34 +330,56 @@ def detect_jumps(frames_per_group, data, gdq, pdq, err, copy_arrs = False # we don't need to copy arrays again in find_crs for i in range(n_slices - 1): - slices.insert(i, (data[:, :, i * yinc:(i + 1) * yinc, :], - gdq[:, :, i * yinc:(i + 1) * yinc, :], - readnoise_2d[i * yinc:(i + 1) * yinc, :], - rejection_thresh, three_grp_thresh, four_grp_thresh, - frames_per_group, flag_4_neighbors, - max_jump_to_flag_neighbors, - min_jump_to_flag_neighbors, dqflags, - after_jump_flag_e1, after_jump_flag_n1, - after_jump_flag_e2, after_jump_flag_n2, - copy_arrs, minimum_groups, minimum_sigclip_groups, - only_use_ints)) + slices.insert( + i, + ( + data[:, :, i * yinc : (i + 1) * yinc, :], + gdq[:, :, i * yinc : (i + 1) * yinc, :], + readnoise_2d[i * yinc : (i + 1) * yinc, :], + rejection_thresh, + three_grp_thresh, + four_grp_thresh, + frames_per_group, + flag_4_neighbors, + max_jump_to_flag_neighbors, + min_jump_to_flag_neighbors, + dqflags, + after_jump_flag_e1, + after_jump_flag_n1, + after_jump_flag_e2, + after_jump_flag_n2, + copy_arrs, + minimum_groups, + minimum_sigclip_groups, + only_use_ints, + ), + ) # last slice get the rest - slices.insert(n_slices - 1, (data[:, :, (n_slices - 1) * - yinc:n_rows, :], - gdq[:, :, (n_slices - 1) * - yinc:n_rows, :], - readnoise_2d[(n_slices - 1) * - yinc:n_rows, :], - rejection_thresh, three_grp_thresh, - four_grp_thresh, frames_per_group, - flag_4_neighbors, - max_jump_to_flag_neighbors, - min_jump_to_flag_neighbors, dqflags, - after_jump_flag_e1, after_jump_flag_n1, - after_jump_flag_e2, after_jump_flag_n2, - copy_arrs, minimum_groups, minimum_sigclip_groups, - only_use_ints)) + slices.insert( + n_slices - 1, + ( + data[:, :, (n_slices - 1) * yinc : n_rows, :], + gdq[:, :, (n_slices - 1) * yinc : n_rows, :], + readnoise_2d[(n_slices - 1) * yinc : n_rows, :], + rejection_thresh, + three_grp_thresh, + four_grp_thresh, + frames_per_group, + flag_4_neighbors, + max_jump_to_flag_neighbors, + min_jump_to_flag_neighbors, + dqflags, + after_jump_flag_e1, + after_jump_flag_n1, + after_jump_flag_e2, + after_jump_flag_n2, + copy_arrs, + minimum_groups, + minimum_sigclip_groups, + only_use_ints, + ), + ) log.info("Creating %d processes for jump detection " % n_slices) pool = multiprocessing.Pool(processes=n_slices) # Starts each slice in its own process. Starmap allows more than one @@ -334,24 +396,22 @@ def detect_jumps(frames_per_group, data, gdq, pdq, err, nrows = gdq.shape[2] ncols = gdq.shape[3] if only_use_ints: - stddev = np.zeros((ngrps - 1, nrows, ncols), - dtype=np.float32) + stddev = np.zeros((ngrps - 1, nrows, ncols), dtype=np.float32) else: - stddev = np.zeros((nrows, ncols), - dtype=np.float32) + stddev = np.zeros((nrows, ncols), dtype=np.float32) for resultslice in real_result: if len(real_result) == k + 1: # last result - gdq[:, :, k * yinc:n_rows, :] = resultslice[0] + gdq[:, :, k * yinc : n_rows, :] = resultslice[0] if only_use_ints: - stddev[:, k * yinc:n_rows, :] = resultslice[4] + stddev[:, k * yinc : n_rows, :] = resultslice[4] else: - stddev[k * yinc:n_rows, :] = resultslice[4] + stddev[k * yinc : n_rows, :] = resultslice[4] else: - gdq[:, :, k * yinc:(k + 1) * yinc, :] = resultslice[0] + gdq[:, :, k * yinc : (k + 1) * yinc, :] = resultslice[0] if only_use_ints: - stddev[:, k * yinc:(k + 1) * yinc, :] = resultslice[4] + stddev[:, k * yinc : (k + 1) * yinc, :] = resultslice[4] else: - stddev[k * yinc:(k + 1) * yinc, :] = resultslice[4] + stddev[k * yinc : (k + 1) * yinc, :] = resultslice[4] row_below_gdq[:, :, :] = resultslice[1] row_above_gdq[:, :, :] = resultslice[2] total_primary_crs += resultslice[3] @@ -360,45 +420,52 @@ def detect_jumps(frames_per_group, data, gdq, pdq, err, # row of the previous slice and flag any neighbors in the # bottom row of this slice saved from the top of the previous # slice - gdq[:, :, k * yinc - 1, :] = \ - np.bitwise_or(gdq[:, :, k * yinc - 1, :], - row_below_gdq[:, :, :]) - gdq[:, :, k * yinc, :] = \ - np.bitwise_or(gdq[:, :, k * yinc, :], - previous_row_above_gdq[:, :, :]) + gdq[:, :, k * yinc - 1, :] = np.bitwise_or(gdq[:, :, k * yinc - 1, :], row_below_gdq[:, :, :]) + gdq[:, :, k * yinc, :] = np.bitwise_or( + gdq[:, :, k * yinc, :], previous_row_above_gdq[:, :, :] + ) # save the neighbors to be flagged that will be in the next slice previous_row_above_gdq = row_above_gdq.copy() k += 1 # This is the flag that controls the flagging of snowballs. if expand_large_events: - total_snowballs = flag_large_events(gdq, jump_flag, sat_flag, - min_sat_area=min_sat_area, - min_jump_area=min_jump_area, - expand_factor=expand_factor, - sat_required_snowball=sat_required_snowball, - min_sat_radius_extend=min_sat_radius_extend, - edge_size=edge_size, sat_expand=sat_expand, - max_extended_radius=max_extended_radius) - log.info('Total snowballs = %i' % total_snowballs) + total_snowballs = flag_large_events( + gdq, + jump_flag, + sat_flag, + min_sat_area=min_sat_area, + min_jump_area=min_jump_area, + expand_factor=expand_factor, + sat_required_snowball=sat_required_snowball, + min_sat_radius_extend=min_sat_radius_extend, + edge_size=edge_size, + sat_expand=sat_expand, + max_extended_radius=max_extended_radius, + ) + log.info("Total snowballs = %i" % total_snowballs) number_extended_events = total_snowballs if find_showers: - gdq, num_showers = \ - find_faint_extended(data, gdq, readnoise_2d, - frames_per_group, minimum_sigclip_groups, - snr_threshold=extend_snr_threshold, - min_shower_area=extend_min_area, - inner=extend_inner_radius, - outer=extend_outer_radius, - sat_flag=sat_flag, - jump_flag=jump_flag, - ellipse_expand=extend_ellipse_expand_ratio, - num_grps_masked=grps_masked_after_shower, - max_extended_radius=max_extended_radius) - log.info('Total showers= %i' % num_showers) + gdq, num_showers = find_faint_extended( + data, + gdq, + readnoise_2d, + frames_per_group, + minimum_sigclip_groups, + snr_threshold=extend_snr_threshold, + min_shower_area=extend_min_area, + inner=extend_inner_radius, + outer=extend_outer_radius, + sat_flag=sat_flag, + jump_flag=jump_flag, + ellipse_expand=extend_ellipse_expand_ratio, + num_grps_masked=grps_masked_after_shower, + max_extended_radius=max_extended_radius, + ) + log.info("Total showers= %i" % num_showers) number_extended_events = num_showers elapsed = time.time() - start - log.info('Total elapsed time = %g sec' % elapsed) + log.info("Total elapsed time = %g sec" % elapsed) # Back out the applied gain to the SCI, ERR, and readnoise arrays so they're # back in units of DN @@ -410,11 +477,19 @@ def detect_jumps(frames_per_group, data, gdq, pdq, err, return gdq, pdq, total_primary_crs, number_extended_events, stddev -def flag_large_events(gdq, jump_flag, sat_flag, min_sat_area=1, - min_jump_area=6, - expand_factor=2.0, - sat_required_snowball=True, min_sat_radius_extend=2.5, - sat_expand=2, edge_size=25, max_extended_radius=200): +def flag_large_events( + gdq, + jump_flag, + sat_flag, + min_sat_area=1, + min_jump_area=6, + expand_factor=2.0, + sat_required_snowball=True, + min_sat_radius_extend=2.5, + sat_expand=2, + edge_size=25, + max_extended_radius=200, +): """ This routine controls the creation of expanded regions that are flagged as jumps. @@ -459,7 +534,7 @@ def flag_large_events(gdq, jump_flag, sat_flag, min_sat_area=1, """ - log.info('Flagging large Snowballs') + log.info("Flagging large Snowballs") n_showers_grp = [] total_snowballs = 0 @@ -475,33 +550,45 @@ def flag_large_events(gdq, jump_flag, sat_flag, min_sat_area=1, new_sat = current_sat * not_prev_sat sat_ellipses = find_ellipses(new_sat, sat_flag, min_sat_area) # find the ellipse parameters for jump regions - jump_ellipses = find_ellipses(gdq[integration, group, :, :], - jump_flag, min_jump_area) + jump_ellipses = find_ellipses(gdq[integration, group, :, :], jump_flag, min_jump_area) if sat_required_snowball: low_threshold = edge_size nrows = gdq.shape[2] high_threshold = max(0, nrows - edge_size) - gdq, snowballs = make_snowballs(gdq, integration, group, - jump_ellipses, sat_ellipses, - low_threshold, high_threshold, - min_sat_radius_extend, - sat_expand, sat_flag, - max_extended_radius) + gdq, snowballs = make_snowballs( + gdq, + integration, + group, + jump_ellipses, + sat_ellipses, + low_threshold, + high_threshold, + min_sat_radius_extend, + sat_expand, + sat_flag, + max_extended_radius, + ) else: snowballs = jump_ellipses n_showers_grp.append(len(snowballs)) total_snowballs += len(snowballs) - gdq, num_events = extend_ellipses(gdq, integration, group, - snowballs, - sat_flag, jump_flag, - expansion=expand_factor, - max_extended_radius=max_extended_radius) + gdq, num_events = extend_ellipses( + gdq, + integration, + group, + snowballs, + sat_flag, + jump_flag, + expansion=expand_factor, + max_extended_radius=max_extended_radius, + ) return total_snowballs -def extend_saturation(cube, grp, sat_ellipses, sat_flag, - min_sat_radius_extend, expansion=2, - max_extended_radius=200): + +def extend_saturation( + cube, grp, sat_ellipses, sat_flag, min_sat_radius_extend, expansion=2, max_extended_radius=200 +): ncols = cube.shape[2] nrows = cube.shape[1] image = np.zeros(shape=(nrows, ncols, 3), dtype=np.uint8) @@ -516,18 +603,34 @@ def extend_saturation(cube, grp, sat_ellipses, sat_flag, alpha = ellipse[2] axis1 = min(axis1, max_extended_radius) axis2 = min(axis2, max_extended_radius) - image = cv.ellipse(image, (round(ceny), round(cenx)), - (round(axis1/2), - round(axis2/2)), alpha, 0, 360, (0, 0, 22), -1) + image = cv.ellipse( + image, + (round(ceny), round(cenx)), + (round(axis1 / 2), round(axis2 / 2)), + alpha, + 0, + 360, + (0, 0, 22), + -1, + ) sat_ellipse = image[:, :, 2] saty, satx = np.where(sat_ellipse == 22) outcube[grp:, saty, satx] = sat_flag return outcube -def extend_ellipses(gdq_cube, intg, grp, ellipses, sat_flag, jump_flag, - expansion=1.9, expand_by_ratio=True, - num_grps_masked=1, max_extended_radius=200): +def extend_ellipses( + gdq_cube, + intg, + grp, + ellipses, + sat_flag, + jump_flag, + expansion=1.9, + expand_by_ratio=True, + num_grps_masked=1, + max_extended_radius=200, +): # For a given DQ plane it will use the list of ellipses to create # expanded ellipses of pixels with # the jump flag set. @@ -559,9 +662,16 @@ def extend_ellipses(gdq_cube, intg, grp, ellipses, sat_flag, jump_flag, axis1 = min(axis1, max_extended_radius) axis2 = min(axis2, max_extended_radius) alpha = ellipse[2] - image = cv.ellipse(image, (round(ceny), round(cenx)), (round(axis1 / 2), - round(axis2 / 2)), alpha, 0, 360, - (0, 0, jump_flag), -1) + image = cv.ellipse( + image, + (round(ceny), round(cenx)), + (round(axis1 / 2), round(axis2 / 2)), + alpha, + 0, + 360, + (0, 0, jump_flag), + -1, + ) jump_ellipse = image[:, :, 2] ngrps = gdq_cube.shape[1] last_grp = min(grp + num_grps_masked, ngrps) @@ -570,8 +680,7 @@ def extend_ellipses(gdq_cube, intg, grp, ellipses, sat_flag, jump_flag, sat_pix = np.bitwise_and(gdq_cube[intg, flg_grp, :, :], sat_flag) saty, satx = np.where(sat_pix == sat_flag) jump_ellipse[saty, satx] = 0 - gdq_cube[intg, flg_grp, :, :] = \ - np.bitwise_or(gdq_cube[intg, flg_grp, :, :], jump_ellipse) + gdq_cube[intg, flg_grp, :, :] = np.bitwise_or(gdq_cube[intg, flg_grp, :, :], jump_ellipse) return gdq_cube, num_ellipses @@ -590,8 +699,7 @@ def find_ellipses(dqplane, bitmask, min_area): # at least the minimum # area and return a list of the minimum enclosing ellipse parameters. pixels = np.bitwise_and(dqplane, bitmask) - contours, hierarchy = cv.findContours(pixels, cv.RETR_EXTERNAL, - cv.CHAIN_APPROX_SIMPLE) + contours, hierarchy = cv.findContours(pixels, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) bigcontours = [con for con in contours if cv.contourArea(con) > min_area] # minAreaRect is used becuase fitEllipse requires 5 points and it is # possible to have a contour @@ -600,9 +708,19 @@ def find_ellipses(dqplane, bitmask, min_area): return ellipses -def make_snowballs(gdq, integration, group, jump_ellipses, sat_ellipses, - low_threshold, high_threshold, - min_sat_radius, expansion, sat_flag, max_extended_radius): +def make_snowballs( + gdq, + integration, + group, + jump_ellipses, + sat_ellipses, + low_threshold, + high_threshold, + min_sat_radius, + expansion, + sat_flag, + max_extended_radius, +): # Ths routine will create a list of snowballs (ellipses) that have the # center # of the saturation circle within the enclosing jump rectangle. @@ -613,10 +731,12 @@ def make_snowballs(gdq, integration, group, jump_ellipses, sat_ellipses, jump_center = jump[0] # if center of the jump ellipse is not saturated in this group and is saturated in # the next group add the jump ellipse to the snowball list - if (group < (num_groups - 1) and - gdq[integration, group+1, round(jump_center[1]), round(jump_center[0])] == sat_flag and - gdq[integration, group, round(jump_center[1]), round(jump_center[0])] != sat_flag): - snowballs.append(jump) + if ( + group < (num_groups - 1) + and gdq[integration, group + 1, round(jump_center[1]), round(jump_center[0])] == sat_flag + and gdq[integration, group, round(jump_center[1]), round(jump_center[0])] != sat_flag + ): + snowballs.append(jump) # if the jump ellipse is near the edge, do not require saturation in the # center of the jump ellipse elif near_edge(jump, low_threshold, high_threshold): @@ -625,22 +745,25 @@ def make_snowballs(gdq, integration, group, jump_ellipses, sat_ellipses, for sat in sat_ellipses: # center of saturation is within the enclosing jump rectangle if point_inside_ellipse(sat[0], jump): - if gdq[integration, group, round(jump_center[1]), - round(jump_center[0])] == sat_flag: + if gdq[integration, group, round(jump_center[1]), round(jump_center[0])] == sat_flag: if jump not in snowballs: snowballs.append(jump) # extend the saturated ellipses that are larger than the min_sat_radius - gdq[integration, :, :, :] = \ - extend_saturation(gdq[integration, :, :, :], - group, sat_ellipses, sat_flag, min_sat_radius, - expansion=expansion, max_extended_radius=max_extended_radius) + gdq[integration, :, :, :] = extend_saturation( + gdq[integration, :, :, :], + group, + sat_ellipses, + sat_flag, + min_sat_radius, + expansion=expansion, + max_extended_radius=max_extended_radius, + ) return gdq, snowballs def point_inside_ellipse(point, ellipse): - delta_center = np.sqrt((point[0]-ellipse[0][0])**2 + - (point[1]-ellipse[0][1])**2) + delta_center = np.sqrt((point[0] - ellipse[0][0]) ** 2 + (point[1] - ellipse[0][1]) ** 2) minor_axis = min(ellipse[1][0], ellipse[1][1]) if delta_center < minor_axis: return True @@ -652,18 +775,33 @@ def near_edge(jump, low_threshold, high_threshold): # This routing tests whether the center of a jump is close to the edge of # the detector. Jumps that are within the threshold will not requre a # saturated core since this may be off the detector - if jump[0][0] < low_threshold or jump[0][1] < low_threshold\ - or jump[0][0] > high_threshold or jump[0][1] > high_threshold: + if ( + jump[0][0] < low_threshold + or jump[0][1] < low_threshold + or jump[0][0] > high_threshold + or jump[0][1] > high_threshold + ): return True else: return False -def find_faint_extended(indata, gdq, readnoise_2d, nframes, minimum_sigclip_groups, - snr_threshold=1.3, - min_shower_area=40, inner=1, outer=2, sat_flag=2, - jump_flag=4, ellipse_expand=1.1, num_grps_masked=25, - max_extended_radius=200): +def find_faint_extended( + indata, + gdq, + readnoise_2d, + nframes, + minimum_sigclip_groups, + snr_threshold=1.3, + min_shower_area=40, + inner=1, + outer=2, + sat_flag=2, + jump_flag=4, + ellipse_expand=1.1, + num_grps_masked=25, + max_extended_radius=200, +): """ Parameters ---------- @@ -713,8 +851,7 @@ def find_faint_extended(indata, gdq, readnoise_2d, nframes, minimum_sigclip_grou first_diffs_masked = np.ma.masked_array(first_diffs, mask=np.isnan(first_diffs)) nints = data.shape[0] if nints > minimum_sigclip_groups: - mean, median, stddev = stats.sigma_clipped_stats(first_diffs_masked, sigma=5, - axis=0) + mean, median, stddev = stats.sigma_clipped_stats(first_diffs_masked, sigma=5, axis=0) for intg in range(nints): # calculate sigma for each pixel if nints <= minimum_sigclip_groups: @@ -730,13 +867,13 @@ def find_faint_extended(indata, gdq, readnoise_2d, nframes, minimum_sigclip_grou ngrps = data.shape[1] for grp in range(1, ngrps): if nints > minimum_sigclip_groups: - median_diffs = median[grp-1] - sigma = stddev[grp-1] + median_diffs = median[grp - 1] + sigma = stddev[grp - 1] # The difference from the median difference for each group e_jump = first_diffs_masked[intg] - median_diffs[np.newaxis, :, :] # SNR ratio of each diff. ratio = np.abs(e_jump) / sigma[np.newaxis, :, :] - masked_ratio = ratio[grp-1].copy() + masked_ratio = ratio[grp - 1].copy() jumpy, jumpx = np.where(gdq[intg, grp, :, :] == jump_flag) # mask pix. that are already flagged as jump masked_ratio[jumpy, jumpx] = np.nan @@ -748,22 +885,17 @@ def find_faint_extended(indata, gdq, readnoise_2d, nframes, minimum_sigclip_grou masked_smoothed_ratio = convolve(masked_ratio, ring_2D_kernel) nrows = ratio.shape[1] ncols = ratio.shape[2] - extended_emission = np.zeros(shape=(nrows, - ncols), dtype=np.uint8) + extended_emission = np.zeros(shape=(nrows, ncols), dtype=np.uint8) exty, extx = np.where(masked_smoothed_ratio > snr_threshold) extended_emission[exty, extx] = 1 # find the contours of the extended emission - contours, hierarchy = cv.findContours(extended_emission, - cv.RETR_EXTERNAL, - cv.CHAIN_APPROX_SIMPLE) + contours, hierarchy = cv.findContours(extended_emission, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # get the countours that are above the minimum size - bigcontours = [con for con in contours if cv.contourArea(con) > - min_shower_area] + bigcontours = [con for con in contours if cv.contourArea(con) > min_shower_area] # get the minimum enclosing rectangle which is the same as the # minimum enclosing ellipse ellipses = [cv.minAreaRect(con) for con in bigcontours] - expand_by_ratio = True expansion = 1.0 plane = gdq[intg, grp, :, :] @@ -795,9 +927,16 @@ def find_faint_extended(indata, gdq, readnoise_2d, nframes, minimum_sigclip_grou axis1 = min(axis1, max_extended_radius) axis2 = min(axis2, max_extended_radius) alpha = ellipse[2] - image = cv.ellipse(image, (round(ceny), round(cenx)), (round(axis1 / 2), - round(axis2 / 2)), alpha, 0, 360, - (0, 0, jump_flag), -1) + image = cv.ellipse( + image, + (round(ceny), round(cenx)), + (round(axis1 / 2), round(axis2 / 2)), + alpha, + 0, + 360, + (0, 0, jump_flag), + -1, + ) if len(ellipses) > 0: # add all the showers for this integration to the list all_ellipses.append([intg, grp, ellipses]) @@ -810,11 +949,18 @@ def find_faint_extended(indata, gdq, readnoise_2d, nframes, minimum_sigclip_grou intg = showers[0] grp = showers[1] ellipses = showers[2] - gdq, num = extend_ellipses(gdq, intg, grp, ellipses, sat_flag, - jump_flag, expansion=ellipse_expand, - expand_by_ratio=True, - num_grps_masked=num_grps_masked, - max_extended_radius=max_extended_radius) + gdq, num = extend_ellipses( + gdq, + intg, + grp, + ellipses, + sat_flag, + jump_flag, + expansion=ellipse_expand, + expand_by_ratio=True, + num_grps_masked=num_grps_masked, + max_extended_radius=max_extended_radius, + ) return gdq, len(all_ellipses) @@ -822,13 +968,13 @@ def calc_num_slices(n_rows, max_cores, max_available): n_slices = 1 if max_cores.isnumeric(): n_slices = int(max_cores) - elif max_cores.lower() == "none" or max_cores.lower() == 'one': + elif max_cores.lower() == "none" or max_cores.lower() == "one": n_slices = 1 - elif max_cores == 'quarter': + elif max_cores == "quarter": n_slices = max_available // 4 or 1 - elif max_cores == 'half': + elif max_cores == "half": n_slices = max_available // 2 or 1 - elif max_cores == 'all': + elif max_cores == "all": n_slices = max_available # Make sure we don't have more slices than rows or available cores. n_slices = min([n_rows, n_slices, max_available]) diff --git a/src/stcal/jump/twopoint_difference.py b/src/stcal/jump/twopoint_difference.py index 8420b307..13ff8669 100644 --- a/src/stcal/jump/twopoint_difference.py +++ b/src/stcal/jump/twopoint_difference.py @@ -2,21 +2,32 @@ import numpy as np import astropy.stats as stats import warnings + log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) -def find_crs(dataa, group_dq, read_noise, normal_rej_thresh, - two_diff_rej_thresh, three_diff_rej_thresh, nframes, - flag_4_neighbors, max_jump_to_flag_neighbors, - min_jump_to_flag_neighbors, dqflags, - after_jump_flag_e1=0.0, - after_jump_flag_n1=0, - after_jump_flag_e2=0.0, - after_jump_flag_n2=0, - copy_arrs=True, minimum_groups=3, minimum_sigclip_groups=100, - only_use_ints=True): - +def find_crs( + dataa, + group_dq, + read_noise, + normal_rej_thresh, + two_diff_rej_thresh, + three_diff_rej_thresh, + nframes, + flag_4_neighbors, + max_jump_to_flag_neighbors, + min_jump_to_flag_neighbors, + dqflags, + after_jump_flag_e1=0.0, + after_jump_flag_n1=0, + after_jump_flag_e2=0.0, + after_jump_flag_n2=0, + copy_arrs=True, + minimum_groups=3, + minimum_sigclip_groups=100, + only_use_ints=True, +): """ Find CRs/Jumps in each integration within the input data array. The input data array is assumed to be in units of electrons, i.e. already multiplied @@ -143,13 +154,12 @@ def find_crs(dataa, group_dq, read_noise, normal_rej_thresh, total_groups = nints else: total_groups = nints * ngrps - num_flagged_grps - if (ngrps < minimum_groups and only_use_ints and nints < minimum_sigclip_groups) or \ - (not only_use_ints and nints * ngrps < minimum_sigclip_groups and - ngrps < minimum_groups): + if (ngrps < minimum_groups and only_use_ints and nints < minimum_sigclip_groups) or ( + not only_use_ints and nints * ngrps < minimum_sigclip_groups and ngrps < minimum_groups + ): log.info("Jump Step was skipped because exposure has less than the minimum number of usable groups") log.info("Data shape {}".format(str(dat.shape))) - dummy = np.zeros((dataa.shape[1] - 1, dataa.shape[2], dataa.shape[3]), - dtype=np.float32) + dummy = np.zeros((dataa.shape[1] - 1, dataa.shape[2], dataa.shape[3]), dtype=np.float32) return gdq, row_below_gdq, row_above_gdq, 0, dummy else: # set 'saturated' or 'do not use' pixels to nan in data @@ -168,49 +178,61 @@ def find_crs(dataa, group_dq, read_noise, normal_rej_thresh, sigma = np.sqrt(np.abs(median_diffs) + read_noise_2 / nframes) # reset sigma so pxels with 0 readnoise are not flagged as jumps - sigma[np.where(sigma == 0.)] = np.nan + sigma[np.where(sigma == 0.0)] = np.nan # compute 'ratio' for each group. this is the value that will be # compared to 'threshold' to classify jumps. subtract the median of # first_diffs from first_diffs, take the abs. value and divide by sigma. e_jump_4d = first_diffs - median_diffs[np.newaxis, :, :] - ratio_all = np.abs(first_diffs - median_diffs[np.newaxis, np.newaxis, :, :]) / \ - sigma[np.newaxis, np.newaxis, :, :] - if (only_use_ints and nints >= minimum_sigclip_groups) or \ - (not only_use_ints and total_groups >= minimum_sigclip_groups): - log.info(" Jump Step using sigma clip {} greater than {}, rejection threshold {}".format( - str(total_groups), str(minimum_sigclip_groups), str(normal_rej_thresh))) + ratio_all = ( + np.abs(first_diffs - median_diffs[np.newaxis, np.newaxis, :, :]) + / sigma[np.newaxis, np.newaxis, :, :] + ) + if (only_use_ints and nints >= minimum_sigclip_groups) or ( + not only_use_ints and total_groups >= minimum_sigclip_groups + ): + log.info( + " Jump Step using sigma clip {} greater than {}, rejection threshold {}".format( + str(total_groups), str(minimum_sigclip_groups), str(normal_rej_thresh) + ) + ) warnings.filterwarnings("ignore", ".*All-NaN slice encountered.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*Mean of empty slice.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*Degrees of freedom <= 0.*", RuntimeWarning) if only_use_ints: - mean, median, stddev = stats.sigma_clipped_stats(first_diffs_masked, sigma=normal_rej_thresh, - axis=0) - clipped_diffs = stats.sigma_clip(first_diffs_masked, sigma=normal_rej_thresh, - axis=0, masked=True) + mean, median, stddev = stats.sigma_clipped_stats( + first_diffs_masked, sigma=normal_rej_thresh, axis=0 + ) + clipped_diffs = stats.sigma_clip( + first_diffs_masked, sigma=normal_rej_thresh, axis=0, masked=True + ) else: - mean, median, stddev = stats.sigma_clipped_stats(first_diffs_masked, sigma=normal_rej_thresh, - axis=(0, 1)) - clipped_diffs = stats.sigma_clip(first_diffs_masked, sigma=normal_rej_thresh, - axis=(0, 1), masked=True) + mean, median, stddev = stats.sigma_clipped_stats( + first_diffs_masked, sigma=normal_rej_thresh, axis=(0, 1) + ) + clipped_diffs = stats.sigma_clip( + first_diffs_masked, sigma=normal_rej_thresh, axis=(0, 1), masked=True + ) jump_mask = np.logical_and(clipped_diffs.mask, np.logical_not(first_diffs_masked.mask)) jump_mask[np.bitwise_and(jump_mask, gdq[:, 1:, :, :] == sat_flag)] = False jump_mask[np.bitwise_and(jump_mask, gdq[:, 1:, :, :] == dnu_flag)] = False jump_mask[np.bitwise_and(jump_mask, gdq[:, 1:, :, :] == (dnu_flag + sat_flag))] = False - gdq[:, 1:, :, :] = np.bitwise_or(gdq[:, 1:, :, :], jump_mask * - np.uint8(dqflags["JUMP_DET"])) + gdq[:, 1:, :, :] = np.bitwise_or(gdq[:, 1:, :, :], jump_mask * np.uint8(dqflags["JUMP_DET"])) # if grp is all jump set to do not use for integ in range(nints): for grp in range(ngrps): - if np.all(np.bitwise_or(np.bitwise_and(gdq[integ, grp, :, :], jump_flag), - np.bitwise_and(gdq[integ, grp, :, :], dnu_flag))): + if np.all( + np.bitwise_or( + np.bitwise_and(gdq[integ, grp, :, :], jump_flag), + np.bitwise_and(gdq[integ, grp, :, :], dnu_flag), + ) + ): jumpy, jumpx = np.where(gdq[integ, grp, :, :] == jump_flag) gdq[integ, grp, jumpy, jumpx] = 0 warnings.resetwarnings() else: for integ in range(nints): - # get data, gdq for this integration dat = dataa[integ] gdq_integ = gdq[integ] @@ -229,7 +251,7 @@ def find_crs(dataa, group_dq, read_noise, normal_rej_thresh, # calculate sigma for each pixel sigma = np.sqrt(np.abs(median_diffs) + read_noise_2 / nframes) # reset sigma so pxels with 0 readnoise are not flagged as jumps - sigma[np.where(sigma == 0.)] = np.nan + sigma[np.where(sigma == 0.0)] = np.nan # compute 'ratio' for each group. this is the value that will be # compared to 'threshold' to classify jumps. subtract the median of @@ -244,12 +266,15 @@ def find_crs(dataa, group_dq, read_noise, normal_rej_thresh, # now see if the largest ratio of all groups for each pixel exceeds the threshold. # there are different threshold for 4+, 3, and 2 usable groups num_unusable_groups = np.sum(np.isnan(first_diffs), axis=0) - row4cr, col4cr = np.where(np.logical_and(ndiffs - num_unusable_groups >= 4, - max_ratio > normal_rej_thresh)) - row3cr, col3cr = np.where(np.logical_and(ndiffs - num_unusable_groups == 3, - max_ratio > three_diff_rej_thresh)) - row2cr, col2cr = np.where(np.logical_and(ndiffs - num_unusable_groups == 2, - max_ratio > two_diff_rej_thresh)) + row4cr, col4cr = np.where( + np.logical_and(ndiffs - num_unusable_groups >= 4, max_ratio > normal_rej_thresh) + ) + row3cr, col3cr = np.where( + np.logical_and(ndiffs - num_unusable_groups == 3, max_ratio > three_diff_rej_thresh) + ) + row2cr, col2cr = np.where( + np.logical_and(ndiffs - num_unusable_groups == 2, max_ratio > two_diff_rej_thresh) + ) # get the rows, col pairs for all pixels with at least one CR all_crs_row = np.concatenate((row4cr, row3cr, row2cr)) @@ -279,7 +304,6 @@ def find_crs(dataa, group_dq, read_noise, normal_rej_thresh, # actually one left, since the next CR will be masked after # checking that condition) while new_CR_found and (ndiffs - np.sum(np.isnan(pix_first_diffs)) > 2): - new_CR_found = False # set CRs to nans in first diffs to clip them @@ -305,20 +329,21 @@ def find_crs(dataa, group_dq, read_noise, normal_rej_thresh, pix_cr_mask[new_pix_max_ratio_idx] = 0 unusable_diffs = np.sum(np.isnan(pix_first_diffs)) # Found all CRs for this pix - set flags in input DQ array - gdq[integ, 1:, all_crs_row[j], all_crs_col[j]] = \ - np.bitwise_or(gdq[integ, 1:, all_crs_row[j], all_crs_col[j]], - dqflags["JUMP_DET"] * np.invert(pix_cr_mask)) + gdq[integ, 1:, all_crs_row[j], all_crs_col[j]] = np.bitwise_or( + gdq[integ, 1:, all_crs_row[j], all_crs_col[j]], + dqflags["JUMP_DET"] * np.invert(pix_cr_mask), + ) cr_integ, cr_group, cr_row, cr_col = np.where(np.bitwise_and(gdq, jump_flag)) num_primary_crs = len(cr_group) if flag_4_neighbors: # iterate over each 'jump' pixel for j in range(len(cr_group)): - ratio_this_pix = ratio_all[cr_integ[j], cr_group[j] - 1, cr_row[j], cr_col[j]] # Jumps must be in a certain range to have neighbors flagged - if (ratio_this_pix < max_jump_to_flag_neighbors) and \ - (ratio_this_pix > min_jump_to_flag_neighbors): + if (ratio_this_pix < max_jump_to_flag_neighbors) and ( + ratio_this_pix > min_jump_to_flag_neighbors + ): integ = cr_integ[j] group = cr_group[j] row = cr_row[j] @@ -337,16 +362,18 @@ def find_crs(dataa, group_dq, read_noise, normal_rej_thresh, if row != 0: if (gdq[integ, group, row - 1, col] & sat_flag) == 0: if (gdq[integ, group, row - 1, col] & dnu_flag) == 0: - gdq[integ, group, row - 1, col] =\ - np.bitwise_or(gdq[integ, group, row - 1, col], jump_flag) + gdq[integ, group, row - 1, col] = np.bitwise_or( + gdq[integ, group, row - 1, col], jump_flag + ) else: row_below_gdq[integ, cr_group[j], cr_col[j]] = jump_flag if row != nrows - 1: if (gdq[integ, group, row + 1, col] & sat_flag) == 0: if (gdq[integ, group, row + 1, col] & dnu_flag) == 0: - gdq[integ, group, row + 1, col] = \ - np.bitwise_or(gdq[integ, group, row + 1, col], jump_flag) + gdq[integ, group, row + 1, col] = np.bitwise_or( + gdq[integ, group, row + 1, col], jump_flag + ) else: row_above_gdq[integ, cr_group[j], cr_col[j]] = jump_flag @@ -355,21 +382,22 @@ def find_crs(dataa, group_dq, read_noise, normal_rej_thresh, if cr_col[j] != 0: if (gdq[integ, group, row, col - 1] & sat_flag) == 0: if (gdq[integ, group, row, col - 1] & dnu_flag) == 0: - gdq[integ, group, row, col - 1] =\ - np.bitwise_or(gdq[integ, group, row, col - 1], jump_flag) + gdq[integ, group, row, col - 1] = np.bitwise_or( + gdq[integ, group, row, col - 1], jump_flag + ) if cr_col[j] != ncols - 1: if (gdq[integ, group, row, col + 1] & sat_flag) == 0: if (gdq[integ, group, row, col + 1] & dnu_flag) == 0: - gdq[integ, group, row, col + 1] =\ - np.bitwise_or(gdq[integ, group, row, col + 1], jump_flag) + gdq[integ, group, row, col + 1] = np.bitwise_or( + gdq[integ, group, row, col + 1], jump_flag + ) # flag n groups after jumps above the specified thresholds to account for # the transient seen after ramp jumps flag_e_threshold = [after_jump_flag_e1, after_jump_flag_e2] flag_groups = [after_jump_flag_n1, after_jump_flag_n2] - for cthres, cgroup in zip(flag_e_threshold, flag_groups): if cgroup > 0: cr_intg, cr_group, cr_row, cr_col = np.where(np.bitwise_and(gdq, jump_flag)) @@ -382,48 +410,46 @@ def find_crs(dataa, group_dq, read_noise, normal_rej_thresh, for kk in range(group, min(group + cgroup + 1, ngroups)): if (gdq[intg, kk, row, col] & sat_flag) == 0: if (gdq[intg, kk, row, col] & dnu_flag) == 0: - gdq[intg, kk, row, col] = \ - np.bitwise_or(gdq[integ, kk, row, col], jump_flag) - if 'stddev' in locals(): + gdq[intg, kk, row, col] = np.bitwise_or( + gdq[integ, kk, row, col], jump_flag + ) + if "stddev" in locals(): return gdq, row_below_gdq, row_above_gdq, num_primary_crs, stddev else: if only_use_ints: - dummy = np.zeros((dataa.shape[1]-1, dataa.shape[2], dataa.shape[3]), - dtype=np.float32) + dummy = np.zeros((dataa.shape[1] - 1, dataa.shape[2], dataa.shape[3]), dtype=np.float32) else: dummy = np.zeros((dataa.shape[2], dataa.shape[3]), dtype=np.float32) return gdq, row_below_gdq, row_above_gdq, num_primary_crs, dummy def calc_med_first_diffs(first_diffs): + """Calculate the median of `first diffs` along the group axis. - """ Calculate the median of `first diffs` along the group axis. - - If there are 4+ usable groups (e.g not flagged as saturated, donotuse, - or a previously clipped CR), then the group with largest absoulte - first difference will be clipped and the median of the remianing groups - will be returned. If there are exactly 3 usable groups, the median of - those three groups will be returned without any clipping. Finally, if - there are two usable groups, the group with the smallest absolute - difference will be returned. - - Parameters - ----------- - first_diffs : array, float - array containing the first differences of adjacent groups - for a single integration. Can be 3d or 1d (for a single pix) - - Returns - ------- - median_diffs : float or array, float - If the input is a single pixel, a float containing the median for - the groups in that pixel will be returned. If the input is a 3d - array of several pixels, a 2d array with the median for each pixel - will be returned. - """ + If there are 4+ usable groups (e.g not flagged as saturated, donotuse, + or a previously clipped CR), then the group with largest absoulte + first difference will be clipped and the median of the remianing groups + will be returned. If there are exactly 3 usable groups, the median of + those three groups will be returned without any clipping. Finally, if + there are two usable groups, the group with the smallest absolute + difference will be returned. - if first_diffs.ndim == 1: # in the case where input is a single pixel + Parameters + ----------- + first_diffs : array, float + array containing the first differences of adjacent groups + for a single integration. Can be 3d or 1d (for a single pix) + + Returns + ------- + median_diffs : float or array, float + If the input is a single pixel, a float containing the median for + the groups in that pixel will be returned. If the input is a 3d + array of several pixels, a 2d array with the median for each pixel + will be returned. + """ + if first_diffs.ndim == 1: # in the case where input is a single pixel num_usable_groups = len(first_diffs) - np.sum(np.isnan(first_diffs), axis=0) if num_usable_groups >= 4: # if 4+, clip largest and return median mask = np.ones_like(first_diffs).astype(bool) @@ -446,8 +472,9 @@ def calc_med_first_diffs(first_diffs): row4, col4 = np.where(num_usable_groups >= 4) # locations of >= 4 usable group pixels if len(row4) > 0: four_slice = first_diffs[:, row4, col4] - four_slice[np.nanargmax(np.abs(four_slice), axis=0), - np.arange(four_slice.shape[1])] = np.nan # mask largest group in slice + four_slice[ + np.nanargmax(np.abs(four_slice), axis=0), np.arange(four_slice.shape[1]) + ] = np.nan # mask largest group in slice median_diffs[row4, col4] = np.nanmedian(four_slice, axis=0) # add median to return arr for these pix # process groups with 3 usable groups @@ -460,8 +487,9 @@ def calc_med_first_diffs(first_diffs): row2, col2 = np.where(num_usable_groups == 2) # locations of >= 4 usable group pixels if len(row2) > 0: two_slice = first_diffs[:, row2, col2] - two_slice[np.nanargmax(np.abs(two_slice), axis=0), - np.arange(two_slice.shape[1])] = np.nan # mask larger abs. val + two_slice[ + np.nanargmax(np.abs(two_slice), axis=0), np.arange(two_slice.shape[1]) + ] = np.nan # mask larger abs. val median_diffs[row2, col2] = np.nanmin(two_slice, axis=0) # add med. to return arr # set the medians all groups with less than 2 usable groups to nan to skip further diff --git a/src/stcal/linearity/linearity.py b/src/stcal/linearity/linearity.py index 2516b5cb..752b22ef 100644 --- a/src/stcal/linearity/linearity.py +++ b/src/stcal/linearity/linearity.py @@ -1,8 +1,7 @@ import numpy as np -def linearity_correction( - data, gdq, pdq, lin_coeffs, lin_dq, dqflags, zframe=None): +def linearity_correction(data, gdq, pdq, lin_coeffs, lin_dq, dqflags, zframe=None): """ Apply linearity correction to individual groups in `data` to pixels that haven't already been flagged as saturated. @@ -62,8 +61,7 @@ def linearity_correction( zlin_dq = lin_dq.copy() # Do linear correction on SCI data - data, new_pdq = linearity_correction_branch( - data, gdq, pdq, lin_coeffs, lin_dq, dqflags, False) + data, new_pdq = linearity_correction_branch(data, gdq, pdq, lin_coeffs, lin_dq, dqflags, False) zdata = None # zframe needs to be returned, so initialize it to None. if zframe is not None: @@ -73,23 +71,21 @@ def linearity_correction( # set to ZERO. Since zero ZEROFRAME values indicates bad data, # remember where this happens. Make a dummy ZEROFRAME DQ array and # mark zeroed data as saturated. - wh_zero = np.where(zframe[:, :, :] == 0.) + wh_zero = np.where(zframe[:, :, :] == 0.0) zdq = np.zeros(zframe.shape, dtype=gdq.dtype) - zdq[zframe == 0.] = dqflags["SATURATED"] + zdq[zframe == 0.0] = dqflags["SATURATED"] zpdq = np.zeros(zframe.shape[-2:], dtype=pdq.dtype) # Linearly correct ZEROFRAME - zdata, _ = linearity_correction_branch( - zframe, zdq, zpdq, zlin_coeffs, zlin_dq, dqflags, True) + zdata, _ = linearity_correction_branch(zframe, zdq, zpdq, zlin_coeffs, zlin_dq, dqflags, True) # Ensure bad data remains bad. - zdata[wh_zero] = 0. + zdata[wh_zero] = 0.0 return data, new_pdq, zdata -def linearity_correction_branch( - data, gdq, pdq, lin_coeffs, lin_dq, dqflags, zframe): +def linearity_correction_branch(data, gdq, pdq, lin_coeffs, lin_dq, dqflags, zframe): """ Parameters ---------- @@ -149,15 +145,13 @@ def linearity_correction_branch( for plane in range(ngroups): dataplane = data[ints, plane] gdqplane = gdq[ints, plane] - linear_correct_plane( - dataplane, gdqplane, lin_coeffs, ncoeffs, dqflags) + linear_correct_plane(dataplane, gdqplane, lin_coeffs, ncoeffs, dqflags) else: # ZEROFRAME processing dataplane = data[ints] gdqplane = gdq[ints] - linear_correct_plane( - dataplane, gdqplane, lin_coeffs, ncoeffs, dqflags) + linear_correct_plane(dataplane, gdqplane, lin_coeffs, ncoeffs, dqflags) return data, new_pdq @@ -187,9 +181,7 @@ def linear_correct_plane(dataplane, gdqplane, lin_coeffs, ncoeffs, dqflags): # Only use the corrected signal where the original signal value # has not been flagged by the saturation step. # Otherwise use the original signal. - dataplane[:, :] = np.where(np.bitwise_and(gdqplane[:, :], dqflags['SATURATED']), - dataplane[:, :], - scorr) + dataplane[:, :] = np.where(np.bitwise_and(gdqplane[:, :], dqflags["SATURATED"]), dataplane[:, :], scorr) def correct_for_NaN(lin_coeffs, pixeldq, dqflags): @@ -220,8 +212,7 @@ def correct_for_NaN(lin_coeffs, pixeldq, dqflags): znan, ynan, xnan = wh_nan[0], wh_nan[1], wh_nan[2] num_nan = 0 - nan_array = np.zeros((lin_coeffs.shape[1], lin_coeffs.shape[2]), - dtype=np.uint32) + nan_array = np.zeros((lin_coeffs.shape[1], lin_coeffs.shape[2]), dtype=np.uint32) # If there are NaNs as the correction coefficients, update those # coefficients so that those SCI values will be unchanged. @@ -231,7 +222,7 @@ def correct_for_NaN(lin_coeffs, pixeldq, dqflags): for ii in range(num_nan): lin_coeffs[:, ynan[ii], xnan[ii]] = ben_cor - nan_array[ynan[ii], xnan[ii]] = dqflags['NO_LIN_CORR'] + nan_array[ynan[ii], xnan[ii]] = dqflags["NO_LIN_CORR"] # Include these pixels in the output pixeldq pixeldq = np.bitwise_or(pixeldq, nan_array) @@ -260,12 +251,11 @@ def correct_for_zero(lin_coeffs, pixeldq, dqflags): """ # The critcal coefficient that should not be zero is the linear term other terms are fine to be zero - linear_term = lin_coeffs[1,:,:] + linear_term = lin_coeffs[1, :, :] wh_zero = np.where(linear_term == 0) yzero, xzero = wh_zero[0], wh_zero[1] num_zero = 0 - lin_dq_array = np.zeros((lin_coeffs.shape[1], lin_coeffs.shape[2]), - dtype=np.uint32) + lin_dq_array = np.zeros((lin_coeffs.shape[1], lin_coeffs.shape[2]), dtype=np.uint32) # If there are linearity linear term equal to zero, # update the coefficients so the SCI values will be unchanged. @@ -275,7 +265,7 @@ def correct_for_zero(lin_coeffs, pixeldq, dqflags): for ii in range(num_zero): lin_coeffs[:, yzero[ii], xzero[ii]] = ben_cor - lin_dq_array[yzero[ii], xzero[ii]] = dqflags['NO_LIN_CORR'] + lin_dq_array[yzero[ii], xzero[ii]] = dqflags["NO_LIN_CORR"] # Include these pixels in the output pixeldq pixeldq = np.bitwise_or(pixeldq, lin_dq_array) @@ -307,10 +297,10 @@ def correct_for_flag(lin_coeffs, lin_dq, dqflags): updated array of correction coefficients in reference file """ - wh_flag = np.bitwise_and(lin_dq, dqflags['NO_LIN_CORR']) + wh_flag = np.bitwise_and(lin_dq, dqflags["NO_LIN_CORR"]) num_flag = len(np.where(wh_flag > 0)[0]) - wh_lin = np.where(wh_flag == dqflags['NO_LIN_CORR']) + wh_lin = np.where(wh_flag == dqflags["NO_LIN_CORR"]) yf, xf = wh_lin[0], wh_lin[1] # If there are pixels flagged as 'NO_LIN_CORR', update the corresponding diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index 4139f513..d950975f 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -44,12 +44,12 @@ MAX_ITER = 1 # This is a term to add for saturated pixels to give them low weight. -HUGE_FOR_LOW_WEIGHT = 1.e20 +HUGE_FOR_LOW_WEIGHT = 1.0e20 # This is a value to replace zero or negative values in a fit, to make # all values of the fit positive and to give low weight where the fit was # zero or negative. -FIT_MUST_BE_POSITIVE = 1.e10 +FIT_MUST_BE_POSITIVE = 1.0e10 def gls_ramp_fit(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, max_cores): @@ -108,19 +108,28 @@ def gls_ramp_fit(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, max_cores log.info(f"Number of data slices: {number_slices}") # Get needed sizes and shapes - (nreads, npix, imshape, cubeshape, n_int, instrume, frame_time, - ngroups, group_time) = utils.get_dataset_info(ramp_data) + ( + nreads, + npix, + imshape, + cubeshape, + n_int, + instrume, + frame_time, + ngroups, + group_time, + ) = utils.get_dataset_info(ramp_data) (group_time, frames_per_group, saturated_flag, jump_flag) = utils.get_more_info( - ramp_data, ramp_data.flags_saturated, ramp_data.flags_jump_det) + ramp_data, ramp_data.flags_saturated, ramp_data.flags_jump_det + ) tstart = time.time() # Determine the maximum number of cosmic ray hits for any pixel. - max_num_cr = -1 # invalid initial value + max_num_cr = -1 # invalid initial value for num_int in range(n_int): - i_max_num_cr = utils.get_max_num_cr( - ramp_data.groupdq[num_int, :, :, :], jump_flag) + i_max_num_cr = utils.get_max_num_cr(ramp_data.groupdq[num_int, :, :, :], jump_flag) max_num_cr = max(max_num_cr, i_max_num_cr) # Calculate effective integration time (once EFFINTIM has been populated @@ -133,24 +142,25 @@ def gls_ramp_fit(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, max_cores if number_slices == 1: image_info, integ_info, gls_opt_info = gls_fit_single( - ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt) + ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt + ) else: image_info, integ_info, gls_opt_info = gls_fit_multi( - ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt, number_slices) + ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt, number_slices + ) tstop = time.time() - log.info('Number of groups per integration: %d' % nreads) - log.info('Number of integrations: %d' % n_int) + log.info("Number of groups per integration: %d" % nreads) + log.info("Number of integrations: %d" % n_int) log.debug(f"The execution time in seconds: {tstop - tstart:,}") return image_info, integ_info, gls_opt_info -def gls_fit_multi( - ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt, number_slices): +def gls_fit_multi(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt, number_slices): """ ramp_data: RampClass The data needed to do ramp fitting. @@ -172,7 +182,8 @@ def gls_fit_multi( """ log.info(f"Number of processors used for multiprocessing: {number_slices}") slices, rows_per_slice = compute_slices_for_starmap( - ramp_data, save_opt, readnoise_2d, gain_2d, max_num_cr, number_slices) + ramp_data, save_opt, readnoise_2d, gain_2d, max_num_cr, number_slices + ) pool = Pool(processes=number_slices) pool_results = pool.starmap(gls_fit_single, slices) @@ -180,8 +191,7 @@ def gls_fit_multi( pool.join() # Reassemble results - image_info, integ_info, opt_res = assemble_pool_results( - ramp_data, save_opt, pool_results, rows_per_slice) + image_info, integ_info, opt_res = assemble_pool_results(ramp_data, save_opt, pool_results, rows_per_slice) return image_info, integ_info, opt_res @@ -348,17 +358,16 @@ def reassemble_opt(ramp_data, opt_res, opt_slice, crow, nrows): log.debug(f"ampl_err = {ampl_err.shape}") # TODO Dimension check - ''' + """ opt_res[0][:, srow:erow, :] = slope opt_res[1][:, srow:erow, :] = err opt_res[2][:, srow:erow, :] = pedestal opt_res[3][:, srow:erow, :] = ampl opt_res[4][:, srow:erow, :] = ampl_err - ''' + """ -def compute_slices_for_starmap( - ramp_data, save_opt, readnoise_2d, gain_2d, max_num_cr, number_slices): +def compute_slices_for_starmap(ramp_data, save_opt, readnoise_2d, gain_2d, max_num_cr, number_slices): """ Creates the slices needed for each process for multiprocessing. The slices for the arguments needed for ols_ramp_fit_single. @@ -392,11 +401,9 @@ def compute_slices_for_starmap( start_row = 0 for k in range(len(rslices)): ramp_slice = slice_ramp_data(ramp_data, start_row, rslices[k]) - rnoise_slice = readnoise_2d[start_row:start_row + rslices[k], :].copy() - gain_slice = gain_2d[start_row:start_row + rslices[k], :].copy() - slices.insert( - k, - (ramp_slice, rnoise_slice, gain_slice, max_num_cr, save_opt)) + rnoise_slice = readnoise_2d[start_row : start_row + rslices[k], :].copy() + gain_slice = gain_2d[start_row : start_row + rslices[k], :].copy() + slices.insert(k, (ramp_slice, rnoise_slice, gain_slice, max_num_cr, save_opt)) start_row = start_row + rslices[k] return slices, rslices @@ -459,10 +466,10 @@ def slice_ramp_data(ramp_data, start_row, nrows): ramp_data_slice = ramp_fit_class.RampData() # Slice data by row - data = ramp_data.data[:, :, start_row:start_row + nrows, :].copy() - err = ramp_data.err[:, :, start_row:start_row + nrows, :].copy() - groupdq = ramp_data.groupdq[:, :, start_row:start_row + nrows, :].copy() - pixeldq = ramp_data.pixeldq[start_row:start_row + nrows, :].copy() + data = ramp_data.data[:, :, start_row : start_row + nrows, :].copy() + err = ramp_data.err[:, :, start_row : start_row + nrows, :].copy() + groupdq = ramp_data.groupdq[:, :, start_row : start_row + nrows, :].copy() + pixeldq = ramp_data.pixeldq[start_row : start_row + nrows, :].copy() ramp_data_slice.set_arrays(data, err, groupdq, pixeldq) @@ -473,7 +480,8 @@ def slice_ramp_data(ramp_data, start_row, nrows): group_time=ramp_data.group_time, groupgap=ramp_data.groupgap, nframes=ramp_data.nframes, - drop_frames1=ramp_data.drop_frames1) + drop_frames1=ramp_data.drop_frames1, + ) # Carry over DQ flags. ramp_data_slice.flags_do_not_use = ramp_data.flags_do_not_use @@ -539,12 +547,18 @@ def gls_fit_single(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt): number_ints = data.shape[0] ngroups = data.shape[1] - slope_int, slope_err_int, dq_int, temp_dq, slopes, sum_weight = \ - create_integration_arrays(data.shape) + slope_int, slope_err_int, dq_int, temp_dq, slopes, sum_weight = create_integration_arrays(data.shape) # REFAC - (intercept_int, intercept_err_int, pedestal_int, first_group, shape_ampl, - ampl_int, ampl_err_int) = create_opt_res(save_opt, data.shape, max_num_cr) + ( + intercept_int, + intercept_err_int, + pedestal_int, + first_group, + shape_ampl, + ampl_int, + ampl_err_int, + ) = create_opt_res(save_opt, data.shape, max_num_cr) pixeldq = utils.reset_bad_gain(ramp_data, pixeldq, gain_2d) # Flag bad pixels in gain @@ -568,15 +582,31 @@ def gls_fit_single(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt): if save_opt: first_group[:, :] = data[num_int, 0, :, :].copy() - (intercept_sect, intercept_var_sect, slope_sect, - slope_var_sect, cr_sect, cr_var_sect) = determine_slope( - ramp_data, - data_cube, input_var_sect, gdq_cube, - readnoise_2d, gain_2d, frame_time, group_time, - nframes_used, max_num_cr, saturated_flag, jump_flag, med_rates) + ( + intercept_sect, + intercept_var_sect, + slope_sect, + slope_var_sect, + cr_sect, + cr_var_sect, + ) = determine_slope( + ramp_data, + data_cube, + input_var_sect, + gdq_cube, + readnoise_2d, + gain_2d, + frame_time, + group_time, + nframes_used, + max_num_cr, + saturated_flag, + jump_flag, + med_rates, + ) slope_int[num_int, :, :] = slope_sect.copy() - v_mask = (slope_var_sect <= 0.) + v_mask = slope_var_sect <= 0.0 if v_mask.any(): # Replace negative or zero variances with a large value. slope_var_sect[v_mask] = utils.LARGE_VARIANCE @@ -588,7 +618,7 @@ def gls_fit_single(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt): # If a pixel was flagged (by an earlier step) as saturated in # the first group, flag the pixel as bad. # Note: save s_mask until after the call to utils.gls_pedestal. - s_mask = (gdq_cube[0] == saturated_flag) + s_mask = gdq_cube[0] == saturated_flag if s_mask.any(): temp_dq[s_mask] = ramp_data.flags_do_not_use slope_err_int[num_int, :, :] = np.sqrt(slope_var_sect) @@ -596,8 +626,8 @@ def gls_fit_single(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt): # We need to take a weighted average if (and only if) number_ints > 1. # Accumulate sum of slopes and sum of weights. if number_ints > 1: - weight = 1. / slope_var_sect - slopes[:, :] += (slope_sect * weight) + weight = 1.0 / slope_var_sect + slopes[:, :] += slope_sect * weight sum_weight[:, :] += weight if save_opt: @@ -606,8 +636,8 @@ def gls_fit_single(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt): intercept_int[num_int, :, :] = intercept_sect.copy() intercept_err_int[num_int, :, :] = np.sqrt(np.abs(intercept_var_sect)) pedestal_int[num_int, :, :] = utils.gls_pedestal( - first_group[:, :], slope_int[num_int, :, :], - s_mask, frame_time, nframes_used) + first_group[:, :], slope_int[num_int, :, :], s_mask, frame_time, nframes_used + ) del s_mask ampl_int[num_int, :, :, :] = cr_sect.copy() @@ -616,16 +646,15 @@ def gls_fit_single(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt): # Compress 4D->2D dq arrays for saturated and jump-detected # pixels pixeldq_sect = pixeldq[:, :].copy() - dq_int[num_int, :, :] = utils.dq_compress_sect( - ramp_data, num_int, gdq_cube, pixeldq_sect).copy() + dq_int[num_int, :, :] = utils.dq_compress_sect(ramp_data, num_int, gdq_cube, pixeldq_sect).copy() dq_int[num_int, :, :] |= temp_dq temp_dq[:, :] = 0 # initialize for next integration # Average the slopes over all integrations. if number_ints > 1: - sum_weight = np.where(sum_weight <= 0., 1., sum_weight) - recip_sum_weight = 1. / sum_weight + sum_weight = np.where(sum_weight <= 0.0, 1.0, sum_weight) + recip_sum_weight = 1.0 / sum_weight slopes *= recip_sum_weight gls_err = np.sqrt(recip_sum_weight) @@ -655,8 +684,7 @@ def gls_fit_single(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt): if save_opt: # collect optional results for output # Get the zero-point intercepts and the cosmic-ray amplitudes for # each integration (even if there's only one integration). - gls_opt_info = (intercept_int, intercept_err_int, - pedestal_int, ampl_int, ampl_err_int) + gls_opt_info = (intercept_int, intercept_err_int, pedestal_int, ampl_int, ampl_err_int) else: gls_opt_info = None @@ -735,14 +763,24 @@ def create_opt_res(save_opt, dims, max_num_cr): ampl_int = None ampl_err_int = None - return (intercept_int, intercept_err_int, pedestal_int, first_group, - shape_ampl, ampl_int, ampl_err_int) + return (intercept_int, intercept_err_int, pedestal_int, first_group, shape_ampl, ampl_int, ampl_err_int) def determine_slope( - ramp_data, data_sect, input_var_sect, gdq_sect, readnoise_sect, gain_sect, - frame_time, group_time, nframes_used, max_num_cr, saturated_flag, - jump_flag, med_rates): + ramp_data, + data_sect, + input_var_sect, + gdq_sect, + readnoise_sect, + gain_sect, + frame_time, + group_time, + nframes_used, + max_num_cr, + saturated_flag, + jump_flag, + med_rates, +): """Iteratively fit a slope, intercept, and cosmic rays to a ramp. This function fits a ramp, possibly with discontinuities (cosmic-ray @@ -903,11 +941,22 @@ def determine_slope( ngroups, nrows, ncols = data_sect.shape if ngroups == 1: return determine_slope_one_group( - ramp_data, data_sect, input_var_sect, gdq_sect, readnoise_sect, - gain_sect, frame_time, group_time, nframes_used, max_num_cr, - saturated_flag, jump_flag, med_rates) - - slope_diff_cutoff = 1.e-5 + ramp_data, + data_sect, + input_var_sect, + gdq_sect, + readnoise_sect, + gain_sect, + frame_time, + group_time, + nframes_used, + max_num_cr, + saturated_flag, + jump_flag, + med_rates, + ) + + slope_diff_cutoff = 1.0e-5 # These will be updated in the loop. # TODO The next line assumes more than one group @@ -925,11 +974,22 @@ def determine_slope( temp_use_extra_terms = False while not done: - (intercept_sect, int_var_sect, slope_sect, - slope_var_sect, cr_sect, cr_var_sect) = compute_slope( - data_sect, input_var_sect, gdq_sect, readnoise_sect, gain_sect, - prev_fit, prev_slope_sect, frame_time, group_time, nframes_used, - max_num_cr, saturated_flag, jump_flag, temp_use_extra_terms) + (intercept_sect, int_var_sect, slope_sect, slope_var_sect, cr_sect, cr_var_sect) = compute_slope( + data_sect, + input_var_sect, + gdq_sect, + readnoise_sect, + gain_sect, + prev_fit, + prev_slope_sect, + frame_time, + group_time, + nframes_used, + max_num_cr, + saturated_flag, + jump_flag, + temp_use_extra_terms, + ) iter += 1 if iter == NUM_ITER_NO_EXTRA_TERMS: @@ -941,29 +1001,38 @@ def determine_slope( # If there are pixels with zero or negative variance, ignore # them when taking the difference between the slopes computed # in the current and previous iterations. - slope_diff = np.where( - slope_var_sect > 0., prev_slope_sect - slope_sect, 0.) + slope_diff = np.where(slope_var_sect > 0.0, prev_slope_sect - slope_sect, 0.0) max_slope_diff = np.abs(slope_diff).max() if iter >= MIN_ITER and max_slope_diff < slope_diff_cutoff: done = True current_fit = evaluate_fit( - intercept_sect, slope_sect, cr_sect, frame_time, - group_time, gdq_sect, jump_flag) + intercept_sect, slope_sect, cr_sect, frame_time, group_time, gdq_sect, jump_flag + ) - prev_fit = positive_fit(current_fit) # use for next iteration + prev_fit = positive_fit(current_fit) # use for next iteration del current_fit prev_slope_sect = slope_sect.copy() - return (intercept_sect, int_var_sect, slope_sect, - slope_var_sect, cr_sect, cr_var_sect) + return (intercept_sect, int_var_sect, slope_sect, slope_var_sect, cr_sect, cr_var_sect) def determine_slope_one_group( - ramp_data, data_sect, input_var_sect, gdq_sect, readnoise_sect, gain_sect, - frame_time, group_time, nframes_used, max_num_cr, saturated_flag, - jump_flag, med_rates): + ramp_data, + data_sect, + input_var_sect, + gdq_sect, + readnoise_sect, + gain_sect, + frame_time, + group_time, + nframes_used, + max_num_cr, + saturated_flag, + jump_flag, + med_rates, +): """ The special case where an integration has only one group. @@ -1053,7 +1122,7 @@ def determine_slope_one_group( slope_sect = data_sect[0, :, :] / group_time slope_var_sect = np.zeros(imshape, dtype=np.float32) - var_r = 12. * (readnoise_sect / group_time)**2 + var_r = 12.0 * (readnoise_sect / group_time) ** 2 var_p = med_rates / (group_time * gain_sect) # Handle ZEROFRAME @@ -1061,7 +1130,7 @@ def determine_slope_one_group( for pix in ramp_data.zframe_locs[ramp_data.current_integ]: row, col = pix slope_sect = data_sect[0, row, col] / frame_time - var_r[row, col] = 12. * (readnoise_sect[row, col] / frame_time)**2. + var_r[row, col] = 12.0 * (readnoise_sect[row, col] / frame_time) ** 2.0 var_p[row, col] = med_rates[row, col] / (frame_time * gain_sect[row, col]) slope_var_sect = var_r + var_p @@ -1069,12 +1138,10 @@ def determine_slope_one_group( cr_sect = np.zeros(cubeshape, dtype=np.float32) # Not sure what this is cr_var_sect = np.zeros(cubeshape, dtype=np.float32) # Not sure what this is - return (intercept_sect, int_var_sect, slope_sect, - slope_var_sect, cr_sect, cr_var_sect) + return (intercept_sect, int_var_sect, slope_sect, slope_var_sect, cr_sect, cr_var_sect) -def evaluate_fit( - intercept_sect, slope_sect, cr_sect, frame_time, group_time, gdq_sect, jump_flag): +def evaluate_fit(intercept_sect, slope_sect, cr_sect, frame_time, group_time, gdq_sect, jump_flag): """Evaluate the fit (intercept, slope, cosmic-ray amplitudes). Parameters @@ -1109,7 +1176,7 @@ def evaluate_fit( fit_model and data_sect should not differ by much. """ - shape_3d = gdq_sect.shape # the ramp, (ngroups, ny, nx) + shape_3d = gdq_sect.shape # the ramp, (ngroups, ny, nx) ngroups = gdq_sect.shape[0] # This array is also created in function compute_slope. @@ -1127,8 +1194,7 @@ def evaluate_fit( # The independent variable, in seconds at each image pixel. ind_var = np.zeros(shape_3d, dtype=np.float64) M = round(group_time / frame_time) - iv = np.arange(ngroups, dtype=np.float64) * group_time + \ - frame_time * (M + 1.) / 2. + iv = np.arange(ngroups, dtype=np.float64) * group_time + frame_time * (M + 1.0) / 2.0 iv = iv.reshape((ngroups, 1, 1)) ind_var += iv @@ -1143,8 +1209,8 @@ def evaluate_fit( # Add an offset for each cosmic ray. for n in range(local_max_num_cr): - heaviside[:] = np.where(cr_cumsum > n, 1., 0.) - fit_model += (heaviside * cr_sect[:, :, n]) + heaviside[:] = np.where(cr_cumsum > n, 1.0, 0.0) + fit_model += heaviside * cr_sect[:, :, n] return fit_model @@ -1171,13 +1237,25 @@ def positive_fit(current_fit): negative values will have been replaced by a positive value. """ - return np.where(current_fit <= 0., FIT_MUST_BE_POSITIVE, current_fit) + return np.where(current_fit <= 0.0, FIT_MUST_BE_POSITIVE, current_fit) def compute_slope( - data_sect, input_var_sect, gdq_sect, readnoise_sect, gain_sect, - prev_fit, prev_slope_sect, frame_time, group_time, nframes_used, - max_num_cr, saturated_flag, jump_flag, use_extra_terms): + data_sect, + input_var_sect, + gdq_sect, + readnoise_sect, + gain_sect, + prev_fit, + prev_slope_sect, + frame_time, + group_time, + nframes_used, + max_num_cr, + saturated_flag, + jump_flag, + use_extra_terms, +): """Set up the call to fit a slope to ramp data. This loops over the number of cosmic rays (jumps). That is, all the @@ -1279,7 +1357,7 @@ def compute_slope( # a negative number. The test `ncr_mask = (sum_flagged == num_cr)` # will therefore never match, since num_cr is zero or larger, and the # pixel will not be included in any ncr_mask. - mask1 = (gdq_sect[0, :, :] == saturated_flag) + mask1 = gdq_sect[0, :, :] == saturated_flag sum_flagged[mask1] = -1 # one_group_mask flags pixels that are not saturated in the first @@ -1287,7 +1365,7 @@ def compute_slope( # group). For these pixels, we will assign a value to the slope # image by just dividing the value in the first group by group_time. if len(gdq_sect) > 1: - mask2 = (gdq_sect[1, :, :] == saturated_flag) + mask2 = gdq_sect[1, :, :] == saturated_flag sum_flagged[mask2] = -1 one_group_mask = np.bitwise_and(mask2, np.bitwise_not(mask1)) del mask2 @@ -1299,8 +1377,7 @@ def compute_slope( # pixels are saturated. This is not a flag, it's a value to be # added to the diagonal of the covariance matrix. saturated = np.empty(data_sect.shape, dtype=np.float64) - saturated[:] = np.where( - np.bitwise_and(gdq_sect, saturated_flag), HUGE_FOR_LOW_WEIGHT, 0.) + saturated[:] = np.where(np.bitwise_and(gdq_sect, saturated_flag), HUGE_FOR_LOW_WEIGHT, 0.0) # Create arrays to be populated and then returned. shape = data_sect.shape @@ -1326,7 +1403,7 @@ def compute_slope( # the ramp, then fit slopes with one CR hit, then with two, etc. for num_cr in range(max_num_cr + 1): ngroups = len(data_sect) - ncr_mask = (sum_flagged == num_cr) + ncr_mask = sum_flagged == num_cr # Number of detector pixels flagged with num_cr CRs within the ramp. nz = ncr_mask.sum(dtype=np.int32) @@ -1360,8 +1437,18 @@ def compute_slope( saturated_data[k] = saturated[k][ncr_mask] result, variances = gls_fit( - ramp_data, prev_fit_data, prev_slope_data, readnoise, gain, frame_time, - group_time, nframes_used, num_cr, cr_flagged_2d, saturated_data) + ramp_data, + prev_fit_data, + prev_slope_data, + readnoise, + gain, + frame_time, + group_time, + nframes_used, + num_cr, + cr_flagged_2d, + saturated_data, + ) # Copy the intercept, slope, and cosmic-ray amplitudes and their # variances to the arrays to be returned. @@ -1380,12 +1467,22 @@ def compute_slope( cr_sect[ncr_mask, i] = result[:, 2 + i].copy() cr_var_sect[ncr_mask, i] = variances[:, 2 + i].copy() - return (intercept_sect, int_var_sect, slope_sect, slope_var_sect, - cr_sect, cr_var_sect) - - -def gls_fit(ramp_data, prev_fit_data, prev_slope_data, readnoise, gain, frame_time, - group_time, nframes_used, num_cr, cr_flagged_2d, saturated_data): + return (intercept_sect, int_var_sect, slope_sect, slope_var_sect, cr_sect, cr_var_sect) + + +def gls_fit( + ramp_data, + prev_fit_data, + prev_slope_data, + readnoise, + gain, + frame_time, + group_time, + nframes_used, + num_cr, + cr_flagged_2d, + saturated_data, +): """Generalized least squares linear fit. It is assumed that every input pixel has num_cr cosmic-ray hits @@ -1479,17 +1576,15 @@ def gls_fit(ramp_data, prev_fit_data, prev_slope_data, readnoise, gain, frame_ti # 0 to 1 is the location of a cosmic ray hit; the first 1 in a column # corresponds to the value in cr_flagged_2d being 1. x = np.zeros((nz, ngroups, 2 + num_cr), dtype=np.float64) - x[:, :, 0] = 1. - x[:, :, 1] = np.arange(ngroups, dtype=np.float64) * group_time + \ - frame_time * (M + 1.) / 2. + x[:, :, 0] = 1.0 + x[:, :, 1] = np.arange(ngroups, dtype=np.float64) * group_time + frame_time * (M + 1.0) / 2.0 if num_cr > 0: sum_crs = cr_flagged_2d.cumsum(axis=0) for k in range(ngroups): s = slice(k, ngroups) for n in range(1, num_cr + 1): - temp = np.where(np.logical_and(cr_flagged_2d[k] == 1, - sum_crs[k] == n)) + temp = np.where(np.logical_and(cr_flagged_2d[k] == 1, sum_crs[k] == n)) if len(temp[0]) > 0: index = (temp[0], s, n + 1) x[index] = 1 @@ -1509,9 +1604,9 @@ def gls_fit(ramp_data, prev_fit_data, prev_slope_data, readnoise, gain, frame_ti prev_fit_T = np.transpose(prev_fit_data, (1, 0)) for k in range(ngroups): # Populate the upper right, row by row. - ramp_cov[:, k, k:ngroups] = prev_fit_T[:, k:k + 1] + ramp_cov[:, k, k:ngroups] = prev_fit_T[:, k : k + 1] # Populate the lower left, column by column. - ramp_cov[:, k:ngroups, k] = prev_fit_T[:, k:k + 1] + ramp_cov[:, k:ngroups, k] = prev_fit_T[:, k : k + 1] # Give saturated pixels a very high high variance (hence a low weight) ramp_cov[:, k, k] += saturated_data[k, :] del prev_fit_T @@ -1521,11 +1616,11 @@ def gls_fit(ramp_data, prev_fit_data, prev_slope_data, readnoise, gain, frame_ti iden = np.identity(ngroups) rn3d = readnoise.reshape((nz, 1, 1)) - ramp_cov += (iden * rn3d**2) + ramp_cov += iden * rn3d**2 # prev_slope_data must be non-negative. - flags = prev_slope_data < 0. - prev_slope_data[flags] = 1. + flags = prev_slope_data < 0.0 + prev_slope_data[flags] = 1.0 # The resulting fit parameters are # (xT @ ramp_cov^-1 @ x)^-1 @ [xT @ ramp_cov^-1 @ y] @@ -1544,11 +1639,11 @@ def gls_fit(ramp_data, prev_fit_data, prev_slope_data, readnoise, gain, frame_ti # temp1 = xT @ ramp_invcov # np.einsum use is equivalent to matrix multiplication # shape of temp1 is (nz, 2 + num_cr, ngroups) - temp1 = np.einsum('...ij,...jk->...ik', xT, ramp_invcov) + temp1 = np.einsum("...ij,...jk->...ik", xT, ramp_invcov) # temp_var = xT @ ramp_invcov @ x # shape of temp_var is (nz, 2 + num_cr, 2 + num_cr) - temp_var = np.einsum('...ij,...jk->...ik', temp1, x) + temp_var = np.einsum("...ij,...jk->...ik", temp1, x) # `fitparam_cov` is an array of nz covariance matrices. # fitparam_cov = (xT @ ramp_invcov @ x)^-1 @@ -1569,10 +1664,10 @@ def gls_fit(ramp_data, prev_fit_data, prev_slope_data, readnoise, gain, frame_ti # [xT @ ramp_invcov @ y] # shape of temp2 is (nz, 2 + num_cr, 1) - temp2 = np.einsum('...ij,...jk->...ik', temp1, y) + temp2 = np.einsum("...ij,...jk->...ik", temp1, y) # shape of fitparam is (nz, 2 + num_cr, 1) - fitparam = np.einsum('...ij,...jk->...ik', fitparam_cov, temp2) + fitparam = np.einsum("...ij,...jk->...ik", fitparam_cov, temp2) r_shape = fitparam.shape fitparam2d = fitparam.reshape((r_shape[0], r_shape[1])) del fitparam diff --git a/src/stcal/ramp_fitting/ols_cas22/__init__.py b/src/stcal/ramp_fitting/ols_cas22/__init__.py index 9d0e18e6..439e4f1b 100644 --- a/src/stcal/ramp_fitting/ols_cas22/__init__.py +++ b/src/stcal/ramp_fitting/ols_cas22/__init__.py @@ -1,4 +1,4 @@ from ._fit import fit_ramps, RampFitOutputs, Parameter, Variance from ._jump import JUMP_DET -__all__ = ['fit_ramps', 'RampFitOutputs', 'Parameter', 'Variance', 'Diff', 'JUMP_DET'] +__all__ = ["fit_ramps", "RampFitOutputs", "Parameter", "Variance", "Diff", "JUMP_DET"] diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index 663ede7d..ced7c0ec 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -97,11 +97,11 @@ def fit_ramps_casertano( # parameters outside the cython code. kwargs = {} if threshold_intercept is not None: - kwargs['intercept'] = threshold_intercept + kwargs["intercept"] = threshold_intercept if threshold_constant is not None: - kwargs['constant'] = threshold_constant + kwargs["constant"] = threshold_constant - resultants_unit = getattr(resultants, 'unit', None) + resultants_unit = getattr(resultants, "unit", None) if resultants_unit is not None: resultants = resultants.to(u.electron).value @@ -126,7 +126,8 @@ def fit_ramps_casertano( read_time, read_pattern, use_jump, - **kwargs) + **kwargs, + ) parameters = output.parameters.reshape(orig_shape[1:] + (2,)) variances = output.variances.reshape(orig_shape[1:] + (3,)) diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index 06cd2606..3bf9106e 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -21,8 +21,7 @@ log.setLevel(logging.DEBUG) -def ols_ramp_fit_multi( - ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, max_cores): +def ols_ramp_fit_multi(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, max_cores): """ Setup the inputs to ols_ramp_fit with and without multiprocessing. The inputs will be sliced into the number of cores that are being used for @@ -83,7 +82,7 @@ def ols_ramp_fit_multi( # flagged as DO_NOT_USE, those groups will be ignored by ramp fitting, and # the input model arrays will be resized appropriately. If all pixels in # all groups are flagged, return None for the models. - if ramp_data.instrument_name == 'MIRI' and ramp_data.data.shape[1] > 1: + if ramp_data.instrument_name == "MIRI" and ramp_data.data.shape[1] > 1: miri_ans = discard_miri_groups(ramp_data) # The function returns False if the removed groups leaves no data to be # processed. If this is the case, return None for all expected variables @@ -100,7 +99,8 @@ def ols_ramp_fit_multi( if number_slices == 1: # Single threaded computation image_info, integ_info, opt_info = ols_ramp_fit_single( - ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting) + ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting + ) if image_info is None or integ_info is None: return None, None, None @@ -109,15 +109,15 @@ def ols_ramp_fit_multi( # Call ramp fitting for multi-processor (multiple data slices) case else: image_info, integ_info, opt_info = ols_ramp_fit_multiprocessing( - ramp_data, buffsize, save_opt, - readnoise_2d, gain_2d, weighting, number_slices) + ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, number_slices + ) return image_info, integ_info, opt_info def ols_ramp_fit_multiprocessing( - ramp_data, buffsize, save_opt, - readnoise_2d, gain_2d, weighting, number_slices): + ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, number_slices +): """ Fit a ramp using ordinary least squares. Calculate the count rate for each pixel in all data cube sections and all integrations, equal to the weighted @@ -161,8 +161,8 @@ def ols_ramp_fit_multiprocessing( """ log.info(f"Number of processors used for multiprocessing: {number_slices}") slices, rows_per_slice = compute_slices_for_starmap( - ramp_data, buffsize, save_opt, - readnoise_2d, gain_2d, weighting, number_slices) + ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, number_slices + ) pool = Pool(processes=number_slices) pool_results = pool.starmap(ols_ramp_fit_single, slices) @@ -171,7 +171,8 @@ def ols_ramp_fit_multiprocessing( # Reassemble results image_info, integ_info, opt_info = assemble_pool_results( - ramp_data, save_opt, pool_results, rows_per_slice) + ramp_data, save_opt, pool_results, rows_per_slice + ) return image_info, integ_info, opt_info @@ -212,8 +213,7 @@ def assemble_pool_results(ramp_data, save_opt, pool_results, rows_per_slice): """ # Create output arrays for each output tuple. The input ramp data and # slices are needed for this. - image_info, integ_info, opt_info = create_output_info( - ramp_data, pool_results, save_opt) + image_info, integ_info, opt_info = create_output_info(ramp_data, pool_results, save_opt) # Loop over the slices and assemble each slice into the main return arrays. current_row_start = 0 @@ -303,10 +303,8 @@ def get_opt_slice(opt_info, opt_slice, row_start, nrows): nrows: int The number of rows int the current slice. """ - (slope, sigslope, var_poisson, var_rnoise, - yint, sigyint, pedestal, weights, crmag) = opt_info - (oslope, osigslope, ovar_poisson, ovar_rnoise, - oyint, osigyint, opedestal, oweights, ocrmag) = opt_slice + (slope, sigslope, var_poisson, var_rnoise, yint, sigyint, pedestal, weights, crmag) = opt_info + (oslope, osigslope, ovar_poisson, ovar_rnoise, oyint, osigyint, opedestal, oweights, ocrmag) = opt_slice srow, erow = row_start, row_start + nrows @@ -317,14 +315,14 @@ def get_opt_slice(opt_info, opt_slice, row_start, nrows): # using the max size for this dimension. To ensure correct assignment is # done during this step, the second dimension, as well as the row # dimension, must be specified. - slope[:, :oslope.shape[1], srow:erow, :] = oslope - sigslope[:, :osigslope.shape[1], srow:erow, :] = osigslope - var_poisson[:, :ovar_poisson.shape[1], srow:erow, :] = ovar_poisson - var_rnoise[:, :ovar_rnoise.shape[1], srow:erow, :] = ovar_rnoise - yint[:, :oyint.shape[1], srow:erow, :] = oyint - sigyint[:, :osigyint.shape[1], srow:erow, :] = osigyint - weights[:, :oweights.shape[1], srow:erow, :] = oweights - crmag[:, :ocrmag.shape[1], srow:erow, :] = ocrmag + slope[:, : oslope.shape[1], srow:erow, :] = oslope + sigslope[:, : osigslope.shape[1], srow:erow, :] = osigslope + var_poisson[:, : ovar_poisson.shape[1], srow:erow, :] = ovar_poisson + var_rnoise[:, : ovar_rnoise.shape[1], srow:erow, :] = ovar_rnoise + yint[:, : oyint.shape[1], srow:erow, :] = oyint + sigyint[:, : osigyint.shape[1], srow:erow, :] = osigyint + weights[:, : oweights.shape[1], srow:erow, :] = oweights + crmag[:, : ocrmag.shape[1], srow:erow, :] = ocrmag pedestal[:, srow:erow, :] = opedestal # Different shape (3-D, not 4-D) @@ -386,8 +384,17 @@ def create_output_info(ramp_data, pool_results, save_opt): opedestal = np.zeros(integ_shape, dtype=np.float32) ocrmag = np.zeros(crmag_shape, dtype=np.float32) - opt_info = (oslope, osigslope, ovar_poisson, ovar_rnoise, - oyint, osigyint, opedestal, oweights, ocrmag) + opt_info = ( + oslope, + osigslope, + ovar_poisson, + ovar_rnoise, + oyint, + osigyint, + opedestal, + oweights, + ocrmag, + ) else: opt_info = None @@ -429,8 +436,8 @@ def get_max_segs_crs(pool_results): def compute_slices_for_starmap( - ramp_data, buffsize, save_opt, - readnoise_2d, gain_2d, weighting, number_slices): + ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, number_slices +): """ Creates the slices needed for each process for multiprocessing. The slices for the arguments needed for ols_ramp_fit_single. @@ -470,12 +477,9 @@ def compute_slices_for_starmap( start_row = 0 for k in range(len(rslices)): ramp_slice = slice_ramp_data(ramp_data, start_row, rslices[k]) - rnoise_slice = readnoise_2d[start_row:start_row + rslices[k], :].copy() - gain_slice = gain_2d[start_row:start_row + rslices[k], :].copy() - slices.insert( - k, - (ramp_slice, buffsize, save_opt, - rnoise_slice, gain_slice, weighting)) + rnoise_slice = readnoise_2d[start_row : start_row + rslices[k], :].copy() + gain_slice = gain_2d[start_row : start_row + rslices[k], :].copy() + slices.insert(k, (ramp_slice, buffsize, save_opt, rnoise_slice, gain_slice, weighting)) start_row = start_row + rslices[k] return slices, rslices @@ -538,16 +542,15 @@ def slice_ramp_data(ramp_data, start_row, nrows): ramp_data_slice = ramp_fit_class.RampData() # Slice data by row - data = ramp_data.data[:, :, start_row:start_row + nrows, :].copy() - err = ramp_data.err[:, :, start_row:start_row + nrows, :].copy() - groupdq = ramp_data.groupdq[:, :, start_row:start_row + nrows, :].copy() - pixeldq = ramp_data.pixeldq[start_row:start_row + nrows, :].copy() + data = ramp_data.data[:, :, start_row : start_row + nrows, :].copy() + err = ramp_data.err[:, :, start_row : start_row + nrows, :].copy() + groupdq = ramp_data.groupdq[:, :, start_row : start_row + nrows, :].copy() + pixeldq = ramp_data.pixeldq[start_row : start_row + nrows, :].copy() - ramp_data_slice.set_arrays( - data, err, groupdq, pixeldq) + ramp_data_slice.set_arrays(data, err, groupdq, pixeldq) if ramp_data.zeroframe is not None: - ramp_data_slice.zeroframe = ramp_data.zeroframe[:, start_row:start_row + nrows, :].copy() + ramp_data_slice.zeroframe = ramp_data.zeroframe[:, start_row : start_row + nrows, :].copy() # Carry over meta data. ramp_data_slice.set_meta( @@ -556,7 +559,8 @@ def slice_ramp_data(ramp_data, start_row, nrows): group_time=ramp_data.group_time, groupgap=ramp_data.groupgap, nframes=ramp_data.nframes, - drop_frames1=ramp_data.drop_frames1) + drop_frames1=ramp_data.drop_frames1, + ) # Carry over DQ flags. ramp_data_slice.flags_do_not_use = ramp_data.flags_do_not_use @@ -588,16 +592,16 @@ def find_0th_one_good_group(ramp_data): # Find pixels with good group 0 good_0 = np.zeros((nrows, ncols), dtype=int) - cintegdq_0 = cintegdq[0, :, :] + cintegdq_0 = cintegdq[0, :, :] good_0[cintegdq_0 == 0] = 1 # Pixels with good 0 group # Find pixels with only one good group - cinteg_sm = np.zeros((ngroups-1, nrows, ncols), dtype=int) + cinteg_sm = np.zeros((ngroups - 1, nrows, ncols), dtype=int) cintegdq_1 = cintegdq[1:, :, :] # Current integration DQ array excluding 0th group cinteg_sm[cintegdq_1 != 0] = 1 # Mark flagged groups to use in sum gp_sum = cinteg_sm.sum(axis=0) # Find the number of flagged groups excluding 0th group bad_1_ = np.zeros((nrows, ncols), dtype=int) - bad_1_[gp_sum == ngroups-1] = 1 # Pixels with all groups flagged after the 0th group + bad_1_[gp_sum == ngroups - 1] = 1 # Pixels with all groups flagged after the 0th group # Get the locations of pixels that have good zeroeth group, with # all other groups bad. @@ -618,8 +622,7 @@ def find_0th_one_good_group(ramp_data): ramp_data.one_groups_time = (ramp_data.nframes + 1) * ramp_data.frame_time / 2 -def ols_ramp_fit_single( - ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting): +def ols_ramp_fit_single(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting): """ Fit a ramp using ordinary least squares. Calculate the count rate for each pixel in all data cube sections and all integrations, equal to the weighted @@ -678,9 +681,9 @@ def ols_ramp_fit_single( orig_cubeshape = (ngroups, nrows, ncols) if ngroups == 1: - log.warning('Dataset has NGROUPS=1, so count rates for each integration ') - log.warning('will be calculated as the value of that 1 group divided by ') - log.warning('the group exposure time.') + log.warning("Dataset has NGROUPS=1, so count rates for each integration ") + log.warning("will be calculated as the value of that 1 group divided by ") + log.warning("the group exposure time.") # In this 'First Pass' over the data, loop over integrations and data # sections to calculate the estimated median slopes, which will be used @@ -688,8 +691,7 @@ def ols_ramp_fit_single( # as is done in the jump detection step, except here CR-affected and # saturated groups have already been flagged. The actual, fit, slopes for # each segment are also calculated here. - fit_slopes_ans = ramp_fit_slopes( - ramp_data, gain_2d, readnoise_2d, save_opt, weighting) + fit_slopes_ans = ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting) if fit_slopes_ans[0] == "saturated": return fit_slopes_ans[1:] @@ -699,8 +701,7 @@ def ols_ramp_fit_single( # noise only, read noise only, and the combination of Poisson noise and # read noise. The integration-specific variances are 3D arrays, and the # segment-specific variances are 4D arrays. - variances_ans = ramp_fit_compute_variances( - ramp_data, gain_2d, readnoise_2d, fit_slopes_ans) + variances_ans = ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans) # Now that the segment-specific and integration-specific variances have # been calculated, the segment-specific, integration-specific, and @@ -712,8 +713,8 @@ def ols_ramp_fit_single( # slope = sum_over_integs_and_segs(slope_seg/var_seg)/ # sum_over_integs_and_segs(1/var_seg) image_info, integ_info, opt_info = ramp_fit_overall( - ramp_data, orig_cubeshape, orig_ngroups, buffsize, fit_slopes_ans, - variances_ans, save_opt, tstart) + ramp_data, orig_cubeshape, orig_ngroups, buffsize, fit_slopes_ans, variances_ans, save_opt, tstart + ) return image_info, integ_info, opt_info @@ -752,8 +753,8 @@ def discard_miri_groups(ramp_data): # Check if there are remaining groups before accessing data if ngroups < 1: # no usable data - log.error('1. All groups have all pixels flagged as DO_NOT_USE,') - log.error(' so will not process this dataset.') + log.error("1. All groups have all pixels flagged as DO_NOT_USE,") + log.error(" so will not process this dataset.") return False groupdq = groupdq[:, 1:, :, :] @@ -771,7 +772,7 @@ def discard_miri_groups(ramp_data): data = data[:, num_bad_slices:, :, :] err = err[:, num_bad_slices:, :, :] - log.info('Number of leading groups that are flagged as DO_NOT_USE: %s', num_bad_slices) + log.info("Number of leading groups that are flagged as DO_NOT_USE: %s", num_bad_slices) # If all groups were flagged, the final group would have been picked up # in the while loop above, ngroups would have been set to 0, and Nones @@ -782,21 +783,21 @@ def discard_miri_groups(ramp_data): # Check if there are remaining groups before accessing data if ngroups < 1: # no usable data - log.error('2. All groups have all pixels flagged as DO_NOT_USE,') - log.error(' so will not process this dataset.') + log.error("2. All groups have all pixels flagged as DO_NOT_USE,") + log.error(" so will not process this dataset.") return False data = data[:, :-1, :, :] err = err[:, :-1, :, :] groupdq = groupdq[:, :-1, :, :] - log.info('MIRI dataset has all pixels in the final group flagged as DO_NOT_USE.') + log.info("MIRI dataset has all pixels in the final group flagged as DO_NOT_USE.") # Next block is to satisfy github issue 1681: # "MIRI FirstFrame and LastFrame minimum number of groups" if ngroups < 2: - log.warning('MIRI datasets require at least 2 groups/integration') - log.warning('(NGROUPS), so will not process this dataset.') + log.warning("MIRI datasets require at least 2 groups/integration") + log.warning("(NGROUPS), so will not process this dataset.") return False ramp_data.data = data @@ -883,14 +884,12 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): gdq_cube_shape = gdq_cube.shape # Get max number of segments fit in all integrations - max_seg, num_CRs = calc_num_seg( - gdq_cube, n_int, ramp_data.flags_jump_det, ramp_data.flags_do_not_use) + max_seg, num_CRs = calc_num_seg(gdq_cube, n_int, ramp_data.flags_jump_det, ramp_data.flags_do_not_use) del gdq_cube f_max_seg = 0 # final number to use, usually overwritten by actual value - dq_int, num_seg_per_int, sat_0th_group_int =\ - utils.alloc_arrays_1(n_int, imshape) + dq_int, num_seg_per_int, sat_0th_group_int = utils.alloc_arrays_1(n_int, imshape) opt_res = utils.OptRes(n_int, imshape, max_seg, ngroups, save_opt) @@ -926,7 +925,7 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): # data_sect = np.float32(data[num_int, :, :, :]) data_sect = data[num_int, :, :, :] if np.all(np.isnan(data_sect)): - log.error('Current data section is all nans, so not processing the section.') + log.error("Current data section is all nans, so not processing the section.") continue # first frame section for 1st group of current integration @@ -945,9 +944,20 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): # is deceiving; this in fact contains all the per-integration and # per-segment results that will eventually be used to compute the # final slopes, sigmas, etc. for the main (non-optional) products - t_dq_cube, inv_var, opt_res, f_max_seg, num_seg = \ - calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, - gain_sect, max_seg, ngroups, weighting, f_max_seg, ramp_data) + t_dq_cube, inv_var, opt_res, f_max_seg, num_seg = calc_slope( + data_sect, + gdq_sect, + frame_time, + opt_res, + save_opt, + rn_sect, + gain_sect, + max_seg, + ngroups, + weighting, + f_max_seg, + ramp_data, + ) del gain_sect @@ -957,12 +967,14 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): num_seg_per_int[num_int, rlo:rhi, :] = num_seg.reshape(sect_shape) # Populate integ-spec slice which is set if 0th group has SAT - sat_0th_group_int[num_int, rlo:rhi, :][np.bitwise_and( - gdq_sect[0, :, :], ramp_data.flags_saturated).astype(bool)] = 1 + sat_0th_group_int[num_int, rlo:rhi, :][ + np.bitwise_and(gdq_sect[0, :, :], ramp_data.flags_saturated).astype(bool) + ] = 1 pixeldq_sect = pixeldq[rlo:rhi, :].copy() dq_int[num_int, rlo:rhi, :] = utils.dq_compress_sect( - ramp_data, num_int, t_dq_cube, pixeldq_sect).copy() + ramp_data, num_int, t_dq_cube, pixeldq_sect + ).copy() del t_dq_cube @@ -995,8 +1007,18 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): ramp_data.groupdq = groupdq ramp_data.pixeldq = inpixeldq - return max_seg, gdq_cube_shape, f_max_seg, dq_int, num_seg_per_int,\ - sat_0th_group_int, opt_res, pixeldq, inv_var, med_rates + return ( + max_seg, + gdq_cube_shape, + f_max_seg, + dq_int, + num_seg_per_int, + sat_0th_group_int, + opt_res, + pixeldq, + inv_var, + med_rates, + ) def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans): @@ -1082,9 +1104,19 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans) num_seg_per_int = fit_slopes_ans[5] med_rates = fit_slopes_ans[9] - var_p3, var_r3, var_p4, var_r4, var_both4, var_both3, \ - inv_var_both4, s_inv_var_p3, s_inv_var_r3, s_inv_var_both3, segs_4 = \ - utils.alloc_arrays_2(n_int, imshape, max_seg) + ( + var_p3, + var_r3, + var_p4, + var_r4, + var_both4, + var_both3, + inv_var_both4, + s_inv_var_p3, + s_inv_var_r3, + s_inv_var_both3, + segs_4, + ) = utils.alloc_arrays_2(n_int, imshape, max_seg) # Loop over data integrations for num_int in range(n_int): @@ -1103,7 +1135,8 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans) # Calculate results needed to compute the variance arrays den_r3, den_p3, num_r3, segs_beg_3 = utils.calc_slope_vars( - ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg) + ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg + ) segs_4[num_int, :, rlo:rhi, :] = segs_beg_3 @@ -1131,51 +1164,50 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans) # Suppress, then re-enable harmless arithmetic warnings warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) - var_p4[num_int, :, :, :] *= (segs_4[num_int, :, :, :] > 0) - var_p4[var_p4 <= 0.] = utils.LARGE_VARIANCE + var_p4[num_int, :, :, :] *= segs_4[num_int, :, :, :] > 0 + var_p4[var_p4 <= 0.0] = utils.LARGE_VARIANCE - var_r4[num_int, :, :, :] *= (segs_4[num_int, :, :, :] > 0) - var_r4[var_r4 <= 0.] = utils.LARGE_VARIANCE + var_r4[num_int, :, :, :] *= segs_4[num_int, :, :, :] > 0 + var_r4[var_r4 <= 0.0] = utils.LARGE_VARIANCE # The sums of inverses of the variances are needed for later # variance calculations. - s_inv_var_p3[num_int, :, :] = (1. / var_p4[num_int, :, :, :]).sum(axis=0) - var_p3[num_int, :, :] = 1. / s_inv_var_p3[num_int, :, :] - s_inv_var_r3[num_int, :, :] = (1. / var_r4[num_int, :, :, :]).sum(axis=0) - var_r3[num_int, :, :] = 1. / s_inv_var_r3[num_int, :, :] + s_inv_var_p3[num_int, :, :] = (1.0 / var_p4[num_int, :, :, :]).sum(axis=0) + var_p3[num_int, :, :] = 1.0 / s_inv_var_p3[num_int, :, :] + s_inv_var_r3[num_int, :, :] = (1.0 / var_r4[num_int, :, :, :]).sum(axis=0) + var_r3[num_int, :, :] = 1.0 / s_inv_var_r3[num_int, :, :] # Huge variances correspond to non-existing segments, so are reset to 0 # to nullify their contribution. - var_p3[var_p3 > utils.LARGE_VARIANCE_THRESHOLD] = 0. - var_p3[:, med_rates <= 0.] = 0. + var_p3[var_p3 > utils.LARGE_VARIANCE_THRESHOLD] = 0.0 + var_p3[:, med_rates <= 0.0] = 0.0 warnings.resetwarnings() - var_p4[num_int, :, med_rates <= 0.] = 0. + var_p4[num_int, :, med_rates <= 0.0] = 0.0 var_both4[num_int, :, :, :] = var_r4[num_int, :, :, :] + var_p4[num_int, :, :, :] - inv_var_both4[num_int, :, :, :] = 1. / var_both4[num_int, :, :, :] + inv_var_both4[num_int, :, :, :] = 1.0 / var_both4[num_int, :, :, :] # Want to retain values in the 4D arrays only for the segments that each # pixel has, so will zero out values for the higher indices. Creating # and manipulating intermediate arrays (views, such as var_p4_int # will zero out the appropriate indices in var_p4 and var_r4.) # Extract the slice of 4D arrays for the current integration - var_p4_int = var_p4[num_int, :, :, :] # [ segment, y, x ] + var_p4_int = var_p4[num_int, :, :, :] # [ segment, y, x ] inv_var_both4_int = inv_var_both4[num_int, :, :, :] # Zero out non-existing segments - var_p4_int *= (segs_4[num_int, :, :, :] > 0) - inv_var_both4_int *= (segs_4[num_int, :, :, :] > 0) + var_p4_int *= segs_4[num_int, :, :, :] > 0 + inv_var_both4_int *= segs_4[num_int, :, :, :] > 0 # reshape these arrays to simplify masking [ segment, 1D pixel ] - var_p4_int2 = var_p4_int.reshape( - (var_p4_int.shape[0], var_p4_int.shape[1] * var_p4_int.shape[2])) + var_p4_int2 = var_p4_int.reshape((var_p4_int.shape[0], var_p4_int.shape[1] * var_p4_int.shape[2])) s_inv_var_both3[num_int, :, :] = (inv_var_both4[num_int, :, :, :]).sum(axis=0) # Suppress, then re-enable harmless arithmetic warnings warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) - var_both3[num_int, :, :] = 1. / s_inv_var_both3[num_int, :, :] + var_both3[num_int, :, :] = 1.0 / s_inv_var_both3[num_int, :, :] warnings.resetwarnings() del var_p4_int @@ -1183,8 +1215,8 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans) del gain_2d - var_p4 *= (segs_4[:, :, :, :] > 0) # Zero out non-existing segments - var_r4 *= (segs_4[:, :, :, :] > 0) + var_p4 *= segs_4[:, :, :, :] > 0 # Zero out non-existing segments + var_r4 *= segs_4[:, :, :, :] > 0 # Delete lots of arrays no longer needed if inv_var_both4_int is not None: @@ -1210,13 +1242,23 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans) ramp_data.groupdq = groupdq ramp_data.pixeldq = inpixeldq - return var_p3, var_r3, var_p4, var_r4, var_both4, var_both3, inv_var_both4, \ - s_inv_var_p3, s_inv_var_r3, s_inv_var_both3 + return ( + var_p3, + var_r3, + var_p4, + var_r4, + var_both4, + var_both3, + inv_var_both4, + s_inv_var_p3, + s_inv_var_r3, + s_inv_var_both3, + ) def ramp_fit_overall( - ramp_data, orig_cubeshape, orig_ngroups, buffsize, fit_slopes_ans, - variances_ans, save_opt, tstart): + ramp_data, orig_cubeshape, orig_ngroups, buffsize, fit_slopes_ans, variances_ans, save_opt, tstart +): """ Computes the final/overall slope and variance values using the intermediate computations previously computed. When computing @@ -1271,9 +1313,9 @@ def ramp_fit_overall( nframes = ramp_data.nframes dropframes1 = ramp_data.drop_frames1 - if dropframes1 is None: # set to default if missing + if dropframes1 is None: # set to default if missing dropframes1 = 0 - log.debug('Missing keyword DRPFRMS1, so setting to default value of 0') + log.debug("Missing keyword DRPFRMS1, so setting to default value of 0") # Get needed sizes and shapes n_int, ngroups, nrows, ncols = data.shape @@ -1297,15 +1339,15 @@ def ramp_fit_overall( # for slope calculations invalid_data = ramp_data.flags_saturated | ramp_data.flags_do_not_use wh_invalid = np.where(np.bitwise_and(dq_int, invalid_data)) - s_inv_var_both3[wh_invalid] = 0. + s_inv_var_both3[wh_invalid] = 0.0 s_inv_var_both2 = s_inv_var_both3.sum(axis=0) - var_p3[wh_invalid] = 0. - var_r3[wh_invalid] = 0. - var_both3[wh_invalid] = 0. - s_inv_var_p3[wh_invalid] = 0. - s_inv_var_r3[wh_invalid] = 0. - s_inv_var_both3[wh_invalid] = 0. + var_p3[wh_invalid] = 0.0 + var_r3[wh_invalid] = 0.0 + var_both3[wh_invalid] = 0.0 + s_inv_var_p3[wh_invalid] = 0.0 + s_inv_var_r3[wh_invalid] = 0.0 + s_inv_var_both3[wh_invalid] = 0.0 # Compute the 'dataset-averaged' slope # Suppress, then re-enable harmless arithmetic warnings @@ -1318,7 +1360,7 @@ def ramp_fit_overall( del s_inv_var_both2, s_inv_var_both3 # Replace nans in slope_dataset2 with 0 (for non-existing segments) - slope_dataset2[np.isnan(slope_dataset2)] = 0. + slope_dataset2[np.isnan(slope_dataset2)] = 0.0 # Compute the integration-specific slope the_num = (opt_res.slope_seg * inv_var_both4).sum(axis=1) @@ -1342,7 +1384,8 @@ def ramp_fit_overall( # for variances and slope so they will not contribute var_p3, var_both3, slope_int, dq_int = utils.fix_sat_ramps( - ramp_data, sat_0th_group_int, var_p3, var_both3, slope_int, dq_int) + ramp_data, sat_0th_group_int, var_p3, var_both3, slope_int, dq_int + ) if sat_0th_group_int is not None: del sat_0th_group_int @@ -1353,9 +1396,9 @@ def ramp_fit_overall( for num_int in range(0, n_int): dq_slice = groupdq[num_int, 0, :, :] - opt_res.ped_int[num_int, :, :] = \ - utils.calc_pedestal(ramp_data, num_int, slope_int, opt_res.firstf_int, - dq_slice, nframes, groupgap, dropframes1) + opt_res.ped_int[num_int, :, :] = utils.calc_pedestal( + ramp_data, num_int, slope_int, opt_res.firstf_int, dq_slice, nframes, groupgap, dropframes1 + ) del dq_slice @@ -1367,15 +1410,15 @@ def ramp_fit_overall( # Some contributions to these vars may be NaN as they are from ramps # having PIXELDQ=DO_NOT_USE - var_p4[np.isnan(var_p4)] = 0. - var_r4[np.isnan(var_r4)] = 0. + var_p4[np.isnan(var_p4)] = 0.0 + var_r4[np.isnan(var_r4)] = 0.0 # Truncate results at the maximum number of segments found opt_res.slope_seg = opt_res.slope_seg[:, :f_max_seg, :, :] opt_res.sigslope_seg = opt_res.sigslope_seg[:, :f_max_seg, :, :] opt_res.yint_seg = opt_res.yint_seg[:, :f_max_seg, :, :] opt_res.sigyint_seg = opt_res.sigyint_seg[:, :f_max_seg, :, :] - opt_res.weights = (inv_var_both4[:, :f_max_seg, :, :])**2. + opt_res.weights = (inv_var_both4[:, :f_max_seg, :, :]) ** 2.0 opt_res.var_p_seg = var_p4[:, :f_max_seg, :, :] opt_res.var_r_seg = var_r4[:, :f_max_seg, :, :] @@ -1399,8 +1442,7 @@ def ramp_fit_overall( del pixeldq # Output integration-specific results to separate file - integ_info = utils.output_integ( - ramp_data, slope_int, dq_int, var_p3, var_r3, var_both3) + integ_info = utils.output_integ(ramp_data, slope_int, dq_int, var_p3, var_r3, var_both3) if opt_res is not None: del opt_res @@ -1431,15 +1473,15 @@ def ramp_fit_overall( utils.log_stats(c_rates) - log.debug('Instrument: %s', instrume) - log.debug('Number of pixels in 2D array: %d', nrows * ncols) - log.debug('Shape of 2D image: (%d, %d)' % (imshape)) - log.debug('Shape of data cube: (%d, %d, %d)' % (orig_cubeshape)) - log.debug('Buffer size (bytes): %d', buffsize) - log.debug('Number of rows per buffer: %d', nrows) - log.info('Number of groups per integration: %d', orig_ngroups) - log.info('Number of integrations: %d', n_int) - log.debug('The execution time in seconds: %f', tstop - tstart) + log.debug("Instrument: %s", instrume) + log.debug("Number of pixels in 2D array: %d", nrows * ncols) + log.debug("Shape of 2D image: (%d, %d)" % (imshape)) + log.debug("Shape of data cube: (%d, %d, %d)" % (orig_cubeshape)) + log.debug("Buffer size (bytes): %d", buffsize) + log.debug("Number of rows per buffer: %d", nrows) + log.info("Number of groups per integration: %d", orig_ngroups) + log.info("Number of integrations: %d", n_int) + log.debug("The execution time in seconds: %f", tstop - tstart) # Compute the 2D variances due to Poisson and read noise with warnings.catch_warnings(): @@ -1451,14 +1493,14 @@ def ramp_fit_overall( # to nullify their contribution. with warnings.catch_warnings(): warnings.filterwarnings("ignore", "invalid value.*", RuntimeWarning) - var_p2[var_p2 > utils.LARGE_VARIANCE_THRESHOLD] = 0. - var_r2[var_r2 > utils.LARGE_VARIANCE_THRESHOLD] = 0. + var_p2[var_p2 > utils.LARGE_VARIANCE_THRESHOLD] = 0.0 + var_r2[var_r2 > utils.LARGE_VARIANCE_THRESHOLD] = 0.0 # Some contributions to these vars may be NaN as they are from ramps # having PIXELDQ=DO_NOT_USE - var_p2[np.isnan(var_p2)] = 0. - var_p2[med_rates <= 0.0] = 0. - var_r2[np.isnan(var_r2)] = 0. + var_p2[np.isnan(var_p2)] = 0.0 + var_p2[med_rates <= 0.0] = 0.0 + var_r2[np.isnan(var_r2)] = 0.0 # Suppress, then re-enable, harmless arithmetic warning warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) @@ -1496,11 +1538,11 @@ def calc_power(snr): weighting exponent, 1-D float """ pow_wt = snr.copy() * 0.0 - pow_wt[snr > 5.] = 0.4 - pow_wt[snr > 10.] = 1.0 - pow_wt[snr > 20.] = 3.0 - pow_wt[snr > 50.] = 6.0 - pow_wt[snr > 100.] = 10.0 + pow_wt[snr > 5.0] = 0.4 + pow_wt[snr > 10.0] = 1.0 + pow_wt[snr > 20.0] = 3.0 + pow_wt[snr > 50.0] = 6.0 + pow_wt[snr > 100.0] = 10.0 return pow_wt.ravel() @@ -1522,17 +1564,29 @@ def interpolate_power(snr): weighting exponent, 1-D float """ pow_wt = snr.copy() * 0.0 - pow_wt[np.where(snr > 5.)] = ((snr[snr > 5] - 5) / (10 - 5)) * 0.6 + 0.4 - pow_wt[np.where(snr > 10.)] = ((snr[snr > 10] - 10) / (20 - 10)) * 2.0 + 1.0 - pow_wt[np.where(snr > 20.)] = ((snr[snr > 20] - 20)) / (50 - 20) * 3.0 + 3.0 - pow_wt[np.where(snr > 50.)] = ((snr[snr > 50] - 50)) / (100 - 50) * 4.0 + 6.0 - pow_wt[np.where(snr > 100.)] = 10.0 + pow_wt[np.where(snr > 5.0)] = ((snr[snr > 5] - 5) / (10 - 5)) * 0.6 + 0.4 + pow_wt[np.where(snr > 10.0)] = ((snr[snr > 10] - 10) / (20 - 10)) * 2.0 + 1.0 + pow_wt[np.where(snr > 20.0)] = (snr[snr > 20] - 20) / (50 - 20) * 3.0 + 3.0 + pow_wt[np.where(snr > 50.0)] = (snr[snr > 50] - 50) / (100 - 50) * 4.0 + 6.0 + pow_wt[np.where(snr > 100.0)] = 10.0 return pow_wt.ravel() -def calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, - gain_sect, i_max_seg, ngroups, weighting, f_max_seg, ramp_data): +def calc_slope( + data_sect, + gdq_sect, + frame_time, + opt_res, + save_opt, + rn_sect, + gain_sect, + i_max_seg, + ngroups, + weighting, + f_max_seg, + ramp_data, +): """ Compute the slope of each segment for each pixel in the data cube section for the current integration. Each segment has its slope fit in fit_lines(); @@ -1615,7 +1669,7 @@ def calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, # Highest channel in fit initialized to last read end = np.zeros(npix, dtype=np.int32) + (ngroups - 1) - pixel_done = (end < 0) # False until processing is done + pixel_done = end < 0 # False until processing is done inv_var = np.zeros(npix, dtype=np.float32) # inverse of fit variance num_seg = np.zeros(npix, dtype=np.int32) # number of segments per pixel @@ -1637,18 +1691,17 @@ def calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, err_2d_array = data_sect[0, :, :] * frame_time # Suppress, then re-enable, harmless arithmetic warnings - ''' + """ warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) - ''' + """ err_2d_array[err_2d_array < 0] = 0 warnings.resetwarnings() # Frames >= start and <= end will be masked. However, the first channel # to be included in fit will be the read in which a cosmic ray has # been flagged - mask_2d = ((arange_ngroups_col >= start[np.newaxis, :]) & - (arange_ngroups_col <= end[np.newaxis, :])) + mask_2d = (arange_ngroups_col >= start[np.newaxis, :]) & (arange_ngroups_col <= end[np.newaxis, :]) end = 0 # array no longer needed @@ -1660,7 +1713,7 @@ def calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, # Reset the initial False groups to be True so that the first False is now either a jump or sat # Because start was set to be the first True, the initial False values will not be included for pixel in range(npix): - mask_2d[:start[pixel], pixel] = True + mask_2d[: start[pixel], pixel] = True wh_f = np.where(np.logical_not(mask_2d)) @@ -1699,9 +1752,9 @@ def calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, break # frames >= start and <= end_st will be included in fit - mask_2d = \ - ((arange_ngroups_col >= start) - & (arange_ngroups_col < (end_st[end_heads[all_pix] - 1, all_pix] + 1))) + mask_2d = (arange_ngroups_col >= start) & ( + arange_ngroups_col < (end_st[end_heads[all_pix] - 1, all_pix] + 1) + ) mask_2d[gdq_sect_r != 0] = False # RE-exclude bad group dq values @@ -1728,9 +1781,25 @@ def calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, # for all pixels, update arrays, summing slope and variance f_max_seg, num_seg = fit_next_segment( - start, end_st, end_heads, pixel_done, data_sect, mask_2d, mask_2d_init, - inv_var, num_seg, opt_res, save_opt, rn_sect, gain_sect, ngroups, weighting, - f_max_seg, gdq_sect_r, ramp_data) + start, + end_st, + end_heads, + pixel_done, + data_sect, + mask_2d, + mask_2d_init, + inv_var, + num_seg, + opt_res, + save_opt, + rn_sect, + gain_sect, + ngroups, + weighting, + f_max_seg, + gdq_sect_r, + ramp_data, + ) if f_max_seg is None: f_max_seg = 1 @@ -1741,9 +1810,26 @@ def calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, return gdq_sect, inv_var, opt_res, f_max_seg, num_seg -def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, - mask_2d_init, inv_var, num_seg, opt_res, save_opt, rn_sect, - gain_sect, ngroups, weighting, f_max_seg, gdq_sect_r, ramp_data): +def fit_next_segment( + start, + end_st, + end_heads, + pixel_done, + data_sect, + mask_2d, + mask_2d_init, + inv_var, + num_seg, + opt_res, + save_opt, + rn_sect, + gain_sect, + ngroups, + weighting, + f_max_seg, + gdq_sect_r, + ramp_data, +): """ Call routine to LS fit masked data for a single segment for all pixels in data section. Then categorize each pixel's fitting interval based on @@ -1833,7 +1919,8 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # Compute fit quantities for the next segment of all pixels # Each returned array below is 1D, for all npix pixels for current segment slope, intercept, variance, sig_intercept, sig_slope = fit_lines( - data_sect, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, ramp_data) + data_sect, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, ramp_data + ) end_locs = end_st[end_heads[all_pix] - 1, all_pix] @@ -1841,7 +1928,7 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # the number of groups-1 l_interval = end_locs - start - wh_done = (start == -1) # done pixels + wh_done = start == -1 # done pixels l_interval[wh_done] = 0 # set interval lengths for done pixels to 0 # Create array to set when each good pixel is classified for the current @@ -1851,67 +1938,157 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # Special case fit with NGROUPS being 1 or 2. if ngroups == 1 or ngroups == 2: return fit_short_ngroups( - ngroups, start, end_st, end_heads, pixel_done, all_pix, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt, mask_2d_init, ramp_mask_sum) + ngroups, + start, + end_st, + end_heads, + pixel_done, + all_pix, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + mask_2d_init, + ramp_mask_sum, + ) # CASE: Long enough (semiramp has >2 groups), at end of ramp wh_check = np.where((l_interval > 1) & (end_locs == ngroups - 1) & (~pixel_done)) if len(wh_check[0]) > 0: f_max_seg = fit_next_segment_long_end_of_ramp( - wh_check, start, end_st, end_heads, pixel_done, got_case, f_max_seg, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt) + wh_check, + start, + end_st, + end_heads, + pixel_done, + got_case, + f_max_seg, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + ) # CASE: Long enough (semiramp has >2 groups ), not at array end (meaning # final group for this semiramp is not final group of the whole ramp) wh_check = np.where((l_interval > 2) & (end_locs != ngroups - 1) & ~pixel_done) if len(wh_check[0]) > 0: f_max_seg = fit_next_segment_long_not_end_of_ramp( - wh_check, start, end_st, end_heads, pixel_done, got_case, f_max_seg, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt, mask_2d_init, end_locs, ngroups) + wh_check, + start, + end_st, + end_heads, + pixel_done, + got_case, + f_max_seg, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + mask_2d_init, + end_locs, + ngroups, + ) # CASE: interval too short to fit normally (only 2 good groups) # At end of array, NGROUPS>1, but exclude NGROUPS==2 datasets # as they are covered in `fit_short_ngroups`. - wh_check = np.where((l_interval == 1) & (end_locs == ngroups - 1) - & (ngroups > 2) & (~pixel_done)) + wh_check = np.where((l_interval == 1) & (end_locs == ngroups - 1) & (ngroups > 2) & (~pixel_done)) if len(wh_check[0]) > 0: f_max_seg = fit_next_segment_short_seg_at_end( - wh_check, start, end_st, end_heads, pixel_done, got_case, f_max_seg, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt, mask_2d_init) + wh_check, + start, + end_st, + end_heads, + pixel_done, + got_case, + f_max_seg, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + mask_2d_init, + ) # CASE: full-length ramp has 2 good groups not at array end - wh_check = np.where((l_interval == 2) & (ngroups > 2) - & (end_locs != ngroups - 1) & ~pixel_done) + wh_check = np.where((l_interval == 2) & (ngroups > 2) & (end_locs != ngroups - 1) & ~pixel_done) if len(wh_check[0]) > 0: f_max_seg = fit_next_segment_short_seg_not_at_end( - wh_check, start, end_st, end_heads, pixel_done, got_case, f_max_seg, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt, mask_2d_init, end_locs, ngroups) + wh_check, + start, + end_st, + end_heads, + pixel_done, + got_case, + f_max_seg, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + mask_2d_init, + end_locs, + ngroups, + ) # CASE: full-length ramp has a good group on 0th group of the entire ramp, # and no later good groups. Will use single good group data as the slope. - wh_check = np.where( - mask_2d_init[0, :] & ~mask_2d_init[1, :] & (ramp_mask_sum == 1) & ~pixel_done) + wh_check = np.where(mask_2d_init[0, :] & ~mask_2d_init[1, :] & (ramp_mask_sum == 1) & ~pixel_done) if len(wh_check[0]) > 0: f_max_seg = fit_next_segment_only_good_0th_group( - wh_check, start, end_st, end_heads, pixel_done, got_case, f_max_seg, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt, mask_2d_init) + wh_check, + start, + end_st, + end_heads, + pixel_done, + got_case, + f_max_seg, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + mask_2d_init, + ) # CASE: the segment has a good 0th group and a bad 1st group. - wh_check = np.where(mask_2d_init[0, :] & ~mask_2d_init[1, :] & ~pixel_done - & (end_locs == 1) & (start == 0)) + wh_check = np.where( + mask_2d_init[0, :] & ~mask_2d_init[1, :] & ~pixel_done & (end_locs == 1) & (start == 0) + ) if len(wh_check[0]) > 0: - fit_next_segment_good_0th_bad_1st( - wh_check, start, end_st, end_heads, got_case, ngroups) + fit_next_segment_good_0th_bad_1st(wh_check, start, end_st, end_heads, got_case, ngroups) # CASE OTHER: all other types of segments not covered earlier. No segments # handled here have adequate data, but the stack arrays are updated. @@ -1952,11 +2129,10 @@ def fit_next_segment_all_other(wh_check, start, end_st, end_heads, ngroups): start[start > ngroups - 1] = ngroups - 1 # to keep at max level end_st[end_heads[these_pix] - 1, these_pix] = 0 end_heads[these_pix] -= 1 - end_heads[end_heads < 0.] = 0. + end_heads[end_heads < 0.0] = 0.0 -def fit_next_segment_good_0th_bad_1st( - wh_check, start, end_st, end_heads, got_case, ngroups): +def fit_next_segment_good_0th_bad_1st(wh_check, start, end_st, end_heads, got_case, ngroups): """ The segment has a good 0th group and a bad 1st group. For the data from the 0th good group of this segment to possibly be used as a @@ -2003,13 +2179,28 @@ def fit_next_segment_good_0th_bad_1st( start[start > ngroups - 1] = ngroups - 1 # to keep at max level end_st[end_heads[these_pix] - 1, these_pix] = 0 end_heads[these_pix] -= 1 - end_heads[end_heads < 0.] = 0. + end_heads[end_heads < 0.0] = 0.0 def fit_next_segment_only_good_0th_group( - wh_check, start, end_st, end_heads, pixel_done, got_case, f_max_seg, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt, mask_2d_init): + wh_check, + start, + end_st, + end_heads, + pixel_done, + got_case, + f_max_seg, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + mask_2d_init, +): """ Full-length ramp has a good group on 0th group of the entire ramp, and no later good groups. Will use single good group data as the slope. @@ -2094,8 +2285,7 @@ def fit_next_segment_only_good_0th_group( inv_var[these_pix] += 1.0 / variance[these_pix] # Append results to arrays - opt_res.append_arr(num_seg, these_pix, intercept, slope, - sig_intercept, sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, these_pix, intercept, slope, sig_intercept, sig_slope, inv_var, save_opt) num_seg[these_pix] += 1 f_max_seg = max(f_max_seg, num_seg.max()) @@ -2104,9 +2294,26 @@ def fit_next_segment_only_good_0th_group( def fit_next_segment_short_seg_not_at_end( - wh_check, start, end_st, end_heads, pixel_done, got_case, f_max_seg, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt, mask_2d_init, end_locs, ngroups): + wh_check, + start, + end_st, + end_heads, + pixel_done, + got_case, + f_max_seg, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + mask_2d_init, + end_locs, + ngroups, +): """ Special case Full-length ramp has 2 good groups not at array end @@ -2206,7 +2413,11 @@ def fit_next_segment_short_seg_not_at_end( # create array: 0...ngroups-1 in a column for each pixel arr_ind_all = np.array( - [np.arange(ngroups), ] * c_mask_2d_init.shape[1]).transpose() + [ + np.arange(ngroups), + ] + * c_mask_2d_init.shape[1] + ).transpose() wh_c_start_all = np.zeros(mask_2d_init.shape[1], dtype=np.uint8) wh_c_start_all[these_pix] = start[these_pix] @@ -2230,11 +2441,10 @@ def fit_next_segment_short_seg_not_at_end( end_st[end_heads[pix_only] - 1, pix_only] = 0 end_heads[pix_only] = 0 pixel_done[pix_only] = True # all processing for pixel is completed - end_heads[(end_heads < 0.)] = 0. + end_heads[(end_heads < 0.0)] = 0.0 # Append results to arrays - opt_res.append_arr(num_seg, these_pix, intercept, slope, - sig_intercept, sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, these_pix, intercept, slope, sig_intercept, sig_slope, inv_var, save_opt) num_seg[these_pix] += 1 f_max_seg = max(f_max_seg, num_seg.max()) @@ -2243,9 +2453,24 @@ def fit_next_segment_short_seg_not_at_end( def fit_next_segment_short_seg_at_end( - wh_check, start, end_st, end_heads, pixel_done, got_case, f_max_seg, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt, mask_2d_init): + wh_check, + start, + end_st, + end_heads, + pixel_done, + got_case, + f_max_seg, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + mask_2d_init, +): """ Interval too short to fit normally (only 2 good groups) At end of array, NGROUPS>1, but exclude NGROUPS==2 datasets @@ -2332,7 +2557,7 @@ def fit_next_segment_short_seg_at_end( num_wh = len(wh_check[0]) for ii in range(num_wh): # locate pixels with at least 1 good group this_pix = wh_check[0][ii] - sum_final_2 = mask_2d_init[start[this_pix]:, this_pix].sum() + sum_final_2 = mask_2d_init[start[this_pix] :, this_pix].sum() if sum_final_2 > 0: wh_list.append(wh_check[0][ii]) # add to list to be fit @@ -2346,14 +2571,13 @@ def fit_next_segment_short_seg_at_end( end_heads[these_pix] = 0 pixel_done[these_pix] = True - g_pix = these_pix[variance[these_pix] > 0.] # good pixels + g_pix = these_pix[variance[these_pix] > 0.0] # good pixels if len(g_pix) > 0: inv_var[g_pix] += 1.0 / variance[g_pix] # Append results to arrays - opt_res.append_arr(num_seg, g_pix, intercept, slope, - sig_intercept, sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, sig_slope, inv_var, save_opt) num_seg[g_pix] += 1 f_max_seg = max(f_max_seg, num_seg.max()) @@ -2362,9 +2586,26 @@ def fit_next_segment_short_seg_at_end( def fit_next_segment_long_not_end_of_ramp( - wh_check, start, end_st, end_heads, pixel_done, got_case, f_max_seg, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt, mask_2d_init, end_locs, ngroups): + wh_check, + start, + end_st, + end_heads, + pixel_done, + got_case, + f_max_seg, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + mask_2d_init, + end_locs, + ngroups, +): """ Special case fitting long segment at the end of ramp. Long enough (semiramp has >2 groups ), not at array end (meaning @@ -2453,16 +2694,15 @@ def fit_next_segment_long_not_end_of_ramp( start[these_pix] = end_locs[these_pix] end_st[end_heads[these_pix] - 1, these_pix] = 0 end_heads[these_pix] -= 1 - end_heads[end_heads < 0.] = 0. + end_heads[end_heads < 0.0] = 0.0 - g_pix = these_pix[variance[these_pix] > 0.] # good pixels + g_pix = these_pix[variance[these_pix] > 0.0] # good pixels if len(g_pix) > 0: inv_var[g_pix] += 1.0 / variance[g_pix] # Append results to arrays - opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, - sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, sig_slope, inv_var, save_opt) num_seg[g_pix] += 1 f_max_seg = max(f_max_seg, num_seg.max()) @@ -2473,7 +2713,11 @@ def fit_next_segment_long_not_end_of_ramp( # create array: 0...ngroups-1 in a column for each pixel arr_ind_all = np.array( - [np.arange(ngroups), ] * c_mask_2d_init.shape[1]).transpose() + [ + np.arange(ngroups), + ] + * c_mask_2d_init.shape[1] + ).transpose() wh_c_start_all = np.zeros(c_mask_2d_init.shape[1], dtype=np.uint8) wh_c_start_all[g_pix] = start[g_pix] @@ -2494,9 +2738,23 @@ def fit_next_segment_long_not_end_of_ramp( def fit_next_segment_long_end_of_ramp( - wh_check, start, end_st, end_heads, pixel_done, got_case, f_max_seg, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt): + wh_check, + start, + end_st, + end_heads, + pixel_done, + got_case, + f_max_seg, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, +): """ Long enough (semiramp has >2 groups), at end of ramp - set start to -1 to designate all fitting done @@ -2570,7 +2828,7 @@ def fit_next_segment_long_end_of_ramp( arrays before output. """ these_pix = wh_check[0] - start[these_pix] = -1 # all processing for this pixel is completed + start[these_pix] = -1 # all processing for this pixel is completed end_st[end_heads[these_pix] - 1, these_pix] = 0 end_heads[these_pix] = 0 pixel_done[these_pix] = True # all processing for pixel is completed @@ -2578,13 +2836,12 @@ def fit_next_segment_long_end_of_ramp( with warnings.catch_warnings(): warnings.filterwarnings("ignore", "invalid value.*", RuntimeWarning) - g_pix = these_pix[variance[these_pix] > 0.] # good pixels + g_pix = these_pix[variance[these_pix] > 0.0] # good pixels if len(g_pix) > 0: inv_var[g_pix] += 1.0 / variance[g_pix] # Append results to arrays - opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, - sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, sig_slope, inv_var, save_opt) num_seg[g_pix] += 1 f_max_seg = max(f_max_seg, num_seg.max()) @@ -2592,9 +2849,24 @@ def fit_next_segment_long_end_of_ramp( def fit_short_ngroups( - ngroups, start, end_st, end_heads, pixel_done, all_pix, - inv_var, num_seg, slope, intercept, variance, sig_intercept, sig_slope, - opt_res, save_opt, mask_2d_init, ramp_mask_sum): + ngroups, + start, + end_st, + end_heads, + pixel_done, + all_pix, + inv_var, + num_seg, + slope, + intercept, + variance, + sig_intercept, + sig_slope, + opt_res, + save_opt, + mask_2d_init, + ramp_mask_sum, +): """ Special case fitting for short ngroups fit. @@ -2678,12 +2950,11 @@ def fit_short_ngroups( end_heads[all_pix] = 0 pixel_done[all_pix] = True - g_pix = all_pix[variance[all_pix] > 0.] + g_pix = all_pix[variance[all_pix] > 0.0] if len(g_pix) > 0: inv_var[g_pix] += 1.0 / variance[g_pix] - opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, - sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, sig_slope, inv_var, save_opt) num_seg[g_pix] = 1 @@ -2710,8 +2981,7 @@ def fit_short_ngroups( inv_var[g_pix] += 1.0 / variance[g_pix] # Append results to arrays - opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, - sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, sig_slope, inv_var, save_opt) num_seg[g_pix] = 1 @@ -2788,17 +3058,27 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, # Calculate slopes etc. for datasets having either 1 or 2 groups per # integration, and return if ngroups == 1: # process all pixels in 1 group/integration dataset - slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s = \ - fit_1_group(slope_s, intercept_s, variance_s, sig_intercept_s, - sig_slope_s, npix, data, c_mask_2d, ramp_data) + slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s = fit_1_group( + slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s, npix, data, c_mask_2d, ramp_data + ) return slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s if ngroups == 2: # process all pixels in 2 group/integration dataset rn_sect_1d = rn_sect.reshape(npix) slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s = fit_2_group( - slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s, npix, - data, c_mask_2d, rn_sect_1d, gdq_sect_r, ramp_data) + slope_s, + intercept_s, + variance_s, + sig_intercept_s, + sig_slope_s, + npix, + data, + c_mask_2d, + rn_sect_1d, + gdq_sect_r, + ramp_data, + ) return slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s @@ -2812,9 +3092,9 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, wh_pix_1r = np.where(c_mask_2d[0, :] & (np.logical_not(c_mask_2d[1, :]))) if len(wh_pix_1r[0]) > 0: - slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s = \ - fit_single_read(slope_s, intercept_s, variance_s, sig_intercept_s, - sig_slope_s, npix, data, wh_pix_1r, ramp_data) + slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s = fit_single_read( + slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s, npix, data, wh_pix_1r, ramp_data + ) del wh_pix_1r @@ -2822,9 +3102,18 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, # the 0th and 1st group are good, set slope, etc wh_pix_2r = np.where(c_mask_2d.sum(axis=0) == 2) # ramps with 2 good groups - slope_s, intercept_s, variance_s, sig_slope_s, sig_intercept_s = \ - fit_double_read(c_mask_2d, wh_pix_2r, data_masked, slope_s, intercept_s, - variance_s, sig_slope_s, sig_intercept_s, rn_sect, ramp_data) + slope_s, intercept_s, variance_s, sig_slope_s, sig_intercept_s = fit_double_read( + c_mask_2d, + wh_pix_2r, + data_masked, + slope_s, + intercept_s, + variance_s, + sig_slope_s, + sig_intercept_s, + rn_sect, + ramp_data, + ) del wh_pix_2r @@ -2842,24 +3131,25 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, c_mask_2d = c_mask_2d[:, good_pix] nreads_1d = nreads_1d[good_pix] - if weighting.lower() == 'optimal': # fit using optimal weighting + if weighting.lower() == "optimal": # fit using optimal weighting # get sums from optimal weighting sumx, sumxx, sumxy, sumy, nreads_wtd, xvalues = calc_opt_sums( - ramp_data, rn_sect, gain_sect, data_masked, c_mask_2d, xvalues, good_pix) + ramp_data, rn_sect, gain_sect, data_masked, c_mask_2d, xvalues, good_pix + ) - slope, intercept, sig_slope, sig_intercept = \ - calc_opt_fit(nreads_wtd, sumxx, sumx, sumxy, sumy) + slope, intercept, sig_slope, sig_intercept = calc_opt_fit(nreads_wtd, sumxx, sumx, sumxy, sumy) slope = slope / ramp_data.group_time - variance = sig_slope**2. # variance due to fit values + variance = sig_slope**2.0 # variance due to fit values - elif weighting.lower() == 'unweighted': # fit using unweighted weighting + elif weighting.lower() == "unweighted": # fit using unweighted weighting # get sums from unweighted weighting sumx, sumxx, sumxy, sumy = calc_unwtd_sums(data_masked, xvalues) - slope, intercept, sig_slope, sig_intercept, line_fit =\ - calc_unwtd_fit(xvalues, nreads_1d, sumxx, sumx, sumxy, sumy) + slope, intercept, sig_slope, sig_intercept, line_fit = calc_unwtd_fit( + xvalues, nreads_1d, sumxx, sumx, sumxy, sumy + ) denominator = nreads_1d * sumxx - sumx**2 @@ -2873,7 +3163,7 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, denominator = 0 else: # unsupported weighting type specified - log.error('FATAL ERROR: unsupported weighting type specified.') + log.error("FATAL ERROR: unsupported weighting type specified.") slope_s[good_pix] = slope variance_s[good_pix] = variance @@ -2884,8 +3174,9 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, return slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s -def fit_single_read(slope_s, intercept_s, variance_s, sig_intercept_s, - sig_slope_s, npix, data, wh_pix_1r, ramp_data): +def fit_single_read( + slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s, npix, data, wh_pix_1r, ramp_data +): """ For datasets having >2 groups/integrations, for any semiramp in which the 0th group is good and the 1st group is either SAT or CR, set slope, etc. @@ -2959,15 +3250,25 @@ def fit_single_read(slope_s, intercept_s, variance_s, sig_intercept_s, # The following arrays will have values correctly calculated later; for # now they are just place-holders variance_s[wh_pix_1r] = utils.LARGE_VARIANCE - sig_slope_s[wh_pix_1r] = 0. - intercept_s[wh_pix_1r] = 0. - sig_intercept_s[wh_pix_1r] = 0. + sig_slope_s[wh_pix_1r] = 0.0 + intercept_s[wh_pix_1r] = 0.0 + sig_intercept_s[wh_pix_1r] = 0.0 return slope_s, intercept_s, variance_s, sig_slope_s, sig_intercept_s -def fit_double_read(mask_2d, wh_pix_2r, data_masked, slope_s, intercept_s, - variance_s, sig_slope_s, sig_intercept_s, rn_sect, ramp_data): +def fit_double_read( + mask_2d, + wh_pix_2r, + data_masked, + slope_s, + intercept_s, + variance_s, + sig_slope_s, + sig_intercept_s, + rn_sect, + ramp_data, +): """ Process all semi-ramps having exactly 2 good groups. May need to optimize later to remove loop over pixels. @@ -3035,8 +3336,7 @@ def fit_double_read(mask_2d, wh_pix_2r, data_masked, slope_s, intercept_s, diff_data = data_semi[1] - data_semi[0] slope_s[pixel_ff] = diff_data / ramp_data.group_time - intercept_s[pixel_ff] = \ - data_semi[1] * (1. - second_read) + data_semi[0] * second_read # by geometry + intercept_s[pixel_ff] = data_semi[1] * (1.0 - second_read) + data_semi[0] * second_read # by geometry variance_s[pixel_ff] = 2.0 * rn * rn sig_slope_s[pixel_ff] = np.sqrt(2) * rn sig_intercept_s[pixel_ff] = np.sqrt(2) * rn @@ -3095,8 +3395,8 @@ def calc_unwtd_fit(xvalues, nreads_1d, sumxx, sumx, sumxy, sumy): warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) slope = (nreads_1d * sumxy - sumx * sumy) / denominator intercept = (sumxx * sumy - sumx * sumxy) / denominator - sig_intercept = (sumxx / denominator)**0.5 - sig_slope = (nreads_1d / denominator)**0.5 + sig_intercept = (sumxx / denominator) ** 0.5 + sig_slope = (nreads_1d / denominator) ** 0.5 warnings.resetwarnings() line_fit = (slope * xvalues) + intercept @@ -3152,16 +3452,17 @@ def calc_opt_fit(nreads_wtd, sumxx, sumx, sumxy, sumy): slope = (nreads_wtd * sumxy - sumx * sumy) / denominator intercept = (sumxx * sumy - sumx * sumxy) / denominator - sig_intercept = (sumxx / denominator)**0.5 - sig_slope = (nreads_wtd / denominator)**0.5 # STD of the slope's fit + sig_intercept = (sumxx / denominator) ** 0.5 + sig_slope = (nreads_wtd / denominator) ** 0.5 # STD of the slope's fit warnings.resetwarnings() return slope, intercept, sig_slope, sig_intercept -def fit_1_group(slope_s, intercept_s, variance_s, sig_intercept_s, - sig_slope_s, npix, data, mask_2d, ramp_data): +def fit_1_group( + slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s, npix, data, mask_2d, ramp_data +): """ This function sets the fitting arrays for datasets having only 1 group per integration. @@ -3222,12 +3523,12 @@ def fit_1_group(slope_s, intercept_s, variance_s, sig_intercept_s, # The following arrays will have values correctly calculated later; for # now they are just place-holders variance_s = np.zeros(npix, dtype=np.float32) + utils.LARGE_VARIANCE - sig_slope_s = slope_s * 0. - intercept_s = slope_s * 0. - sig_intercept_s = slope_s * 0. + sig_slope_s = slope_s * 0.0 + intercept_s = slope_s * 0.0 + sig_intercept_s = slope_s * 0.0 # For saturated pixels, overwrite slope with benign values. - slope_s[np.logical_not(mask_2d[0, :])] = 0. + slope_s[np.logical_not(mask_2d[0, :])] = 0.0 return slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s @@ -3346,8 +3647,19 @@ def check_bad_0_good_1(gdq, sat): return both -def fit_2_group(slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s, - npix, data, mask_2d, rn_sect_1d, gdq_sect_r, ramp_data): +def fit_2_group( + slope_s, + intercept_s, + variance_s, + sig_intercept_s, + sig_slope_s, + npix, + data, + mask_2d, + rn_sect_1d, + gdq_sect_r, + ramp_data, +): """ This function sets the fitting arrays for datasets having only 2 groups per integration. @@ -3431,7 +3743,7 @@ def fit_2_group(slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s, if len(one_group_locs[0]) > 0: data0 = data_r[0, :] slope_s[one_group_locs] = data0[one_group_locs] - variance_s[one_group_locs] = 1. + variance_s[one_group_locs] = 1.0 del one_group_locs # Special case 4. Bad 0th group, good 1st group. @@ -3440,7 +3752,7 @@ def fit_2_group(slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s, if len(one_group_locs[0]) > 0: data1 = data_r[1, :] slope_s[one_group_locs] = data1[one_group_locs] - variance_s[one_group_locs] = 1. + variance_s[one_group_locs] = 1.0 del one_group_locs slope_s = slope_s / ramp_data.group_time @@ -3525,7 +3837,7 @@ def calc_unwtd_sums(data_masked, xvalues): """ sumx = xvalues.sum(axis=0) sumxx = (xvalues**2).sum(axis=0) - sumy = (np.reshape(data_masked.sum(axis=0), sumx.shape)) + sumy = np.reshape(data_masked.sum(axis=0), sumx.shape) sumxy = (xvalues * np.reshape(data_masked, xvalues.shape)).sum(axis=0) return sumx, sumxx, sumxy, sumy @@ -3586,8 +3898,7 @@ def calc_opt_sums(ramp_data, rn_sect, gain_sect, data_masked, mask_2d, xvalues, # Return 'empty' sums if there is no more data to fit if data_masked.size == 0: - return np.array([]), np.array([]), np.array([]), np.array([]),\ - np.array([]), np.array([]) + return np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]) # get initial group for each good pixel for this semiramp fnz = np.argmax(c_mask_2d, axis=0) @@ -3595,7 +3906,7 @@ def calc_opt_sums(ramp_data, rn_sect, gain_sect, data_masked, mask_2d, xvalues, # For those pixels that are all False, set to sentinel value of -1 fnz[c_mask_2d.sum(axis=0) == 0] = -1 - mask_2d_sum = c_mask_2d.sum(axis=0) # number of valid groups/pixel + mask_2d_sum = c_mask_2d.sum(axis=0) # number of valid groups/pixel # get final valid group for each pixel for this semiramp ind_lastnz = fnz + mask_2d_sum - 1 @@ -3627,14 +3938,13 @@ def calc_opt_sums(ramp_data, rn_sect, gain_sect, data_masked, mask_2d, xvalues, sqrt_arg = rn_2_r + data_diff * gain_sect_r with warnings.catch_warnings(): warnings.filterwarnings("ignore", "invalid value.*", RuntimeWarning) - wh_pos = np.where((sqrt_arg >= 0.) & (gain_sect_r != 0.)) - numer_ir[wh_pos] = \ - np.sqrt(rn_2_r[wh_pos] + data_diff[wh_pos] * gain_sect_r[wh_pos]) + wh_pos = np.where((sqrt_arg >= 0.0) & (gain_sect_r != 0.0)) + numer_ir[wh_pos] = np.sqrt(rn_2_r[wh_pos] + data_diff[wh_pos] * gain_sect_r[wh_pos]) sigma_ir[wh_pos] = numer_ir[wh_pos] / gain_sect_r[wh_pos] - snr = data_diff * 0. + snr = data_diff * 0.0 snr[wh_pos] = data_diff[wh_pos] / sigma_ir[wh_pos] snr[np.isnan(snr)] = 0.0 - snr[snr < 0.] = 0.0 + snr[snr < 0.0] = 0.0 del wh_pos @@ -3646,13 +3956,13 @@ def calc_opt_sums(ramp_data, rn_sect, gain_sect, data_masked, mask_2d, xvalues, # Make array of number of good groups, and exponents for each pixel power_wt_r = calc_power(snr) # Get the interpolated power for this SNR num_nz = c_mask_2d.sum(0) # number of groups in segment - nrd_prime = (num_nz - 1) / 2. + nrd_prime = (num_nz - 1) / 2.0 num_nz = 0 # Calculate inverse read noise^2 for use in weights # Suppress, then re-enable, harmless arithmetic warning warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) - invrdns2_r = 1. / rn_2_r + invrdns2_r = 1.0 / rn_2_r warnings.resetwarnings() # Set optimal weights for each group of each pixel; @@ -3660,11 +3970,10 @@ def calc_opt_sums(ramp_data, rn_sect, gain_sect, data_masked, mask_2d, xvalues, wt_h = np.zeros(data_masked.shape, dtype=np.float32) for jj_rd in range(data_masked.shape[0]): - wt_h[jj_rd, :] = \ - abs((abs(jj_rd - nrd_prime) / nrd_prime) ** power_wt_r) * invrdns2_r + wt_h[jj_rd, :] = abs((abs(jj_rd - nrd_prime) / nrd_prime) ** power_wt_r) * invrdns2_r - wt_h[np.isnan(wt_h)] = 0. - wt_h[np.isinf(wt_h)] = 0. + wt_h[np.isnan(wt_h)] = 0.0 + wt_h[np.isinf(wt_h)] = 0.0 # For all pixels, 'roll' up the leading zeros such that the 0th group of # each pixel is the lowest nonzero group for that pixel @@ -3682,8 +3991,8 @@ def calc_opt_sums(ramp_data, rn_sect, gain_sect, data_masked, mask_2d, xvalues, sumxx = (xvalues**2 * wt_h).sum(axis=0) c_data_masked = data_masked.copy() - c_data_masked[np.isnan(c_data_masked)] = 0. - sumy = (np.reshape((c_data_masked * wt_h).sum(axis=0), sumx.shape)) + c_data_masked[np.isnan(c_data_masked)] = 0.0 + sumy = np.reshape((c_data_masked * wt_h).sum(axis=0), sumx.shape) sumxy = (xvalues * wt_h * np.reshape(c_data_masked, xvalues.shape)).sum(axis=0) return sumx, sumxx, sumxy, sumy, nreads_wtd, xvalues diff --git a/src/stcal/ramp_fitting/ramp_fit.py b/src/stcal/ramp_fitting/ramp_fit.py index 6a35a45d..e87954e4 100755 --- a/src/stcal/ramp_fitting/ramp_fit.py +++ b/src/stcal/ramp_fitting/ramp_fit.py @@ -17,8 +17,8 @@ from astropy import units as u import logging -from . import gls_fit # used only if algorithm is "GLS" -from . import ols_fit # used only if algorithm is "OLS" +from . import gls_fit # used only if algorithm is "GLS" +from . import ols_fit # used only if algorithm is "OLS" from . import ramp_fit_class log = logging.getLogger(__name__) @@ -50,15 +50,13 @@ def create_ramp_fit_class(model, dqflags=None, suppress_one_group=False): """ ramp_data = ramp_fit_class.RampData() - if isinstance(model.data,u.Quantity): - ramp_data.set_arrays(model.data.value, model.err.value, - model.groupdq, model.pixeldq) + if isinstance(model.data, u.Quantity): + ramp_data.set_arrays(model.data.value, model.err.value, model.groupdq, model.pixeldq) else: - ramp_data.set_arrays(model.data, model.err, - model.groupdq, model.pixeldq) + ramp_data.set_arrays(model.data, model.err, model.groupdq, model.pixeldq) # Attribute may not be supported by all pipelines. Default is NoneType. - if hasattr(model, 'drop_frames1'): + if hasattr(model, "drop_frames1"): drop_frames1 = model.meta.exposure.drop_frames1 else: drop_frames1 = None @@ -68,7 +66,8 @@ def create_ramp_fit_class(model, dqflags=None, suppress_one_group=False): group_time=model.meta.exposure.group_time, groupgap=model.meta.exposure.groupgap, nframes=model.meta.exposure.nframes, - drop_frames1=drop_frames1) + drop_frames1=drop_frames1, + ) if "zero_frame" in model.meta.exposure and model.meta.exposure.zero_frame: ramp_data.zeroframe = model.zeroframe @@ -82,8 +81,18 @@ def create_ramp_fit_class(model, dqflags=None, suppress_one_group=False): return ramp_data -def ramp_fit(model, buffsize, save_opt, readnoise_2d, gain_2d, algorithm, - weighting, max_cores, dqflags, suppress_one_group=False): +def ramp_fit( + model, + buffsize, + save_opt, + readnoise_2d, + gain_2d, + algorithm, + weighting, + max_cores, + dqflags, + suppress_one_group=False, +): """ Calculate the count rate for each pixel in all data cube sections and all integrations, equal to the slope for all sections (intervals between @@ -158,12 +167,13 @@ def ramp_fit(model, buffsize, save_opt, readnoise_2d, gain_2d, algorithm, ramp_data = create_ramp_fit_class(model, dqflags, suppress_one_group) return ramp_fit_data( - ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, - algorithm, weighting, max_cores, dqflags) + ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, algorithm, weighting, max_cores, dqflags + ) -def ramp_fit_data(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, - algorithm, weighting, max_cores, dqflags): +def ramp_fit_data( + ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, algorithm, weighting, max_cores, dqflags +): """ This function begins the ramp fit computation after the creation of the RampData class. It determines the proper path for computation to take @@ -222,13 +232,14 @@ def ramp_fit_data(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, """ if algorithm.upper() == "GLS": image_info, integ_info, gls_opt_info = gls_fit.gls_ramp_fit( - ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, max_cores) + ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, max_cores + ) opt_info = None else: # Get readnoise array for calculation of variance of noiseless ramps, and # gain array in case optimal weighting is to be done nframes = ramp_data.nframes - readnoise_2d *= gain_2d / np.sqrt(2. * nframes) + readnoise_2d *= gain_2d / np.sqrt(2.0 * nframes) # Suppress one group ramps, if desired. if ramp_data.suppress_one_group_ramps: @@ -236,7 +247,8 @@ def ramp_fit_data(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, # Compute ramp fitting using ordinary least squares. image_info, integ_info, opt_info = ols_fit.ols_ramp_fit_multi( - ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, max_cores) + ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, max_cores + ) gls_opt_info = None return image_info, integ_info, opt_info, gls_opt_info @@ -276,4 +288,5 @@ def suppress_one_good_group_ramps(ramp_data): good_index = np.where(ramp_data.groupdq[integ, :, row, col] == 0) if ramp_data.groupdq[integ, good_index, row, col] == 0: ramp_data.groupdq[integ, :, row, col] = np.bitwise_or( - ramp_data.groupdq[integ, :, row, col], dnu_flag) + ramp_data.groupdq[integ, :, row, col], dnu_flag + ) diff --git a/src/stcal/ramp_fitting/ramp_fit_class.py b/src/stcal/ramp_fitting/ramp_fit_class.py index 1eafe1d0..a633bd35 100644 --- a/src/stcal/ramp_fitting/ramp_fit_class.py +++ b/src/stcal/ramp_fitting/ramp_fit_class.py @@ -71,8 +71,7 @@ def set_arrays(self, data, err, groupdq, pixeldq): self.groupdq = groupdq self.pixeldq = pixeldq - def set_meta(self, name, frame_time, group_time, groupgap, - nframes, drop_frames1=None): + def set_meta(self, name, frame_time, group_time, groupgap, nframes, drop_frames1=None): """ Set the metainformation needed for ramp fitting. @@ -182,7 +181,6 @@ def dbg_print_basic_info(self): print(f"pixeldq : \n{self.pixeldq}") print("-" * 80) - def dbg_print_pixel_info(self, row, col): print("-" * 80) print(f" data :\n{self.data[:, :, row, col]}") diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index c41f3532..b29476b4 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -9,7 +9,7 @@ log.setLevel(logging.DEBUG) # Replace zero or negative variances with this: -LARGE_VARIANCE = 1.e8 +LARGE_VARIANCE = 1.0e8 LARGE_VARIANCE_THRESHOLD = 0.01 * LARGE_VARIANCE @@ -118,24 +118,21 @@ def reshape_res(self, num_int, rlo, rhi, sect_shape, ff_sect, save_opt): ------- """ for ii_seg in range(0, self.slope_seg.shape[1]): - self.slope_seg[num_int, ii_seg, rlo:rhi, :] = \ - self.slope_2d[ii_seg, :].reshape(sect_shape) + self.slope_seg[num_int, ii_seg, rlo:rhi, :] = self.slope_2d[ii_seg, :].reshape(sect_shape) if save_opt: - self.yint_seg[num_int, ii_seg, rlo:rhi, :] = \ - self.interc_2d[ii_seg, :].reshape(sect_shape) - self.slope_seg[num_int, ii_seg, rlo:rhi, :] = \ - self.slope_2d[ii_seg, :].reshape(sect_shape) - self.sigyint_seg[num_int, ii_seg, rlo:rhi, :] = \ - self.siginterc_2d[ii_seg, :].reshape(sect_shape) - self.sigslope_seg[num_int, ii_seg, rlo:rhi, :] = \ - self.sigslope_2d[ii_seg, :].reshape(sect_shape) - self.inv_var_seg[num_int, ii_seg, rlo:rhi, :] = \ - self.inv_var_2d[ii_seg, :].reshape(sect_shape) + self.yint_seg[num_int, ii_seg, rlo:rhi, :] = self.interc_2d[ii_seg, :].reshape(sect_shape) + self.slope_seg[num_int, ii_seg, rlo:rhi, :] = self.slope_2d[ii_seg, :].reshape(sect_shape) + self.sigyint_seg[num_int, ii_seg, rlo:rhi, :] = self.siginterc_2d[ii_seg, :].reshape( + sect_shape + ) + self.sigslope_seg[num_int, ii_seg, rlo:rhi, :] = self.sigslope_2d[ii_seg, :].reshape( + sect_shape + ) + self.inv_var_seg[num_int, ii_seg, rlo:rhi, :] = self.inv_var_2d[ii_seg, :].reshape(sect_shape) self.firstf_int[num_int, rlo:rhi, :] = ff_sect - def append_arr(self, num_seg, g_pix, intercept, slope, sig_intercept, - sig_slope, inv_var, save_opt): + def append_arr(self, num_seg, g_pix, intercept, slope, sig_intercept, sig_slope, inv_var, save_opt): """ Add the fitting results for the current segment to the 2d arrays. @@ -217,7 +214,7 @@ def shrink_crmag(self, n_int, dq_cube, imshape, nreads, jump_det): for ii_int in range(0, n_int): dq_int = dq_cube[ii_int, :, :, :] dq_cr = np.bitwise_and(jump_det, dq_int) - max_cr_int = (dq_cr > 0.).sum(axis=0).max() + max_cr_int = (dq_cr > 0.0).sum(axis=0).max() max_cr = max(max_cr, max_cr_int) # Allocate compressed array based on max number of crs @@ -237,7 +234,7 @@ def shrink_crmag(self, n_int, dq_cube, imshape, nreads, jump_det): for nn in range(len(cr_int_has_cr[0])): y, x = cr_int_has_cr[0][nn], cr_int_has_cr[1][nn] - if cr_mag_int[k_rd, y, x] > 0.: + if cr_mag_int[k_rd, y, x] > 0.0: cr_com[ii_int, end_cr[y, x], y, x] = cr_mag_int[k_rd, y, x] end_cr[y, x] += 1 @@ -271,20 +268,28 @@ def output_optional(self, group_time): opt_info : tuple The tuple of computed optional results arrays for fitting. """ - self.var_p_seg[self.var_p_seg > LARGE_VARIANCE_THRESHOLD] = 0. - self.var_r_seg[self.var_r_seg > LARGE_VARIANCE_THRESHOLD] = 0. + self.var_p_seg[self.var_p_seg > LARGE_VARIANCE_THRESHOLD] = 0.0 + self.var_r_seg[self.var_r_seg > LARGE_VARIANCE_THRESHOLD] = 0.0 # Suppress, then re-enable, arithmetic warnings warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) # Tiny 'weights' values correspond to non-existent segments, so set to 0. - self.weights[1. / self.weights > LARGE_VARIANCE_THRESHOLD] = 0. + self.weights[1.0 / self.weights > LARGE_VARIANCE_THRESHOLD] = 0.0 warnings.resetwarnings() - opt_info = (self.slope_seg, self.sigslope_seg, self.var_p_seg, - self.var_r_seg, self.yint_seg, self.sigyint_seg, - self.ped_int, self.weights, self.cr_mag_seg) + opt_info = ( + self.slope_seg, + self.sigslope_seg, + self.var_p_seg, + self.var_r_seg, + self.yint_seg, + self.sigyint_seg, + self.ped_int, + self.weights, + self.cr_mag_seg, + ) return opt_info @@ -301,29 +306,29 @@ def print_full(self): # pragma: no cover ------- None """ - print('Will now print all optional output arrays - ') - print(' yint_seg: ') + print("Will now print all optional output arrays - ") + print(" yint_seg: ") print((self.yint_seg)) - print(' ') - print(' slope_seg: ') + print(" ") + print(" slope_seg: ") print(self.slope_seg) - print(' ') - print(' sigyint_seg: ') + print(" ") + print(" sigyint_seg: ") print(self.sigyint_seg) - print(' ') - print(' sigslope_seg: ') + print(" ") + print(" sigslope_seg: ") print(self.sigslope_seg) - print(' ') - print(' inv_var_2d: ') + print(" ") + print(" inv_var_2d: ") print((self.inv_var_2d)) - print(' ') - print(' firstf_int: ') + print(" ") + print(" firstf_int: ") print((self.firstf_int)) - print(' ') - print(' ped_int: ') + print(" ") + print(" ped_int: ") print((self.ped_int)) - print(' ') - print(' cr_mag_seg: ') + print(" ") + print(" cr_mag_seg: ") print((self.cr_mag_seg)) @@ -446,9 +451,19 @@ def alloc_arrays_2(n_int, imshape, max_seg): # number of segments segs_4 = np.zeros((n_int,) + (max_seg,) + imshape, dtype=np.uint8) - return (var_p3, var_r3, var_p4, var_r4, var_both4, var_both3, - inv_var_both4, s_inv_var_p3, s_inv_var_r3, - s_inv_var_both3, segs_4) + return ( + var_p3, + var_r3, + var_p4, + var_r4, + var_both4, + var_both3, + inv_var_both4, + s_inv_var_p3, + s_inv_var_r3, + s_inv_var_both3, + segs_4, + ) def calc_slope_vars(ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg): @@ -506,8 +521,7 @@ def calc_slope_vars(ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg gdq_2d_nan = gdq_2d_nan.astype(np.float32) # set all SAT groups to nan - gdq_2d_nan[np.bitwise_and( - gdq_2d, ramp_data.flags_saturated).astype(bool)] = np.nan + gdq_2d_nan[np.bitwise_and(gdq_2d, ramp_data.flags_saturated).astype(bool)] = np.nan # Get lengths of semiramps for all pix [number_of_semiramps, number_of_pix] segs = np.zeros_like(gdq_2d) @@ -530,8 +544,7 @@ def calc_slope_vars(ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg # Locate any CRs that appear before the first SAT group... with warnings.catch_warnings(): warnings.filterwarnings("ignore", "invalid value.*", RuntimeWarning) - wh_cr = np.where( - gdq_2d_nan[i_read, :].astype(np.int32) & ramp_data.flags_jump_det > 0) + wh_cr = np.where(gdq_2d_nan[i_read, :].astype(np.int32) & ramp_data.flags_jump_det > 0) # ... but not on final read: if len(wh_cr[0]) > 0 and (i_read < nreads - 1): @@ -573,35 +586,35 @@ def calc_slope_vars(ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg # checked for and handled later warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) - den_p3 = 1. / (group_time * gain_1d.reshape(imshape) * segs_beg_3_m1) + den_p3 = 1.0 / (group_time * gain_1d.reshape(imshape) * segs_beg_3_m1) if ramp_data.zframe_locs: zinteg_locs = ramp_data.zframe_locs[ramp_data.current_integ] frame_time = ramp_data.frame_time tmp_den_p3 = den_p3[0, :, :] - tmp_den_p3[zinteg_locs] = 1. / (frame_time * gain_sect[zinteg_locs]) + tmp_den_p3[zinteg_locs] = 1.0 / (frame_time * gain_sect[zinteg_locs]) den_p3[0, :, :] = tmp_den_p3 if ramp_data.one_groups_time is not None: ginteg_locs = ramp_data.one_groups_locs[ramp_data.current_integ] tmp_den_p3 = den_p3[0, :, :] - tmp_den_p3[ginteg_locs] = 1. / (ramp_data.one_groups_time * gain_sect[ginteg_locs]) + tmp_den_p3[ginteg_locs] = 1.0 / (ramp_data.one_groups_time * gain_sect[ginteg_locs]) den_p3[0, :, :] = tmp_den_p3 warnings.resetwarnings() # For a segment, the variance due to readnoise noise # = 12 * readnoise**2 /(ngroups_seg**3. - ngroups_seg)/( tgroup **2.) - num_r3 = 12. * (rn_sect / group_time)**2. # always >0 + num_r3 = 12.0 * (rn_sect / group_time) ** 2.0 # always >0 if ramp_data.zframe_locs: zinteg_locs = ramp_data.zframe_locs[ramp_data.current_integ] frame_time = ramp_data.frame_time - num_r3[zinteg_locs] = 12. * (rn_sect[zinteg_locs] / frame_time)**2. + num_r3[zinteg_locs] = 12.0 * (rn_sect[zinteg_locs] / frame_time) ** 2.0 if ramp_data.one_groups_time is not None: ginteg_locs = ramp_data.one_groups_locs[ramp_data.current_integ] - num_r3[ginteg_locs] = 12. * (rn_sect[ginteg_locs] / ramp_data.one_groups_time)**2. + num_r3[ginteg_locs] = 12.0 * (rn_sect[ginteg_locs] / ramp_data.one_groups_time) ** 2.0 # Reshape for every group, every pixel in section num_r3 = np.dstack([num_r3] * max_seg) @@ -613,7 +626,7 @@ def calc_slope_vars(ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg # only one good group at the beginning of the integration, so it will be # be compared to the plane of (near) zeros resulting from the reset. For # longer segments, this value is overwritten below. - den_r3 = num_r3.copy() * 0. + 1. / 6 + den_r3 = num_r3.copy() * 0.0 + 1.0 / 6 wh_seg_pos = np.where(segs_beg_3 > 1) # Suppress, then, re-enable harmless arithmetic warnings, as NaN will be @@ -621,14 +634,13 @@ def calc_slope_vars(ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) # overwrite where segs>1 - den_r3[wh_seg_pos] = 1. / (segs_beg_3[wh_seg_pos] ** 3. - segs_beg_3[wh_seg_pos]) + den_r3[wh_seg_pos] = 1.0 / (segs_beg_3[wh_seg_pos] ** 3.0 - segs_beg_3[wh_seg_pos]) warnings.resetwarnings() return den_r3, den_p3, num_r3, segs_beg_3 -def calc_pedestal(ramp_data, num_int, slope_int, firstf_int, dq_first, - nframes, groupgap, dropframes1): +def calc_pedestal(ramp_data, num_int, slope_int, firstf_int, dq_first, nframes, groupgap, dropframes1): """ The pedestal is calculated by extrapolating the final slope for each pixel from its value at the first sample in the integration to an exposure time @@ -667,12 +679,12 @@ def calc_pedestal(ramp_data, num_int, slope_int, firstf_int, dq_first, pedestal image, 2-D float """ ff_all = firstf_int[num_int, :, :].astype(np.float32) - tmp = (((nframes + 1.) / 2. + dropframes1) / (nframes + groupgap)) + tmp = ((nframes + 1.0) / 2.0 + dropframes1) / (nframes + groupgap) ped = ff_all - slope_int[num_int, ::] * tmp sat_flag = ramp_data.flags_saturated ped[np.bitwise_and(dq_first, sat_flag) == sat_flag] = 0 - ped[np.isnan(ped)] = 0. + ped[np.isnan(ped)] = 0.0 return ped @@ -720,9 +732,9 @@ def output_integ(ramp_data, slope_int, dq_int, var_p3, var_r3, var_both3): warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) - var_p3[var_p3 > LARGE_VARIANCE_THRESHOLD] = 0. - var_r3[var_r3 > LARGE_VARIANCE_THRESHOLD] = 0. - var_both3[var_both3 > LARGE_VARIANCE_THRESHOLD] = 0. + var_p3[var_p3 > LARGE_VARIANCE_THRESHOLD] = 0.0 + var_r3[var_r3 > LARGE_VARIANCE_THRESHOLD] = 0.0 + var_both3[var_both3 > LARGE_VARIANCE_THRESHOLD] = 0.0 data = slope_int invalid_data = ramp_data.flags_saturated | ramp_data.flags_do_not_use @@ -741,8 +753,7 @@ def output_integ(ramp_data, slope_int, dq_int, var_p3, var_r3, var_both3): return integ_info -def gls_pedestal(first_group, slope_int, s_mask, - frame_time, nframes_used): # pragma: no cover +def gls_pedestal(first_group, slope_int, s_mask, frame_time, nframes_used): # pragma: no cover """ Calculate the pedestal for the GLS case. @@ -786,9 +797,9 @@ def gls_pedestal(first_group, slope_int, s_mask, current integration, 2-D float """ M = float(nframes_used) - pedestal = first_group - slope_int * frame_time * (M + 1.) / 2. + pedestal = first_group - slope_int * frame_time * (M + 1.0) / 2.0 if s_mask.any(): - pedestal[s_mask] = 0. + pedestal[s_mask] = 0.0 return pedestal @@ -858,21 +869,21 @@ def get_efftim_ped(ramp_data): frame_time = ramp_data.frame_time dropframes1 = ramp_data.drop_frames1 - if dropframes1 is None: # set to default if missing + if dropframes1 is None: # set to default if missing dropframes1 = 0 - log.debug('Missing keyword DRPFRMS1, so setting to default value of 0') + log.debug("Missing keyword DRPFRMS1, so setting to default value of 0") try: effintim = (nframes + groupgap) * frame_time except TypeError: - log.error('Can not retrieve values needed to calculate integ. time') + log.error("Can not retrieve values needed to calculate integ. time") - log.debug('Calculating effective integration time for a single group using:') - log.debug(' groupgap: %s' % (groupgap)) - log.debug(' nframes: %s' % (nframes)) - log.debug(' frame_time: %s' % (frame_time)) - log.debug(' dropframes1: %s' % (dropframes1)) - log.info('Effective integration time per group: %s' % (effintim)) + log.debug("Calculating effective integration time for a single group using:") + log.debug(" groupgap: %s" % (groupgap)) + log.debug(" nframes: %s" % (nframes)) + log.debug(" frame_time: %s" % (frame_time)) + log.debug(" dropframes1: %s" % (dropframes1)) + log.info("Effective integration time per group: %s" % (effintim)) return effintim, nframes, groupgap, dropframes1 @@ -931,8 +942,7 @@ def get_dataset_info(ramp_data): imshape = (asize2, asize1) cubeshape = (nreads,) + imshape - return (nreads, npix, imshape, cubeshape, n_int, instrume, - frame_time, ngroups, group_time) + return (nreads, npix, imshape, cubeshape, n_int, instrume, frame_time, ngroups, group_time) def get_more_info(ramp_data, saturated_flag, jump_flag): # pragma: no cover @@ -1014,12 +1024,12 @@ def reset_bad_gain(ramp_data, pdq, gain): for pixels in the gain array that are either non-positive or NaN., 2-D flag """ - ''' + """ with warnings.catch_warnings(): warnings.filterwarnings("ignore", "invalid value.*", RuntimeWarning) wh_g = np.where(gain <= 0.) - ''' - wh_g = np.where(gain <= 0.) + """ + wh_g = np.where(gain <= 0.0) if len(wh_g[0]) > 0: pdq[wh_g] = np.bitwise_or(pdq[wh_g], ramp_data.flags_no_gain_val) pdq[wh_g] = np.bitwise_or(pdq[wh_g], ramp_data.flags_do_not_use) @@ -1057,8 +1067,7 @@ def remove_bad_singles(segs_beg_3): max_seg = segs_beg_3.shape[0] # get initial number of ramps having single-group segments - tot_num_single_grp_ramps = len(np.where((segs_beg_3 == 1) & - (segs_beg_3.sum(axis=0) > 1))[0]) + tot_num_single_grp_ramps = len(np.where((segs_beg_3 == 1) & (segs_beg_3.sum(axis=0) > 1))[0]) while tot_num_single_grp_ramps > 0: # until there are no more single-group segments @@ -1080,7 +1089,7 @@ def remove_bad_singles(segs_beg_3): continue # Remove the 1-group segment - segs_beg_3[ii_0:-1, wh_y, wh_x] = segs_beg_3[ii_0 + 1:, wh_y, wh_x] + segs_beg_3[ii_0:-1, wh_y, wh_x] = segs_beg_3[ii_0 + 1 :, wh_y, wh_x] # Zero the last segment entry for the ramp, which would otherwise # remain non-zero due to the shift @@ -1088,8 +1097,7 @@ def remove_bad_singles(segs_beg_3): del wh_y, wh_x - tot_num_single_grp_ramps = len(np.where((segs_beg_3 == 1) & - (segs_beg_3.sum(axis=0) > 1))[0]) + tot_num_single_grp_ramps = len(np.where((segs_beg_3 == 1) & (segs_beg_3.sum(axis=0) > 1))[0]) return segs_beg_3 @@ -1149,8 +1157,7 @@ def fix_sat_ramps(ramp_data, sat_0th_group_int, var_p3, var_both3, slope_int, dq var_p3[sat_0th_group_int > 0] = LARGE_VARIANCE var_both3[sat_0th_group_int > 0] = LARGE_VARIANCE slope_int[sat_0th_group_int > 0] = np.nan - dq_int[sat_0th_group_int > 0] = np.bitwise_or( - dq_int[sat_0th_group_int > 0], ramp_data.flags_do_not_use) + dq_int[sat_0th_group_int > 0] = np.bitwise_or(dq_int[sat_0th_group_int > 0], ramp_data.flags_do_not_use) return var_p3, var_both3, slope_int, dq_int @@ -1207,8 +1214,7 @@ def do_all_sat(ramp_data, pixeldq, groupdq, imshape, n_int, save_opt): groupdq_3d = np.zeros((m_sh[0], m_sh[2], m_sh[3]), dtype=np.uint32) for ii in range(n_int): # add SAT flag to existing groupdq in each slice - groupdq_3d[ii, :, :] = np.bitwise_or.reduce(groupdq[ii, :, :, :], - axis=0) + groupdq_3d[ii, :, :] = np.bitwise_or.reduce(groupdq[ii, :, :, :], axis=0) groupdq_3d = np.bitwise_or(groupdq_3d, ramp_data.flags_do_not_use) @@ -1236,13 +1242,12 @@ def do_all_sat(ramp_data, pixeldq, groupdq, imshape, n_int, save_opt): weights = new_arr crmag = new_arr - opt_info = (slope, sigslope, var_poisson, var_rnoise, - yint, sigyint, pedestal, weights, crmag) + opt_info = (slope, sigslope, var_poisson, var_rnoise, yint, sigyint, pedestal, weights, crmag) else: opt_info = None - log.info('All groups of all integrations are saturated.') + log.info("All groups of all integrations are saturated.") return image_info, integ_info, opt_info @@ -1260,12 +1265,14 @@ def log_stats(c_rates): ------- None """ - wh_c_0 = np.where(c_rates == 0.) # insuff data or no signal + wh_c_0 = np.where(c_rates == 0.0) # insuff data or no signal - log.debug('The number of pixels having insufficient data') - log.debug('due to excessive CRs or saturation %d:', len(wh_c_0[0])) - log.debug('Count rates - min, mean, max, std: %f, %f, %f, %f' - % (c_rates.min(), c_rates.mean(), c_rates.max(), c_rates.std())) + log.debug("The number of pixels having insufficient data") + log.debug("due to excessive CRs or saturation %d:", len(wh_c_0[0])) + log.debug( + "Count rates - min, mean, max, std: %f, %f, %f, %f" + % (c_rates.min(), c_rates.mean(), c_rates.max(), c_rates.std()) + ) def compute_num_slices(max_cores, nrows, max_available): @@ -1295,13 +1302,13 @@ def compute_num_slices(max_cores, nrows, max_available): number_slices = 1 if max_cores.isnumeric(): number_slices = int(max_cores) - elif max_cores.lower() == "none" or max_cores.lower() == 'one': + elif max_cores.lower() == "none" or max_cores.lower() == "one": number_slices = 1 - elif max_cores == 'quarter': + elif max_cores == "quarter": number_slices = max_available // 4 or 1 - elif max_cores == 'half': + elif max_cores == "half": number_slices = max_available // 2 or 1 - elif max_cores == 'all': + elif max_cores == "all": number_slices = max_available # Make sure we don't have more slices than rows or available cores. number_slices = min([nrows, number_slices, max_available]) @@ -1419,8 +1426,7 @@ def dq_compress_sect(ramp_data, num_int, gdq_sect, pixeldq_sect): # Assume total saturation if group 0 is SATURATED. gdq0_sat = np.bitwise_and(gdq_sect[0], sat) - pixeldq_sect[gdq0_sat != 0] = np.bitwise_or( - pixeldq_sect[gdq0_sat != 0], sat | dnu) + pixeldq_sect[gdq0_sat != 0] = np.bitwise_or(pixeldq_sect[gdq0_sat != 0], sat | dnu) # If jump occurs mark the appropriate flag. jump_loc = np.bitwise_and(gdq_sect, jump) @@ -1526,8 +1532,11 @@ def compute_median_rates(ramp_data): # starting at group 1. The purpose of starting at index 1 is # to shift all the indices down by 1, so they line up with the # indices in first_diffs. - i_group, i_yy, i_xx, = np.where(np.bitwise_and( - gdq_sect[1:, :, :], ramp_data.flags_jump_det)) + ( + i_group, + i_yy, + i_xx, + ) = np.where(np.bitwise_and(gdq_sect[1:, :, :], ramp_data.flags_jump_det)) first_diffs_sect[i_group, i_yy, i_xx] = np.nan del i_group, i_yy, i_xx @@ -1537,8 +1546,9 @@ def compute_median_rates(ramp_data): # few good groups past the 0th. Due to the shortage of good # data, the first_diffs will be set here equal to the data in # the 0th group. - wh_min = np.where(np.logical_and( - np.isnan(first_diffs_sect).all(axis=0), np.isfinite(data_sect[0, :, :]))) + wh_min = np.where( + np.logical_and(np.isnan(first_diffs_sect).all(axis=0), np.isfinite(data_sect[0, :, :])) + ) if len(wh_min[0] > 0): first_diffs_sect[0, :, :][wh_min] = data_sect[0, :, :][wh_min] @@ -1550,7 +1560,7 @@ def compute_median_rates(ramp_data): warnings.filterwarnings("ignore", "All-NaN.*", RuntimeWarning) nan_med = np.nanmedian(first_diffs_sect, axis=0) - nan_med[np.isnan(nan_med)] = 0. # if all first_diffs_sect are nans + nan_med[np.isnan(nan_med)] = 0.0 # if all first_diffs_sect are nans median_diffs_2d[:, :] += nan_med # Compute the final 2D array of differences; create rate array diff --git a/src/stcal/saturation/saturation.py b/src/stcal/saturation/saturation.py index 1abf3566..b109a30a 100644 --- a/src/stcal/saturation/saturation.py +++ b/src/stcal/saturation/saturation.py @@ -9,8 +9,8 @@ def flag_saturated_pixels( - data, gdq, pdq, sat_thresh, sat_dq, atod_limit, dqflags, - n_pix_grow_sat=1, zframe=None, read_pattern=None): + data, gdq, pdq, sat_thresh, sat_dq, atod_limit, dqflags, n_pix_grow_sat=1, zframe=None, read_pattern=None +): """ Short Summary ------------- @@ -64,9 +64,9 @@ def flag_saturated_pixels( """ nints, ngroups, nrows, ncols = data.shape - saturated = dqflags['SATURATED'] - ad_floor = dqflags['AD_FLOOR'] - no_sat_check = dqflags['NO_SAT_CHECK'] + saturated = dqflags["SATURATED"] + ad_floor = dqflags["AD_FLOOR"] + no_sat_check = dqflags["NO_SAT_CHECK"] # Identify pixels flagged in reference file as NO_SAT_CHECK, no_sat_check_mask = np.bitwise_and(sat_dq, no_sat_check) == no_sat_check @@ -86,24 +86,19 @@ def flag_saturated_pixels( plane = data[ints, group, :, :] if read_pattern is not None: - dilution_factor = (np.mean(read_pattern[group]) - / read_pattern[group][-1]) + dilution_factor = np.mean(read_pattern[group]) / read_pattern[group][-1] dilution_factor = np.where(no_sat_check_mask, 1, dilution_factor) else: dilution_factor = 1 - - flagarray, flaglowarray = plane_saturation( - plane, sat_thresh * dilution_factor, dqflags) + flagarray, flaglowarray = plane_saturation(plane, sat_thresh * dilution_factor, dqflags) # for saturation, the flag is set in the current plane # and all following planes. - np.bitwise_or( - gdq[ints, group:, :, :], flagarray, gdq[ints, group:, :, :]) + np.bitwise_or(gdq[ints, group:, :, :], flagarray, gdq[ints, group:, :, :]) # for A/D floor, the flag is only set of the current plane - np.bitwise_or( - gdq[ints, group, :, :], flaglowarray, gdq[ints, group, :, :]) + np.bitwise_or(gdq[ints, group, :, :], flaglowarray, gdq[ints, group, :, :]) del flagarray del flaglowarray @@ -112,8 +107,7 @@ def flag_saturated_pixels( if n_pix_grow_sat > 0: gdq_slice = copy.copy(gdq[ints, group, :, :]).astype(int) - gdq[ints, group, :, :] = adjacent_pixels( - gdq_slice, saturated, n_pix_grow_sat) + gdq[ints, group, :, :] = adjacent_pixels(gdq_slice, saturated, n_pix_grow_sat) # Check ZEROFRAME. if zframe is not None: @@ -122,13 +116,13 @@ def flag_saturated_pixels( zdq = flagarray | flaglowarray if n_pix_grow_sat > 0: zdq = adjacent_pixels(zdq, saturated, n_pix_grow_sat) - plane[zdq != 0] = 0. + plane[zdq != 0] = 0.0 zframe[ints] = plane n_sat = np.any(np.any(np.bitwise_and(gdq, saturated), axis=0), axis=0).sum() - log.info(f'Detected {n_sat} saturated pixels') + log.info(f"Detected {n_sat} saturated pixels") n_floor = np.any(np.any(np.bitwise_and(gdq, ad_floor), axis=0), axis=0).sum() - log.info(f'Detected {n_floor} A/D floor pixels') + log.info(f"Detected {n_floor} A/D floor pixels") pdq = np.bitwise_or(pdq, sat_dq) @@ -156,8 +150,7 @@ def adjacent_pixels(plane_gdq, saturated, n_pix_grow_sat): only_sat = np.bitwise_and(plane_gdq, saturated).astype(np.uint8) box_dim = (n_pix_grow_sat * 2) + 1 struct = np.ones((box_dim, box_dim)).astype(bool) - dialated = ndimage.binary_dilation( - only_sat, structure=struct).astype(only_sat.dtype) + dialated = ndimage.binary_dilation(only_sat, structure=struct).astype(only_sat.dtype) sat_pix = np.bitwise_or(cgdq, (dialated * saturated)) return sat_pix @@ -177,10 +170,9 @@ def plane_saturation(plane, sat_thresh, dqflags): A dictionary with at least the following keywords: DO_NOT_USE, SATURATED, AD_FLOOR, NO_SAT_CHECK """ - donotuse = dqflags['DO_NOT_USE'] - saturated = dqflags['SATURATED'] - ad_floor = dqflags['AD_FLOOR'] - + donotuse = dqflags["DO_NOT_USE"] + saturated = dqflags["SATURATED"] + ad_floor = dqflags["AD_FLOOR"] flagarray = np.zeros(plane.shape, dtype=np.uint32) flaglowarray = np.zeros(plane.shape, dtype=np.uint32) diff --git a/tests/test_alignment.py b/tests/test_alignment.py index 80af4d7e..f2a8150e 100644 --- a/tests/test_alignment.py +++ b/tests/test_alignment.py @@ -43,12 +43,8 @@ def _create_wcs_object_without_distortion( det2sky = shift | scale | tan | celestial_rotation det2sky.name = "linear_transform" - detector_frame = cf.Frame2D( - name="detector", axes_names=("x", "y"), unit=(u.pix, u.pix) - ) - sky_frame = cf.CelestialFrame( - reference_frame=coord.FK5(), name="fk5", unit=(u.deg, u.deg) - ) + detector_frame = cf.Frame2D(name="detector", axes_names=("x", "y"), unit=(u.pix, u.pix)) + sky_frame = cf.CelestialFrame(reference_frame=coord.FK5(), name="fk5", unit=(u.deg, u.deg)) pipeline = [(detector_frame, det2sky), (sky_frame, None)] @@ -63,9 +59,7 @@ def _create_wcs_object_without_distortion( def _create_wcs_and_datamodel(fiducial_world, shape, pscale): - wcs = _create_wcs_object_without_distortion( - fiducial_world=fiducial_world, shape=shape, pscale=pscale - ) + wcs = _create_wcs_object_without_distortion(fiducial_world=fiducial_world, shape=shape, pscale=pscale) ra_ref, dec_ref = fiducial_world[0], fiducial_world[1] return DataModel( ra_ref=ra_ref, @@ -107,9 +101,7 @@ def __init__(self, ra_ref, dec_ref, roll_ref, v2_ref, v3_ref, v3yangle, wcs=None class DataModel: def __init__(self, ra_ref, dec_ref, roll_ref, v2_ref, v3_ref, v3yangle, wcs=None): - self.meta = MetaData( - ra_ref, dec_ref, roll_ref, v2_ref, v3_ref, v3yangle, wcs=wcs - ) + self.meta = MetaData(ra_ref, dec_ref, roll_ref, v2_ref, v3_ref, v3yangle, wcs=wcs) def test_compute_fiducial(): @@ -121,9 +113,7 @@ def test_compute_fiducial(): fiducial_world = (0, 0) # in deg pscale = (0.000014, 0.000014) # in deg/pixel - wcs = _create_wcs_object_without_distortion( - fiducial_world=fiducial_world, shape=shape, pscale=pscale - ) + wcs = _create_wcs_object_without_distortion(fiducial_world=fiducial_world, shape=shape, pscale=pscale) computed_fiducial = compute_fiducial([wcs]) @@ -139,9 +129,7 @@ def test_compute_scale(pscales): fiducial_world = (0, 0) # in deg pscale = (pscales[0], pscales[1]) # in deg/pixel - wcs = _create_wcs_object_without_distortion( - fiducial_world=fiducial_world, shape=shape, pscale=pscale - ) + wcs = _create_wcs_object_without_distortion(fiducial_world=fiducial_world, shape=shape, pscale=pscale) expected_scale = np.sqrt(pscale[0] * pscale[1]) computed_scale = compute_scale(wcs=wcs, fiducial=fiducial_world) @@ -288,7 +276,7 @@ def test_wcs_bbox_from_shape_2d(): @pytest.mark.parametrize( "shape, pixmap_expected_shape", [ - (None,(4, 4, 2)), + (None, (4, 4, 2)), ((100, 200), (100, 200, 2)), ], ) @@ -296,7 +284,7 @@ def test_calc_pixmap_shape(shape, pixmap_expected_shape): # TODO: add test for gwcs.WCS wcs1, wcs2 = get_fake_wcs() pixmap = resample_utils.calc_pixmap(wcs1, wcs2, shape=shape) - assert pixmap.shape==pixmap_expected_shape + assert pixmap.shape == pixmap_expected_shape @pytest.mark.parametrize( @@ -316,9 +304,7 @@ def test_calc_pixmap_shape(shape, pixmap_expected_shape): ), ], ) -def test_update_s_region_keyword( - model, footprint, expected_s_region, expected_log_info, caplog -): +def test_update_s_region_keyword(model, footprint, expected_s_region, expected_log_info, caplog): """ Test that S_REGION keyword is being properly populated with the coordinate values. """ @@ -372,10 +358,5 @@ def test_update_s_region_imaging(model, bounding_box, data): *model.meta.wcs(2.5, -0.5), ] update_s_region_imaging(model, center=False) - updated_s_region_coords = [ - float(x) for x in model.meta.wcsinfo.s_region.split(" ")[3:] - ] - assert all( - np.isclose(x, y) - for x, y in zip(updated_s_region_coords, expected_s_region_coords) - ) + updated_s_region_coords = [float(x) for x in model.meta.wcsinfo.s_region.split(" ")[3:]] + assert all(np.isclose(x, y) for x, y in zip(updated_s_region_coords, expected_s_region_coords)) diff --git a/tests/test_dark_current.py b/tests/test_dark_current.py index 5700d38f..d2356a90 100644 --- a/tests/test_dark_current.py +++ b/tests/test_dark_current.py @@ -12,11 +12,11 @@ from stcal.dark_current.dark_class import DarkData, ScienceData dqflags = { - 'DO_NOT_USE': 2**0, # Bad pixel. Do not use. - 'SATURATED': 2**1, # Pixel saturated during exposure - 'JUMP_DET': 2**2, # Jump detected during exposure - 'DROPOUT': 2**3, # Data lost in transmission - 'AD_FLOOR': 2**6, # Below A/D floor (0 DN, was RESERVED_3) + "DO_NOT_USE": 2**0, # Bad pixel. Do not use. + "SATURATED": 2**1, # Pixel saturated during exposure + "JUMP_DET": 2**2, # Jump detected during exposure + "DROPOUT": 2**3, # Data lost in transmission + "AD_FLOOR": 2**6, # Below A/D floor (0 DN, was RESERVED_3) } @@ -27,7 +27,7 @@ DELIM = "-" * 80 -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def make_rampmodel(): """Make MIRI Ramp model for testing""" @@ -51,7 +51,7 @@ def _ramp(nints, ngroups, nrows, ncols): return _ramp -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def make_darkmodel(): """Make MIRI dark model for testing""" @@ -74,12 +74,11 @@ def _dark(ngroups, nrows, ncols): return _dark -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def setup_nrc_cube(): """Set up fake NIRCam data to test.""" def _cube(readpatt, ngroups, nframes, groupgap, nrows, ncols): - nints = 1 dims = (nints, ngroups, nrows, ncols) ramp_data = ScienceData() @@ -133,16 +132,16 @@ def _params(): nrows = 20 ncols = 20 for readpatt, values in readpatterns.items(): - params.append((readpatt, ngroups, values['nframes'], values['nskip'], nrows, ncols)) + params.append((readpatt, ngroups, values["nframes"], values["nskip"], nrows, ncols)) return params -@pytest.mark.parametrize('readpatt, ngroups, nframes, groupgap, nrows, ncols', _params()) +@pytest.mark.parametrize("readpatt, ngroups, nframes, groupgap, nrows, ncols", _params()) def test_frame_averaging(setup_nrc_cube, readpatt, ngroups, nframes, groupgap, nrows, ncols): """Check that if nframes>1 or groupgap>0, then the pipeline reconstructs - the dark reference file to match the frame averaging and groupgap - settings of the exposure.""" + the dark reference file to match the frame averaging and groupgap + settings of the exposure.""" # Create data and dark model data, dark = setup_nrc_cube(readpatt, ngroups, nframes, groupgap, nrows, ncols) @@ -167,13 +166,12 @@ def test_frame_averaging(setup_nrc_cube, readpatt, ngroups, nframes, groupgap, n # Manually average the input data to compare with pipeline output for newgp, gstart, gend in zip(range(ngroups), gstrt_ind, gend_ind): - # Average the data frames newframe = np.mean(dark.data[gstart:gend, 10, 10]) manual_avg[newgp] = newframe # ERR arrays will be quadratic sum of error values - manual_errs[newgp] = np.sqrt(np.sum(dark.err[gstart:gend, 10, 10]**2)) / (gend - gstart) + manual_errs[newgp] = np.sqrt(np.sum(dark.err[gstart:gend, 10, 10] ** 2)) / (gend - gstart) # Check that pipeline output matches manual averaging results assert_allclose(manual_avg, avg_dark.data[:, 10, 10], rtol=1e-5) @@ -222,7 +220,7 @@ def test_more_sci_frames(make_rampmodel, make_darkmodel): # darkstatus = outfile.meta.cal_step.dark_sub darkstatus = out_data.cal_step - assert darkstatus == 'SKIPPED' + assert darkstatus == "SKIPPED" def test_sub_by_frame(make_rampmodel, make_darkmodel): @@ -254,7 +252,7 @@ def test_sub_by_frame(make_rampmodel, make_darkmodel): # apply correction outfile, avg_dark = darkcorr(dm_ramp, dark) - assert(outfile.cal_step == "COMPLETE") + assert outfile.cal_step == "COMPLETE" # remove the single dimension at start of file (1, 30, 1032, 1024) # so comparison in assert works @@ -266,7 +264,7 @@ def test_sub_by_frame(make_rampmodel, make_darkmodel): # test that the output data file is equal to the difference # found when subtracting ref file from sci file - tol = 1.e-6 + tol = 1.0e-6 np.testing.assert_allclose(outdata, diff, tol) @@ -340,11 +338,9 @@ def test_dq_combine(make_rampmodel, make_darkmodel): outfile, avg_dark = darkcorr(dm_ramp, dark) # check that dq flags were correctly added - assert (outfile.pixeldq[50, 50] - == np.bitwise_or(dqflags["JUMP_DET"], dqflags["DO_NOT_USE"])) + assert outfile.pixeldq[50, 50] == np.bitwise_or(dqflags["JUMP_DET"], dqflags["DO_NOT_USE"]) - assert (outfile.pixeldq[50, 51] - == np.bitwise_or(dqflags["SATURATED"], dqflags["DO_NOT_USE"])) + assert outfile.pixeldq[50, 51] == np.bitwise_or(dqflags["SATURATED"], dqflags["DO_NOT_USE"]) def test_frame_avg(make_rampmodel, make_darkmodel): @@ -389,5 +385,5 @@ def test_frame_avg(make_rampmodel, make_darkmodel): assert outfile.data[0, 3, 500, 500] == pytest.approx(2.65) # check that the error array is not modified. - tol = 1.e-6 + tol = 1.0e-6 np.testing.assert_allclose(outfile.err[:, :], 0, tol) diff --git a/tests/test_dq.py b/tests/test_dq.py index 675a3676..d7375e35 100644 --- a/tests/test_dq.py +++ b/tests/test_dq.py @@ -16,12 +16,8 @@ def test_deprecation(name): error = ( nullcontext() if HAS_STDATAMODELS - else pytest.raises( - ImportError, match=f"{name} has been moved to stdatamodels.{name},.*" - ) + else pytest.raises(ImportError, match=f"{name} has been moved to stdatamodels.{name},.*") ) - with pytest.warns( - DeprecationWarning, match=f"{name} has been moved to stdatamodels.{name},.*" - ), error: + with pytest.warns(DeprecationWarning, match=f"{name} has been moved to stdatamodels.{name},.*"), error: importlib.import_module(f"stcal.{name}") diff --git a/tests/test_jump.py b/tests/test_jump.py index 37758e76..e68684ef 100644 --- a/tests/test_jump.py +++ b/tests/test_jump.py @@ -1,13 +1,19 @@ import numpy as np import pytest -from stcal.jump.jump import flag_large_events, find_ellipses, extend_saturation, \ - point_inside_ellipse, find_faint_extended, calc_num_slices +from stcal.jump.jump import ( + flag_large_events, + find_ellipses, + extend_saturation, + point_inside_ellipse, + find_faint_extended, + calc_num_slices, +) -DQFLAGS = {'JUMP_DET': 4, 'SATURATED': 2, 'DO_NOT_USE': 1, 'GOOD': 0, 'NO_GAIN_VALUE': 8} +DQFLAGS = {"JUMP_DET": 4, "SATURATED": 2, "DO_NOT_USE": 1, "GOOD": 0, "NO_GAIN_VALUE": 8} -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def setup_cube(): def _cube(ngroups, readnoise=10): nints = 1 @@ -26,25 +32,25 @@ def _cube(ngroups, readnoise=10): def test_find_simple_ellipse(): plane = np.zeros(shape=(5, 5), dtype=np.uint8) - plane[2, 2] = DQFLAGS['JUMP_DET'] - plane[3, 2] = DQFLAGS['JUMP_DET'] - plane[1, 2] = DQFLAGS['JUMP_DET'] - plane[2, 3] = DQFLAGS['JUMP_DET'] - plane[2, 1] = DQFLAGS['JUMP_DET'] - plane[1, 3] = DQFLAGS['JUMP_DET'] - plane[2, 4] = DQFLAGS['JUMP_DET'] - plane[3, 3] = DQFLAGS['JUMP_DET'] - ellipse = find_ellipses(plane, DQFLAGS['JUMP_DET'], 1) + plane[2, 2] = DQFLAGS["JUMP_DET"] + plane[3, 2] = DQFLAGS["JUMP_DET"] + plane[1, 2] = DQFLAGS["JUMP_DET"] + plane[2, 3] = DQFLAGS["JUMP_DET"] + plane[2, 1] = DQFLAGS["JUMP_DET"] + plane[1, 3] = DQFLAGS["JUMP_DET"] + plane[2, 4] = DQFLAGS["JUMP_DET"] + plane[3, 3] = DQFLAGS["JUMP_DET"] + ellipse = find_ellipses(plane, DQFLAGS["JUMP_DET"], 1) assert ellipse[0][2] == pytest.approx(45.0, 1e-3) # 90 degree rotation assert ellipse[0][0] == pytest.approx((2.5, 2.0)) # center def test_find_ellipse2(): plane = np.zeros(shape=(5, 5), dtype=np.uint8) - plane[1, :] = [0, DQFLAGS['JUMP_DET'], DQFLAGS['JUMP_DET'], DQFLAGS['JUMP_DET'], 0] - plane[2, :] = [0, DQFLAGS['JUMP_DET'], DQFLAGS['JUMP_DET'], DQFLAGS['JUMP_DET'], 0] - plane[3, :] = [0, DQFLAGS['JUMP_DET'], DQFLAGS['JUMP_DET'], DQFLAGS['JUMP_DET'], 0] - ellipses = find_ellipses(plane, DQFLAGS['JUMP_DET'], 1) + plane[1, :] = [0, DQFLAGS["JUMP_DET"], DQFLAGS["JUMP_DET"], DQFLAGS["JUMP_DET"], 0] + plane[2, :] = [0, DQFLAGS["JUMP_DET"], DQFLAGS["JUMP_DET"], DQFLAGS["JUMP_DET"], 0] + plane[3, :] = [0, DQFLAGS["JUMP_DET"], DQFLAGS["JUMP_DET"], DQFLAGS["JUMP_DET"], 0] + ellipses = find_ellipses(plane, DQFLAGS["JUMP_DET"], 1) ellipse = ellipses[0] assert ellipse[0][0] == 2 assert ellipse[0][1] == 2 @@ -57,44 +63,53 @@ def test_extend_saturation_simple(): cube = np.zeros(shape=(5, 7, 7), dtype=np.uint8) grp = 1 min_sat_radius_extend = 1 - cube[1, 3, 3] = DQFLAGS['SATURATED'] - cube[1, 2, 3] = DQFLAGS['SATURATED'] - cube[1, 3, 4] = DQFLAGS['SATURATED'] - cube[1, 4, 3] = DQFLAGS['SATURATED'] - cube[1, 3, 2] = DQFLAGS['SATURATED'] - cube[1, 2, 2] = DQFLAGS['JUMP_DET'] - sat_circles = find_ellipses(cube[grp, :, :], DQFLAGS['SATURATED'], 1) - new_cube = extend_saturation(cube, grp, sat_circles, DQFLAGS['SATURATED'], - min_sat_radius_extend, expansion=1.1) - - assert new_cube[grp, 2, 2] == DQFLAGS['SATURATED'] - assert new_cube[grp, 4, 4] == DQFLAGS['SATURATED'] + cube[1, 3, 3] = DQFLAGS["SATURATED"] + cube[1, 2, 3] = DQFLAGS["SATURATED"] + cube[1, 3, 4] = DQFLAGS["SATURATED"] + cube[1, 4, 3] = DQFLAGS["SATURATED"] + cube[1, 3, 2] = DQFLAGS["SATURATED"] + cube[1, 2, 2] = DQFLAGS["JUMP_DET"] + sat_circles = find_ellipses(cube[grp, :, :], DQFLAGS["SATURATED"], 1) + new_cube = extend_saturation( + cube, grp, sat_circles, DQFLAGS["SATURATED"], min_sat_radius_extend, expansion=1.1 + ) + + assert new_cube[grp, 2, 2] == DQFLAGS["SATURATED"] + assert new_cube[grp, 4, 4] == DQFLAGS["SATURATED"] assert new_cube[grp, 4, 5] == 0 def test_flag_large_events_nosnowball(): cube = np.zeros(shape=(1, 5, 7, 7), dtype=np.uint8) # cross of saturation with no jump - cube[0, 0:2, 3, 3] = DQFLAGS['SATURATED'] - cube[0, 0:2, 2, 3] = DQFLAGS['SATURATED'] - cube[0, 0:2, 3, 4] = DQFLAGS['SATURATED'] - cube[0, 0:2, 4, 3] = DQFLAGS['SATURATED'] - cube[0, 0:2, 3, 2] = DQFLAGS['SATURATED'] + cube[0, 0:2, 3, 3] = DQFLAGS["SATURATED"] + cube[0, 0:2, 2, 3] = DQFLAGS["SATURATED"] + cube[0, 0:2, 3, 4] = DQFLAGS["SATURATED"] + cube[0, 0:2, 4, 3] = DQFLAGS["SATURATED"] + cube[0, 0:2, 3, 2] = DQFLAGS["SATURATED"] # cross of saturation surrounding by jump -> snowball but sat core is not new # should have no snowball trigger - cube[0, 2, 3, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 2, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 3, 4] = DQFLAGS['SATURATED'] - cube[0, 2, 4, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 3, 2] = DQFLAGS['SATURATED'] - cube[0, 2, 1, 1:6] = DQFLAGS['JUMP_DET'] - cube[0, 2, 5, 1:6] = DQFLAGS['JUMP_DET'] - cube[0, 2, 1:6, 1] = DQFLAGS['JUMP_DET'] - cube[0, 2, 1:6, 5] = DQFLAGS['JUMP_DET'] - flag_large_events(cube, DQFLAGS['JUMP_DET'], DQFLAGS['SATURATED'], min_sat_area=1, - min_jump_area=6, - expand_factor=1.9, edge_size=1, - sat_required_snowball=True, min_sat_radius_extend=1, sat_expand=1.1) + cube[0, 2, 3, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 2, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 3, 4] = DQFLAGS["SATURATED"] + cube[0, 2, 4, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 3, 2] = DQFLAGS["SATURATED"] + cube[0, 2, 1, 1:6] = DQFLAGS["JUMP_DET"] + cube[0, 2, 5, 1:6] = DQFLAGS["JUMP_DET"] + cube[0, 2, 1:6, 1] = DQFLAGS["JUMP_DET"] + cube[0, 2, 1:6, 5] = DQFLAGS["JUMP_DET"] + flag_large_events( + cube, + DQFLAGS["JUMP_DET"], + DQFLAGS["SATURATED"], + min_sat_area=1, + min_jump_area=6, + expand_factor=1.9, + edge_size=1, + sat_required_snowball=True, + min_sat_radius_extend=1, + sat_expand=1.1, + ) assert cube[0, 2, 2, 2] == 0 assert cube[0, 2, 3, 6] == 0 @@ -102,67 +117,92 @@ def test_flag_large_events_nosnowball(): def test_flag_large_events_withsnowball(): cube = np.zeros(shape=(1, 5, 7, 7), dtype=np.uint8) # cross of saturation surrounding by jump -> snowball - cube[0, 2, 3, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 2, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 3, 4] = DQFLAGS['SATURATED'] - cube[0, 2, 4, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 3, 2] = DQFLAGS['SATURATED'] - cube[0, 2, 1, 1:6] = DQFLAGS['JUMP_DET'] - cube[0, 2, 5, 1:6] = DQFLAGS['JUMP_DET'] - cube[0, 2, 1:6, 1] = DQFLAGS['JUMP_DET'] - cube[0, 2, 1:6, 5] = DQFLAGS['JUMP_DET'] - flag_large_events(cube, DQFLAGS['JUMP_DET'], DQFLAGS['SATURATED'], min_sat_area=1, - min_jump_area=6, - expand_factor=1.9, edge_size=0, - sat_required_snowball=True, min_sat_radius_extend=.5, sat_expand=1.1) + cube[0, 2, 3, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 2, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 3, 4] = DQFLAGS["SATURATED"] + cube[0, 2, 4, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 3, 2] = DQFLAGS["SATURATED"] + cube[0, 2, 1, 1:6] = DQFLAGS["JUMP_DET"] + cube[0, 2, 5, 1:6] = DQFLAGS["JUMP_DET"] + cube[0, 2, 1:6, 1] = DQFLAGS["JUMP_DET"] + cube[0, 2, 1:6, 5] = DQFLAGS["JUMP_DET"] + flag_large_events( + cube, + DQFLAGS["JUMP_DET"], + DQFLAGS["SATURATED"], + min_sat_area=1, + min_jump_area=6, + expand_factor=1.9, + edge_size=0, + sat_required_snowball=True, + min_sat_radius_extend=0.5, + sat_expand=1.1, + ) assert cube[0, 1, 2, 2] == 0 assert cube[0, 1, 3, 5] == 0 assert cube[0, 2, 0, 0] == 0 - assert cube[0, 2, 1, 0] == DQFLAGS['JUMP_DET'] # Jump was extended - assert cube[0, 2, 2, 2] == DQFLAGS['SATURATED'] # Saturation was extended - assert cube[0, 2, 3, 6] == DQFLAGS['JUMP_DET'] + assert cube[0, 2, 1, 0] == DQFLAGS["JUMP_DET"] # Jump was extended + assert cube[0, 2, 2, 2] == DQFLAGS["SATURATED"] # Saturation was extended + assert cube[0, 2, 3, 6] == DQFLAGS["JUMP_DET"] def test_flag_large_events_groupedsnowball(): cube = np.zeros(shape=(1, 5, 7, 7), dtype=np.uint8) # cross of saturation surrounding by jump -> snowball - cube[0, 1, :, :] = DQFLAGS['JUMP_DET'] - cube[0, 2, 3, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 2, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 3, 4] = DQFLAGS['SATURATED'] - cube[0, 2, 4, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 3, 2] = DQFLAGS['SATURATED'] - cube[0, 2, 1, 1:6] = DQFLAGS['JUMP_DET'] - cube[0, 2, 5, 1:6] = DQFLAGS['JUMP_DET'] - cube[0, 2, 1:6, 1] = DQFLAGS['JUMP_DET'] - cube[0, 2, 1:6, 5] = DQFLAGS['JUMP_DET'] - flag_large_events(cube, DQFLAGS['JUMP_DET'], DQFLAGS['SATURATED'], min_sat_area=1, - min_jump_area=6, - expand_factor=1.9, edge_size=0, - sat_required_snowball=True, min_sat_radius_extend=.5, sat_expand=1.1) -# assert cube[0, 1, 2, 2] == 0 -# assert cube[0, 1, 3, 5] == 0 + cube[0, 1, :, :] = DQFLAGS["JUMP_DET"] + cube[0, 2, 3, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 2, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 3, 4] = DQFLAGS["SATURATED"] + cube[0, 2, 4, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 3, 2] = DQFLAGS["SATURATED"] + cube[0, 2, 1, 1:6] = DQFLAGS["JUMP_DET"] + cube[0, 2, 5, 1:6] = DQFLAGS["JUMP_DET"] + cube[0, 2, 1:6, 1] = DQFLAGS["JUMP_DET"] + cube[0, 2, 1:6, 5] = DQFLAGS["JUMP_DET"] + flag_large_events( + cube, + DQFLAGS["JUMP_DET"], + DQFLAGS["SATURATED"], + min_sat_area=1, + min_jump_area=6, + expand_factor=1.9, + edge_size=0, + sat_required_snowball=True, + min_sat_radius_extend=0.5, + sat_expand=1.1, + ) + # assert cube[0, 1, 2, 2] == 0 + # assert cube[0, 1, 3, 5] == 0 assert cube[0, 2, 0, 0] == 0 - assert cube[0, 2, 1, 0] == DQFLAGS['JUMP_DET'] # Jump was extended - assert cube[0, 2, 2, 2] == DQFLAGS['SATURATED'] # Saturation was extended + assert cube[0, 2, 1, 0] == DQFLAGS["JUMP_DET"] # Jump was extended + assert cube[0, 2, 2, 2] == DQFLAGS["SATURATED"] # Saturation was extended + def test_flag_large_events_withsnowball_noextension(): cube = np.zeros(shape=(1, 5, 7, 7), dtype=np.uint8) # cross of saturation surrounding by jump -> snowball - cube[0, 2, 3, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 2, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 3, 4] = DQFLAGS['SATURATED'] - cube[0, 2, 4, 3] = DQFLAGS['SATURATED'] - cube[0, 2, 3, 2] = DQFLAGS['SATURATED'] - cube[0, 2, 1, 1:6] = DQFLAGS['JUMP_DET'] - cube[0, 2, 5, 1:6] = DQFLAGS['JUMP_DET'] - cube[0, 2, 1:6, 1] = DQFLAGS['JUMP_DET'] - cube[0, 2, 1:6, 5] = DQFLAGS['JUMP_DET'] - flag_large_events(cube, DQFLAGS['JUMP_DET'], DQFLAGS['SATURATED'], min_sat_area=1, - min_jump_area=6, - expand_factor=1.9, edge_size=0, - sat_required_snowball=True, min_sat_radius_extend=.5, - sat_expand=1.1, max_extended_radius=1) + cube[0, 2, 3, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 2, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 3, 4] = DQFLAGS["SATURATED"] + cube[0, 2, 4, 3] = DQFLAGS["SATURATED"] + cube[0, 2, 3, 2] = DQFLAGS["SATURATED"] + cube[0, 2, 1, 1:6] = DQFLAGS["JUMP_DET"] + cube[0, 2, 5, 1:6] = DQFLAGS["JUMP_DET"] + cube[0, 2, 1:6, 1] = DQFLAGS["JUMP_DET"] + cube[0, 2, 1:6, 5] = DQFLAGS["JUMP_DET"] + flag_large_events( + cube, + DQFLAGS["JUMP_DET"], + DQFLAGS["SATURATED"], + min_sat_area=1, + min_jump_area=6, + expand_factor=1.9, + edge_size=0, + sat_required_snowball=True, + min_sat_radius_extend=0.5, + sat_expand=1.1, + max_extended_radius=1, + ) assert cube[0, 1, 2, 2] == 0 assert cube[0, 1, 3, 5] == 0 assert cube[0, 2, 0, 0] == 0 @@ -179,44 +219,54 @@ def test_find_faint_extended(): rng = np.random.default_rng(12345) data[0, 1:, 14:20, 15:20] = 6 * gain * 1.7 data = data + rng.normal(size=(nint, ngrps, nrows, ncols)) * readnoise - gdq, num_showers = find_faint_extended(data, gdq, readnoise, 1, 100, - snr_threshold=1.3, - min_shower_area=20, inner=1, - outer=2, sat_flag=2, jump_flag=4, - ellipse_expand=1.1, num_grps_masked=3) + gdq, num_showers = find_faint_extended( + data, + gdq, + readnoise, + 1, + 100, + snr_threshold=1.3, + min_shower_area=20, + inner=1, + outer=2, + sat_flag=2, + jump_flag=4, + ellipse_expand=1.1, + num_grps_masked=3, + ) # Check that all the expected samples in group 2 are flagged as jump and # that they are not flagged outside - assert (num_showers == 3) - assert (np.all(gdq[0, 1, 22, 14:23] == 0)) - assert (np.all(gdq[0, 1, 21, 16:20] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 1, 20, 15:22] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 1, 19, 15:23] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 1, 18, 14:23] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 1, 17, 14:23] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 1, 16, 14:23] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 1, 15, 14:22] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 1, 14, 16:22] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 1, 13, 17:21] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 1, 12, 14:23] == 0)) - assert (np.all(gdq[0, 1, 12:23, 24] == 0)) - assert (np.all(gdq[0, 1, 12:23, 13] == 0)) + assert num_showers == 3 + assert np.all(gdq[0, 1, 22, 14:23] == 0) + assert np.all(gdq[0, 1, 21, 16:20] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 1, 20, 15:22] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 1, 19, 15:23] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 1, 18, 14:23] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 1, 17, 14:23] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 1, 16, 14:23] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 1, 15, 14:22] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 1, 14, 16:22] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 1, 13, 17:21] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 1, 12, 14:23] == 0) + assert np.all(gdq[0, 1, 12:23, 24] == 0) + assert np.all(gdq[0, 1, 12:23, 13] == 0) # Check that the same area is flagged in the first group after the event - assert (np.all(gdq[0, 2, 22, 14:23] == 0)) - assert (np.all(gdq[0, 2, 21, 16:20] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 2, 20, 15:22] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 2, 19, 15:23] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 2, 18, 14:23] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 2, 17, 14:23] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 2, 16, 14:23] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 2, 15, 14:22] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 2, 14, 16:22] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 2, 13, 17:21] == DQFLAGS['JUMP_DET'])) - assert (np.all(gdq[0, 2, 12, 14:23] == 0)) - assert (np.all(gdq[0, 2, 12:22, 24] == 0)) - assert (np.all(gdq[0, 2, 12:22, 13] == 0)) + assert np.all(gdq[0, 2, 22, 14:23] == 0) + assert np.all(gdq[0, 2, 21, 16:20] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 2, 20, 15:22] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 2, 19, 15:23] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 2, 18, 14:23] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 2, 17, 14:23] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 2, 16, 14:23] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 2, 15, 14:22] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 2, 14, 16:22] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 2, 13, 17:21] == DQFLAGS["JUMP_DET"]) + assert np.all(gdq[0, 2, 12, 14:23] == 0) + assert np.all(gdq[0, 2, 12:22, 24] == 0) + assert np.all(gdq[0, 2, 12:22, 13] == 0) # Check that the flags are not applied in the 3rd group after the event - assert (np.all(gdq[0, 4, 12:22, 14:23]) == 0) + assert np.all(gdq[0, 4, 12:22, 14:23]) == 0 # No shower is found because the event is identical in all ints @@ -229,30 +279,40 @@ def test_find_faint_extended_sigclip(): rng = np.random.default_rng(12345) data[0, 1:, 14:20, 15:20] = 6 * gain * 1.7 data = data + rng.normal(size=(nint, ngrps, nrows, ncols)) * readnoise - gdq, num_showers = find_faint_extended(data, gdq, readnoise, 1, 100, - snr_threshold=1.3, - min_shower_area=20, inner=1, - outer=2, sat_flag=2, jump_flag=4, - ellipse_expand=1.1, num_grps_masked=3) + gdq, num_showers = find_faint_extended( + data, + gdq, + readnoise, + 1, + 100, + snr_threshold=1.3, + min_shower_area=20, + inner=1, + outer=2, + sat_flag=2, + jump_flag=4, + ellipse_expand=1.1, + num_grps_masked=3, + ) # Check that all the expected samples in group 2 are flagged as jump and # that they are not flagged outside - assert(num_showers == 0) - assert (np.all(gdq[0, 1, 22, 14:23] == 0)) - assert (np.all(gdq[0, 1, 21, 16:20] == 0)) - assert (np.all(gdq[0, 1, 20, 15:22] == 0)) - assert (np.all(gdq[0, 1, 19, 15:23] == 0)) - assert (np.all(gdq[0, 1, 18, 14:23] == 0)) - assert (np.all(gdq[0, 1, 17, 14:23] == 0)) - assert (np.all(gdq[0, 1, 16, 14:23] == 0)) - assert (np.all(gdq[0, 1, 15, 14:22] == 0)) - assert (np.all(gdq[0, 1, 14, 16:22] == 0)) - assert (np.all(gdq[0, 1, 13, 17:21] == 0)) - assert (np.all(gdq[0, 1, 12, 14:23] == 0)) - assert (np.all(gdq[0, 1, 12:23, 24] == 0)) - assert (np.all(gdq[0, 1, 12:23, 13] == 0)) + assert num_showers == 0 + assert np.all(gdq[0, 1, 22, 14:23] == 0) + assert np.all(gdq[0, 1, 21, 16:20] == 0) + assert np.all(gdq[0, 1, 20, 15:22] == 0) + assert np.all(gdq[0, 1, 19, 15:23] == 0) + assert np.all(gdq[0, 1, 18, 14:23] == 0) + assert np.all(gdq[0, 1, 17, 14:23] == 0) + assert np.all(gdq[0, 1, 16, 14:23] == 0) + assert np.all(gdq[0, 1, 15, 14:22] == 0) + assert np.all(gdq[0, 1, 14, 16:22] == 0) + assert np.all(gdq[0, 1, 13, 17:21] == 0) + assert np.all(gdq[0, 1, 12, 14:23] == 0) + assert np.all(gdq[0, 1, 12:23, 24] == 0) + assert np.all(gdq[0, 1, 12:23, 13] == 0) # Check that the flags are not applied in the 3rd group after the event - assert (np.all(gdq[0, 4, 12:22, 14:23]) == 0) + assert np.all(gdq[0, 4, 12:22, 14:23]) == 0 def test_inside_ellipse5(): @@ -279,16 +339,16 @@ def test_inside_ellipes5(): def test_calc_num_slices(): n_rows = 20 max_available_cores = 10 - assert(calc_num_slices(n_rows, 'none', max_available_cores) == 1) - assert (calc_num_slices(n_rows, 'half', max_available_cores) == 5) - assert (calc_num_slices(n_rows, '3', max_available_cores) == 3) - assert (calc_num_slices(n_rows, '7', max_available_cores) == 7) - assert (calc_num_slices(n_rows, '21', max_available_cores) == 10) - assert (calc_num_slices(n_rows, 'quarter', max_available_cores) == 2) - assert (calc_num_slices(n_rows, '7.5', max_available_cores) == 1) - assert (calc_num_slices(n_rows, 'one', max_available_cores) == 1) - assert (calc_num_slices(n_rows, '-5', max_available_cores) == 1) - assert (calc_num_slices(n_rows, 'all', max_available_cores) == 10) - assert (calc_num_slices(n_rows, '3/4', max_available_cores) == 1) + assert calc_num_slices(n_rows, "none", max_available_cores) == 1 + assert calc_num_slices(n_rows, "half", max_available_cores) == 5 + assert calc_num_slices(n_rows, "3", max_available_cores) == 3 + assert calc_num_slices(n_rows, "7", max_available_cores) == 7 + assert calc_num_slices(n_rows, "21", max_available_cores) == 10 + assert calc_num_slices(n_rows, "quarter", max_available_cores) == 2 + assert calc_num_slices(n_rows, "7.5", max_available_cores) == 1 + assert calc_num_slices(n_rows, "one", max_available_cores) == 1 + assert calc_num_slices(n_rows, "-5", max_available_cores) == 1 + assert calc_num_slices(n_rows, "all", max_available_cores) == 10 + assert calc_num_slices(n_rows, "3/4", max_available_cores) == 1 n_rows = 9 - assert (calc_num_slices(n_rows, '21', max_available_cores) == 9) + assert calc_num_slices(n_rows, "21", max_available_cores) == 9 diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index a6966a02..28b7a810 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -2,10 +2,12 @@ import pytest from numpy.testing import assert_allclose -from stcal.ramp_fitting.ols_cas22._jump import (fill_fixed_values, - _fill_pixel_values, - FixedOffsets, - PixelOffsets) +from stcal.ramp_fitting.ols_cas22._jump import ( + fill_fixed_values, + _fill_pixel_values, + FixedOffsets, + PixelOffsets, +) from stcal.ramp_fitting.ols_cas22._ramp import from_read_pattern, init_ramps from stcal.ramp_fitting.ols_cas22 import fit_ramps, Parameter, Variance, JUMP_DET @@ -45,10 +47,15 @@ def test_init_ramps(): """ # from stcal.ramp_fitting.ols_cas22._core import _init_ramps_list - dq = np.array([[0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1], - [0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1], - [0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1], - [0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1]], dtype=np.int32) + dq = np.array( + [ + [0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1], + [0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1], + [0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1], + [0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1], + ], + dtype=np.int32, + ) n_resultants, n_pixels = dq.shape ramps = [init_ramps(dq[:, index], n_resultants) for index in range(n_pixels)] @@ -58,27 +65,27 @@ def test_init_ramps(): # Check that the ramps are correct # No DQ - assert ramps[0] == [{'start': 0, 'end': 3}] + assert ramps[0] == [{"start": 0, "end": 3}] # 1 DQ - assert ramps[1] == [{'start': 1, 'end': 3}] - assert ramps[2] == [{'start': 0, 'end': 0}, {'start': 2, 'end': 3}] - assert ramps[3] == [{'start': 0, 'end': 1}, {'start': 3, 'end': 3}] - assert ramps[4] == [{'start': 0, 'end': 2}] + assert ramps[1] == [{"start": 1, "end": 3}] + assert ramps[2] == [{"start": 0, "end": 0}, {"start": 2, "end": 3}] + assert ramps[3] == [{"start": 0, "end": 1}, {"start": 3, "end": 3}] + assert ramps[4] == [{"start": 0, "end": 2}] # 2 DQ - assert ramps[5] == [{'start': 2, 'end': 3}] - assert ramps[6] == [{'start': 1, 'end': 1}, {'start': 3, 'end': 3}] - assert ramps[7] == [{'start': 1, 'end': 2}] - assert ramps[8] == [{'start': 0, 'end': 0}, {'start': 3, 'end': 3}] - assert ramps[9] == [{'start': 0, 'end': 0}, {'start': 2, 'end': 2}] - assert ramps[10] == [{'start': 0, 'end': 1}] + assert ramps[5] == [{"start": 2, "end": 3}] + assert ramps[6] == [{"start": 1, "end": 1}, {"start": 3, "end": 3}] + assert ramps[7] == [{"start": 1, "end": 2}] + assert ramps[8] == [{"start": 0, "end": 0}, {"start": 3, "end": 3}] + assert ramps[9] == [{"start": 0, "end": 0}, {"start": 2, "end": 2}] + assert ramps[10] == [{"start": 0, "end": 1}] # 3 DQ - assert ramps[11] == [{'start': 3, 'end': 3}] - assert ramps[12] == [{'start': 2, 'end': 2}] - assert ramps[13] == [{'start': 1, 'end': 1}] - assert ramps[14] == [{'start': 0, 'end': 0}] + assert ramps[11] == [{"start": 3, "end": 3}] + assert ramps[12] == [{"start": 2, "end": 2}] + assert ramps[13] == [{"start": 1, "end": 1}] + assert ramps[14] == [{"start": 0, "end": 0}] # 4 DQ assert ramps[15] == [] @@ -102,7 +109,7 @@ def read_pattern(): [6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16, 17, 18], [19, 20, 21], - [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36] + [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36], ] @@ -110,9 +117,9 @@ def test_from_read_pattern(read_pattern): """Test turning read_pattern into the time data""" metadata = from_read_pattern(read_pattern, READ_TIME, len(read_pattern))._to_dict() - t_bar = metadata['t_bar'] - tau = metadata['tau'] - n_reads = metadata['n_reads'] + t_bar = metadata["t_bar"] + tau = metadata["tau"] + n_reads = metadata["n_reads"] # Check that the data is correct assert_allclose(t_bar, [7.6, 15.2, 21.279999, 41.040001, 60.799999, 88.159996]) @@ -139,7 +146,7 @@ def ramp_data(read_pattern): """ data = from_read_pattern(read_pattern, READ_TIME, len(read_pattern))._to_dict() - yield data['t_bar'], data['tau'], data['n_reads'], read_pattern + yield data["t_bar"], data["tau"], data["n_reads"], read_pattern def test_fill_fixed_values(ramp_data): @@ -154,10 +161,10 @@ def test_fill_fixed_values(ramp_data): assert fixed.shape == (2 * 4, n_resultants - 1) # Split into the different types of data - t_bar_diffs = fixed[FixedOffsets.single_t_bar_diff:FixedOffsets.double_t_bar_diff + 1, :] - t_bar_diff_sqrs = fixed[FixedOffsets.single_t_bar_diff_sqr:FixedOffsets.double_t_bar_diff_sqr + 1, :] - read_recip = fixed[FixedOffsets.single_read_recip:FixedOffsets.double_read_recip + 1, :] - var_slope_vals = fixed[FixedOffsets.single_var_slope_val:FixedOffsets.double_var_slope_val + 1, :] + t_bar_diffs = fixed[FixedOffsets.single_t_bar_diff : FixedOffsets.double_t_bar_diff + 1, :] + t_bar_diff_sqrs = fixed[FixedOffsets.single_t_bar_diff_sqr : FixedOffsets.double_t_bar_diff_sqr + 1, :] + read_recip = fixed[FixedOffsets.single_read_recip : FixedOffsets.double_read_recip + 1, :] + var_slope_vals = fixed[FixedOffsets.single_var_slope_val : FixedOffsets.double_var_slope_val + 1, :] # Sanity check that these are all the right shape assert t_bar_diffs.shape == (2, n_resultants - 1) @@ -169,13 +176,13 @@ def test_fill_fixed_values(ramp_data): # These are computed using loop in cython, here we check against numpy # Single diffs assert np.all(t_bar_diffs[0] == t_bar[1:] - t_bar[:-1]) - assert np.all(t_bar_diff_sqrs[0] == (t_bar[1:] - t_bar[:-1])**2) + assert np.all(t_bar_diff_sqrs[0] == (t_bar[1:] - t_bar[:-1]) ** 2) assert np.all(read_recip[0] == np.float32(1 / n_reads[1:]) + np.float32(1 / n_reads[:-1])) assert np.all(var_slope_vals[0] == (tau[1:] + tau[:-1] - 2 * np.minimum(t_bar[1:], t_bar[:-1]))) # Double diffs assert np.all(t_bar_diffs[1, :-1] == t_bar[2:] - t_bar[:-2]) - assert np.all(t_bar_diff_sqrs[1, :-1] == (t_bar[2:] - t_bar[:-2])**2) + assert np.all(t_bar_diff_sqrs[1, :-1] == (t_bar[2:] - t_bar[:-2]) ** 2) assert np.all(read_recip[1, :-1] == np.float32(1 / n_reads[2:]) + np.float32(1 / n_reads[:-2])) assert np.all(var_slope_vals[1, :-1] == (tau[2:] + tau[:-2] - 2 * np.minimum(t_bar[2:], t_bar[:-2]))) @@ -266,8 +273,8 @@ def test__fill_pixel_values(pixel_data): assert pixel.shape == (2 * 2, n_resultants - 1) # Split into the different types of data - local_slopes = pixel[PixelOffsets.single_local_slope:PixelOffsets.double_local_slope + 1, :] - var_read_noise = pixel[PixelOffsets.single_var_read_noise:PixelOffsets.double_var_read_noise + 1, :] + local_slopes = pixel[PixelOffsets.single_local_slope : PixelOffsets.double_local_slope + 1, :] + var_read_noise = pixel[PixelOffsets.single_var_read_noise : PixelOffsets.double_var_read_noise + 1, :] # Sanity check that these are all the right shape assert local_slopes.shape == (2, n_resultants - 1) @@ -277,14 +284,16 @@ def test__fill_pixel_values(pixel_data): # These are computed using loop in cython, here we check against numpy # Single diffs assert np.all(local_slopes[0] == (resultants[1:] - resultants[:-1]) / (t_bar[1:] - t_bar[:-1])) - assert np.all(var_read_noise[0] == np.float32(READ_NOISE ** 2) * ( - np.float32(1 / n_reads[1:]) + np.float32(1 / n_reads[:-1])) + assert np.all( + var_read_noise[0] + == np.float32(READ_NOISE**2) * (np.float32(1 / n_reads[1:]) + np.float32(1 / n_reads[:-1])) ) # Double diffs assert np.all(local_slopes[1, :-1] == (resultants[2:] - resultants[:-2]) / (t_bar[2:] - t_bar[:-2])) - assert np.all(var_read_noise[1, :-1] == np.float32(READ_NOISE ** 2) * ( - np.float32(1 / n_reads[2:]) + np.float32(1 / n_reads[:-2])) + assert np.all( + var_read_noise[1, :-1] + == np.float32(READ_NOISE**2) * (np.float32(1 / n_reads[2:]) + np.float32(1 / n_reads[:-2])) ) # Last double diff should be NaN @@ -323,8 +332,9 @@ def test_fit_ramps(detector_data, use_jump, use_dq): """ resultants, read_noise, read_pattern = detector_data dq = ( - (RNG.uniform(size=resultants.shape) > GOOD_PROB).astype(np.int32) if use_dq else - np.zeros(resultants.shape, dtype=np.int32) + (RNG.uniform(size=resultants.shape) > GOOD_PROB).astype(np.int32) + if use_dq + else np.zeros(resultants.shape, dtype=np.int32) ) # only use okay ramps @@ -338,8 +348,9 @@ def test_fit_ramps(detector_data, use_jump, use_dq): if not use_dq: assert okay.all() - output = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=use_jump, - include_diagnostic=True) + output = fit_ramps( + resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=use_jump, include_diagnostic=True + ) assert len(output.fits) == N_PIXELS # sanity check that a fit is output for each pixel chi2 = 0 @@ -352,20 +363,20 @@ def test_fit_ramps(detector_data, use_jump, use_dq): # purposefully placed ones which we know about. So the `test_find_jumps` # can focus on checking that the jumps found are the correct ones, # and that all jumps introduced are detected properly. - assert len(fit['fits']) == 1 + assert len(fit["fits"]) == 1 if use: # Add okay ramps to chi2 - total_var = fit['average']['read_var'] + fit['average']['poisson_var'] + total_var = fit["average"]["read_var"] + fit["average"]["poisson_var"] if total_var != 0: - chi2 += (fit['average']['slope'] - FLUX)**2 / total_var + chi2 += (fit["average"]["slope"] - FLUX) ** 2 / total_var else: # Check no slope fit for bad ramps - assert fit['average']['slope'] == 0 - assert fit['average']['read_var'] == 0 - assert fit['average']['poisson_var'] == 0 + assert fit["average"]["slope"] == 0 + assert fit["average"]["read_var"] == 0 + assert fit["average"]["poisson_var"] == 0 - assert use_dq # sanity check that this branch is only encountered when use_dq = True + assert use_dq # sanity check that this branch is only encountered when use_dq = True chi2 /= np.sum(okay) assert np.abs(chi2 - 1) < CHI2_TOL @@ -379,17 +390,18 @@ def test_fit_ramps_array_outputs(detector_data, use_jump): resultants, read_noise, read_pattern = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) - output = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=use_jump, - include_diagnostic=True) + output = fit_ramps( + resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=use_jump, include_diagnostic=True + ) for fit, par, var in zip(output.fits, output.parameters, output.variances): assert par[Parameter.intercept] == 0 - assert par[Parameter.slope] == fit['average']['slope'] + assert par[Parameter.slope] == fit["average"]["slope"] - assert var[Variance.read_var] == fit['average']['read_var'] - assert var[Variance.poisson_var] == fit['average']['poisson_var'] + assert var[Variance.read_var] == fit["average"]["read_var"] + assert var[Variance.poisson_var] == fit["average"]["poisson_var"] assert var[Variance.total_var] == np.float32( - fit['average']['read_var'] + fit['average']['poisson_var'] + fit["average"]["read_var"] + fit["average"]["poisson_var"] ) @@ -452,8 +464,9 @@ def test_find_jumps(jump_data): resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - output = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True, - include_diagnostic=True) + output = fit_ramps( + resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True, include_diagnostic=True + ) assert len(output.fits) == len(jump_reads) # sanity check that a fit/jump is set for every pixel chi2 = 0 @@ -462,37 +475,36 @@ def test_find_jumps(jump_data): incorrect_does_not_capture = 0 incorrect_other = 0 for fit, jump_index, resultant_index in zip(output.fits, jump_reads, jump_resultants): - # Check that the jumps are detected correctly if jump_index == 0: # There is no way to detect a jump if it is in the very first read # The very first pixel in this case has a jump in the first read - assert len(fit['jumps']) == 0 + assert len(fit["jumps"]) == 0 assert resultant_index == 0 # sanity check that the jump is indeed in the first resultant # Test the correct ramp_index was recorded: - assert len(fit['index']) == 1 - assert fit['index'][0]['start'] == 0 - assert fit['index'][0]['end'] == len(read_pattern) - 1 + assert len(fit["index"]) == 1 + assert fit["index"][0]["start"] == 0 + assert fit["index"][0]["end"] == len(read_pattern) - 1 else: # There should be a single jump detected; however, this results in # two resultants being excluded. - if resultant_index not in fit['jumps']: + if resultant_index not in fit["jumps"]: incorrect_does_not_capture += 1 continue - if len(fit['jumps']) < 2: + if len(fit["jumps"]) < 2: incorrect_too_few += 1 continue - if len(fit['jumps']) > 2: + if len(fit["jumps"]) > 2: incorrect_too_many += 1 continue # The two resultants excluded should be adjacent jump_correct = [] - for jump in fit['jumps']: - jump_correct.append(jump == resultant_index or - jump == resultant_index - 1 or - jump == resultant_index + 1) + for jump in fit["jumps"]: + jump_correct.append( + jump == resultant_index or jump == resultant_index - 1 or jump == resultant_index + 1 + ) if not all(jump_correct): incorrect_other += 1 continue @@ -517,11 +529,11 @@ def test_find_jumps(jump_data): # assert set(ramp_indices).union(fit['jumps']) == set(range(len(read_pattern))) # Compute the chi2 for the fit and add it to a running "total chi2" - total_var = fit['average']['read_var'] + fit['average']['poisson_var'] - chi2 += (fit['average']['slope'] - FLUX)**2 / total_var + total_var = fit["average"]["read_var"] + fit["average"]["poisson_var"] + chi2 += (fit["average"]["slope"] - FLUX) ** 2 / total_var # Check that the average chi2 is ~1. - chi2 /= (N_PIXELS - incorrect_too_few - incorrect_too_many - incorrect_does_not_capture - incorrect_other) + chi2 /= N_PIXELS - incorrect_too_few - incorrect_too_many - incorrect_does_not_capture - incorrect_other assert np.abs(chi2 - 1) < CHI2_TOL @@ -531,8 +543,9 @@ def test_override_default_threshold(jump_data): dq = np.zeros(resultants.shape, dtype=np.int32) standard = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True) - override = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True, - intercept=0, constant=0) + override = fit_ramps( + resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True, intercept=0, constant=0 + ) # All this is intended to do is show that with all other things being equal passing non-default # threshold parameters changes the results. @@ -546,12 +559,13 @@ def test_jump_dq_set(jump_data): resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - output = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True, - include_diagnostic=True) + output = fit_ramps( + resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True, include_diagnostic=True + ) for fit, pixel_dq in zip(output.fits, output.dq.transpose()): # Check that all jumps found get marked - assert (pixel_dq[fit['jumps']] == JUMP_DET).all() + assert (pixel_dq[fit["jumps"]] == JUMP_DET).all() # Check that dq flags for jumps are only set if the jump is marked - assert set(np.where(pixel_dq == JUMP_DET)[0]) == set(fit['jumps']) + assert set(np.where(pixel_dq == JUMP_DET)[0]) == set(fit["jumps"]) diff --git a/tests/test_linearity.py b/tests/test_linearity.py index c8e1ca94..bdc2f12e 100644 --- a/tests/test_linearity.py +++ b/tests/test_linearity.py @@ -8,13 +8,7 @@ from stcal.linearity.linearity import linearity_correction -DQFLAGS = { - 'GOOD': 0, - 'DO_NOT_USE': 1, - 'SATURATED': 2, - 'DEAD': 1024, - 'HOT': 2048, - 'NO_LIN_CORR': 1048576} +DQFLAGS = {"GOOD": 0, "DO_NOT_USE": 1, "SATURATED": 2, "DEAD": 1024, "HOT": 2048, "NO_LIN_CORR": 1048576} DELIM = "-" * 80 @@ -45,13 +39,13 @@ def test_coeff_dq(): # Equation is DNcorr = L0 + L1*DN(i) + L2*DN(i)^2 + L3*DN(i)^3 + L4*DN(i)^4 # DN(i) = signal in pixel, Ln = coefficient from ref file # L0 = 0 for all pixels for CDP6 - L0 = 0.0e+00 + L0 = 0.0e00 L1 = 0.85 - L2 = 4.62E-6 - L3 = -6.16E-11 - L4 = 7.23E-16 + L2 = 4.62e-6 + L3 = -6.16e-11 + L4 = 7.23e-16 - coeffs = np.asarray([L0, L1, L2, L3, L4], dtype='float') + coeffs = np.asarray([L0, L1, L2, L3, L4], dtype="float") # pixels we are testing using above coefficients lin_coeffs[:, 30, 50] = coeffs @@ -61,7 +55,7 @@ def test_coeff_dq(): lin_dq = np.zeros((ysize, xsize), dtype=np.uint32) # check behavior with NaN coefficients: should not alter pixel values - coeffs2 = np.asarray([L0, np.nan, L2, L3, L4], dtype='float') + coeffs2 = np.asarray([L0, np.nan, L2, L3, L4], dtype="float") lin_coeffs[:, 20, 50] = coeffs2 data[0, 50, 20, 50] = 500.0 @@ -80,36 +74,35 @@ def test_coeff_dq(): data[0, 30, 35, 36] = 35 # pixel to check that dq=2 meant no correction was applied # check if dq flags in pixeldq are correctly populated in output - pdq[50, 40] = DQFLAGS['DO_NOT_USE'] - pdq[50, 41] = DQFLAGS['SATURATED'] - pdq[50, 42] = DQFLAGS['DEAD'] - pdq[50, 43] = DQFLAGS['HOT'] + pdq[50, 40] = DQFLAGS["DO_NOT_USE"] + pdq[50, 41] = DQFLAGS["SATURATED"] + pdq[50, 42] = DQFLAGS["DEAD"] + pdq[50, 43] = DQFLAGS["HOT"] # set dq flags in DQ of reference file - lin_dq[35, 35] = DQFLAGS['DO_NOT_USE'] - lin_dq[35, 36] = DQFLAGS['NO_LIN_CORR'] - lin_dq[30, 50] = DQFLAGS['GOOD'] + lin_dq[35, 35] = DQFLAGS["DO_NOT_USE"] + lin_dq[35, 36] = DQFLAGS["NO_LIN_CORR"] + lin_dq[30, 50] = DQFLAGS["GOOD"] np.bitwise_or(pdq, lin_dq) # run linearity correction - output_data, output_pdq, _ = linearity_correction( - data, gdq, pdq, lin_coeffs, lin_dq, DQFLAGS) + output_data, output_pdq, _ = linearity_correction(data, gdq, pdq, lin_coeffs, lin_dq, DQFLAGS) # check that multiplication of polynomial was done correctly for specified pixel outval = L0 + (L1 * scival) + (L2 * scival**2) + (L3 * scival**3) + (L4 * scival**4) - assert(np.isclose(output_data[0, 45, 30, 50], outval, rtol=0.00001)) + assert np.isclose(output_data[0, 45, 30, 50], outval, rtol=0.00001) # check that dq value was handled correctly - assert output_pdq[35, 35] == DQFLAGS['DO_NOT_USE'] - assert output_pdq[35, 36] == DQFLAGS['NO_LIN_CORR'] + assert output_pdq[35, 35] == DQFLAGS["DO_NOT_USE"] + assert output_pdq[35, 36] == DQFLAGS["NO_LIN_CORR"] # NO_LIN_CORR, sci value should not change assert output_data[0, 30, 35, 36] == 35 # NaN coefficient should not change data value assert output_data[0, 50, 20, 50] == 500.0 # dq for pixel with all zero lin coeffs should be NO_LIN_CORR - assert output_pdq[25, 25] == DQFLAGS['NO_LIN_CORR'] + assert output_pdq[25, 25] == DQFLAGS["NO_LIN_CORR"] def create_science_data(dims, ncoeffs): @@ -151,15 +144,16 @@ def test_zero_frame(): base = 31.459 data[0, :, 0, 0] = np.array([(k + 1) * base for k in range(ngroups)], dtype=float) - zframe[0, 0, :] = np.array([data[0, 0, 0, 0] * 0.666666, 0.]) + zframe[0, 0, :] = np.array([data[0, 0, 0, 0] * 0.666666, 0.0]) - lin_base = 2.718 / (base * 10.) - coeffs = np.array([lin_base**(k) for k in range(ncoeffs)], dtype=float) + lin_base = 2.718 / (base * 10.0) + coeffs = np.array([lin_base ** (k) for k in range(ncoeffs)], dtype=float) lin_coeffs[:, 0, 0] = coeffs output_data, output_pdq, new_zframe = linearity_correction( - data, gdq, pdq, lin_coeffs, lin_dq, DQFLAGS, zframe) + data, gdq, pdq, lin_coeffs, lin_dq, DQFLAGS, zframe + ) zcheck = np.zeros((nints, nrows, ncols), dtype=float) - zcheck[0, 0, :] = np.array([1.22106063, 0.]) + zcheck[0, 0, :] = np.array([1.22106063, 0.0]) np.testing.assert_almost_equal(new_zframe, zcheck, decimal=5) diff --git a/tests/test_ramp_fitting.py b/tests/test_ramp_fitting.py index 6d25588c..08ebe03f 100644 --- a/tests/test_ramp_fitting.py +++ b/tests/test_ramp_fitting.py @@ -12,12 +12,12 @@ # to me. [KDG - 19 Dec 2018] dqflags = { - 'GOOD': 0, # Good pixel. - 'DO_NOT_USE': 2**0, # Bad pixel. Do not use. - 'SATURATED': 2**1, # Pixel saturated during exposure. - 'JUMP_DET': 2**2, # Jump detected during exposure. - 'NO_GAIN_VALUE': 2**19, # Gain cannot be measured. - 'UNRELIABLE_SLOPE': 2**24, # Slope variance large (i.e., noisy pixel). + "GOOD": 0, # Good pixel. + "DO_NOT_USE": 2**0, # Bad pixel. Do not use. + "SATURATED": 2**1, # Pixel saturated during exposure. + "JUMP_DET": 2**2, # Jump detected during exposure. + "NO_GAIN_VALUE": 2**19, # Gain cannot be measured. + "UNRELIABLE_SLOPE": 2**24, # Slope variance large (i.e., noisy pixel). } GOOD = dqflags["GOOD"] @@ -29,13 +29,14 @@ # ----------------------------------------------------------------------------- # Test Suite + def base_neg_med_rates_single_integration(): """ Creates single integration data for testing ensuring negative median rates. """ - nints, ngroups, nrows, ncols = 1, 10, 1,1 - rnoise_val, gain_val = 10., 1. - nframes, gtime, dtime = 1, 1., 1 + nints, ngroups, nrows, ncols = 1, 10, 1, 1 + rnoise_val, gain_val = 10.0, 1.0 + nframes, gtime, dtime = 1, 1.0, 1 dims = (nints, ngroups, nrows, ncols) var = (rnoise_val, gain_val) tm = (nframes, gtime, dtime) @@ -50,7 +51,8 @@ def base_neg_med_rates_single_integration(): # Run ramp fit on RampData buffsize, save_opt, algo, wt, ncores = 512, True, "OLS", "optimal", "none" slopes, cube, optional, gls_dummy = ramp_fit_data( - ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags) + ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags + ) return slopes, cube, optional, gls_dummy @@ -61,14 +63,13 @@ def test_neg_med_rates_single_integration_slope(): is zero, readnoise is non-zero and the ERR array is a function of only RNOISE. """ - slopes, cube, optional, gls_dummy = \ - base_neg_med_rates_single_integration() + slopes, cube, optional, gls_dummy = base_neg_med_rates_single_integration() sdata, sdq, svp, svr, serr = slopes - assert(sdata[0, 0] < 0.) - assert(svp[0, 0] == 0.) - assert(svr[0, 0] != 0.) - assert(np.sqrt(svr[0, 0]) == serr[0, 0]) + assert sdata[0, 0] < 0.0 + assert svp[0, 0] == 0.0 + assert svr[0, 0] != 0.0 + assert np.sqrt(svr[0, 0]) == serr[0, 0] def test_neg_med_rates_single_integration_integ(): @@ -76,8 +77,7 @@ def test_neg_med_rates_single_integration_integ(): Make sure that for the single integration data the single integration is the same as the slope data. """ - slopes, cube, optional, gls_dummy = \ - base_neg_med_rates_single_integration() + slopes, cube, optional, gls_dummy = base_neg_med_rates_single_integration() sdata, sdq, svp, svr, serr = slopes idata, idq, ivp, ivr, ierr = cube @@ -94,15 +94,13 @@ def test_neg_med_rates_single_integration_optional(): Make sure that for the single integration data the optional results is the same as the slope data. """ - slopes, cube, optional, gls_dummy = \ - base_neg_med_rates_single_integration() + slopes, cube, optional, gls_dummy = base_neg_med_rates_single_integration() sdata, sdq, svp, svr, serr = slopes - oslope, osigslope, ovp, ovr, \ - oyint, osigyint, opedestal, oweights, ocrmag = optional + oslope, osigslope, ovp, ovr, oyint, osigyint, opedestal, oweights, ocrmag = optional tol = 1e-6 - assert(oslope.shape[1] == 1) # Max segments is 1 because clean ramp + assert oslope.shape[1] == 1 # Max segments is 1 because clean ramp np.testing.assert_allclose(oslope[0, 0, :, :], sdata, tol) np.testing.assert_allclose(ovp[0, 0, :, :], svp, tol) np.testing.assert_allclose(ovr[0, 0, :, :], svr, tol) @@ -113,8 +111,8 @@ def base_neg_med_rates_multi_integrations(): Creates multi-integration data for testing ensuring negative median rates. """ nints, ngroups, nrows, ncols = 3, 10, 1, 1 - rnoise_val, gain_val = 10., 1. - nframes, gtime, dtime = 1, 1., 1 + rnoise_val, gain_val = 10.0, 1.0 + nframes, gtime, dtime = 1, 1.0, 1 dims = (nints, ngroups, nrows, ncols) var = (rnoise_val, gain_val) tm = (nframes, gtime, dtime) @@ -132,7 +130,8 @@ def base_neg_med_rates_multi_integrations(): # Run ramp fit on RampData buffsize, save_opt, algo, wt, ncores = 512, True, "OLS", "optimal", "none" slopes, cube, optional, gls_dummy = ramp_fit_data( - ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags) + ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags + ) return slopes, cube, optional, gls_dummy, dims @@ -141,16 +140,15 @@ def test_neg_med_rates_multi_integrations_slopes(): """ Test computing median rates of a ramp with multiple integrations. """ - slopes, cube, optional, gls_dummy, dims = \ - base_neg_med_rates_multi_integrations() + slopes, cube, optional, gls_dummy, dims = base_neg_med_rates_multi_integrations() nints, ngroups, nrows, ncols = dims sdata, sdq, svp, svr, serr = slopes - assert(sdata[0, 0] < 0.) - assert(svp[0, 0] == 0.) - assert(svr[0, 0] != 0.) - assert(np.sqrt(svr[0, 0]) == serr[0, 0]) + assert sdata[0, 0] < 0.0 + assert svp[0, 0] == 0.0 + assert svr[0, 0] != 0.0 + assert np.sqrt(svr[0, 0]) == serr[0, 0] def test_neg_med_rates_multi_integration_integ(): @@ -159,8 +157,7 @@ def test_neg_med_rates_multi_integration_integ(): results in zero Poisson info and the ERR array a function of only RNOISE. """ - slopes, cube, optional, gls_dummy, dims = \ - base_neg_med_rates_multi_integrations() + slopes, cube, optional, gls_dummy, dims = base_neg_med_rates_multi_integrations() sdata, sdq, svp, svr, serr = slopes idata, idq, ivp, ivr, ierr = cube @@ -169,7 +166,7 @@ def test_neg_med_rates_multi_integration_integ(): sdata, sdq, svp, svr, serr = slopes idata, idq, ivp, ivr, ierr = cube - np.testing.assert_allclose(ivp[:, 0, 0], np.array([0., 0., 0.]), tol) + np.testing.assert_allclose(ivp[:, 0, 0], np.array([0.0, 0.0, 0.0]), tol) np.testing.assert_allclose(ierr, np.sqrt(ivr), tol) @@ -179,15 +176,13 @@ def test_neg_med_rates_multi_integration_optional(): one segment has only one segment in the optional results product as well as zero Poisson variance. """ - slopes, cube, optional, gls_dummy, dims = \ - base_neg_med_rates_multi_integrations() + slopes, cube, optional, gls_dummy, dims = base_neg_med_rates_multi_integrations() sdata, sdq, svp, svr, serr = slopes - oslope, osigslope, ovp, ovr, \ - oyint, osigyint, opedestal, oweights, ocrmag = optional + oslope, osigslope, ovp, ovr, oyint, osigyint, opedestal, oweights, ocrmag = optional tol = 1e-6 - assert(oslope.shape[1] == 1) # Max segments is 1 because clean ramp + assert oslope.shape[1] == 1 # Max segments is 1 because clean ramp np.testing.assert_allclose(ovp[:, 0, 0, 0], np.zeros(3), tol) @@ -197,8 +192,8 @@ def base_neg_med_rates_single_integration_multi_segment(): negative median rates. """ nints, ngroups, nrows, ncols = 1, 15, 2, 1 - rnoise_val, gain_val = 10., 1. - nframes, gtime, dtime = 1, 1., 1 + rnoise_val, gain_val = 10.0, 1.0 + nframes, gtime, dtime = 1, 1.0, 1 dims = (nints, ngroups, nrows, ncols) var = (rnoise_val, gain_val) tm = (nframes, gtime, dtime) @@ -218,7 +213,8 @@ def base_neg_med_rates_single_integration_multi_segment(): # Run ramp fit on RampData buffsize, save_opt, algo, wt, ncores = 512, True, "OLS", "optimal", "none" slopes, cube, optional, gls_dummy = ramp_fit_data( - ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags) + ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags + ) return slopes, cube, optional, gls_dummy, dims @@ -229,16 +225,14 @@ def test_neg_med_rates_single_integration_multi_segment_optional(): segments are created and to make sure all Poisson segements are set to zero. """ - slopes, cube, optional, gls_dummy, dims = \ - base_neg_med_rates_single_integration_multi_segment() + slopes, cube, optional, gls_dummy, dims = base_neg_med_rates_single_integration_multi_segment() - oslope, osigslope, ovp, ovr, \ - oyint, osigyint, opedestal, oweights, ocrmag = optional + oslope, osigslope, ovp, ovr, oyint, osigyint, opedestal, oweights, ocrmag = optional neg_ramp_poisson = ovp[0, :, 0, 0] tol = 1e-6 - assert(ovp.shape[1] == 3) + assert ovp.shape[1] == 3 np.testing.assert_allclose(neg_ramp_poisson, np.zeros(3), tol) @@ -264,8 +258,8 @@ def test_utils_dq_compress_final(): set, nor should it be set in the final DQ. """ nints, ngroups, nrows, ncols = 2, 5, 1, 3 - rnoise_val, gain_val = 10., 1. - nframes, gtime, dtime = 1, 1., 1 + rnoise_val, gain_val = 10.0, 1.0 + nframes, gtime, dtime = 1, 1.0, 1 dims = (nints, ngroups, nrows, ncols) var = (rnoise_val, gain_val) tm = (nframes, gtime, dtime) @@ -279,25 +273,26 @@ def test_utils_dq_compress_final(): # Run ramp fit on RampData buffsize, save_opt, algo, wt, ncores = 512, True, "OLS", "optimal", "none" slopes, cube, optional, gls_dummy = ramp_fit_data( - ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags) + ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags + ) dq = slopes[1] idq = cube[1] # Make sure DO_NOT_USE is set in the expected integrations. - assert(idq[0, 0, 0] & dqflags["DO_NOT_USE"]) - assert(idq[1, 0, 0] & dqflags["DO_NOT_USE"]) + assert idq[0, 0, 0] & dqflags["DO_NOT_USE"] + assert idq[1, 0, 0] & dqflags["DO_NOT_USE"] - assert(idq[0, 0, 1] & dqflags["DO_NOT_USE"]) - assert(not (idq[1, 0, 1] & dqflags["DO_NOT_USE"])) + assert idq[0, 0, 1] & dqflags["DO_NOT_USE"] + assert not (idq[1, 0, 1] & dqflags["DO_NOT_USE"]) - assert(not (idq[0, 0, 2] & dqflags["DO_NOT_USE"])) - assert(not (idq[1, 0, 2] & dqflags["DO_NOT_USE"])) + assert not (idq[0, 0, 2] & dqflags["DO_NOT_USE"]) + assert not (idq[1, 0, 2] & dqflags["DO_NOT_USE"]) # Make sure DO_NOT_USE is set in the expected final DQ. - assert(dq[0, 0] & dqflags["DO_NOT_USE"]) - assert(not(dq[0, 1] & dqflags["DO_NOT_USE"])) - assert(not(dq[0, 2] & dqflags["DO_NOT_USE"])) + assert dq[0, 0] & dqflags["DO_NOT_USE"] + assert not (dq[0, 1] & dqflags["DO_NOT_USE"]) + assert not (dq[0, 2] & dqflags["DO_NOT_USE"]) def jp_2326_test_setup(): @@ -305,8 +300,20 @@ def jp_2326_test_setup(): Sets up data for MIRI testing DO_NOT_USE flags at the beginning of ramps. """ # Set up ramp data - ramp = np.array([120.133545, 117.85222, 87.38832, 66.90588, 51.392555, - 41.65941, 32.15081, 24.25277, 15.955284, 9.500946]) + ramp = np.array( + [ + 120.133545, + 117.85222, + 87.38832, + 66.90588, + 51.392555, + 41.65941, + 32.15081, + 24.25277, + 15.955284, + 9.500946, + ] + ) dnu = dqflags["DO_NOT_USE"] dq = np.array([dnu, 0, 0, 0, 0, 0, 0, 0, 0, dnu]) @@ -320,16 +327,15 @@ def jp_2326_test_setup(): gdq[0, :, 0, 0] = dq.copy() ramp_data = RampData() - ramp_data.set_arrays( - data=data, err=err, groupdq=gdq, pixeldq=pdq) + ramp_data.set_arrays(data=data, err=err, groupdq=gdq, pixeldq=pdq) ramp_data.set_meta( - name="MIRI", frame_time=2.77504, group_time=2.77504, groupgap=0, - nframes=1, drop_frames1=None) + name="MIRI", frame_time=2.77504, group_time=2.77504, groupgap=0, nframes=1, drop_frames1=None + ) ramp_data.set_dqflags(dqflags) # Set up gain and read noise gain = np.ones(shape=(nrows, ncols), dtype=np.float32) * 5.5 - rnoise = np.ones(shape=(nrows, ncols), dtype=np.float32) * 1000. + rnoise = np.ones(shape=(nrows, ncols), dtype=np.float32) * 1000.0 return ramp_data, gain, rnoise @@ -345,7 +351,8 @@ def test_miri_ramp_dnu_at_ramp_beginning(): # Run ramp fit on RampData buffsize, save_opt, algo, wt, ncores = 512, True, "OLS", "optimal", "none" slopes1, cube, optional, gls_dummy = ramp_fit_data( - ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags) + ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags + ) s1 = slopes1[0] tol = 1e-6 @@ -366,7 +373,8 @@ def test_miri_ramp_dnu_and_jump_at_ramp_beginning(): # Run ramp fit on RampData buffsize, save_opt, algo, wt, ncores = 512, True, "OLS", "optimal", "none" slopes2, cube, optional, gls_dummy = ramp_fit_data( - ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags) + ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags + ) s2 = slopes2[0] tol = 1e-6 @@ -381,19 +389,17 @@ def test_2_group_cases(): with two groups to test the various DQ cases. """ base_group = [-12328.601, -4289.051] - base_err = [0., 0.] + base_err = [0.0, 0.0] gain_val = 0.9699 rnoise_val = 9.4552 possibilities = [ # Both groups are good [GOOD, GOOD], - # Both groups are bad. Note saturated 0th group kills group 1. [SAT, GOOD], [DNU | SAT, GOOD], [DNU, SAT], - # One group is bad, while the other group is good. [DNU, GOOD], [GOOD, DNU], @@ -430,37 +436,34 @@ def test_2_group_cases(): ramp_data.set_arrays(data, err, groupdq, pixeldq) ramp_data.set_meta( - name="NIRSPEC", - frame_time=14.58889, - group_time=14.58889, - groupgap=0, - nframes=1, - drop_frames1=None) + name="NIRSPEC", frame_time=14.58889, group_time=14.58889, groupgap=0, nframes=1, drop_frames1=None + ) ramp_data.set_dqflags(dqflags) # Run ramp fit on RampData buffsize, save_opt, algo, wt, ncores = 512, True, "OLS", "optimal", "none" slopes, cube, optional, gls_dummy = ramp_fit_data( - ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags) + ramp_data, buffsize, save_opt, rnoise, gain, algo, wt, ncores, dqflags + ) # Check the outputs data, dq, var_poisson, var_rnoise, err = slopes - tol = 1.e-6 + tol = 1.0e-6 check = np.array([[551.0735, np.nan, np.nan, np.nan, -293.9943, -845.0678, -845.0677]]) np.testing.assert_allclose(data, check, tol) check = np.array([[GOOD, DNU | SAT, DNU | SAT, DNU, GOOD, GOOD, GOOD]]) np.testing.assert_allclose(dq, check, tol) - check = np.array([[38.945766, 0., 0., 0., 0., 0., 0.]]) + check = np.array([[38.945766, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) np.testing.assert_allclose(var_poisson, check, tol) - check = np.array([[0.420046, 0., 0., 0., 0.420046, 0.420046, 0.420046]]) + check = np.array([[0.420046, 0.0, 0.0, 0.0, 0.420046, 0.420046, 0.420046]]) np.testing.assert_allclose(var_rnoise, check, tol) - check = np.array([[6.274218 , 0., 0., 0., 0.6481096, 0.6481096, 0.6481096]]) + check = np.array([[6.274218, 0.0, 0.0, 0.0, 0.6481096, 0.6481096, 0.6481096]]) np.testing.assert_allclose(err, check, tol) @@ -499,7 +502,7 @@ def run_one_group_ramp_suppression(nints, suppress): ramp_data.groupdq[0, :, 0, 0] = sat_dq # All groups sat ramp_data.groupdq[0, :, 0, 1] = sat_dq # 0th good, all others sat ramp_data.groupdq[0, 0, 0, 1] = 0 - ramp_data.groupdq[0, :, 0, 2] = zdq # All groups good + ramp_data.groupdq[0, :, 0, 2] = zdq # All groups good if nints > 1: ramp_data.data[1, :, 0, 0] = arr @@ -516,8 +519,8 @@ def run_one_group_ramp_suppression(nints, suppress): algo = "OLS" save_opt, ncores, bufsize = False, "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, dqflags + ) return slopes, cube, dims @@ -539,13 +542,13 @@ def test_one_group_ramp_suppressed_one_integration(): check = np.array([[DNU | SAT, DNU, GOOD]]) np.testing.assert_allclose(sdq, check, tol) - check = np.array([[0., 0., 0.25]]) + check = np.array([[0.0, 0.0, 0.25]]) np.testing.assert_allclose(svp, check, tol) - check = np.array([[0., 0., 4.999999]]) + check = np.array([[0.0, 0.0, 4.999999]]) np.testing.assert_allclose(svr, check, tol) - check = np.array([[0., 0., 2.2912877]]) + check = np.array([[0.0, 0.0, 2.2912877]]) np.testing.assert_allclose(serr, check, tol) # Check slopes information @@ -557,13 +560,13 @@ def test_one_group_ramp_suppressed_one_integration(): check = np.array([[[DNU | SAT, DNU, GOOD]]]) np.testing.assert_allclose(cdq, check, tol) - check = np.array([[[0., 0., 0.25]]]) + check = np.array([[[0.0, 0.0, 0.25]]]) np.testing.assert_allclose(cvp, check, tol) - check = np.array([[[0., 0., 4.999999]]]) + check = np.array([[[0.0, 0.0, 4.999999]]]) np.testing.assert_allclose(cvr, check, tol) - check = np.array([[[0., 0., 2.291288]]]) + check = np.array([[[0.0, 0.0, 2.291288]]]) np.testing.assert_allclose(cerr, check, tol) @@ -578,37 +581,37 @@ def test_one_group_ramp_not_suppressed_one_integration(): # Check slopes information sdata, sdq, svp, svr, serr = slopes - check = np.array([[np.nan, 1., 1.0000001]]) + check = np.array([[np.nan, 1.0, 1.0000001]]) np.testing.assert_allclose(sdata, check, tol) check = np.array([[DNU | SAT, GOOD, GOOD]]) np.testing.assert_allclose(sdq, check, tol) - check = np.array([[0., 1., 0.25]]) + check = np.array([[0.0, 1.0, 0.25]]) np.testing.assert_allclose(svp, check, tol) - check = np.array([[0., 100., 5.0000005]]) + check = np.array([[0.0, 100.0, 5.0000005]]) np.testing.assert_allclose(svr, check, tol) - check = np.array([[0., 10.049875, 2.291288]]) + check = np.array([[0.0, 10.049875, 2.291288]]) np.testing.assert_allclose(serr, check, tol) # Check slopes information cdata, cdq, cvp, cvr, cerr = cube - check = np.array([[[np.nan, 1., 1.0000001]]]) + check = np.array([[[np.nan, 1.0, 1.0000001]]]) np.testing.assert_allclose(cdata, check, tol) check = np.array([[[DNU | SAT, GOOD, GOOD]]]) np.testing.assert_allclose(cdq, check, tol) - check = np.array([[[0., 1, 0.25]]]) + check = np.array([[[0.0, 1, 0.25]]]) np.testing.assert_allclose(cvp, check, tol) - check = np.array([[[0., 100., 5.0000005]]]) + check = np.array([[[0.0, 100.0, 5.0000005]]]) np.testing.assert_allclose(cvr, check, tol) - check = np.array([[[0., 10.049875, 2.291288]]]) + check = np.array([[[0.0, 10.049875, 2.291288]]]) np.testing.assert_allclose(cerr, check, tol) @@ -633,33 +636,28 @@ def test_one_group_ramp_suppressed_two_integrations(): check = np.array([[0.125, 0.125, 0.125]]) np.testing.assert_allclose(svp, check, tol) - check = np.array([[4.999998 , 4.999998 , 2.4999995]]) + check = np.array([[4.999998, 4.999998, 2.4999995]]) np.testing.assert_allclose(svr, check, tol) - check = np.array([[2.263846 , 2.263846 , 1.620185]]) + check = np.array([[2.263846, 2.263846, 1.620185]]) np.testing.assert_allclose(serr, check, tol) # Check slopes information cdata, cdq, cvp, cvr, cerr = cube - check = np.array([[[np.nan, np.nan, 1.0000001]], - [[1.0000001, 1.0000001, 1.0000001]]]) + check = np.array([[[np.nan, np.nan, 1.0000001]], [[1.0000001, 1.0000001, 1.0000001]]]) np.testing.assert_allclose(cdata, check, tol) - check = np.array([[[DNU | SAT, DNU, GOOD]], - [[GOOD, GOOD, GOOD]]]) + check = np.array([[[DNU | SAT, DNU, GOOD]], [[GOOD, GOOD, GOOD]]]) np.testing.assert_allclose(cdq, check, tol) - check = np.array([[[0., 0., 0.25]], - [[0.125, 0.125, 0.25]]]) + check = np.array([[[0.0, 0.0, 0.25]], [[0.125, 0.125, 0.25]]]) np.testing.assert_allclose(cvp, check, tol) - check = np.array([[[0., 0., 4.999999]], - [[4.999999, 4.999999, 4.999999]]]) + check = np.array([[[0.0, 0.0, 4.999999]], [[4.999999, 4.999999, 4.999999]]]) np.testing.assert_allclose(cvr, check, tol) - check = np.array([[[0., 0., 2.291288]], - [[2.2638464, 2.2638464, 2.291288]]]) + check = np.array([[[0.0, 0.0, 2.291288]], [[2.2638464, 2.2638464, 2.291288]]]) np.testing.assert_allclose(cerr, check, tol) @@ -684,7 +682,7 @@ def test_one_group_ramp_not_suppressed_two_integrations(): check = np.array([[0.125, 0.2, 0.125]]) np.testing.assert_allclose(svp, check, tol) - check = np.array([[5. , 4.7619047, 2.5000002]]) + check = np.array([[5.0, 4.7619047, 2.5000002]]) np.testing.assert_allclose(svr, check, tol) check = np.array([[2.2638464, 2.2275333, 1.6201853]]) @@ -693,24 +691,19 @@ def test_one_group_ramp_not_suppressed_two_integrations(): # Check slopes information cdata, cdq, cvp, cvr, cerr = cube - check = np.array([[[np.nan, 1., 1.0000001]], - [[1.0000001, 1.0000001, 1.0000001]]]) + check = np.array([[[np.nan, 1.0, 1.0000001]], [[1.0000001, 1.0000001, 1.0000001]]]) np.testing.assert_allclose(cdata, check, tol) - check = np.array([[[DNU | SAT, GOOD, GOOD]], - [[GOOD, GOOD, GOOD]]]) + check = np.array([[[DNU | SAT, GOOD, GOOD]], [[GOOD, GOOD, GOOD]]]) np.testing.assert_allclose(cdq, check, tol) - check = np.array([[[0., 1., 0.25]], - [[0.125, 0.25, 0.25]]]) + check = np.array([[[0.0, 1.0, 0.25]], [[0.125, 0.25, 0.25]]]) np.testing.assert_allclose(cvp, check, tol) - check = np.array([[[0., 100., 5.0000005]], - [[5.0000005, 5.0000005, 5.0000005]]]) + check = np.array([[[0.0, 100.0, 5.0000005]], [[5.0000005, 5.0000005, 5.0000005]]]) np.testing.assert_allclose(cvr, check, tol) - check = np.array([[[0., 10.049875 , 2.291288]], - [[2.2638464, 2.291288 , 2.291288]]]) + check = np.array([[[0.0, 10.049875, 2.291288]], [[2.2638464, 2.291288, 2.291288]]]) np.testing.assert_allclose(cerr, check, tol) @@ -729,7 +722,7 @@ def create_zero_frame_data(): frame_time, nframes, groupgap = 10.736, 4, 1 group_time = (nframes + groupgap) * frame_time nints, ngroups, nrows, ncols = 2, 5, 1, 3 - rnval, gval = 10., 5. + rnval, gval = 10.0, 5.0 # Create arrays for RampData. data = np.zeros(shape=(nints, ngroups, nrows, ncols), dtype=np.float32) @@ -740,19 +733,19 @@ def create_zero_frame_data(): # Create base ramps for each pixel in each integration. base_slope = 2000.0 - base_arr = [8000. + k * base_slope for k in range(ngroups)] + base_arr = [8000.0 + k * base_slope for k in range(ngroups)] base_ramp = np.array(base_arr, dtype=np.float32) data[0, :, 0, 0] = base_ramp data[0, :, 0, 1] = base_ramp data[0, :, 0, 2] = base_ramp - data[1, :, :, :] = data[0, :, :, :] / 2. + data[1, :, :, :] = data[0, :, :, :] / 2.0 # ZEROFRAME data. fdn = (data[0, 1, 0, 0] - data[0, 0, 0, 0]) / (nframes + groupgap) dummy = data[0, 0, 0, 2] - (fdn * 2.5) zframe[0, 0, :] *= dummy - zframe[0, 0, 1] = 0. # ZEROFRAME is saturated too. + zframe[0, 0, 1] = 0.0 # ZEROFRAME is saturated too. fdn = (data[1, 1, 0, 0] - data[1, 0, 0, 0]) / (nframes + groupgap) dummy = data[1, 0, 0, 2] - (fdn * 2.5) zframe[1, 0, :] *= dummy @@ -763,11 +756,15 @@ def create_zero_frame_data(): # Create RampData for testing. ramp_data = RampData() - ramp_data.set_arrays( - data=data, err=err, groupdq=gdq, pixeldq=pixdq) + ramp_data.set_arrays(data=data, err=err, groupdq=gdq, pixeldq=pixdq) ramp_data.set_meta( - name="NIRCam", frame_time=frame_time, group_time=group_time, - groupgap=groupgap, nframes=nframes, drop_frames1=None) + name="NIRCam", + frame_time=frame_time, + group_time=group_time, + groupgap=groupgap, + nframes=nframes, + drop_frames1=None, + ) ramp_data.set_dqflags(dqflags) ramp_data.suppress_one_group_ramps = False @@ -795,10 +792,10 @@ def test_zeroframe(): algo, save_opt, ncores, bufsize = "OLS", False, "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo, - "optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) - tol = 1.e-5 + tol = 1.0e-5 # Check slopes information sdata, sdq, svp, svr, serr = slopes @@ -824,24 +821,19 @@ def test_zeroframe(): # The third pixel in integration zero has good data # because the zeroframe has good data, so the ramp # is not fully saturated. - check = np.array([[[298.0626, np.nan, 652.01196]], - [[18.62891, 18.62891, 18.62891]]]) + check = np.array([[[298.0626, np.nan, 652.01196]], [[18.62891, 18.62891, 18.62891]]]) np.testing.assert_allclose(cdata, check, tol, tol) - check = np.array([[[GOOD, DNU | SAT, GOOD]], - [[GOOD, GOOD, GOOD]]]) + check = np.array([[[GOOD, DNU | SAT, GOOD]], [[GOOD, GOOD, GOOD]]]) np.testing.assert_allclose(cdq, check, tol, tol) - check = np.array([[[1.1799237 , 0. , 6.246655 ]], - [[0.14749046, 0.00867591, 0.31233275]]]) + check = np.array([[[1.1799237, 0.0, 6.246655]], [[0.14749046, 0.00867591, 0.31233275]]]) np.testing.assert_allclose(cvp, check, tol, tol) - check = np.array([[[0.03470363, 0., 0.21689774]], - [[0.0004338, 0.0004338, 0.0004338]]]) + check = np.array([[[0.03470363, 0.0, 0.21689774]], [[0.0004338, 0.0004338, 0.0004338]]]) np.testing.assert_allclose(cvr, check, tol, tol) - check = np.array([[[1.1021013, 0., 2.542352]], - [[0.38460922, 0.09544477, 0.55925536]]]) + check = np.array([[[1.1021013, 0.0, 2.542352]], [[0.38460922, 0.09544477, 0.55925536]]]) np.testing.assert_allclose(cerr, check, tol, tol) @@ -856,7 +848,7 @@ def create_only_good_0th_group_data(): frame_time, nframes, groupgap = 10.736, 2, 3 group_time = (nframes + groupgap) * frame_time nints, ngroups, nrows, ncols = 1, 5, 1, 3 - rnval, gval = 10., 5. + rnval, gval = 10.0, 5.0 # Create arrays for RampData. data = np.zeros(shape=(nints, ngroups, nrows, ncols), dtype=np.float32) @@ -866,7 +858,7 @@ def create_only_good_0th_group_data(): # Create base ramps for each pixel in each integration. base_slope = 2000.0 - base_arr = [8000. + k * base_slope for k in range(ngroups)] + base_arr = [8000.0 + k * base_slope for k in range(ngroups)] base_ramp = np.array(base_arr, dtype=np.float32) data[0, :, 0, 0] = base_ramp @@ -885,11 +877,15 @@ def create_only_good_0th_group_data(): # Create RampData for testing. ramp_data = RampData() - ramp_data.set_arrays( - data=data, err=err, groupdq=gdq, pixeldq=pixdq) + ramp_data.set_arrays(data=data, err=err, groupdq=gdq, pixeldq=pixdq) ramp_data.set_meta( - name="NIRCam", frame_time=frame_time, group_time=group_time, - groupgap=groupgap, nframes=nframes, drop_frames1=None) + name="NIRCam", + frame_time=frame_time, + group_time=group_time, + groupgap=groupgap, + nframes=nframes, + drop_frames1=None, + ) ramp_data.set_dqflags(dqflags) ramp_data.suppress_one_group_ramps = False @@ -914,10 +910,10 @@ def test_only_good_0th_group(): algo, save_opt, ncores, bufsize = "OLS", False, "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo, - "optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) - tol = 1.e-5 + tol = 1.0e-5 # Check slopes information sdata, sdq, svp, svr, serr = slopes @@ -928,7 +924,7 @@ def test_only_good_0th_group(): # Because the number of groups used in the first two ramps are different # the variances are expected to be different, even though the slopes # should be the same. - check = np.array([[37.257824, 37.257824, 496.77103]]) + check = np.array([[37.257824, 37.257824, 496.77103]]) np.testing.assert_allclose(sdata, check, tol, tol) check = np.array([[GOOD, GOOD, GOOD]]) @@ -951,7 +947,7 @@ def test_all_sat(): Test all ramps in all integrations saturated. """ nints, ngroups, nrows, ncols = 2, 5, 1, 3 - rnval, gval = 10., 5. + rnval, gval = 10.0, 5.0 frame_time, nframes, groupgap = 10.736, 4, 1 dims = nints, ngroups, nrows, ncols @@ -963,8 +959,8 @@ def test_all_sat(): algo, save_opt, ncores, bufsize = "OLS", False, "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp, bufsize, save_opt, rnoise, gain, algo, - "optimal", ncores, dqflags) + ramp, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) assert slopes is None assert cube is None @@ -976,7 +972,7 @@ def test_dq_multi_int_dnu(): in an integration are set to DO_NOT_USE. """ nints, ngroups, nrows, ncols = 2, 5, 1, 1 - rnval, gval = 10., 5. + rnval, gval = 10.0, 5.0 frame_time, nframes, groupgap = 10.736, 4, 1 dims = nints, ngroups, nrows, ncols @@ -993,10 +989,10 @@ def test_dq_multi_int_dnu(): algo, save_opt, ncores, bufsize = "OLS", False, "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp, bufsize, save_opt, rnoise, gain, algo, - "optimal", ncores, dqflags) + ramp, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) - tol = 1.e-5 + tol = 1.0e-5 # Check slopes information sdata, sdq, svp, svr, serr = slopes @@ -1019,24 +1015,19 @@ def test_dq_multi_int_dnu(): # Check slopes information cdata, cdq, cvp, cvr, cerr = cube - check = np.array([[[np.nan]], - [[1.8628913]]]) + check = np.array([[[np.nan]], [[1.8628913]]]) np.testing.assert_allclose(cdata, check, tol, tol) - check = np.array([[[dqflags["DO_NOT_USE"]]], - [[0]]]) + check = np.array([[[dqflags["DO_NOT_USE"]]], [[0]]]) np.testing.assert_allclose(cdq, check, tol, tol) - check = np.array([[[0.]], - [[0.00086759]]]) + check = np.array([[[0.0]], [[0.00086759]]]) np.testing.assert_allclose(cvp, check, tol, tol) - check = np.array([[[0.]], - [[4.3379547e-04]]]) + check = np.array([[[0.0]], [[4.3379547e-04]]]) np.testing.assert_allclose(cvr, check, tol, tol) - check = np.array([[[0.]], - [[0.03607474]]]) + check = np.array([[[0.0]], [[0.03607474]]]) np.testing.assert_allclose(cerr, check, tol, tol) @@ -1050,7 +1041,7 @@ def test_multi_more_cores_than_rows(): slice of the data would be sent through ramp fitting. """ nints, ngroups, nrows, ncols = 2, 10, 1, 2 - rnval, gval = 10., 5. + rnval, gval = 10.0, 5.0 frame_time, nframes, groupgap = 10.736, 5, 0 dims = nints, ngroups, nrows, ncols @@ -1058,7 +1049,8 @@ def test_multi_more_cores_than_rows(): tm = frame_time, nframes, groupgap from stcal.ramp_fitting.utils import compute_num_slices - requested_slices = '8' + + requested_slices = "8" max_available_cores = 10 requested_slices = compute_num_slices(requested_slices, nrows, max_available_cores) assert requested_slices == 1 @@ -1071,8 +1063,20 @@ def test_multi_more_cores_than_rows(): being no more than the number of processors requested. """ ramp, gain, rnoise = create_blank_ramp_data(dims, var, tm) - bramp = np.array([ 150.4896, 299.7697, 449.0971, 600.6752, 749.6968, - 900.9771, 1050.1395, 1199.9658, 1349.9163, 1499.8358]) + bramp = np.array( + [ + 150.4896, + 299.7697, + 449.0971, + 600.6752, + 749.6968, + 900.9771, + 1050.1395, + 1199.9658, + 1349.9163, + 1499.8358, + ] + ) factor = 1.05 for integ in range(nints): for row in range(nrows): @@ -1082,12 +1086,12 @@ def test_multi_more_cores_than_rows(): bufsize, algo, save_opt, ncores = 512, "OLS", False, "all" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags) + ramp, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # This part of the test is simply to make sure ramp fitting # doesn't crash. No asserts are necessary here. - def get_new_saturation(): """ Three columns (pixels) with two integrations each. @@ -1096,7 +1100,7 @@ def get_new_saturation(): 2. Both integrations fully saturated. """ nints, ngroups, nrows, ncols = 2, 20, 1, 3 - rnval, gval = 10., 5. + rnval, gval = 10.0, 5.0 frame_time, nframes, groupgap = 10.736, 4, 1 dims = nints, ngroups, nrows, ncols @@ -1105,10 +1109,28 @@ def get_new_saturation(): ramp, gain, rnoise = create_blank_ramp_data(dims, var, tm) - bramp = [ 149.3061, 299.0544, 449.9949, 599.7617, 749.7327, - 900.117 , 1049.314 , 1200.6003, 1350.0906, 1500.7772, - 1649.3098, 1799.8952, 1949.1304, 2100.1875, 2249.85 , - 2399.1154, 2550.537 , 2699.915 , 2850.0734, 2999.7891] + bramp = [ + 149.3061, + 299.0544, + 449.9949, + 599.7617, + 749.7327, + 900.117, + 1049.314, + 1200.6003, + 1350.0906, + 1500.7772, + 1649.3098, + 1799.8952, + 1949.1304, + 2100.1875, + 2249.85, + 2399.1154, + 2550.537, + 2699.915, + 2850.0734, + 2999.7891, + ] # Set up ramp data. for integ in range(nints): @@ -1118,20 +1140,79 @@ def get_new_saturation(): # Set up DQ's. # Set up col 0 # One integ no sat, one with jump and saturated - dq = [GOOD, GOOD, GOOD, GOOD, GOOD, GOOD, GOOD, GOOD, GOOD, GOOD, - GOOD, GOOD, GOOD, GOOD, GOOD, GOOD, GOOD, GOOD, GOOD, GOOD] + dq = [ + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + ] ramp.groupdq[0, :, 0, 0] = np.array(dq) - dq = [GOOD, GOOD, GOOD, GOOD, GOOD, JUMP, JUMP, GOOD, GOOD, GOOD, - GOOD, GOOD, GOOD, GOOD, GOOD, SAT, SAT, SAT, SAT, SAT] + dq = [ + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + JUMP, + JUMP, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + SAT, + SAT, + SAT, + SAT, + SAT, + ] ramp.groupdq[1, :, 0, 0] = np.array(dq) # Set up col 1 # One integ with jump and saturated, one fully saturated - dq = [GOOD, GOOD, GOOD, GOOD, GOOD, JUMP, JUMP, GOOD, GOOD, GOOD, - GOOD, GOOD, GOOD, GOOD, GOOD, SAT, SAT, SAT, SAT, SAT] + dq = [ + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + JUMP, + JUMP, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + GOOD, + SAT, + SAT, + SAT, + SAT, + SAT, + ] ramp.groupdq[0, :, 0, 1] = np.array(dq) - dq = [SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, - SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT] + dq = [SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT, SAT] ramp.groupdq[1, :, 0, 1] = np.array(dq) # Set up col 2 @@ -1156,49 +1237,45 @@ def test_new_saturation(): save_opt, ncores, bufsize, algo = False, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) - tol = 1.e-5 + tol = 1.0e-5 # Check slopes information sdata, sdq, svp, svr, serr = slopes - check = np.array([[2.795187 , 2.795632, np.nan]]) + check = np.array([[2.795187, 2.795632, np.nan]]) np.testing.assert_allclose(sdata, check, tol, tol) check = np.array([[JUMP, JUMP, DNU | SAT]]) np.testing.assert_allclose(sdq, check, tol, tol) - check = np.array([[0.00033543, 0.00043342, 0.]]) + check = np.array([[0.00033543, 0.00043342, 0.0]]) np.testing.assert_allclose(svp, check, tol, tol) - check = np.array([[5.9019785e-06, 6.1970772e-05, 0.0000000e+00]]) + check = np.array([[5.9019785e-06, 6.1970772e-05, 0.0000000e00]]) np.testing.assert_allclose(svr, check, tol, tol) - check = np.array([[0.01847528, 0.02225729, 0.]]) + check = np.array([[0.01847528, 0.02225729, 0.0]]) np.testing.assert_allclose(serr, check, tol, tol) # Check slopes information cdata, cdq, cvp, cvr, cerr = cube - check = np.array([[[2.7949152, 2.7956316, np.nan]], - [[2.7956493, np.nan, np.nan]]]) + check = np.array([[[2.7949152, 2.7956316, np.nan]], [[2.7956493, np.nan, np.nan]]]) np.testing.assert_allclose(cdata, check, tol, tol) - check = np.array([[[GOOD, JUMP, DNU | SAT]], - [[JUMP, DNU | SAT, DNU | SAT]]]) + check = np.array([[[GOOD, JUMP, DNU | SAT]], [[JUMP, DNU | SAT, DNU | SAT]]]) np.testing.assert_allclose(cdq, check, tol, tol) - check = np.array([[[0.00054729, 0.00043342, 0.]], - [[0.00086654, 0. , 0.]]]) + check = np.array([[[0.00054729, 0.00043342, 0.0]], [[0.00086654, 0.0, 0.0]]]) np.testing.assert_allclose(cvp, check, tol, tol) - check = np.array([[[6.5232398e-06, 6.1970772e-05, 0.]], - [[6.1970772e-05, 0., 0.]]]) + check = np.array([[[6.5232398e-06, 6.1970772e-05, 0.0]], [[6.1970772e-05, 0.0, 0.0]]]) np.testing.assert_allclose(cvr, check, tol, tol) - check = np.array([[[0.02353317, 0.02258242, 0.]], - [[0.03073696, 0. , 0.]]]) + check = np.array([[[0.02353317, 0.02258242, 0.0]], [[0.03073696, 0.0, 0.0]]]) np.testing.assert_allclose(cerr, check, tol, tol) @@ -1222,14 +1299,14 @@ def test_invalid_integrations(): ramp, gain, rnoise = create_blank_ramp_data(dims, var, tm) int_data = [ - [17343.719, 32944.32 , 48382.062, 63066.062, 58844.7 ], - [19139.965, 34863.45 , 50415.816, 52806.453, 59525.01 ], + [17343.719, 32944.32, 48382.062, 63066.062, 58844.7], + [19139.965, 34863.45, 50415.816, 52806.453, 59525.01], [19020.926, 34759.785, 50351.984, 52774.695, 59533.586], - [19060.592, 34772.496, 50247.75 , 52781.04 , 59509.086], - [19011.01 , 34768.832, 50247.547, 52829.46 , 59557.85 ], - [18939.426, 34680.39 , 50175.406, 52685.527, 59486.184], - [19009.908, 34748.207, 50274.14 , 52723.406, 59523.812], - [19072.715, 34844.24 , 50421.906, 52781.83 , 59527.06 ] + [19060.592, 34772.496, 50247.75, 52781.04, 59509.086], + [19011.01, 34768.832, 50247.547, 52829.46, 59557.85], + [18939.426, 34680.39, 50175.406, 52685.527, 59486.184], + [19009.908, 34748.207, 50274.14, 52723.406, 59523.812], + [19072.715, 34844.24, 50421.906, 52781.83, 59527.06], ] int_dq = [ [DNU, GOOD, JUMP, GOOD, DNU | SAT], @@ -1239,7 +1316,7 @@ def test_invalid_integrations(): [DNU, GOOD, JUMP, SAT, DNU | SAT], [DNU, GOOD, JUMP, SAT, DNU | SAT], [DNU, GOOD, JUMP, SAT, DNU | SAT], - [DNU, GOOD, JUMP, SAT, DNU | SAT] + [DNU, GOOD, JUMP, SAT, DNU | SAT], ] for integ in range(nints): @@ -1250,9 +1327,10 @@ def test_invalid_integrations(): save_opt, ncores, bufsize, algo = False, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) - tol = 1.e-5 + tol = 1.0e-5 # Check slopes information sdata, sdq, svp, svr, serr = slopes @@ -1275,21 +1353,21 @@ def test_invalid_integrations(): # Check slopes information cdata, cdq, cvp, cvr, cerr = cube - check = np.array([5291.4556, np.nan, np.nan, 5576.588, - np.nan, np.nan, np.nan, np.nan], dtype=np.float32) + check = np.array([5291.4556, np.nan, np.nan, 5576.588, np.nan, np.nan, np.nan, np.nan], dtype=np.float32) np.testing.assert_allclose(cdata[:, 0, 0], check, tol, tol) - check = np.array([JUMP, JUMP | DNU, JUMP | DNU, GOOD, - JUMP | DNU, JUMP | DNU, JUMP | DNU, JUMP | DNU], dtype=np.uint8) + check = np.array( + [JUMP, JUMP | DNU, JUMP | DNU, GOOD, JUMP | DNU, JUMP | DNU, JUMP | DNU, JUMP | DNU], dtype=np.uint8 + ) np.testing.assert_allclose(cdq[:, 0, 0], check, tol, tol) - check = np.array([89.007835, 0., 0., 89.007835, 0., 0., 0., 0.], dtype=np.float32) + check = np.array([89.007835, 0.0, 0.0, 89.007835, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) np.testing.assert_allclose(cvp[:, 0, 0], check, tol, tol) - check = np.array([4.8278294, 0., 0., 4.8278294, 0., 0., 0., 0.], dtype=np.float32) + check = np.array([4.8278294, 0.0, 0.0, 4.8278294, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) np.testing.assert_allclose(cvr[:, 0, 0], check, tol, tol) - check = np.array([9.686893, 0., 0., 9.686893, 0., 0., 0., 0.0], dtype=np.float32) + check = np.array([9.686893, 0.0, 0.0, 9.686893, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) np.testing.assert_allclose(cerr[:, 0, 0], check, tol, tol) @@ -1298,7 +1376,7 @@ def test_one_group(): Test ngroups = 1 """ nints, ngroups, nrows, ncols = 1, 1, 1, 1 - rnval, gval = 10., 5. + rnval, gval = 10.0, 5.0 frame_time, nframes, groupgap = 10.736, 4, 1 dims = nints, ngroups, nrows, ncols @@ -1311,7 +1389,8 @@ def test_one_group(): save_opt, ncores, bufsize, algo = False, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) tol = 1e-5 sdata, sdq, svp, svr, serr = slopes @@ -1345,11 +1424,15 @@ def create_blank_ramp_data(dims, var, tm): gdq = np.zeros(shape=(nints, ngroups, nrows, ncols), dtype=np.uint8) ramp_data = RampData() - ramp_data.set_arrays( - data=data, err=err, groupdq=gdq, pixeldq=pixdq) + ramp_data.set_arrays(data=data, err=err, groupdq=gdq, pixeldq=pixdq) ramp_data.set_meta( - name="NIRSpec", frame_time=frame_time, group_time=group_time, - groupgap=groupgap, nframes=nframes, drop_frames1=None) + name="NIRSpec", + frame_time=frame_time, + group_time=group_time, + groupgap=groupgap, + nframes=nframes, + drop_frames1=None, + ) ramp_data.set_dqflags(dqflags) gain = np.ones(shape=(nrows, ncols), dtype=np.float64) * gval @@ -1361,24 +1444,25 @@ def create_blank_ramp_data(dims, var, tm): def test_compute_num_slices(): n_rows = 20 max_available_cores = 10 - assert(compute_num_slices('none', n_rows, max_available_cores) == 1) - assert (compute_num_slices('half', n_rows, max_available_cores) == 5) - assert (compute_num_slices('3', n_rows, max_available_cores) == 3) - assert (compute_num_slices('7', n_rows, max_available_cores) == 7) - assert (compute_num_slices('21', n_rows, max_available_cores) == 10) - assert (compute_num_slices('quarter', n_rows,max_available_cores) == 2) - assert (compute_num_slices('7.5', n_rows, max_available_cores) == 1) - assert (compute_num_slices('one', n_rows, max_available_cores) == 1) - assert (compute_num_slices('-5', n_rows, max_available_cores) == 1) - assert (compute_num_slices('all', n_rows, max_available_cores) == 10) - assert (compute_num_slices('3/4', n_rows, max_available_cores) == 1) + assert compute_num_slices("none", n_rows, max_available_cores) == 1 + assert compute_num_slices("half", n_rows, max_available_cores) == 5 + assert compute_num_slices("3", n_rows, max_available_cores) == 3 + assert compute_num_slices("7", n_rows, max_available_cores) == 7 + assert compute_num_slices("21", n_rows, max_available_cores) == 10 + assert compute_num_slices("quarter", n_rows, max_available_cores) == 2 + assert compute_num_slices("7.5", n_rows, max_available_cores) == 1 + assert compute_num_slices("one", n_rows, max_available_cores) == 1 + assert compute_num_slices("-5", n_rows, max_available_cores) == 1 + assert compute_num_slices("all", n_rows, max_available_cores) == 10 + assert compute_num_slices("3/4", n_rows, max_available_cores) == 1 n_rows = 9 - assert (compute_num_slices('21', n_rows, max_available_cores) == 9) + assert compute_num_slices("21", n_rows, max_available_cores) == 9 # ----------------------------------------------------------------------------- # Set up functions + def setup_inputs(dims, var, tm): """ Given dimensions, variances, and timing data, this creates test data to @@ -1404,11 +1488,10 @@ def setup_inputs(dims, var, tm): data[c_int, :, :, :] = data[0, :, :, :].copy() ramp_data = RampData() - ramp_data.set_arrays( - data=data, err=err, groupdq=gdq, pixeldq=pixdq) + ramp_data.set_arrays(data=data, err=err, groupdq=gdq, pixeldq=pixdq) ramp_data.set_meta( - name="MIRI", frame_time=dtime, group_time=gtime, groupgap=0, - nframes=nframes, drop_frames1=None) + name="MIRI", frame_time=dtime, group_time=gtime, groupgap=0, nframes=nframes, drop_frames1=None + ) ramp_data.set_dqflags(dqflags) gain = np.ones(shape=(nrows, ncols), dtype=np.float64) * gain @@ -1416,14 +1499,17 @@ def setup_inputs(dims, var, tm): return ramp_data, rnoise, gain + # ----------------------------------------------------------------------------- ############################################################################### # The functions below are only used for DEBUGGING tests and developing tests. # ############################################################################### + def print_real_check(real, check, label=None): import inspect + cf = inspect.currentframe() line_number = cf.f_back.f_lineno print("=" * 80) @@ -1540,24 +1626,21 @@ def print_integ(integ_info): def print_optional_data(optional): - oslope, osigslope, ovar_poisson, ovar_rnoise, \ - oyint, osigyint, opedestal, oweights, ocrmag = optional + oslope, osigslope, ovar_poisson, ovar_rnoise, oyint, osigyint, opedestal, oweights, ocrmag = optional print("Optional results slopes:") print(f"Dimensions: {oslope.shape}") print(oslope) def print_optional_poisson(optional): - oslope, osigslope, ovar_poisson, ovar_rnoise, \ - oyint, osigyint, opedestal, oweights, ocrmag = optional + oslope, osigslope, ovar_poisson, ovar_rnoise, oyint, osigyint, opedestal, oweights, ocrmag = optional print("Optional results Poisson:") print(f"Dimensions: {ovar_poisson.shape}") print(ovar_poisson) def print_optional_rnoise(optional): - oslope, osigslope, ovar_poisson, ovar_rnoise, \ - oyint, osigyint, opedestal, oweights, ocrmag = optional + oslope, osigslope, ovar_poisson, ovar_rnoise, oyint, osigyint, opedestal, oweights, ocrmag = optional print("Optional results read noise:") print(f"Dimensions: {ovar_rnoise.shape}") print(ovar_rnoise) diff --git a/tests/test_ramp_fitting_cas22.py b/tests/test_ramp_fitting_cas22.py index 72a7b6d1..d3b46589 100644 --- a/tests/test_ramp_fitting_cas22.py +++ b/tests/test_ramp_fitting_cas22.py @@ -1,4 +1,3 @@ - """ Unit tests for ramp-fitting functions. """ @@ -39,14 +38,14 @@ def test_simulated_ramps(use_unit, use_dq): dq |= bad output = ramp.fit_ramps_casertano( - resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, - threshold_constant=0, threshold_intercept=0) # set the threshold parameters - # to demo the interface. This - # will raise an error if - # the interface changes, but - # does not effect the computation - # since jump detection is off in - # this case. + resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, threshold_constant=0, threshold_intercept=0 + ) # set the threshold parameters + # to demo the interface. This + # will raise an error if + # the interface changes, but + # does not effect the computation + # since jump detection is off in + # this case. # Check that the output shapes are correct assert output.parameters.shape == (320, 320, 2) == resultants.shape[1:] + (2,) @@ -74,7 +73,7 @@ def test_simulated_ramps(use_unit, use_dq): if not use_dq: assert np.all(okay) - chi2dof_slope = np.sum((parameters[okay, 1] - flux)**2 / variances[okay, 2]) / np.sum(okay) + chi2dof_slope = np.sum((parameters[okay, 1] - flux) ** 2 / variances[okay, 2]) / np.sum(okay) assert np.abs(chi2dof_slope - 1) < 0.03 assert np.all(parameters[~okay, 1] == 0) assert np.all(variances[~okay, 1] == 0) @@ -109,24 +108,26 @@ def simulate_many_ramps(ntrial=100, flux=100, readnoise=5, read_pattern=None): readnoise : float read noise used resultants : np.ndarray[n_resultant, ntrial] (float) - simulated resultants -""" + simulated resultants""" if read_pattern is None: - read_pattern = [[1, 2, 3, 4], - [5], - [6, 7, 8], - [9, 10, 11, 12, 13, 14, 15, 16, 17, 18], - [19, 20, 21], - [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36]] + read_pattern = [ + [1, 2, 3, 4], + [5], + [6, 7, 8], + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18], + [19, 20, 21], + [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36], + ] nread = np.array([len(x) for x in read_pattern]) - resultants = np.zeros((len(read_pattern), ntrial), dtype='f4') - buf = np.zeros(ntrial, dtype='i4') + resultants = np.zeros((len(read_pattern), ntrial), dtype="f4") + buf = np.zeros(ntrial, dtype="i4") for i, reads in enumerate(read_pattern): - subbuf = np.zeros(ntrial, dtype='i4') + subbuf = np.zeros(ntrial, dtype="i4") for _ in reads: buf += np.random.poisson(ROMAN_READ_TIME * flux, ntrial) subbuf += buf - resultants[i] = (subbuf / len(reads)).astype('f4') - resultants += np.random.randn(len(read_pattern), ntrial) * ( - readnoise / np.sqrt(nread)).reshape(len(read_pattern), 1) + resultants[i] = (subbuf / len(reads)).astype("f4") + resultants += np.random.randn(len(read_pattern), ntrial) * (readnoise / np.sqrt(nread)).reshape( + len(read_pattern), 1 + ) return (read_pattern, flux, readnoise, resultants) diff --git a/tests/test_ramp_fitting_cases.py b/tests/test_ramp_fitting_cases.py index d6a93b32..df302817 100644 --- a/tests/test_ramp_fitting_cases.py +++ b/tests/test_ramp_fitting_cases.py @@ -27,12 +27,12 @@ # to me. [KDG - 19 Dec 2018] dqflags = { - 'GOOD': 0, # Good pixel. - 'DO_NOT_USE': 2**0, # Bad pixel. Do not use. - 'SATURATED': 2**1, # Pixel saturated during exposure. - 'JUMP_DET': 2**2, # Jump detected during exposure. - 'NO_GAIN_VALUE': 2**19, # Gain cannot be measured. - 'UNRELIABLE_SLOPE': 2**24, # Slope variance large (i.e., noisy pixel). + "GOOD": 0, # Good pixel. + "DO_NOT_USE": 2**0, # Bad pixel. Do not use. + "SATURATED": 2**1, # Pixel saturated during exposure. + "JUMP_DET": 2**2, # Jump detected during exposure. + "NO_GAIN_VALUE": 2**19, # Gain cannot be measured. + "UNRELIABLE_SLOPE": 2**24, # Slope variance large (i.e., noisy pixel). } GOOD = dqflags["GOOD"] @@ -53,13 +53,15 @@ def test_pix_0(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 55., 65., 75., 94., 95., 105.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 105.0], dtype=np.float32 + ) dq = [GOOD] * ngroups ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: # [data, dq, err, var_p, var_r] @@ -67,8 +69,7 @@ def test_pix_0(): # Set truth values for OPTIONAL results: # [slope, sigslope, var_poisson, var_rnoise, yint, sigyint, ped, weights] - o_true = [1.0117551, 4.874572, 0.0020202, 0.00647973, - 15.911023, 27.789335, 13.988245, 13841.038] + o_true = [1.0117551, 4.874572, 0.0020202, 0.00647973, 15.911023, 27.789335, 13.988245, 13841.038] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -90,7 +91,8 @@ def test_pix_1(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 55., 65., 75., 94., 95., 105.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 105.0], dtype=np.float32 + ) dq = [GOOD] * ngroups dq[1] = JUMP dq[2] = JUMP @@ -99,14 +101,14 @@ def test_pix_1(): save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [1.8999999, JUMP, 1.05057204, 0.03454545, 1.0691562] # Set truth values for OPTIONAL results: - o_true = [1.9, 56.870003, 0.03454545, 1.0691562, -3., 56.870003, - 13.1, 0.82091206] + o_true = [1.9, 56.870003, 0.03454545, 1.0691562, -3.0, 56.870003, 13.1, 0.82091206] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -125,27 +127,30 @@ def test_pix_2(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 55., 65., 75., 94., 95., 105.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 105.0], dtype=np.float32 + ) dq = [GOOD, GOOD, GOOD, JUMP, GOOD, JUMP, GOOD, JUMP, SAT, SAT] ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [0.84833729, JUMP, 0.42747884, 0.00454545, 0.1781927] # Set truth values for OPTIONAL results for all segments - o_true = [[1.0000001, 0.1, 1.], # slopes for 3 segments - [28.435, 56.870003, 56.870003], # sigslope - [0.00909091, 0.01818182, 0.01818182], # var_poisson - [0.26728904, 1.0691562, 1.0691562], # var_rnoise - [14.999998, 51., 15.], # yint - [36.709427, 56.870003, 56.870003], # sigyint - [14.151663], # pedestal - [13.091425, 0.84580624, 0.84580624], # weights - ] + o_true = [ + [1.0000001, 0.1, 1.0], # slopes for 3 segments + [28.435, 56.870003, 56.870003], # sigslope + [0.00909091, 0.01818182, 0.01818182], # var_poisson + [0.26728904, 1.0691562, 1.0691562], # var_rnoise + [14.999998, 51.0, 15.0], # yint + [36.709427, 56.870003, 56.870003], # sigyint + [14.151663], # pedestal + [13.091425, 0.84580624, 0.84580624], # weights + ] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -164,28 +169,31 @@ def test_pix_3(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 55., 65., 75., 94., 95., 105.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 105.0], dtype=np.float32 + ) dq = [GOOD] * ngroups dq[-2] = JUMP ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [1.0746869, JUMP, 0.12186482, 0.00227273, 0.01257831] # Set truth values for OPTIONAL results: - o_true = [[1.0757396, 1.], - [6.450687, 56.870003], - [0.0025974, 0.01818182], - [0.01272805, 1.0691562], - [14.504965, 15.], - [27.842508, 56.870003], - [13.925313], - [4.2576841e+03, 8.458062e-01], - ] + o_true = [ + [1.0757396, 1.0], + [6.450687, 56.870003], + [0.0025974, 0.01818182], + [0.01272805, 1.0691562], + [14.504965, 15.0], + [27.842508, 56.870003], + [13.925313], + [4.2576841e03, 8.458062e-01], + ] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -204,25 +212,27 @@ def test_pix_4(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 1055., 1065., 1075., 2594., 2595., 2605.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 1055.0, 1065.0, 1075.0, 2594.0, 2595.0, 2605.0], dtype=np.float32 + ) dq = [GOOD] + [SAT] * (ngroups - 1) ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [1.5, GOOD, 1.047105, 0.02727273, 1.0691562] # Set truth values for OPTIONAL results: - o_true = [1.5, 0., 0.02727273, 1.0691562, 0., 0., 13.5, 0.8318386] + o_true = [1.5, 0.0, 0.02727273, 1.0691562, 0.0, 0.0, 13.5, 0.8318386] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) -''' +""" NOTE: There are small differences in the slope computation due to architectural differences of C and python. @@ -266,7 +276,9 @@ def test_pix_4(): Debug - [slope_fitter.c:2632] gtime = 10.000000000000 Debug - [slope_fitter.c:2633] seg->slope = 1.014447927475 -''' +""" + + # @pytest.mark.skip(reason="C architecture gives small differences for slope.") def test_pix_5(): """ @@ -282,14 +294,16 @@ def test_pix_5(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 2055., 2065., 2075., 2094., 2095., 2105.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 2055.0, 2065.0, 2075.0, 2094.0, 2095.0, 2105.0], dtype=np.float32 + ) dq = [GOOD] * ngroups dq[4] = JUMP ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # XXX see the note above for the differences in C and python testing values. # Set truth values for PRIMARY results: @@ -300,15 +314,16 @@ def test_pix_5(): # Set truth values for OPTIONAL results: oslope_p = [1.2799551, 1.0144024] # oslope_c = [1.2799551, 1.0144479] # To be used with C - o_true = [oslope_p, - [18.312422, 9.920552], - [0.00606061, 0.00363636], - [0.10691562, 0.03054732], - [13.537246, 2015.0737], - [35.301933, 67.10882], - [13.923912], - [78.34764, 855.78046] - ] + o_true = [ + oslope_p, + [18.312422, 9.920552], + [0.00606061, 0.00363636], + [0.10691562, 0.03054732], + [13.537246, 2015.0737], + [35.301933, 67.10882], + [13.923912], + [78.34764, 855.78046], + ] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -327,7 +342,8 @@ def test_pix_6(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 55., 65., 375., 394., 395., 405.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 55.0, 65.0, 375.0, 394.0, 395.0, 405.0], dtype=np.float32 + ) dq = [GOOD] * ngroups dq[2] = JUMP dq[3] = JUMP @@ -335,21 +351,23 @@ def test_pix_6(): save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [6.092052, JUMP, 0.14613187, 0.0025974, 0.01875712] # Set truth values for OPTIONAL results: - o_true = [[1., 6.195652], - [56.870003, 8.8936615], - [0.01818182, 0.0030303], - [1.0691562, 0.01909207], - [15., -143.2391], - [56.870003, 58.76999], - [8.907948], - [8.4580624e-01, 2.0433204e+03] - ] + o_true = [ + [1.0, 6.195652], + [56.870003, 8.8936615], + [0.01818182, 0.0030303], + [1.0691562, 0.01909207], + [15.0, -143.2391], + [56.870003, 58.76999], + [8.907948], + [8.4580624e-01, 2.0433204e03], + ] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -367,20 +385,21 @@ def test_pix_7(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 55., 65., 75., 94., 195., 205.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 195.0, 205.0], dtype=np.float32 + ) dq = [GOOD] * (ngroups - 2) + [JUMP, JUMP] ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [1.0757396, JUMP, 0.12379601, 0.0025974, 0.01272805] # Set truth values for OPTIONAL results: - o_true = [1.0757396, 6.450687, 0.0025974, 0.01272805, 14.504951, - 27.842508, 13.92426, 4257.684] + o_true = [1.0757396, 6.450687, 0.0025974, 0.01272805, 14.504951, 27.842508, 13.92426, 4257.684] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -399,20 +418,21 @@ def test_pix_8(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 55., 65., 75., 94., 95., 105.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 105.0], dtype=np.float32 + ) dq = [GOOD, JUMP, GOOD, GOOD, GOOD, GOOD, GOOD, SAT, SAT, SAT] ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [0.98561335, JUMP, 0.1848883, 0.00363636, 0.03054732] # Set truth values for OPTIONAL results: - o_true = [0.98561335, 9.920554, 0.00363636, 0.03054732, 16.508228, - 39.383667, 14.014386, 855.78046] + o_true = [0.98561335, 9.920554, 0.00363636, 0.03054732, 16.508228, 39.383667, 14.014386, 855.78046] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -432,27 +452,30 @@ def test_pix_9(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 55., 65., 75., 94., 95., 105.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 105.0], dtype=np.float32 + ) dq = [GOOD, GOOD, JUMP, JUMP, GOOD, GOOD, GOOD, GOOD, JUMP, GOOD] ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [0.9999994, JUMP, 0.22721863, 0.0030303, 0.048598] # Set truth values for OPTIONAL results: - o_true = [[1., 0.9999994, 1.], - [56.870003, 13.036095, 56.870003], - [0.01818182, 0.00454545, 0.01818182], - [1.0691562, 0.05345781, 1.0691562], - [15., 20.119896, 15.], - [56.870003, 68.618195, 56.870003], - [14.], - [0.84580624, 297.23172, 0.84580624] - ] + o_true = [ + [1.0, 0.9999994, 1.0], + [56.870003, 13.036095, 56.870003], + [0.01818182, 0.00454545, 0.01818182], + [1.0691562, 0.05345781, 1.0691562], + [15.0, 20.119896, 15.0], + [56.870003, 68.618195, 56.870003], + [14.0], + [0.84580624, 297.23172, 0.84580624], + ] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -472,27 +495,30 @@ def test_pix_10(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 55., 65., 75., 94., 95., 105.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 105.0], dtype=np.float32 + ) dq = [GOOD, GOOD, JUMP, GOOD, GOOD, JUMP, GOOD, GOOD, GOOD, GOOD] ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: - p_true = [1., JUMP, 0.21298744, 0.0025974, 0.04276625] + p_true = [1.0, JUMP, 0.21298744, 0.0025974, 0.04276625] # Set truth values for OPTIONAL results: - o_true = [[1., 1.0000014, 0.99999964], - [56.870003, 28.434996, 13.036095], - [0.01818182, 0.00909091, 0.00454545], - [1.0691562, 0.26728904, 0.05345781], - [15., 17.999956, 15.000029], - [56.870003, 88.40799, 93.73906], - [14.], - [0.84580624, 13.091425, 297.23172] - ] + o_true = [ + [1.0, 1.0000014, 0.99999964], + [56.870003, 28.434996, 13.036095], + [0.01818182, 0.00909091, 0.00454545], + [1.0691562, 0.26728904, 0.05345781], + [15.0, 17.999956, 15.000029], + [56.870003, 88.40799, 93.73906], + [14.0], + [0.84580624, 13.091425, 297.23172], + ] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -510,20 +536,21 @@ def test_pix_11(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [15., 25., 35., 54., 55., 65., 75., 94., 95., 105.], dtype=np.float32) + [15.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 105.0], dtype=np.float32 + ) dq = [GOOD, GOOD] + [SAT] * (ngroups - 2) ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: - p_true = [1., GOOD, 1.042755, 0.01818182, 1.0691562] + p_true = [1.0, GOOD, 1.042755, 0.01818182, 1.0691562] # Set truth values for OPTIONAL results: - o_true = [1., 56.870003, 0.01818182, 1.0691562, 15., 56.870003, 14., - 0.84580624] + o_true = [1.0, 56.870003, 0.01818182, 1.0691562, 15.0, 56.870003, 14.0, 0.84580624] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -545,14 +572,15 @@ def test_pix_12(): ramp_data, gain, rnoise = create_blank_ramp_data(dims, var, timing) # Populate pixel-specific SCI and GROUPDQ arrays - ramp_data.data[0, :, 0, 0] = np.array([15., 59025.], dtype=np.float32) + ramp_data.data[0, :, 0, 0] = np.array([15.0, 59025.0], dtype=np.float32) ramp_data.groupdq[0, :, 0, 0] = np.array([0, SAT]) - ramp_data.data[0, :, 0, 1] = np.array([61000., 61000.], dtype=np.float32) + ramp_data.data[0, :, 0, 1] = np.array([61000.0, 61000.0], dtype=np.float32) ramp_data.groupdq[0, :, 0, 1] = np.array([SAT, SAT]) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results for pixel 1: # slope, dq, err, var_p, var_r @@ -564,7 +592,7 @@ def test_pix_12(): # slope, sig_slope, var_p, var_r, yint, sig_yint, pedestal, weights # slope = group1 / deltatime = 15 / 10 = 1.5 # sig_slope, yint, sig_yint, and pedestal are all zero, because only 1 good group - o_true = [1.5, 0., 0.027273, 1.069156, 0., 0., 13.5, 0.831839] + o_true = [1.5, 0.0, 0.027273, 1.069156, 0.0, 0.0, 13.5, 0.831839] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -573,12 +601,12 @@ def test_pix_12(): # slope, dq, err, var_p, var_r # slope = zero, because no good data # dq = 3 (saturation + do_not_use) because both groups are saturated - p_true = [np.nan, 3, 0., 0., 0.] + p_true = [np.nan, 3, 0.0, 0.0, 0.0] # Set truth values for OPTIONAL results: # slope, sig_slope, var_p, var_r, yint, sig_yint, pedestal, weights # all values zero, because no good data - o_true = [0., 0., 0., 0., 0., 0., 0., 0.] + o_true = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] assert_pri(p_true, slopes, 1) assert_opt(o_true, ols_opt, 1) @@ -598,20 +626,21 @@ def test_miri_0(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [8888., 25., 35., 54., 55., 65., 75., 94., 95., 888.], dtype=np.float32) + [8888.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 888.0], dtype=np.float32 + ) dq = [DNU] + [GOOD] * (ngroups - 2) + [DNU] ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [1.025854, GOOD, 0.12379601, 0.0025974, 0.01272805] # Set truth values for OPTIONAL results: - o_true = [1.025854, 6.450687, 0.0025974, 0.01272805, 26.439266, 27.842508, - 23.974146, 4257.684] + o_true = [1.025854, 6.450687, 0.0025974, 0.01272805, 26.439266, 27.842508, 23.974146, 4257.684] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -631,20 +660,21 @@ def test_miri_1(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [7777., 125., 135., 154., 165., 175., 185., 204., 205., 777.], dtype=np.float32) + [7777.0, 125.0, 135.0, 154.0, 165.0, 175.0, 185.0, 204.0, 205.0, 777.0], dtype=np.float32 + ) dq = [DNU | JUMP] + [GOOD] * (ngroups - 2) + [DNU] ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [1.1996487, GOOD, 0.12379601, 0.0025974, 0.01272805] # Set truth values for OPTIONAL results: - o_true = [1.1996487, 6.450687, 0.0025974, 0.01272805, 126.110214, - 27.842508, 123.800354, 4257.684] + o_true = [1.1996487, 6.450687, 0.0025974, 0.01272805, 126.110214, 27.842508, 123.800354, 4257.684] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -664,20 +694,21 @@ def test_miri_2(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [4444., 25., 35., 54., 55., 65., 75., 94., 95., 444.], dtype=np.float32) + [4444.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 444.0], dtype=np.float32 + ) dq = [DNU | JUMP] + [GOOD] * (ngroups - 2) + [DNU | JUMP] ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [1.025854, GOOD, 0.12379601, 0.0025974, 0.01272805] # Set truth values for OPTIONAL results: - o_true = [1.025854, 6.450687, 0.0025974, 0.01272805, 26.439266, 27.842508, - 23.974146, 4257.684] + o_true = [1.025854, 6.450687, 0.0025974, 0.01272805, 26.439266, 27.842508, 23.974146, 4257.684] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -697,20 +728,21 @@ def test_miri_3(): # Populate pixel-specific SCI and GROUPDQ arrays ramp_data.data[0, :, 0, 0] = np.array( - [6666., 25., 35., 54., 55., 65., 75., 94., 95., 666.], dtype=np.float32) + [6666.0, 25.0, 35.0, 54.0, 55.0, 65.0, 75.0, 94.0, 95.0, 666.0], dtype=np.float32 + ) dq = [DNU] + [GOOD] * (ngroups - 2) + [DNU | JUMP] ramp_data.groupdq[0, :, 0, 0] = np.array(dq) save_opt, ncores, bufsize, algo = True, "none", 1024 * 30000, "OLS" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags) + ramp_data, bufsize, save_opt, rnoise, gain, algo, "optimal", ncores, dqflags + ) # Set truth values for PRIMARY results: p_true = [1.025854, GOOD, 0.12379601, 0.0025974, 0.01272805] # Set truth values for OPTIONAL results: - o_true = [1.025854, 6.450687, 0.0025974, 0.01272805, 26.439266, - 27.842508, 23.974146, 4257.684] + o_true = [1.025854, 6.450687, 0.0025974, 0.01272805, 26.439266, 27.842508, 23.974146, 4257.684] assert_pri(p_true, slopes, 0) assert_opt(o_true, ols_opt, 0) @@ -756,11 +788,15 @@ def create_blank_ramp_data(dims, var, timing, ts_name="NIRSpec"): gdq = np.zeros(shape=(nints, ngroups, nrows, ncols), dtype=np.uint8) ramp_data = RampData() - ramp_data.set_arrays( - data=data, err=err, groupdq=gdq, pixeldq=pixdq) + ramp_data.set_arrays(data=data, err=err, groupdq=gdq, pixeldq=pixdq) ramp_data.set_meta( - name=ts_name, frame_time=frame_time, group_time=group_time, - groupgap=groupgap, nframes=nframes, drop_frames1=None) + name=ts_name, + frame_time=frame_time, + group_time=group_time, + groupgap=groupgap, + nframes=nframes, + drop_frames1=None, + ) ramp_data.set_dqflags(dqflags) gain = np.ones(shape=(nrows, ncols), dtype=np.float32) * gval @@ -798,18 +834,17 @@ def assert_pri(p_true, new_info, pix): data, dq, var_poisson, var_rnoise, err = new_info - npt.assert_allclose(data[0, pix], p_true[0], atol=2E-5, rtol=2e-5) - npt.assert_allclose(dq[0, pix], p_true[1], atol=1E-1) - npt.assert_allclose(err[0, pix], p_true[2], atol=2E-5, rtol=2e-5) - npt.assert_allclose(var_poisson[0, pix], p_true[3], atol=2E-5, rtol=2e-5) - npt.assert_allclose(var_rnoise[0, pix], p_true[4], atol=2E-5, rtol=2e-5) + npt.assert_allclose(data[0, pix], p_true[0], atol=2e-5, rtol=2e-5) + npt.assert_allclose(dq[0, pix], p_true[1], atol=1e-1) + npt.assert_allclose(err[0, pix], p_true[2], atol=2e-5, rtol=2e-5) + npt.assert_allclose(var_poisson[0, pix], p_true[3], atol=2e-5, rtol=2e-5) + npt.assert_allclose(var_rnoise[0, pix], p_true[4], atol=2e-5, rtol=2e-5) return None def debug_opt(o_true, opt_info, pix): - (slope, sigslope, var_poisson, var_rnoise, - yint, sigyint, pedestal, weights, crmag) = opt_info + (slope, sigslope, var_poisson, var_rnoise, yint, sigyint, pedestal, weights, crmag) = opt_info opt_slope = slope[0, :, 0, pix] opt_sigslope = sigslope[0, :, 0, pix] @@ -854,8 +889,7 @@ def assert_opt(o_true, opt_info, pix): Selecting the particular (and only) ramp in the optional output, which is [0,:,0,0] """ - (slope, sigslope, var_poisson, var_rnoise, - yint, sigyint, pedestal, weights, crmag) = opt_info + (slope, sigslope, var_poisson, var_rnoise, yint, sigyint, pedestal, weights, crmag) = opt_info opt_slope = slope[0, :, 0, pix] opt_sigslope = sigslope[0, :, 0, pix] @@ -866,14 +900,14 @@ def assert_opt(o_true, opt_info, pix): opt_pedestal = pedestal[:, 0, pix] opt_weights = weights[0, :, 0, pix] - npt.assert_allclose(opt_slope, o_true[0], atol=2E-5, rtol=2e-5) - npt.assert_allclose(opt_sigslope, o_true[1], atol=2E-5, rtol=2e-5) - npt.assert_allclose(opt_var_poisson, o_true[2], atol=2E-5, rtol=2e-5) - npt.assert_allclose(opt_var_rnoise, o_true[3], atol=2E-5, rtol=2e-5) - npt.assert_allclose(opt_yint, o_true[4], atol=2E-2) - npt.assert_allclose(opt_sigyint, o_true[5], atol=2E-5, rtol=2e-5) - npt.assert_allclose(opt_pedestal, o_true[6], atol=2E-5, rtol=3e-5) - npt.assert_allclose(opt_weights, o_true[7], atol=2E-5, rtol=2e-5) + npt.assert_allclose(opt_slope, o_true[0], atol=2e-5, rtol=2e-5) + npt.assert_allclose(opt_sigslope, o_true[1], atol=2e-5, rtol=2e-5) + npt.assert_allclose(opt_var_poisson, o_true[2], atol=2e-5, rtol=2e-5) + npt.assert_allclose(opt_var_rnoise, o_true[3], atol=2e-5, rtol=2e-5) + npt.assert_allclose(opt_yint, o_true[4], atol=2e-2) + npt.assert_allclose(opt_sigyint, o_true[5], atol=2e-5, rtol=2e-5) + npt.assert_allclose(opt_pedestal, o_true[6], atol=2e-5, rtol=3e-5) + npt.assert_allclose(opt_weights, o_true[7], atol=2e-5, rtol=2e-5) return None diff --git a/tests/test_ramp_fitting_gls_fit.py b/tests/test_ramp_fitting_gls_fit.py index c31dfe64..73aa2155 100644 --- a/tests/test_ramp_fitting_gls_fit.py +++ b/tests/test_ramp_fitting_gls_fit.py @@ -101,16 +101,14 @@ def test_one_group_small_buffer(): rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 save_opt, algo, ncores = False, "GLS", "none" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, 512, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) data = slopes[0] tol = 1.0e-6 @@ -126,9 +124,7 @@ def test_two_integrations(): rnoise, gain = 1, 5 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) row, col = 0, 0 @@ -138,8 +134,8 @@ def test_two_integrations(): save_opt, algo, ncores = False, "GLS", "none" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, 512, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) np.testing.assert_allclose(slopes[0][row, col], 133.3377685, 1e-6) @@ -153,17 +149,15 @@ def test_one_group_two_integrations(): rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) - ramp_data.data[0, 0, 0, 0] = 10. - ramp_data.data[1, 0, 0, 0] = 11. + ramp_data.data[0, 0, 0, 0] = 10.0 + ramp_data.data[1, 0, 0, 0] = 11.0 save_opt, algo, ncores = False, "GLS", "none" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, 512, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) data = slopes[0] @@ -180,14 +174,12 @@ def test_nocrs_noflux(): rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) save_opt, algo, ncores = False, "GLS", "none" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, 512, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) assert 0 == np.max(slopes[0]) assert 0 == np.min(slopes[0]) @@ -204,15 +196,13 @@ def test_nocrs_noflux_firstrows_are_nan(): rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0:, 0:12, :] = np.nan save_opt, algo, ncores = False, "GLS", "none" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, 512, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) assert 0 == np.max(slopes[0]) assert 0 == np.min(slopes[0]) @@ -227,9 +217,7 @@ def test_error_when_frame_time_not_set(): rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0:, 0:12, :] = np.nan ramp_data.frame_time = None # Must be set @@ -237,8 +225,8 @@ def test_error_when_frame_time_not_set(): save_opt, algo, ncores = False, "GLS", "none" with pytest.raises(UnboundLocalError): slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, 512, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) def test_five_groups_two_integrations_Poisson_noise_only(): @@ -250,9 +238,7 @@ def test_five_groups_two_integrations_Poisson_noise_only(): rnoise, gain = 7, 2000 group_time, frame_time = 3.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) row, col = 0, 0 ramp_data.data[0, 0, row, col] = 10.0 @@ -268,8 +254,8 @@ def test_five_groups_two_integrations_Poisson_noise_only(): save_opt, algo, ncores = False, "GLS", "none" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, 512, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) out_slope = slopes[0][row, col] deltaDN1 = 50 @@ -290,17 +276,15 @@ def test_bad_gain_values(): rnoise, gain = 7, 2000 group_time, frame_time = 3.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) gain2d[r1, c1] = -10 gain2d[r2, c2] = np.nan # save_opt, algo, ncores = False, "OLS", "none" save_opt, algo, ncores = False, "GLS", "none" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, 512, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) # data, dq, var_poisson, var_rnoise, err = slopes data, dq, err = slopes @@ -310,8 +294,8 @@ def test_bad_gain_values(): assert dq[r2, c2] == flag_check # These asserts are wrong for some reason - assert(0 == np.max(data)) - assert(0 == np.min(data)) + assert 0 == np.max(data) + assert 0 == np.min(data) def test_simple_ramp(): @@ -324,21 +308,19 @@ def test_simple_ramp(): rnoise, gain = 7, 2000 group_time, frame_time = 3.0, 3 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp = np.array(list(range(ngroups))) * 20 + 10 ramp_data.data[0, :, 50, 50] = ramp save_opt, algo, ncores = False, "GLS", "none" slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, 512, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[0][50, 50] - check = 20. / 3 - tol = 1.e-5 + check = 20.0 / 3 + tol = 1.0e-5 np.testing.assert_allclose(ans, check, tol) @@ -351,24 +333,22 @@ def test_read_noise_only_fit(): rnoise, gain = 50, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) - ramp_arr = [10., 15., 25., 33., 60.] + ramp_arr = [10.0, 15.0, 25.0, 33.0, 60.0] ramp_data.data[0, :, 50, 50] = np.array(ramp_arr) save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) xvalues = np.arange(5) * 1.0 yvalues = np.array(ramp_arr) coeff = np.polyfit(xvalues, yvalues, 1) ans = slopes[0][50, 50] check = coeff[0] - tol = 1.e-2 + tol = 1.0e-2 # print(f"ans = {ans}") # 11.78866004 # print(f"check = {check}") # 11.79999999 np.testing.assert_allclose(ans, check, tol) @@ -376,30 +356,26 @@ def test_read_noise_only_fit(): @pytest.mark.skip(reason="GLS not sure what expected value is.") def test_photon_noise_only_fit(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 5, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 1, 1000 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) - ramp_arr = [10., 15., 25., 33., 60.] + ramp_arr = [10.0, 15.0, 25.0, 33.0, 60.0] ramp_data.data[0, :, 50, 50] = np.array(ramp_arr) - check = (ramp_data.data[0,4,50,50] - ramp_data.data[0,0,50,50]) / 4.0 + check = (ramp_data.data[0, 4, 50, 50] - ramp_data.data[0, 0, 50, 50]) / 4.0 save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[0][50, 50] - tol = 1.e-2 + tol = 1.0e-2 # print(f"ans = {ans}") # 8.6579208 # print(f"check = {check}") # 12.5 np.testing.assert_allclose(ans, check, tol) @@ -407,34 +383,30 @@ def test_photon_noise_only_fit(): @pytest.mark.skip(reason="GLS not sure what expected value is.") def test_photon_noise_only_bad_last_group(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 5, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 1, 1000 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 ramp_data.data[0, 1, 50, 50] = 15.0 ramp_data.data[0, 2, 50, 50] = 25.0 ramp_data.data[0, 3, 50, 50] = 33.0 ramp_data.data[0, 4, 50, 50] = 60.0 - check = (ramp_data.data[0,3,50,50] - ramp_data.data[0,0,50,50]) / 3.0 + check = (ramp_data.data[0, 3, 50, 50] - ramp_data.data[0, 0, 50, 50]) / 3.0 - ramp_data.groupdq[0,4,:,:] = DO_NOT_USE + ramp_data.groupdq[0, 4, :, :] = DO_NOT_USE save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[0][50, 50] - tol = 1.e-2 + tol = 1.0e-2 # print(f"ans = {ans}") # 8.6579208 # print(f"check = {check}") # 7.6666666 np.testing.assert_allclose(ans, check, tol) @@ -442,18 +414,14 @@ def test_photon_noise_only_bad_last_group(): @pytest.mark.skip(reason="GLS not sure what expected value is.") def test_photon_noise_with_unweighted_fit(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 5, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 1, 1000 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 ramp_data.data[0, 1, 50, 50] = 15.0 ramp_data.data[0, 2, 50, 50] = 25.0 @@ -462,15 +430,15 @@ def test_photon_noise_with_unweighted_fit(): save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "unweighted", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "unweighted", ncores, test_dq_flags + ) xvalues = np.arange(5) * 1.0 - yvalues = np.array([10,15,25,33,60]) + yvalues = np.array([10, 15, 25, 33, 60]) coeff = np.polyfit(xvalues, yvalues, 1) check = coeff[0] ans = slopes[0][50, 50] - tol = 1.e-5 + tol = 1.0e-5 # print(f"ans = {ans}") # 8.6579208 # print(f"check = {check}") # 11.7999999 np.testing.assert_allclose(ans, check, tol) @@ -486,15 +454,13 @@ def test_two_groups_fit(): rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 0, 0] = 10.0 ramp_data.data[0, 1, 0, 0] = 15.0 ramp_data.data[0, 0, 0, 1] = 20.0 ramp_data.data[0, 0, 0, 2] = 200.0 ramp_data.data[0, 1, 0, 2] = 600.0 - check = (ramp_data.data[0, 1, 0, 0] - ramp_data.data[0, 0, 0, 0]) + check = ramp_data.data[0, 1, 0, 0] - ramp_data.data[0, 0, 0, 0] ramp_data.drop_frames1 = 0 # 2nd group is saturated @@ -506,12 +472,12 @@ def test_two_groups_fit(): save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans_data = slopes[0][0, 0] ans_dq = slopes[1] - tol = 1.e-5 + tol = 1.0e-5 np.testing.assert_allclose(ans_data, check, tol) assert ans_dq[0, 0] == GOOD @@ -520,100 +486,88 @@ def test_two_groups_fit(): def test_four_groups_oneCR_orphangroupatend_fit(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 4, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 ramp_data.data[0, 1, 50, 50] = 15.0 ramp_data.data[0, 2, 50, 50] = 20.0 ramp_data.data[0, 3, 50, 50] = 145.0 - ramp_data.groupdq[0,3,50,50] = JUMP_DET + ramp_data.groupdq[0, 3, 50, 50] = JUMP_DET - check = (ramp_data.data[0,1,50,50] - ramp_data.data[0,0,50,50]) + check = ramp_data.data[0, 1, 50, 50] - ramp_data.data[0, 0, 50, 50] save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[0][50, 50] - tol = 1.e-6 + tol = 1.0e-6 np.testing.assert_allclose(ans, check, tol) def test_four_groups_two_CRs_at_end(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 4, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 ramp_data.data[0, 1, 50, 50] = 15.0 ramp_data.data[0, 2, 50, 50] = 25.0 ramp_data.data[0, 3, 50, 50] = 145.0 - check = (ramp_data.data[0,1,50,50] - ramp_data.data[0,0,50,50]) + check = ramp_data.data[0, 1, 50, 50] - ramp_data.data[0, 0, 50, 50] - ramp_data.groupdq[0,2,50,50] = JUMP_DET - ramp_data.groupdq[0,3,50,50] = JUMP_DET + ramp_data.groupdq[0, 2, 50, 50] = JUMP_DET + ramp_data.groupdq[0, 3, 50, 50] = JUMP_DET save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[0][50, 50] - tol = 1.e-6 + tol = 1.0e-6 np.testing.assert_allclose(ans, check, tol) @pytest.mark.skip(reason="GLS code does not [yet] handle all groups as jump.") def test_four_groups_four_CRs(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 10, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 ramp_data.data[0, 1, 50, 50] = 15.0 ramp_data.data[0, 2, 50, 50] = 25.0 ramp_data.data[0, 3, 50, 50] = 145.0 - ramp_data.groupdq[0,0,50,50] = JUMP_DET - ramp_data.groupdq[0,1,50,50] = JUMP_DET - ramp_data.groupdq[0,2,50,50] = JUMP_DET - ramp_data.groupdq[0,3,50,50] = JUMP_DET + ramp_data.groupdq[0, 0, 50, 50] = JUMP_DET + ramp_data.groupdq[0, 1, 50, 50] = JUMP_DET + ramp_data.groupdq[0, 2, 50, 50] = JUMP_DET + ramp_data.groupdq[0, 3, 50, 50] = JUMP_DET save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[0][50, 50] check = 0 - tol = 1.e-6 + tol = 1.0e-6 # print(f"ans = {ans}") # print(f"check = {check}") np.testing.assert_allclose(ans, check, tol) @@ -621,123 +575,106 @@ def test_four_groups_four_CRs(): @pytest.mark.skip(reason="GLS code does not [yet] handle only one good group.") def test_four_groups_three_CRs_at_end(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 4, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 ramp_data.data[0, 1, 50, 50] = 15.0 ramp_data.data[0, 2, 50, 50] = 25.0 ramp_data.data[0, 3, 50, 50] = 145.0 - ramp_data.groupdq[0,1,50,50] = JUMP_DET - ramp_data.groupdq[0,2,50,50] = JUMP_DET - ramp_data.groupdq[0,3,50,50] = JUMP_DET + ramp_data.groupdq[0, 1, 50, 50] = JUMP_DET + ramp_data.groupdq[0, 2, 50, 50] = JUMP_DET + ramp_data.groupdq[0, 3, 50, 50] = JUMP_DET save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[0][50, 50] check = 10.0 - tol = 1.e-6 + tol = 1.0e-6 # print(f"ans = {ans}") # print(f"check = {check}") np.testing.assert_allclose(ans, check, tol) def test_four_groups_CR_causes_orphan_1st_group(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 4, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 10000, 0.01 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 ramp_data.data[0, 1, 50, 50] = 125.0 ramp_data.data[0, 2, 50, 50] = 145.0 ramp_data.data[0, 3, 50, 50] = 165.0 - ramp_data.groupdq[0,1,50,50] = JUMP_DET + ramp_data.groupdq[0, 1, 50, 50] = JUMP_DET save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[0][50, 50] check = 20.0 - tol = 1.e-6 + tol = 1.0e-6 np.testing.assert_allclose(ans, check, tol) def test_one_group_fit(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 1, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 10, 1 group_time, frame_time = 1.0, 1 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[0][50, 50] check = 10.0 - tol = 1.e-6 + tol = 1.0e-6 np.testing.assert_allclose(ans, check, tol) @pytest.mark.skip(reason="GLS not sure what expected value is.") def test_two_groups_unc(): - """ - - """ + """ """ deltaDN = 5 # TODO: Not sure wha this is supposed to be. nints, ngroups, nrows, ncols = 1, 2, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 10, 2 group_time, frame_time = 3.0, 3.0 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 ramp_data.data[0, 1, 50, 50] = 10.0 + deltaDN save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[2][50, 50] - check = np.sqrt( - (deltaDN / gain) / group_time**2 + (rnoise**2 / group_time**2)) - tol = 1.e-6 + check = np.sqrt((deltaDN / gain) / group_time**2 + (rnoise**2 / group_time**2)) + tol = 1.0e-6 # print(f"ans = {ans}") # print(f"check = {check}") np.testing.assert_allclose(ans, check, tol) @@ -745,10 +682,8 @@ def test_two_groups_unc(): @pytest.mark.skip(reason="GLS does not comopute VAR_XXX arrays.") def test_five_groups_unc(): + """ """ """ - - """ - ''' grouptime=3.0 # deltaDN = 5 ingain = 2 @@ -756,15 +691,13 @@ def test_five_groups_unc(): ngroups=5 ramp_data, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups, gain=ingain, readnoise=inreadnoise, deltatime=grouptime) - ''' + """ nints, ngroups, nrows, ncols = 1, 5, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 7, 2 group_time, frame_time = 3.0, 3 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) ramp_data.data[0, 0, 50, 50] = 10.0 ramp_data.data[0, 1, 50, 50] = 15.0 @@ -776,10 +709,10 @@ def test_five_groups_unc(): save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) - ''' + """ # Not sure what to do with this test. The VAR_XXX arrays don't get # computed in GLS. @@ -794,22 +727,18 @@ def test_five_groups_unc(): (12 * single_sample_readnoise**2/(ngroups * (ngroups**2 - 1) * grouptime**2)), 1e-6) np.testing.assert_allclose(slopes[0].err[50,50], np.sqrt(slopes[0].var_poisson[50,50] + slopes[0].var_rnoise[50,50] ), 1e-6) - ''' + """ @pytest.mark.skip(reason="GLS doesn't produce the optional results product, yet.") def test_oneCR_10_groups_combination(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 10, 103, 102 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 7, 200 group_time, frame_time = 3.0, 3 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) # two segments perfect fit, second segment has twice the slope ramp_data.data[0, 0, 50, 50] = 15.0 @@ -823,19 +752,19 @@ def test_oneCR_10_groups_combination(): ramp_data.data[0, 8, 50, 50] = 170.0 ramp_data.data[0, 9, 50, 50] = 180.0 - ramp_data.groupdq[0,5,50,50] = JUMP_DET + ramp_data.groupdq[0, 5, 50, 50] = JUMP_DET save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) ans = slopes[0][50, 50] print(f"ans = {ans}") # TODO Need to add the optional results product to GLS - ''' + """ segment_groups = 5 single_sample_readnoise = np.float64(inreadnoise / np.sqrt(2)) @@ -851,23 +780,19 @@ def test_oneCR_10_groups_combination(): #check that the slopes of the two segments are correct np.testing.assert_allclose(opt_model.slope[0,0,50, 50], 5/3.0,rtol=1e-5) np.testing.assert_allclose(opt_model.slope[0,1,50, 50], 10/3.0,rtol=1e-5) - ''' + """ @pytest.mark.skip(reason="GLS doesn't produce the optional results product, yet.") def test_oneCR_10_groups_combination_noisy2ndSegment(): - """ - - """ + """ """ nints, ngroups, nrows, ncols = 1, 10, 103, 102 dims = (nints, ngroups, nrows, ncols) # use large gain to show that Poisson noise doesn't affect the recombination rnoise, gain = 7, 200 group_time, frame_time = 3.0, 3 - ramp_data, gain2d, rnoise2d = setup_inputs( - dims, gain, rnoise, group_time, frame_time - ) + ramp_data, gain2d, rnoise2d = setup_inputs(dims, gain, rnoise, group_time, frame_time) # two segments perfect fit, second segment has twice the slope ramp_data.data[0, 0, 50, 50] = 15.0 @@ -881,16 +806,16 @@ def test_oneCR_10_groups_combination_noisy2ndSegment(): ramp_data.data[0, 8, 50, 50] = 168.0 ramp_data.data[0, 9, 50, 50] = 180.0 - ramp_data.groupdq[0,5,50,50] = JUMP_DET + ramp_data.groupdq[0, 5, 50, 50] = JUMP_DET save_opt, algo, ncores, bufsize = False, "GLS", "none", 1024 * 30000 slopes, cube, ols_opt, gls_opt = ramp_fit_data( - ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, - "optimal", ncores, test_dq_flags) + ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags + ) - ''' + """ avg_slope = (opt_model.slope[0,0,50,50] + opt_model.slope[0,1,50,50])/2.0 # even with noiser second segment, final slope should be just the average # since they have the same number of groups np.testing.assert_allclose(slopes.data[50, 50], avg_slope,rtol=1e-5) - ''' + """ diff --git a/tests/test_saturation.py b/tests/test_saturation.py index 19b91959..fb207f77 100644 --- a/tests/test_saturation.py +++ b/tests/test_saturation.py @@ -9,37 +9,34 @@ from stcal.saturation.saturation import flag_saturated_pixels # dictionary with required DQ flags -DQFLAGS = {'DO_NOT_USE': 1, 'SATURATED': 2, 'AD_FLOOR': 64, - 'NO_SAT_CHECK': 2097152} -ATOD_LIMIT = 65535. # Hard DN limit of 16-bit A-to-D converter +DQFLAGS = {"DO_NOT_USE": 1, "SATURATED": 2, "AD_FLOOR": 64, "NO_SAT_CHECK": 2097152} +ATOD_LIMIT = 65535.0 # Hard DN limit of 16-bit A-to-D converter def test_basic_saturation_flagging(): - # Create inputs, data, and saturation maps - data = np.zeros((1, 5, 20, 20)).astype('float32') - gdq = np.zeros((1, 5, 20, 20)).astype('uint32') - pdq = np.zeros((20, 20)).astype('uint32') - sat_thresh = np.ones((20, 20)) * 100000. - sat_dq = np.zeros((20, 20)).astype('uint32') + data = np.zeros((1, 5, 20, 20)).astype("float32") + gdq = np.zeros((1, 5, 20, 20)).astype("uint32") + pdq = np.zeros((20, 20)).astype("uint32") + sat_thresh = np.ones((20, 20)) * 100000.0 + sat_dq = np.zeros((20, 20)).astype("uint32") # Add ramp values up to the saturation limit data[0, 0, 5, 5] = 0 data[0, 1, 5, 5] = 20000 data[0, 2, 5, 5] = 40000 - data[0, 3, 5, 5] = 60000 # Signal reaches saturation limit + data[0, 3, 5, 5] = 60000 # Signal reaches saturation limit data[0, 4, 5, 5] = 62000 # Set saturation value in the saturation model satvalue = 60000 sat_thresh[5, 5] = satvalue - gdq, pdq, _ = flag_saturated_pixels( - data, gdq, pdq, sat_thresh, sat_dq, ATOD_LIMIT, DQFLAGS) + gdq, pdq, _ = flag_saturated_pixels(data, gdq, pdq, sat_thresh, sat_dq, ATOD_LIMIT, DQFLAGS) # Make sure that groups with signal > saturation limit get flagged satindex = np.argmax(data[0, :, 5, 5] == satvalue) - assert np.all(gdq[0, satindex:, 5, 5] == DQFLAGS['SATURATED']) + assert np.all(gdq[0, satindex:, 5, 5] == DQFLAGS["SATURATED"]) def test_read_pattern_saturation_flagging(): @@ -47,17 +44,17 @@ def test_read_pattern_saturation_flagging(): are allocated into resultants.""" # Create inputs, data, and saturation maps - data = np.zeros((1, 5, 20, 20)).astype('float32') - gdq = np.zeros((1, 5, 20, 20)).astype('uint32') - pdq = np.zeros((20, 20)).astype('uint32') - sat_thresh = np.ones((20, 20)) * 100000. - sat_dq = np.zeros((20, 20)).astype('uint32') + data = np.zeros((1, 5, 20, 20)).astype("float32") + gdq = np.zeros((1, 5, 20, 20)).astype("uint32") + pdq = np.zeros((20, 20)).astype("uint32") + sat_thresh = np.ones((20, 20)) * 100000.0 + sat_dq = np.zeros((20, 20)).astype("uint32") # Add ramp values up to the saturation limit data[0, 0, 5, 5] = 0 data[0, 1, 5, 5] = 20000 data[0, 2, 5, 5] = 40000 - data[0, 3, 5, 5] = 60000 # Signal reaches saturation limit + data[0, 3, 5, 5] = 60000 # Signal reaches saturation limit data[0, 4, 5, 5] = 62000 # Set saturation value in the saturation model @@ -70,28 +67,27 @@ def test_read_pattern_saturation_flagging(): # This means that the effective saturation for the third resultant # is 60000 * 13 / 20 = 39000 and the third resultant should be marked # saturated. - read_pattern = [ - [1], [2], [3, 4, 5, 6, 7, 8, 9, 10], [11], [12], [13]] + read_pattern = [[1], [2], [3, 4, 5, 6, 7, 8, 9, 10], [11], [12], [13]] gdq, pdq, _ = flag_saturated_pixels( - data, gdq, pdq, sat_thresh, sat_dq, ATOD_LIMIT, DQFLAGS, - read_pattern=read_pattern) + data, gdq, pdq, sat_thresh, sat_dq, ATOD_LIMIT, DQFLAGS, read_pattern=read_pattern + ) # Make sure that groups after the third get flagged assert np.all(gdq[0, 2:, 5, 5] == DQFLAGS["SATURATED"]) def test_no_sat_check_at_limit(): - """ Test to verify that pixels at the A-to-D limit (65535), but flagged with - NO_SAT_CHECK do NOT get flagged as saturated, and that their neighbors - also do NOT get flagged. """ + """Test to verify that pixels at the A-to-D limit (65535), but flagged with + NO_SAT_CHECK do NOT get flagged as saturated, and that their neighbors + also do NOT get flagged.""" # Create inputs, data, and saturation maps - data = np.zeros((1, 5, 10, 10)).astype('float32') - gdq = np.zeros((1, 5, 10, 10)).astype('uint32') - pdq = np.zeros((10, 10)).astype('uint32') - sat_thresh = np.ones((10, 10)) * 50000. - sat_dq = np.zeros((10, 10)).astype('uint32') + data = np.zeros((1, 5, 10, 10)).astype("float32") + gdq = np.zeros((1, 5, 10, 10)).astype("uint32") + pdq = np.zeros((10, 10)).astype("uint32") + sat_thresh = np.ones((10, 10)) * 50000.0 + sat_dq = np.zeros((10, 10)).astype("uint32") # Add ramp values that are flat-lined at the A-to-D limit, # which is well above the sat_thresh of 50,000. @@ -102,31 +98,30 @@ def test_no_sat_check_at_limit(): data[0, 4, 5, 5] = ATOD_LIMIT # Set a DQ value of NO_SAT_CHECK - sat_dq[5, 5] = DQFLAGS['NO_SAT_CHECK'] + sat_dq[5, 5] = DQFLAGS["NO_SAT_CHECK"] # Run the saturation flagging - gdq, pdq, _ = flag_saturated_pixels( - data, gdq, pdq, sat_thresh, sat_dq, ATOD_LIMIT, DQFLAGS, 1) + gdq, pdq, _ = flag_saturated_pixels(data, gdq, pdq, sat_thresh, sat_dq, ATOD_LIMIT, DQFLAGS, 1) # Make sure that no groups for the flat-lined pixel and all # of its neighbors are flagged as saturated. # Also make sure that NO_SAT_CHECK has been propagated to the # pixeldq array. - assert np.all(gdq[0, :, 4:6, 4:6] != DQFLAGS['SATURATED']) - assert pdq[5, 5] == DQFLAGS['NO_SAT_CHECK'] + assert np.all(gdq[0, :, 4:6, 4:6] != DQFLAGS["SATURATED"]) + assert pdq[5, 5] == DQFLAGS["NO_SAT_CHECK"] def test_adjacent_pixel_flagging(): - """ Test to see if specified number of adjacent pixels next to a saturated - pixel are also flagged, and that the edges of the dq array are treated - correctly when this is done. """ + """Test to see if specified number of adjacent pixels next to a saturated + pixel are also flagged, and that the edges of the dq array are treated + correctly when this is done.""" # Create inputs, data, and saturation maps - data = np.ones((1, 2, 5, 5)).astype('float32') - gdq = np.zeros((1, 2, 5, 5)).astype('uint32') - pdq = np.zeros((5, 5)).astype('uint32') - sat_thresh = np.ones((5, 5)) * 60000 # sat. thresh is 60000 - sat_dq = np.zeros((5, 5)).astype('uint32') + data = np.ones((1, 2, 5, 5)).astype("float32") + gdq = np.zeros((1, 2, 5, 5)).astype("uint32") + pdq = np.zeros((5, 5)).astype("uint32") + sat_thresh = np.ones((5, 5)) * 60000 # sat. thresh is 60000 + sat_dq = np.zeros((5, 5)).astype("uint32") nints, ngroups, nrows, ncols = data.shape @@ -136,31 +131,38 @@ def test_adjacent_pixel_flagging(): data[0, 0, 0, 1] = 62000 data[0, 0, 3, 3] = 62000 - gdq, pdq, _ = flag_saturated_pixels( - data, gdq, pdq, sat_thresh, sat_dq, ATOD_LIMIT, DQFLAGS) + gdq, pdq, _ = flag_saturated_pixels(data, gdq, pdq, sat_thresh, sat_dq, ATOD_LIMIT, DQFLAGS) - sat_locs = np.where(np.bitwise_and(gdq, DQFLAGS['SATURATED']) == - DQFLAGS['SATURATED']) + sat_locs = np.where(np.bitwise_and(gdq, DQFLAGS["SATURATED"]) == DQFLAGS["SATURATED"]) - ''' + """ print(f"dims = {dims}") print(f"len(sat_locs = {len(sat_locs)})") for k in range(len(sat_locs)): ostr = np.array2string(sat_locs[k], separator=", ") print(f"sat_locs[{k}] = {ostr}") - ''' + """ # return assert sat_locs[0].all() == 0 - assert np.all(sat_locs[1] == np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1])) - assert np.all(sat_locs[2] == np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, - 4, 4, 4, 0, 0, 0, 1, 1, 1, 2, 2, 2, - 3, 3, 3, 4, 4, 4])) - assert np.all(sat_locs[3] == np.array([0, 1, 2, 0, 1, 2, 2, 3, 4, 2, 3, 4, - 2, 3, 4, 0, 1, 2, 0, 1, 2, 2, 3, 4, - 2, 3, 4, 2, 3, 4])) + assert np.all( + sat_locs[1] + == np.array( + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + ) + ) + assert np.all( + sat_locs[2] + == np.array( + [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4] + ) + ) + assert np.all( + sat_locs[3] + == np.array( + [0, 1, 2, 0, 1, 2, 2, 3, 4, 2, 3, 4, 2, 3, 4, 0, 1, 2, 0, 1, 2, 2, 3, 4, 2, 3, 4, 2, 3, 4] + ) + ) def test_zero_frame(): @@ -173,11 +175,11 @@ def test_zero_frame(): 0 and 1, so the resulting zeroed out ZEROFRAME pixel are swapped. """ - darr1 = [11800., 11793., 11823., 11789., 11857.] - darr2 = [11800., 11793., 11823., 11789., 11857.] - darr3 = [10579., 10594., 10620., 10583., 10621.] - zarr = [11800., 10500., 10579.] - rarr = [11795., 11795., 60501.] + darr1 = [11800.0, 11793.0, 11823.0, 11789.0, 11857.0] + darr2 = [11800.0, 11793.0, 11823.0, 11789.0, 11857.0] + darr3 = [10579.0, 10594.0, 10620.0, 10583.0, 10621.0] + zarr = [11800.0, 10500.0, 10579.0] + rarr = [11795.0, 11795.0, 60501.0] nints, ngroups, nrows, ncols = 2, len(darr1), 1, len(zarr) dims = nints, ngroups, nrows, ncols @@ -203,13 +205,11 @@ def test_zero_frame(): ref[0, :] = np.array(rarr) # dictionary with required DQ flags - dqflags = {'DO_NOT_USE': 1, 'SATURATED': 2, 'AD_FLOOR': 64, - 'NO_SAT_CHECK': 2097152} + dqflags = {"DO_NOT_USE": 1, "SATURATED": 2, "AD_FLOOR": 64, "NO_SAT_CHECK": 2097152} - atod_limit = 65535. # Hard DN limit of 16-bit A-to-D converter + atod_limit = 65535.0 # Hard DN limit of 16-bit A-to-D converter - gdq, pdq, zframe = flag_saturated_pixels( - data, gdq, pdq, ref, rdq, atod_limit, dqflags, 0, zfrm) + gdq, pdq, zframe = flag_saturated_pixels(data, gdq, pdq, ref, rdq, atod_limit, dqflags, 0, zfrm) # Check DQ flags cdq = np.array([dqflags["SATURATED"]] * ngroups) @@ -225,5 +225,5 @@ def test_zero_frame(): np.testing.assert_array_equal(check, gdq) # Check ZEROFRAME flagged elements are zeroed out. - assert(zframe[0, 0, 0] == 0.) - assert(zframe[1, 0, 1] == 0.) + assert zframe[0, 0, 0] == 0.0 + assert zframe[1, 0, 1] == 0.0 diff --git a/tests/test_twopoint_difference.py b/tests/test_twopoint_difference.py index cb08ac6a..024c4bb7 100644 --- a/tests/test_twopoint_difference.py +++ b/tests/test_twopoint_difference.py @@ -4,12 +4,11 @@ from stcal.jump.twopoint_difference import find_crs, calc_med_first_diffs -DQFLAGS = {'JUMP_DET': 4, 'SATURATED': 2, 'DO_NOT_USE': 1} +DQFLAGS = {"JUMP_DET": 4, "SATURATED": 2, "DO_NOT_USE": 1} -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def setup_cube(): - def _cube(ngroups, readnoise=10): nints = 1 nrows = 204 @@ -28,11 +27,11 @@ def _cube(ngroups, readnoise=10): def test_nocrs_noflux(setup_cube): ngroups = 5 data, gdq, nframes, read_noise, rej_threshold = setup_cube(ngroups) - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) - assert(0 == np.max(out_gdq)) # no CR found + assert 0 == np.max(out_gdq) # no CR found def test_5grps_cr3_noflux(setup_cube): @@ -41,11 +40,11 @@ def test_5grps_cr3_noflux(setup_cube): data[0, 0:2, 100, 100] = 10.0 data[0, 2:5, 100, 100] = 1000 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(2 == np.argmax(out_gdq[0, :, 100, 100])) # find the CR in the expected group + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert 2 == np.argmax(out_gdq[0, :, 100, 100]) # find the CR in the expected group def test_5grps_cr2_noflux(setup_cube): @@ -54,11 +53,11 @@ def test_5grps_cr2_noflux(setup_cube): data[0, 0, 100, 100] = 10.0 data[0, 1:6, 100, 100] = 1000 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(1 == np.argmax(out_gdq[0, :, 100, 100])) # find the CR in the expected group + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert 1 == np.argmax(out_gdq[0, :, 100, 100]) # find the CR in the expected group def test_6grps_negative_differences_zeromedian(setup_cube): @@ -71,10 +70,10 @@ def test_6grps_negative_differences_zeromedian(setup_cube): data[0, 3, 100, 100] = 105 data[0, 4, 100, 100] = 100 data[0, 5, 100, 100] = 100 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(0 == np.max(out_gdq)) # no CR was found + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 0 == np.max(out_gdq) # no CR was found def test_5grps_cr2_negjumpflux(setup_cube): @@ -83,11 +82,11 @@ def test_5grps_cr2_negjumpflux(setup_cube): data[0, 0, 100, 100] = 1000.0 data[0, 1:6, 100, 100] = 10 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(1 == np.argmax(out_gdq[0, :, 100, 100])) # find the CR in the expected group + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert 1 == np.argmax(out_gdq[0, :, 100, 100]) # find the CR in the expected group def test_3grps_cr2_noflux(setup_cube): @@ -95,12 +94,12 @@ def test_3grps_cr2_noflux(setup_cube): data, gdq, nframes, read_noise, rej_threshold = setup_cube(ngroups) data[0, 0, 100, 100] = 10.0 data[0, 1:4, 100, 100] = 1000 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found # assert(1,np.argmax(out_gdq[0, :, 100, 100])) # find the CR in the expected group - assert(np.array_equal([0, 4, 0], out_gdq[0, :, 100, 100])) + assert np.array_equal([0, 4, 0], out_gdq[0, :, 100, 100]) def test_4grps_cr2_noflux(setup_cube): @@ -108,11 +107,11 @@ def test_4grps_cr2_noflux(setup_cube): data, gdq, nframes, read_noise, rej_threshold = setup_cube(ngroups) data[0, 0, 100, 100] = 10.0 data[0, 1:4, 100, 100] = 1000 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(1 == np.argmax(out_gdq[0, :, 100, 100])) # find the CR in the expected group + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert 1 == np.argmax(out_gdq[0, :, 100, 100]) # find the CR in the expected group def test_5grps_cr2_nframe2(setup_cube): @@ -124,11 +123,11 @@ def test_5grps_cr2_nframe2(setup_cube): data[0, 2, 100, 100] = 1002 data[0, 3, 100, 100] = 1001 data[0, 4, 100, 100] = 1005 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 4, 0, 0], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 4, 0, 0], out_gdq[0, :, 100, 100]) @pytest.mark.xfail @@ -140,10 +139,10 @@ def test_4grps_twocrs_2nd_4th(setup_cube): data[0, 1, 100, 100] = 60 data[0, 2, 100, 100] = 60 data[0, 3, 100, 100] = 115 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(np.max(out_gdq) == 4) # a CR was found + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert np.max(out_gdq) == 4 # a CR was found def test_5grps_twocrs_2nd_5th(setup_cube): @@ -155,11 +154,11 @@ def test_5grps_twocrs_2nd_5th(setup_cube): data[0, 2, 100, 100] = 60 data[0, 3, 100, 100] = 60 data[0, 4, 100, 100] = 115 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 0, 0, 4], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 0, 0, 4], out_gdq[0, :, 100, 100]) def test_5grps_twocrs_2nd_5thbig(setup_cube): @@ -171,11 +170,11 @@ def test_5grps_twocrs_2nd_5thbig(setup_cube): data[0, 2, 100, 100] = 60 data[0, 3, 100, 100] = 60 data[0, 4, 100, 100] = 2115 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 0, 0, 4], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 0, 0, 4], out_gdq[0, :, 100, 100]) def test_10grps_twocrs_2nd_8th_big(setup_cube): @@ -192,11 +191,11 @@ def test_10grps_twocrs_2nd_8th_big(setup_cube): data[0, 7, 100, 100] = 2115 data[0, 8, 100, 100] = 2115 data[0, 9, 100, 100] = 2115 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 0, 0, 0, 0, 0, 4, 0, 0], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 0, 0, 0, 0, 0, 4, 0, 0], out_gdq[0, :, 100, 100]) def test_10grps_twocrs_10percenthit(setup_cube): @@ -213,11 +212,11 @@ def test_10grps_twocrs_10percenthit(setup_cube): data[0:200, 7, 100, 100] = 2115 data[0:200, 8, 100, 100] = 2115 data[0:200, 9, 100, 100] = 2115 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 0, 0, 0, 0, 0, 4, 0, 0], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 0, 0, 0, 0, 0, 4, 0, 0], out_gdq[0, :, 100, 100]) def test_5grps_twocrs_2nd_5thbig_nframes2(setup_cube): @@ -229,11 +228,11 @@ def test_5grps_twocrs_2nd_5thbig_nframes2(setup_cube): data[0, 2, 100, 100] = 60 data[0, 3, 100, 100] = 60 data[0, 4, 100, 100] = 2115 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 0, 0, 4], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 0, 0, 4], out_gdq[0, :, 100, 100]) def test_6grps_twocrs_2nd_5th(setup_cube): @@ -246,10 +245,10 @@ def test_6grps_twocrs_2nd_5th(setup_cube): data[0, 3, 100, 100] = 60 data[0, 4, 100, 100] = 115 data[0, 5, 100, 100] = 115 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found assert np.array_equal([0, 4, 0, 0, 4, 0], out_gdq[0, :, 100, 100]) @@ -263,11 +262,11 @@ def test_6grps_twocrs_2nd_5th_nframes2(setup_cube): data[0, 3, 100, 100] = 60 data[0, 4, 100, 100] = 115 data[0, 5, 100, 100] = 115 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 0, 0, 4, 0], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 0, 0, 4, 0], out_gdq[0, :, 100, 100]) def test_6grps_twocrs_twopixels_nframes2(setup_cube): @@ -286,12 +285,12 @@ def test_6grps_twocrs_twopixels_nframes2(setup_cube): data[0, 3, 200, 100] = 60 data[0, 4, 200, 100] = 115 data[0, 5, 200, 100] = 115 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 0, 0, 4, 0], out_gdq[0, :, 100, 100])) - assert(np.array_equal([0, 0, 4, 0, 4, 0], out_gdq[0, :, 200, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 0, 0, 4, 0], out_gdq[0, :, 100, 100]) + assert np.array_equal([0, 0, 4, 0, 4, 0], out_gdq[0, :, 200, 100]) def test_5grps_cr2_negslope(setup_cube): @@ -303,11 +302,11 @@ def test_5grps_cr2_negslope(setup_cube): data[0, 2, 100, 100] = -200 data[0, 3, 100, 100] = -260 data[0, 4, 100, 100] = -360 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 0, 4, 0, 0], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 0, 4, 0, 0], out_gdq[0, :, 100, 100]) def test_6grps_1cr(setup_cube): @@ -320,10 +319,10 @@ def test_6grps_1cr(setup_cube): data[0, 3, 100, 100] = 33 data[0, 4, 100, 100] = 46 data[0, 5, 100, 100] = 1146 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert (4 == out_gdq[0, 5, 100, 100]) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == out_gdq[0, 5, 100, 100] def test_7grps_1cr(setup_cube): @@ -337,10 +336,10 @@ def test_7grps_1cr(setup_cube): data[0, 4, 100, 100] = 46 data[0, 5, 100, 100] = 60 data[0, 6, 100, 100] = 1160 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == out_gdq[0, 6, 100, 100]) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == out_gdq[0, 6, 100, 100] def test_8grps_1cr(setup_cube): @@ -355,10 +354,10 @@ def test_8grps_1cr(setup_cube): data[0, 5, 100, 100] = 60 data[0, 6, 100, 100] = 1160 data[0, 7, 100, 100] = 1175 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == out_gdq[0, 6, 100, 100]) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == out_gdq[0, 6, 100, 100] def test_9grps_1cr_1sat(setup_cube): @@ -374,11 +373,11 @@ def test_9grps_1cr_1sat(setup_cube): data[0, 6, 100, 100] = 1160 data[0, 7, 100, 100] = 1175 data[0, 8, 100, 100] = 6175 - gdq[0, 8, 100, 100] = DQFLAGS['SATURATED'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == out_gdq[0, 6, 100, 100]) + gdq[0, 8, 100, 100] = DQFLAGS["SATURATED"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == out_gdq[0, 6, 100, 100] def test_10grps_1cr_2sat(setup_cube): @@ -395,12 +394,12 @@ def test_10grps_1cr_2sat(setup_cube): data[0, 7, 100, 100] = 1175 data[0, 8, 100, 100] = 6175 data[0, 9, 100, 100] = 6175 - gdq[0, 8, 100, 100] = DQFLAGS['SATURATED'] - gdq[0, 9, 100, 100] = DQFLAGS['SATURATED'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == out_gdq[0, 6, 100, 100]) + gdq[0, 8, 100, 100] = DQFLAGS["SATURATED"] + gdq[0, 9, 100, 100] = DQFLAGS["SATURATED"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == out_gdq[0, 6, 100, 100] def test_11grps_1cr_3sat(setup_cube): @@ -418,13 +417,13 @@ def test_11grps_1cr_3sat(setup_cube): data[0, 8, 100, 100] = 6175 data[0, 9, 100, 100] = 6175 data[0, 10, 100, 100] = 6175 - gdq[0, 8, 100, 100] = DQFLAGS['SATURATED'] - gdq[0, 9, 100, 100] = DQFLAGS['SATURATED'] - gdq[0, 10, 100, 100] = DQFLAGS['SATURATED'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == out_gdq[0, 6, 100, 100]) + gdq[0, 8, 100, 100] = DQFLAGS["SATURATED"] + gdq[0, 9, 100, 100] = DQFLAGS["SATURATED"] + gdq[0, 10, 100, 100] = DQFLAGS["SATURATED"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == out_gdq[0, 6, 100, 100] def test_11grps_0cr_3donotuse(setup_cube): @@ -442,13 +441,13 @@ def test_11grps_0cr_3donotuse(setup_cube): data[0, 8, 100, 100] = 150 data[0, 9, 100, 100] = 6175 data[0, 10, 100, 100] = 6175 - gdq[0, 0, 100, 100] = DQFLAGS['DO_NOT_USE'] - gdq[0, 9, 100, 100] = DQFLAGS['DO_NOT_USE'] - gdq[0, 10, 100, 100] = DQFLAGS['DO_NOT_USE'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert (np.array_equal([0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, 1:-2, 100, 100])) + gdq[0, 0, 100, 100] = DQFLAGS["DO_NOT_USE"] + gdq[0, 9, 100, 100] = DQFLAGS["DO_NOT_USE"] + gdq[0, 10, 100, 100] = DQFLAGS["DO_NOT_USE"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert np.array_equal([0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, 1:-2, 100, 100]) def test_5grps_nocr(setup_cube): @@ -460,9 +459,9 @@ def test_5grps_nocr(setup_cube): data[0, 2, 100, 100] = 21 data[0, 3, 100, 100] = 33 data[0, 4, 100, 100] = 46 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) def test_6grps_nocr(setup_cube): @@ -475,9 +474,9 @@ def test_6grps_nocr(setup_cube): data[0, 3, 100, 100] = 33 data[0, 4, 100, 100] = 46 data[0, 5, 100, 100] = 60 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) def test_10grps_cr2_gt3sigma(setup_cube): @@ -487,11 +486,11 @@ def test_10grps_cr2_gt3sigma(setup_cube): nframes = 1 data[0, 0, 100, 100] = 0 data[0, 1:11, 100, 100] = crmag - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100]) def test_10grps_cr2_3sigma_nocr(setup_cube): @@ -501,11 +500,11 @@ def test_10grps_cr2_3sigma_nocr(setup_cube): nframes = 1 data[0, 0, 100, 100] = 0 data[0, 1:11, 100, 100] = crmag - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(0 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 0 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100]) def test_10grps_cr2_gt3sigma_2frames(setup_cube): @@ -515,11 +514,11 @@ def test_10grps_cr2_gt3sigma_2frames(setup_cube): nframes = 2 data[0, 0, 100, 100] = 0 data[0, 1:11, 100, 100] = crmag - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100]) def test_10grps_cr2_gt3sigma_2frames_offdiag(setup_cube): @@ -529,11 +528,11 @@ def test_10grps_cr2_gt3sigma_2frames_offdiag(setup_cube): nframes = 2 data[0, 0, 100, 110] = 0 data[0, 1:11, 100, 110] = crmag - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(4 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 4, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 110])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 4 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 4, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 110]) def test_10grps_cr2_3sigma_2frames_nocr(setup_cube): @@ -543,11 +542,11 @@ def test_10grps_cr2_3sigma_2frames_nocr(setup_cube): nframes = 2 data[0, 0, 100, 100] = 0 data[0, 1:11, 100, 100] = crmag - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(0 == np.max(out_gdq)) # a CR was found - assert(np.array_equal([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100])) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 0 == np.max(out_gdq) # a CR was found + assert np.array_equal([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100]) def test_10grps_nocr_2pixels_sigma0(setup_cube): @@ -559,10 +558,10 @@ def test_10grps_nocr_2pixels_sigma0(setup_cube): data[0, 1:11, 100, 100] = crmag read_noise[50, 50] = 0.0 read_noise[60, 60] = 0.0 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert(0 == np.max(out_gdq)) # no CR was found + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert 0 == np.max(out_gdq) # no CR was found def test_5grps_satat4_crat3(setup_cube): @@ -574,15 +573,15 @@ def test_5grps_satat4_crat3(setup_cube): data[0, 2, 100, 100] = 60000 data[0, 3, 100, 100] = 61000 data[0, 4, 100, 100] = 61000 - gdq[0, 3, 100, 100] = DQFLAGS['SATURATED'] - gdq[0, 4, 100, 100] = DQFLAGS['SATURATED'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + gdq[0, 3, 100, 100] = DQFLAGS["SATURATED"] + gdq[0, 4, 100, 100] = DQFLAGS["SATURATED"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) # assert(4 == np.max(out_gdq)) # no CR was found assert np.array_equal( - [0, 0, DQFLAGS['JUMP_DET'], DQFLAGS['SATURATED'], DQFLAGS['SATURATED']], - out_gdq[0, :, 100, 100]) + [0, 0, DQFLAGS["JUMP_DET"], DQFLAGS["SATURATED"], DQFLAGS["SATURATED"]], out_gdq[0, :, 100, 100] + ) def test_6grps_satat6_crat1(setup_cube): @@ -602,12 +601,12 @@ def test_6grps_satat6_crat1(setup_cube): data[0, 3, 100, 101] = 25006 data[0, 4, 100, 101] = 30010 data[0, 5, 100, 101] = 35015 - gdq[0, 5, 100, 100] = DQFLAGS['SATURATED'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + gdq[0, 5, 100, 100] = DQFLAGS["SATURATED"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) # assert(4 == np.max(out_gdq)) # no CR was found - assert (np.array_equal([0, DQFLAGS['JUMP_DET'], 0, 0, 0, DQFLAGS['SATURATED']], out_gdq[0, :, 100, 100])) + assert np.array_equal([0, DQFLAGS["JUMP_DET"], 0, 0, 0, DQFLAGS["SATURATED"]], out_gdq[0, :, 100, 100]) @pytest.mark.xfail @@ -628,13 +627,13 @@ def test_6grps_satat6_crat1_flagadjpixels(setup_cube): data[0, 3, 100, 101] = 25006 data[0, 4, 100, 101] = 30010 data[0, 5, 100, 101] = 35015 - gdq[0, 5, 100, 100] = DQFLAGS['SATURATED'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + gdq[0, 5, 100, 100] = DQFLAGS["SATURATED"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) # assert(4 == np.max(out_gdq)) # no CR was found - assert (np.array_equal([0, DQFLAGS['JUMP_DET'], 0, 0, 0, DQFLAGS['SATURATED']], out_gdq[0, :, 100, 100])) - assert (np.array_equal([0, DQFLAGS['JUMP_DET'], 0, 0, 0, DQFLAGS['SATURATED']], out_gdq[0, :, 99, 100])) + assert np.array_equal([0, DQFLAGS["JUMP_DET"], 0, 0, 0, DQFLAGS["SATURATED"]], out_gdq[0, :, 100, 100]) + assert np.array_equal([0, DQFLAGS["JUMP_DET"], 0, 0, 0, DQFLAGS["SATURATED"]], out_gdq[0, :, 99, 100]) def test_10grps_satat8_crsat3and6(setup_cube): @@ -650,15 +649,26 @@ def test_10grps_satat8_crsat3and6(setup_cube): data[0, 5, 100, 100] = 40000 # CR data[0, 6, 100, 100] = 45000 data[0, 7:11, 100, 100] = 61000 - gdq[0, 7:11, 100, 100] = DQFLAGS['SATURATED'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + gdq[0, 7:11, 100, 100] = DQFLAGS["SATURATED"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) # assert(4 == np.max(out_gdq)) # no CR was found assert np.array_equal( - [0, 0, DQFLAGS['JUMP_DET'], 0, 0, DQFLAGS['JUMP_DET'], 0, - DQFLAGS['SATURATED'], DQFLAGS['SATURATED'], DQFLAGS['SATURATED']], - out_gdq[0, :, 100, 100]) + [ + 0, + 0, + DQFLAGS["JUMP_DET"], + 0, + 0, + DQFLAGS["JUMP_DET"], + 0, + DQFLAGS["SATURATED"], + DQFLAGS["SATURATED"], + DQFLAGS["SATURATED"], + ], + out_gdq[0, :, 100, 100], + ) def test_median_with_saturation(setup_cube): @@ -675,11 +685,11 @@ def test_median_with_saturation(setup_cube): data[0, 6, 100, 100] = 44850 data[0, 7, 100, 100] = 49900 data[0, 8:10, 100, 100] = 60000 - gdq[0, 7:10, 100, 100] = DQFLAGS['SATURATED'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert (np.array_equal([0, 0, 0, 0, 0, 4, 0, 2, 2, 2], out_gdq[0, :, 100, 100])) + gdq[0, 7:10, 100, 100] = DQFLAGS["SATURATED"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert np.array_equal([0, 0, 0, 0, 0, 4, 0, 2, 2, 2], out_gdq[0, :, 100, 100]) def test_median_with_saturation_even_num_sat_frames(setup_cube): @@ -696,11 +706,11 @@ def test_median_with_saturation_even_num_sat_frames(setup_cube): data[0, 6, 100, 100] = 44850 data[0, 7, 100, 100] = 49900 data[0, 8:10, 100, 100] = 60000 - gdq[0, 6:10, 100, 100] = DQFLAGS['SATURATED'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert (np.array_equal([0, 0, 0, 0, 0, 4, 2, 2, 2, 2], out_gdq[0, :, 100, 100])) + gdq[0, 6:10, 100, 100] = DQFLAGS["SATURATED"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert np.array_equal([0, 0, 0, 0, 0, 4, 2, 2, 2, 2], out_gdq[0, :, 100, 100]) def test_median_with_saturation_odd_number_final_difference(setup_cube): @@ -717,11 +727,11 @@ def test_median_with_saturation_odd_number_final_difference(setup_cube): data[0, 6, 100, 100] = 44850 data[0, 7, 100, 100] = 49900 data[0, 8:9, 100, 100] = 60000 - gdq[0, 6:9, 100, 100] = DQFLAGS['SATURATED'] - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - assert (np.array_equal([0, 0, 0, 0, 0, 4, 2, 2, 2], out_gdq[0, :, 100, 100])) + gdq[0, 6:9, 100, 100] = DQFLAGS["SATURATED"] + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + assert np.array_equal([0, 0, 0, 0, 0, 4, 2, 2, 2], out_gdq[0, :, 100, 100]) def test_first_last_group(setup_cube): @@ -744,15 +754,15 @@ def test_first_last_group(setup_cube): # set group 6 to be 50,000 data[0, 6, 100, 100] = 50000.0 - gdq[0, 0, 100, 100] = DQFLAGS['DO_NOT_USE'] - gdq[0, 6, 100, 100] = DQFLAGS['DO_NOT_USE'] - outgdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + gdq[0, 0, 100, 100] = DQFLAGS["DO_NOT_USE"] + gdq[0, 6, 100, 100] = DQFLAGS["DO_NOT_USE"] + outgdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) - assert outgdq[0, 0, 100, 100] == DQFLAGS['DO_NOT_USE'] - assert outgdq[0, 6, 100, 100] == DQFLAGS['DO_NOT_USE'] - assert outgdq[0, 3, 100, 100] == DQFLAGS['JUMP_DET'] + assert outgdq[0, 0, 100, 100] == DQFLAGS["DO_NOT_USE"] + assert outgdq[0, 6, 100, 100] == DQFLAGS["DO_NOT_USE"] + assert outgdq[0, 3, 100, 100] == DQFLAGS["JUMP_DET"] def test_2group(setup_cube): @@ -764,9 +774,9 @@ def test_2group(setup_cube): # set groups 1,2 - to be around 30,000 data[0, 1, 0, 0] = 30000.0 - outgdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + outgdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) assert outgdq[0, 1, 0, 0] == 0 assert outgdq[0, 0, 0, 0] == 0 @@ -781,9 +791,9 @@ def test_4group(setup_cube): data[0, 2, 0, 0] = 30020.0 data[0, 3, 0, 0] = 30000.0 - outgdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + outgdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) assert outgdq[0, 1, 0, 0] == 4 @@ -799,14 +809,14 @@ def test_first_last_4group(setup_cube): data[0, 2, 0, 0] = 30020.0 data[0, 3, 0, 0] = 30000.0 # treat as MIRI data with first and last flagged - gdq[0, 0, :, :] = DQFLAGS['DO_NOT_USE'] - gdq[0, 3, :, :] = DQFLAGS['DO_NOT_USE'] - outgdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) - - assert outgdq[0, 0, 0, 0] == DQFLAGS['DO_NOT_USE'] - assert outgdq[0, 3, 0, 0] == DQFLAGS['DO_NOT_USE'] + gdq[0, 0, :, :] = DQFLAGS["DO_NOT_USE"] + gdq[0, 3, :, :] = DQFLAGS["DO_NOT_USE"] + outgdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) + + assert outgdq[0, 0, 0, 0] == DQFLAGS["DO_NOT_USE"] + assert outgdq[0, 3, 0, 0] == DQFLAGS["DO_NOT_USE"] assert outgdq[0, 1, 0, 0] == 0 @@ -822,13 +832,13 @@ def test_first_last_3group(setup_cube): data[0, 1, 0, 0] = 10100.0 data[0, 2, 0, 0] = 30020.0 - gdq[0, 2, 0, 0] = DQFLAGS['DO_NOT_USE'] # only flag the last group - outgdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS) + gdq[0, 2, 0, 0] = DQFLAGS["DO_NOT_USE"] # only flag the last group + outgdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS + ) assert outgdq[0, 0, 0, 0] == 0 - assert outgdq[0, 2, 0, 0] == DQFLAGS['DO_NOT_USE'] + assert outgdq[0, 2, 0, 0] == DQFLAGS["DO_NOT_USE"] assert outgdq[0, 1, 0, 0] == 0 @@ -848,11 +858,21 @@ def test_10grps_1cr_afterjump(setup_cube): data[0, 9, 100, 100] = 1209 after_jump_flag_e1 = np.full(data.shape[2:4], 1.0) * 0.0 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS, - after_jump_flag_e1=after_jump_flag_e1, - after_jump_flag_n1=10) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, + gdq, + read_noise, + rej_threshold, + rej_threshold, + rej_threshold, + nframes, + False, + 200, + 10, + DQFLAGS, + after_jump_flag_e1=after_jump_flag_e1, + after_jump_flag_n1=10, + ) # all groups after CR should be flagged for k in range(6, 10): assert 4 == out_gdq[0, k, 100, 100], f"after jump flagging failed in group {k}" @@ -874,11 +894,21 @@ def test_10grps_1cr_afterjump_2group(setup_cube): data[0, 9, 100, 100] = 1209 after_jump_flag_e1 = np.full(data.shape[2:4], 1.0) * 0.0 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS, - after_jump_flag_e1=after_jump_flag_e1, - after_jump_flag_n1=2) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, + gdq, + read_noise, + rej_threshold, + rej_threshold, + rej_threshold, + nframes, + False, + 200, + 10, + DQFLAGS, + after_jump_flag_e1=after_jump_flag_e1, + after_jump_flag_n1=2, + ) # 2 groups after CR should be flagged for k in range(6, 9): @@ -905,11 +935,21 @@ def test_10grps_1cr_afterjump_toosmall(setup_cube): data[0, 9, 100, 100] = 1209 after_jump_flag_e1 = np.full(data.shape[2:4], 1.0) * 10000.0 - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS, - after_jump_flag_e1=after_jump_flag_e1, - after_jump_flag_n1=10) + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, + gdq, + read_noise, + rej_threshold, + rej_threshold, + rej_threshold, + nframes, + False, + 200, + 10, + DQFLAGS, + after_jump_flag_e1=after_jump_flag_e1, + after_jump_flag_n1=10, + ) # all groups after CR should be flagged for k in range(7, 10): assert 0 == out_gdq[0, k, 100, 100], f"after jump flagging incorrect in group {k}" @@ -930,15 +970,25 @@ def test_10grps_1cr_afterjump_twothresholds(setup_cube): data[0, 8, 100, 100] = 1190 data[0, 9, 100, 100] = 1209 - after_jump_flag_e1 = np.full(data.shape[2:4], 1.0) * 500. - after_jump_flag_e2 = np.full(data.shape[2:4], 1.0) * 10. - out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs(data, gdq, read_noise, rej_threshold, - rej_threshold, rej_threshold, nframes, - False, 200, 10, DQFLAGS, - after_jump_flag_e1=after_jump_flag_e1, - after_jump_flag_n1=10, - after_jump_flag_e2=after_jump_flag_e2, - after_jump_flag_n2=2) + after_jump_flag_e1 = np.full(data.shape[2:4], 1.0) * 500.0 + after_jump_flag_e2 = np.full(data.shape[2:4], 1.0) * 10.0 + out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( + data, + gdq, + read_noise, + rej_threshold, + rej_threshold, + rej_threshold, + nframes, + False, + 200, + 10, + DQFLAGS, + after_jump_flag_e1=after_jump_flag_e1, + after_jump_flag_n1=10, + after_jump_flag_e2=after_jump_flag_e2, + after_jump_flag_n2=2, + ) # 2 groups after CR should be flagged for k in range(2, 5): assert 4 == out_gdq[0, k, 100, 100], f"after jump flagging incorrect in group {k}" @@ -949,72 +999,71 @@ def test_10grps_1cr_afterjump_twothresholds(setup_cube): def test_median_func(): - """ - Test the function `calc_med_first_diffs` that computes median of pixels. - Ensure that the correct treatment based on number of non-nan diffs - is being done, and that it works for individual pixels as well as - pixels embedded in 3d arrays, and that it works for arrays with or - without nans (which represent masked pixels).""" + Test the function `calc_med_first_diffs` that computes median of pixels. + Ensure that the correct treatment based on number of non-nan diffs + is being done, and that it works for individual pixels as well as + pixels embedded in 3d arrays, and that it works for arrays with or + without nans (which represent masked pixels).""" # single pix with 5 good diffs, should clip 1 pix and return median # 1d, no nans - arr = np.array([1., 2., 3., 4., 5]) + arr = np.array([1.0, 2.0, 3.0, 4.0, 5]) assert calc_med_first_diffs(arr) == 2.5 # 3d array, no nans arr = np.zeros(5 * 2 * 2).reshape(5, 2, 2) - arr[:, 0, 0] = np.array([1., 2., 3., 4., 5]) + arr[:, 0, 0] = np.array([1.0, 2.0, 3.0, 4.0, 5]) assert calc_med_first_diffs(arr)[0, 0] == 2.5 # 1d, with nans - arr = np.array([1., 2., 3., np.nan, 4., 5, np.nan]) + arr = np.array([1.0, 2.0, 3.0, np.nan, 4.0, 5, np.nan]) assert calc_med_first_diffs(arr) == 2.5 # 3d, with nans arr = np.zeros(7 * 2 * 2).reshape(7, 2, 2) - arr[:, 0, 0] = np.array([1., 2., 3., np.nan, 4., 5, np.nan]) + arr[:, 0, 0] = np.array([1.0, 2.0, 3.0, np.nan, 4.0, 5, np.nan]) assert calc_med_first_diffs(arr)[0, 0] == 2.5 # single pix with exactly 4 good diffs, should also clip 1 pix and return median # 1d, no nans - arr = np.array([1., 2., 3., 4.]) + arr = np.array([1.0, 2.0, 3.0, 4.0]) assert calc_med_first_diffs(arr) == 2 # 3d array, no nans arr = np.zeros(4 * 2 * 2).reshape(4, 2, 2) - arr[:, 0, 0] = np.array([1., 2., 3., 4.]) + arr[:, 0, 0] = np.array([1.0, 2.0, 3.0, 4.0]) assert calc_med_first_diffs(arr)[0, 0] == 2 # 1d, with nans - arr = np.array([1., 2., 3., np.nan, 4., np.nan]) + arr = np.array([1.0, 2.0, 3.0, np.nan, 4.0, np.nan]) assert calc_med_first_diffs(arr) == 2 # 3d, with nans arr = np.zeros(6 * 2 * 2).reshape(6, 2, 2) - arr[:, 0, 0] = np.array([1., 2., 3., np.nan, 4., np.nan]) + arr[:, 0, 0] = np.array([1.0, 2.0, 3.0, np.nan, 4.0, np.nan]) assert calc_med_first_diffs(arr)[0, 0] == 2 # single pix with exactly 3 good diffs, should compute median without clipping - arr = np.array([1., 2., 3.]) + arr = np.array([1.0, 2.0, 3.0]) assert calc_med_first_diffs(arr) == 2 # 3d array, no nans arr = np.zeros(3 * 2 * 2).reshape(3, 2, 2) - arr[:, 0, 0] = np.array([1., 2., 3.]) + arr[:, 0, 0] = np.array([1.0, 2.0, 3.0]) assert calc_med_first_diffs(arr)[0, 0] == 2 # 1d, with nans - arr = np.array([1., 2., 3., np.nan, np.nan]) + arr = np.array([1.0, 2.0, 3.0, np.nan, np.nan]) assert calc_med_first_diffs(arr) == 2 # 3d, with nans arr = np.zeros(5 * 2 * 2).reshape(5, 2, 2) - arr[:, 0, 0] = np.array([1., 2., 3., np.nan, np.nan]) + arr[:, 0, 0] = np.array([1.0, 2.0, 3.0, np.nan, np.nan]) assert calc_med_first_diffs(arr)[0, 0] == 2 # # single pix with exactly 2 good diffs, should return the element with the minimum abs val - arr = np.array([-1., -2.]) + arr = np.array([-1.0, -2.0]) assert calc_med_first_diffs(arr) == -1 # 3d array, no nans arr = np.zeros(2 * 2 * 2).reshape(2, 2, 2) - arr[:, 0, 0] = np.array([-1., -2.]) + arr[:, 0, 0] = np.array([-1.0, -2.0]) assert calc_med_first_diffs(arr)[0, 0] == -1 # 1d, with nans - arr = np.array([-1., -2., np.nan, np.nan]) + arr = np.array([-1.0, -2.0, np.nan, np.nan]) assert calc_med_first_diffs(arr) == -1 # 3d, with nans arr = np.zeros(4 * 2 * 2).reshape(4, 2, 2) - arr[:, 0, 0] = np.array([-1., -2., np.nan, np.nan]) + arr[:, 0, 0] = np.array([-1.0, -2.0, np.nan, np.nan]) assert calc_med_first_diffs(arr)[0, 0] == -1 From 11578a91c680d92752264f495627b41bf492ae38 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 14:50:33 -0500 Subject: [PATCH 05/36] Enable import sort linting --- pyproject.toml | 7 ++++++- setup.py | 4 ++-- src/stcal/__init__.py | 1 - src/stcal/alignment/resample_utils.py | 4 +++- src/stcal/alignment/util.py | 15 ++++++--------- src/stcal/dark_current/dark_sub.py | 3 ++- src/stcal/dqflags.py | 4 ++-- src/stcal/jump/jump.py | 9 ++++----- src/stcal/jump/twopoint_difference.py | 5 +++-- src/stcal/ramp_fitting/gls_fit.py | 8 ++++---- src/stcal/ramp_fitting/ols_cas22/__init__.py | 2 +- src/stcal/ramp_fitting/ols_cas22_fit.py | 2 +- src/stcal/ramp_fitting/ols_fit.py | 10 ++++------ src/stcal/ramp_fitting/ramp_fit.py | 11 +++++++---- src/stcal/ramp_fitting/utils.py | 3 ++- src/stcal/saturation/saturation.py | 4 ++-- tests/test_alignment.py | 19 ++++++++----------- tests/test_dark_current.py | 6 ++---- tests/test_dq.py | 4 ++-- tests/test_jump.py | 9 ++++----- tests/test_jump_cas22.py | 9 +++------ tests/test_linearity.py | 1 - tests/test_ramp_fitting.py | 1 - tests/test_ramp_fitting_cas22.py | 1 - tests/test_ramp_fitting_cases.py | 5 ++--- tests/test_ramp_fitting_gls_fit.py | 6 ++---- tests/test_saturation.py | 1 - tests/test_twopoint_difference.py | 6 ++---- 28 files changed, 74 insertions(+), 86 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 16503c87..edb03e50 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,7 +82,12 @@ filterwarnings = [ [tool.ruff] line-length = 110 -select = ['F', 'W', 'E', 'C'] +select = [ + 'F', # Pyflakes + 'W', 'E', # pycodestyle + 'C', + 'I', # isort +] ignore = [ 'C901', # variable is too complex ] diff --git a/setup.py b/setup.py index bd962aab..e176149e 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ -from setuptools import setup, Extension +import numpy as np from Cython.Build import cythonize from Cython.Compiler import Options -import numpy as np +from setuptools import Extension, setup Options.docstrings = True Options.annotate = False diff --git a/src/stcal/__init__.py b/src/stcal/__init__.py index 495c9ffd..544125be 100644 --- a/src/stcal/__init__.py +++ b/src/stcal/__init__.py @@ -1,4 +1,3 @@ from ._version import version as __version__ - __all__ = ["__version__"] diff --git a/src/stcal/alignment/resample_utils.py b/src/stcal/alignment/resample_utils.py index 7a0c04d3..3a2b06be 100644 --- a/src/stcal/alignment/resample_utils.py +++ b/src/stcal/alignment/resample_utils.py @@ -1,8 +1,10 @@ import logging + import numpy as np -from stcal.alignment import util from gwcs.wcstools import grid_from_bounding_box +from stcal.alignment import util + log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index 66d4152f..b2a65fa8 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -2,23 +2,20 @@ Common utility functions for datamodel alignment. """ -import logging import functools +import logging from typing import List, Protocol, Union +import gwcs import numpy as np - -from astropy.coordinates import SkyCoord -from astropy.utils.misc import isiterable +from asdf import AsdfFile from astropy import units as u -from astropy.modeling import models as astmodels from astropy import wcs as fitswcs - -from asdf import AsdfFile -import gwcs +from astropy.coordinates import SkyCoord +from astropy.modeling import models as astmodels +from astropy.utils.misc import isiterable from gwcs.wcstools import wcs_from_fiducial - log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) diff --git a/src/stcal/dark_current/dark_sub.py b/src/stcal/dark_current/dark_sub.py index ef92edd6..080a59a0 100644 --- a/src/stcal/dark_current/dark_sub.py +++ b/src/stcal/dark_current/dark_sub.py @@ -3,9 +3,10 @@ # import copy -import numpy as np import logging +import numpy as np + from . import dark_class log = logging.getLogger(__name__) diff --git a/src/stcal/dqflags.py b/src/stcal/dqflags.py index aea9bd76..e71b6b28 100644 --- a/src/stcal/dqflags.py +++ b/src/stcal/dqflags.py @@ -8,9 +8,9 @@ try: from stdatamodels.dqflags import ( ap_interpret_bit_flags, - multiple_replace, - interpret_bit_flags, dqflags_to_mnemonics, + interpret_bit_flags, + multiple_replace, ) except ImportError: raise ImportError("dqflags has been moved to stdatamodels.dqflags, please install stdatamodels") diff --git a/src/stcal/jump/jump.py b/src/stcal/jump/jump.py index 6b1b6dcf..55b596b3 100644 --- a/src/stcal/jump/jump.py +++ b/src/stcal/jump/jump.py @@ -1,12 +1,11 @@ import logging import multiprocessing import time -import numpy as np -import cv2 as cv -import astropy.stats as stats -from astropy.convolution import Ring2DKernel -from astropy.convolution import convolve +import astropy.stats as stats +import cv2 as cv +import numpy as np +from astropy.convolution import Ring2DKernel, convolve from . import constants from . import twopoint_difference as twopt diff --git a/src/stcal/jump/twopoint_difference.py b/src/stcal/jump/twopoint_difference.py index 13ff8669..00d22458 100644 --- a/src/stcal/jump/twopoint_difference.py +++ b/src/stcal/jump/twopoint_difference.py @@ -1,8 +1,9 @@ import logging -import numpy as np -import astropy.stats as stats import warnings +import astropy.stats as stats +import numpy as np + log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index d950975f..5bb83352 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -11,14 +11,14 @@ import logging -from multiprocessing.pool import Pool as Pool +import time from multiprocessing import cpu_count as cpu_count +from multiprocessing.pool import Pool as Pool + import numpy as np import numpy.linalg as la -import time -from . import ramp_fit_class -from . import utils +from . import ramp_fit_class, utils log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) diff --git a/src/stcal/ramp_fitting/ols_cas22/__init__.py b/src/stcal/ramp_fitting/ols_cas22/__init__.py index 439e4f1b..3d30b0ad 100644 --- a/src/stcal/ramp_fitting/ols_cas22/__init__.py +++ b/src/stcal/ramp_fitting/ols_cas22/__init__.py @@ -1,4 +1,4 @@ -from ._fit import fit_ramps, RampFitOutputs, Parameter, Variance +from ._fit import Parameter, RampFitOutputs, Variance, fit_ramps from ._jump import JUMP_DET __all__ = ["fit_ramps", "RampFitOutputs", "Parameter", "Variance", "Diff", "JUMP_DET"] diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index ced7c0ec..e90e3aaf 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -29,8 +29,8 @@ So the routines in these packages construct these different matrices, store them, and interpolate between them for different different fluxes and ratios. """ -from astropy import units as u import numpy as np +from astropy import units as u from . import ols_cas22 diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index 3bf9106e..ea6f759e 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -1,16 +1,14 @@ #! /usr/bin/env python import logging -from multiprocessing.pool import Pool as Pool -from multiprocessing import cpu_count as cpu_count -import numpy as np import time - import warnings +from multiprocessing import cpu_count as cpu_count +from multiprocessing.pool import Pool as Pool -from . import ramp_fit_class -from . import utils +import numpy as np +from . import ramp_fit_class, utils log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) diff --git a/src/stcal/ramp_fitting/ramp_fit.py b/src/stcal/ramp_fitting/ramp_fit.py index e87954e4..2c20b4ed 100755 --- a/src/stcal/ramp_fitting/ramp_fit.py +++ b/src/stcal/ramp_fitting/ramp_fit.py @@ -13,13 +13,16 @@ # In this module, comments on the 'first group','second group', etc are # 1-based, unless noted otherwise. +import logging + import numpy as np from astropy import units as u -import logging -from . import gls_fit # used only if algorithm is "GLS" -from . import ols_fit # used only if algorithm is "OLS" -from . import ramp_fit_class +from . import ( + gls_fit, # used only if algorithm is "GLS" + ols_fit, # used only if algorithm is "OLS" + ramp_fit_class, +) log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index b29476b4..a0eadd94 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -2,9 +2,10 @@ # # utils.py: utility functions import logging -import numpy as np import warnings +import numpy as np + log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) diff --git a/src/stcal/saturation/saturation.py b/src/stcal/saturation/saturation.py index b109a30a..57402c3d 100644 --- a/src/stcal/saturation/saturation.py +++ b/src/stcal/saturation/saturation.py @@ -1,7 +1,7 @@ -import numpy as np +import copy import logging -import copy +import numpy as np from scipy import ndimage log = logging.getLogger(__name__) diff --git a/tests/test_alignment.py b/tests/test_alignment.py index f2a8150e..ecc3ea4e 100644 --- a/tests/test_alignment.py +++ b/tests/test_alignment.py @@ -1,25 +1,22 @@ +import gwcs import numpy as np - -from astropy.modeling import models +import pytest from astropy import coordinates as coord from astropy import units as u -from astropy.io import fits - from astropy import wcs as fitswcs -import gwcs +from astropy.io import fits +from astropy.modeling import models from gwcs import coordinate_frames as cf - -import pytest from stcal.alignment import resample_utils from stcal.alignment.util import ( + _validate_wcs_list, compute_fiducial, compute_scale, - wcs_from_footprints, - _validate_wcs_list, + reproject, + update_s_region_imaging, update_s_region_keyword, wcs_bbox_from_shape, - update_s_region_imaging, - reproject, + wcs_from_footprints, ) diff --git a/tests/test_dark_current.py b/tests/test_dark_current.py index d2356a90..5129edfe 100644 --- a/tests/test_dark_current.py +++ b/tests/test_dark_current.py @@ -2,15 +2,13 @@ Unit tests for dark current correction """ -import pytest import numpy as np +import pytest from numpy.testing import assert_allclose - +from stcal.dark_current.dark_class import DarkData, ScienceData from stcal.dark_current.dark_sub import average_dark_frames_3d as average_dark_frames from stcal.dark_current.dark_sub import do_correction_data as darkcorr -from stcal.dark_current.dark_class import DarkData, ScienceData - dqflags = { "DO_NOT_USE": 2**0, # Bad pixel. Do not use. "SATURATED": 2**1, # Pixel saturated during exposure diff --git a/tests/test_dq.py b/tests/test_dq.py index d7375e35..37b1494a 100644 --- a/tests/test_dq.py +++ b/tests/test_dq.py @@ -1,8 +1,8 @@ -import pytest import importlib - from contextlib import nullcontext +import pytest + try: import stdatamodels # noqa: F401 except ImportError: diff --git a/tests/test_jump.py b/tests/test_jump.py index e68684ef..024028f6 100644 --- a/tests/test_jump.py +++ b/tests/test_jump.py @@ -1,13 +1,12 @@ import numpy as np import pytest - from stcal.jump.jump import ( - flag_large_events, - find_ellipses, + calc_num_slices, extend_saturation, - point_inside_ellipse, + find_ellipses, find_faint_extended, - calc_num_slices, + flag_large_events, + point_inside_ellipse, ) DQFLAGS = {"JUMP_DET": 4, "SATURATED": 2, "DO_NOT_USE": 1, "GOOD": 0, "NO_GAIN_VALUE": 8} diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 28b7a810..fc802b51 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -1,18 +1,15 @@ import numpy as np import pytest from numpy.testing import assert_allclose - +from stcal.ramp_fitting.ols_cas22 import JUMP_DET, Parameter, Variance, fit_ramps from stcal.ramp_fitting.ols_cas22._jump import ( - fill_fixed_values, - _fill_pixel_values, FixedOffsets, PixelOffsets, + _fill_pixel_values, + fill_fixed_values, ) from stcal.ramp_fitting.ols_cas22._ramp import from_read_pattern, init_ramps -from stcal.ramp_fitting.ols_cas22 import fit_ramps, Parameter, Variance, JUMP_DET - - # Purposefully set a fixed seed so that the tests in this module are deterministic RNG = np.random.default_rng(619) diff --git a/tests/test_linearity.py b/tests/test_linearity.py index bdc2f12e..51ffee74 100644 --- a/tests/test_linearity.py +++ b/tests/test_linearity.py @@ -5,7 +5,6 @@ """ import numpy as np - from stcal.linearity.linearity import linearity_correction DQFLAGS = {"GOOD": 0, "DO_NOT_USE": 1, "SATURATED": 2, "DEAD": 1024, "HOT": 2048, "NO_LIN_CORR": 1048576} diff --git a/tests/test_ramp_fitting.py b/tests/test_ramp_fitting.py index 08ebe03f..1e0c5d7f 100644 --- a/tests/test_ramp_fitting.py +++ b/tests/test_ramp_fitting.py @@ -3,7 +3,6 @@ from stcal.ramp_fitting.ramp_fit_class import RampData from stcal.ramp_fitting.utils import compute_num_slices - DELIM = "=" * 70 # single group intergrations fail in the GLS fitting diff --git a/tests/test_ramp_fitting_cas22.py b/tests/test_ramp_fitting_cas22.py index d3b46589..02002f8f 100644 --- a/tests/test_ramp_fitting_cas22.py +++ b/tests/test_ramp_fitting_cas22.py @@ -4,7 +4,6 @@ import astropy.units as u import numpy as np import pytest - from stcal.ramp_fitting import ols_cas22_fit as ramp # Read Time in seconds diff --git a/tests/test_ramp_fitting_cases.py b/tests/test_ramp_fitting_cases.py index df302817..a3d27e0b 100644 --- a/tests/test_ramp_fitting_cases.py +++ b/tests/test_ramp_fitting_cases.py @@ -1,12 +1,11 @@ import inspect -import numpy as np -import numpy.testing as npt import os +import numpy as np +import numpy.testing as npt from stcal.ramp_fitting.ramp_fit import ramp_fit_data from stcal.ramp_fitting.ramp_fit_class import RampData - # # The first 12 tests are for a single ramp in a single integration. The ramps # have a variety of GROUPDQ vectors, with 1 or more segments in each ramp. The diff --git a/tests/test_ramp_fitting_gls_fit.py b/tests/test_ramp_fitting_gls_fit.py index 73aa2155..3bcd084f 100644 --- a/tests/test_ramp_fitting_gls_fit.py +++ b/tests/test_ramp_fitting_gls_fit.py @@ -1,8 +1,6 @@ -import pytest import numpy as np - -from stcal.ramp_fitting.ramp_fit import ramp_fit_data -from stcal.ramp_fitting.ramp_fit import ramp_fit_class +import pytest +from stcal.ramp_fitting.ramp_fit import ramp_fit_class, ramp_fit_data test_dq_flags = { "GOOD": 0, diff --git a/tests/test_saturation.py b/tests/test_saturation.py index fb207f77..189ae521 100644 --- a/tests/test_saturation.py +++ b/tests/test_saturation.py @@ -5,7 +5,6 @@ """ import numpy as np - from stcal.saturation.saturation import flag_saturated_pixels # dictionary with required DQ flags diff --git a/tests/test_twopoint_difference.py b/tests/test_twopoint_difference.py index 024c4bb7..78100525 100644 --- a/tests/test_twopoint_difference.py +++ b/tests/test_twopoint_difference.py @@ -1,8 +1,6 @@ -import pytest import numpy as np - -from stcal.jump.twopoint_difference import find_crs, calc_med_first_diffs - +import pytest +from stcal.jump.twopoint_difference import calc_med_first_diffs, find_crs DQFLAGS = {"JUMP_DET": 4, "SATURATED": 2, "DO_NOT_USE": 1} From 4910bc8692cd87d61c53ee4d161ffd546b7de6e8 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 14:57:25 -0500 Subject: [PATCH 06/36] Use the Ruff rules --- .pre-commit-config.yaml | 2 +- pyproject.toml | 30 ++++++++++++++++ src/stcal/alignment/util.py | 20 ++++++----- src/stcal/ramp_fitting/gls_fit.py | 6 ++-- src/stcal/ramp_fitting/ols_cas22_fit.py | 4 +-- src/stcal/ramp_fitting/ols_fit.py | 4 +-- src/stcal/ramp_fitting/utils.py | 48 ++++++++++++------------- 7 files changed, 73 insertions(+), 41 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b6839fa5..29169435 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -32,7 +32,7 @@ repos: # args: ["--py39-plus"] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: 'v0.1.4' + rev: 'v0.1.5' hooks: - id: ruff args: ["--fix", "--show-fixes"] diff --git a/pyproject.toml b/pyproject.toml index edb03e50..7d79f380 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -87,6 +87,36 @@ select = [ 'W', 'E', # pycodestyle 'C', 'I', # isort + # 'N', # pep8-naming + # 'UP', # pyupgrade + # 'S', # flake8-bandit + # 'BLE', # flake8-blind-except + # 'B', # flake8-bugbear + # 'A', # flake8-builtins (prevent shadowing of builtins) + # 'C4', # flake8-comprehensions (best practices for comprehensions) + # 'T10', # flake8-debugger (prevent debugger statements in code) + # 'ISC', # flake8-implicit-str-concat (prevent implicit string concat) + # 'ICN', # flake8-import-conventions (enforce import conventions) + # 'INP', # flake8-no-pep420 (prevent use of PEP420, i.e. implicit name spaces) + # 'G', # flake8-logging-format (best practices for logging) + # 'PIE', # flake8-pie (misc suggested improvement linting) + # 'T20', # flake8-print (prevent print statements in code) + # 'PT', # flake8-pytest-style (best practices for pytest) + # 'Q', # flake8-quotes (best practices for quotes) + # 'RSE', # flake8-raise (best practices for raising exceptions) + # 'RET', # flake8-return (best practices for return statements) + # 'SLF', # flake8-self (prevent private member access) + # 'TID', # flake8-tidy-imports (prevent banned api and best import practices) + # 'INT', # flake8-gettext (when to use printf style strings) + # 'ARG', # flake8-unused-arguments (prevent unused arguments) + # 'PTH', # flake8-use-pathlib (prefer pathlib over os.path) + # 'ERA', # eradicate (remove commented out code) + # 'PGH', # pygrep (simple grep checks) + # 'PL', # pylint (general linting, flake8 alternative) + # 'FLY', # flynt (f-string conversion where possible) + # 'NPY', # NumPy-specific checks (recommendations from NumPy) + # 'PERF', # Perflint (performance linting) + 'RUF', # ruff specific checks ] ignore = [ 'C901', # variable is too complex diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index b2a65fa8..12fce324 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -2,9 +2,11 @@ Common utility functions for datamodel alignment. """ +from __future__ import annotations + import functools import logging -from typing import List, Protocol, Union +from typing import List, Protocol import gwcs import numpy as np @@ -70,9 +72,9 @@ def _calculate_fiducial_from_spatial_footprint( def _generate_tranform( refmodel: SupportsDataWithWcs, ref_fiducial: np.array, - pscale_ratio: int = None, - pscale: float = None, - rotation: float = None, + pscale_ratio: int | None = None, + pscale: float | None = None, + rotation: float | None = None, transform=None, ): """ @@ -396,9 +398,9 @@ def wcsinfo_from_model(input_model: SupportsDataWithWcs): def compute_scale( wcs: gwcs.WCS, - fiducial: Union[tuple, np.ndarray], - disp_axis: int = None, - pscale_ratio: float = None, + fiducial: tuple | np.ndarray, + disp_axis: int | None = None, + pscale_ratio: float | None = None, ) -> float: """Compute the scale at the fiducial point on the detector.. @@ -766,7 +768,7 @@ def _get_forward_transform_func(wcs1): y (str, ndarray), and origin (int). The origin should be between 0, and 1 https://docs.astropy.org/en/latest/wcs/index.html#loading-wcs-information-from-a-fits-file ) - """ # noqa: E501 + """ if isinstance(wcs1, fitswcs.WCS): forward_transform = wcs1.all_pix2world elif isinstance(wcs1, gwcs.WCS): @@ -784,7 +786,7 @@ def _get_backward_transform_func(wcs2): raise TypeError("Expected input to be astropy.wcs.WCS or gwcs.WCS " "object") return backward_transform - def _reproject(x: Union[float, np.ndarray], y: Union[float, np.ndarray]) -> tuple: + def _reproject(x: float | np.ndarray, y: float | np.ndarray) -> tuple: """ Reprojects the input coordinates from one WCS to another. diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index 5bb83352..acad8393 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -739,12 +739,12 @@ def create_opt_res(save_opt, dims, max_num_cr): if save_opt: # Create arrays for the fitted values of zero-point intercept and # cosmic-ray amplitudes, and their errors. - intercept_int = np.zeros((number_ints,) + imshape, dtype=np.float32) - intercept_err_int = np.zeros((number_ints,) + imshape, dtype=np.float32) + intercept_int = np.zeros((number_ints, *imshape), dtype=np.float32) + intercept_err_int = np.zeros((number_ints, *imshape), dtype=np.float32) # The pedestal is the extrapolation of the first group back to zero # time, for each integration. - pedestal_int = np.zeros((number_ints,) + imshape, dtype=np.float32) + pedestal_int = np.zeros((number_ints, *imshape), dtype=np.float32) # The first group, for calculating the pedestal. (This only needs # to be nrows high, but we don't have nrows yet. xxx) diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index e90e3aaf..343bf4db 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -115,8 +115,8 @@ def fit_ramps_casertano( orig_shape = resultants.shape if len(resultants.shape) == 1: # single ramp. - resultants = resultants.reshape(orig_shape + (1,)) - dq = dq.reshape(orig_shape + (1,)) + resultants = resultants.reshape((*orig_shape, 1)) + dq = dq.reshape((*orig_shape, 1)) read_noise = read_noise.reshape(orig_shape[1:] + (1,)) output = ols_cas22.fit_ramps( diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index ea6f759e..693073fa 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -875,7 +875,7 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): # Get needed sizes and shapes n_int, ngroups, nrows, ncols = data.shape imshape = (nrows, ncols) - cubeshape = (ngroups,) + imshape + cubeshape = (ngroups, *imshape) # Get GROUP DQ and ERR arrays from input file gdq_cube = groupdq @@ -1096,7 +1096,7 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans) # Get needed sizes and shapes n_int, ngroups, nrows, ncols = data.shape imshape = (nrows, ncols) - cubeshape = (ngroups,) + imshape + cubeshape = (ngroups, *imshape) max_seg = fit_slopes_ans[0] num_seg_per_int = fit_slopes_ans[5] diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index a0eadd94..53837917 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -47,17 +47,17 @@ def __init__(self, n_int, imshape, max_seg, nreads, save_opt): save_opt : bool save optional fitting results """ - self.slope_seg = np.zeros((n_int,) + (max_seg,) + imshape, dtype=np.float32) + self.slope_seg = np.zeros((n_int, max_seg, *imshape), dtype=np.float32) if save_opt: - self.yint_seg = np.zeros((n_int,) + (max_seg,) + imshape, dtype=np.float32) - self.sigyint_seg = np.zeros((n_int,) + (max_seg,) + imshape, dtype=np.float32) - self.sigslope_seg = np.zeros((n_int,) + (max_seg,) + imshape, dtype=np.float32) - self.inv_var_seg = np.zeros((n_int,) + (max_seg,) + imshape, dtype=np.float32) - self.firstf_int = np.zeros((n_int,) + imshape, dtype=np.float32) - self.ped_int = np.zeros((n_int,) + imshape, dtype=np.float32) - self.cr_mag_seg = np.zeros((n_int,) + (nreads,) + imshape, dtype=np.float32) - self.var_p_seg = np.zeros((n_int,) + (max_seg,) + imshape, dtype=np.float32) - self.var_r_seg = np.zeros((n_int,) + (max_seg,) + imshape, dtype=np.float32) + self.yint_seg = np.zeros((n_int, max_seg, *imshape), dtype=np.float32) + self.sigyint_seg = np.zeros((n_int, max_seg, *imshape), dtype=np.float32) + self.sigslope_seg = np.zeros((n_int, max_seg, *imshape), dtype=np.float32) + self.inv_var_seg = np.zeros((n_int, max_seg, *imshape), dtype=np.float32) + self.firstf_int = np.zeros((n_int, *imshape), dtype=np.float32) + self.ped_int = np.zeros((n_int, *imshape), dtype=np.float32) + self.cr_mag_seg = np.zeros((n_int, nreads, *imshape), dtype=np.float32) + self.var_p_seg = np.zeros((n_int, max_seg, *imshape), dtype=np.float32) + self.var_r_seg = np.zeros((n_int, max_seg, *imshape), dtype=np.float32) def init_2d(self, npix, max_seg, save_opt): """ @@ -219,7 +219,7 @@ def shrink_crmag(self, n_int, dq_cube, imshape, nreads, jump_det): max_cr = max(max_cr, max_cr_int) # Allocate compressed array based on max number of crs - cr_com = np.zeros((n_int,) + (max_cr,) + imshape, dtype=np.float32) + cr_com = np.zeros((n_int, max_cr, *imshape), dtype=np.float32) # Loop over integrations and groups: for those pix having a cr, add # the magnitude to the compressed array @@ -361,11 +361,11 @@ def alloc_arrays_1(n_int, imshape): Integration-specific slice whose value for a pixel is 1 if the initial group of the ramp is saturated, 3-D uint8 """ - dq_int = np.zeros((n_int,) + imshape, dtype=np.uint32) - num_seg_per_int = np.zeros((n_int,) + imshape, dtype=np.uint8) + dq_int = np.zeros((n_int, *imshape), dtype=np.uint32) + num_seg_per_int = np.zeros((n_int, *imshape), dtype=np.uint8) # for estimated median slopes - sat_0th_group_int = np.zeros((n_int,) + imshape, dtype=np.uint8) + sat_0th_group_int = np.zeros((n_int, *imshape), dtype=np.uint8) return dq_int, num_seg_per_int, sat_0th_group_int @@ -436,7 +436,7 @@ def alloc_arrays_2(n_int, imshape, max_seg): # Initialize variances so that non-existing ramps and segments will have # negligible contributions # Integration-specific: - var_p3 = np.zeros((n_int,) + imshape, dtype=np.float32) + LARGE_VARIANCE + var_p3 = np.zeros((n_int, *imshape), dtype=np.float32) + LARGE_VARIANCE var_r3 = var_p3.copy() var_both3 = var_p3.copy() s_inv_var_p3 = np.zeros_like(var_p3) @@ -444,13 +444,13 @@ def alloc_arrays_2(n_int, imshape, max_seg): s_inv_var_both3 = np.zeros_like(var_p3) # Segment-specific: - var_p4 = np.zeros((n_int,) + (max_seg,) + imshape, dtype=np.float32) + LARGE_VARIANCE + var_p4 = np.zeros((n_int, max_seg, *imshape), dtype=np.float32) + LARGE_VARIANCE var_r4 = var_p4.copy() var_both4 = var_p4.copy() inv_var_both4 = np.zeros_like(var_p4) # number of segments - segs_4 = np.zeros((n_int,) + (max_seg,) + imshape, dtype=np.uint8) + segs_4 = np.zeros((n_int, max_seg, *imshape), dtype=np.uint8) return ( var_p3, @@ -941,7 +941,7 @@ def get_dataset_info(ramp_data): npix = asize2 * asize1 # number of pixels in 2D array imshape = (asize2, asize1) - cubeshape = (nreads,) + imshape + cubeshape = (nreads, *imshape) return (nreads, npix, imshape, cubeshape, n_int, instrume, frame_time, ngroups, group_time) @@ -1219,11 +1219,11 @@ def do_all_sat(ramp_data, pixeldq, groupdq, imshape, n_int, save_opt): groupdq_3d = np.bitwise_or(groupdq_3d, ramp_data.flags_do_not_use) - data = np.zeros((n_int,) + imshape, dtype=np.float32) + data = np.zeros((n_int, *imshape), dtype=np.float32) dq = groupdq_3d - var_poisson = np.zeros((n_int,) + imshape, dtype=np.float32) - var_rnoise = np.zeros((n_int,) + imshape, dtype=np.float32) - err = np.zeros((n_int,) + imshape, dtype=np.float32) + var_poisson = np.zeros((n_int, *imshape), dtype=np.float32) + var_rnoise = np.zeros((n_int, *imshape), dtype=np.float32) + err = np.zeros((n_int, *imshape), dtype=np.float32) integ_info = (data, dq, var_poisson, var_rnoise, err) else: @@ -1231,7 +1231,7 @@ def do_all_sat(ramp_data, pixeldq, groupdq, imshape, n_int, save_opt): # Create model for the optional output if save_opt: - new_arr = np.zeros((n_int,) + (1,) + imshape, dtype=np.float32) + new_arr = np.zeros((n_int, 1, *imshape), dtype=np.float32) slope = new_arr sigslope = new_arr @@ -1239,7 +1239,7 @@ def do_all_sat(ramp_data, pixeldq, groupdq, imshape, n_int, save_opt): var_rnoise = new_arr yint = new_arr sigyint = new_arr - pedestal = np.zeros((n_int,) + imshape, dtype=np.float32) + pedestal = np.zeros((n_int, *imshape), dtype=np.float32) weights = new_arr crmag = new_arr From dbee3867f7210812ce7c2e4052e35684972e858b Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:14:52 -0500 Subject: [PATCH 07/36] Add logging string linting --- pyproject.toml | 2 +- src/stcal/alignment/resample_utils.py | 4 ++-- src/stcal/alignment/util.py | 2 +- src/stcal/jump/jump.py | 12 ++++++------ src/stcal/jump/twopoint_difference.py | 9 +++++---- src/stcal/ramp_fitting/gls_fit.py | 24 ++++++++++++------------ src/stcal/ramp_fitting/ols_fit.py | 6 +++--- src/stcal/ramp_fitting/utils.py | 17 ++++++++++------- src/stcal/saturation/saturation.py | 4 ++-- 9 files changed, 42 insertions(+), 38 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7d79f380..0a041550 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,7 +98,7 @@ select = [ # 'ISC', # flake8-implicit-str-concat (prevent implicit string concat) # 'ICN', # flake8-import-conventions (enforce import conventions) # 'INP', # flake8-no-pep420 (prevent use of PEP420, i.e. implicit name spaces) - # 'G', # flake8-logging-format (best practices for logging) + 'G', # flake8-logging-format (best practices for logging) # 'PIE', # flake8-pie (misc suggested improvement linting) # 'T20', # flake8-print (prevent print statements in code) # 'PT', # flake8-pytest-style (best practices for pytest) diff --git a/src/stcal/alignment/resample_utils.py b/src/stcal/alignment/resample_utils.py index 3a2b06be..fa4d3ad6 100644 --- a/src/stcal/alignment/resample_utils.py +++ b/src/stcal/alignment/resample_utils.py @@ -29,10 +29,10 @@ def calc_pixmap(in_wcs, out_wcs, shape=None): """ if shape: bb = util.wcs_bbox_from_shape(shape) - log.debug("Bounding box from data shape: {}".format(bb)) + log.debug("Bounding box from data shape: %s", bb) else: bb = util.wcs_bbox_from_shape(in_wcs.pixel_shape) - log.debug("Bounding box from WCS: {}".format(bb)) + log.debug("Bounding box from WCS: %s", bb) # creates 2 grids, one with rows of all x values * len(y) rows, # and the reverse for all y columns diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index 12fce324..a833b4da 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -739,7 +739,7 @@ def update_s_region_keyword(model, footprint): log.info("There are NaNs in s_region, S_REGION not updated.") else: model.meta.wcsinfo.s_region = s_region - log.info(f"Update S_REGION to {model.meta.wcsinfo.s_region}") + log.info("Update S_REGION to %s", model.meta.wcsinfo.s_region) def reproject(wcs1, wcs2): diff --git a/src/stcal/jump/jump.py b/src/stcal/jump/jump.py index 55b596b3..4552f092 100644 --- a/src/stcal/jump/jump.py +++ b/src/stcal/jump/jump.py @@ -292,7 +292,7 @@ def detect_jumps( sat_expand=sat_expand, max_extended_radius=max_extended_radius, ) - log.info("Total snowballs = %i" % total_snowballs) + log.info("Total snowballs = %i", total_snowballs) number_extended_events = total_snowballs if find_showers: gdq, num_showers = find_faint_extended( @@ -311,7 +311,7 @@ def detect_jumps( num_grps_masked=grps_masked_after_shower, max_extended_radius=max_extended_radius, ) - log.info("Total showers= %i" % num_showers) + log.info("Total showers= %i", num_showers) number_extended_events = num_showers else: yinc = int(n_rows / n_slices) @@ -379,7 +379,7 @@ def detect_jumps( only_use_ints, ), ) - log.info("Creating %d processes for jump detection " % n_slices) + log.info("Creating %d processes for jump detection ", n_slices) pool = multiprocessing.Pool(processes=n_slices) # Starts each slice in its own process. Starmap allows more than one # parameter to be passed. @@ -442,7 +442,7 @@ def detect_jumps( sat_expand=sat_expand, max_extended_radius=max_extended_radius, ) - log.info("Total snowballs = %i" % total_snowballs) + log.info("Total snowballs = %i", total_snowballs) number_extended_events = total_snowballs if find_showers: gdq, num_showers = find_faint_extended( @@ -461,10 +461,10 @@ def detect_jumps( num_grps_masked=grps_masked_after_shower, max_extended_radius=max_extended_radius, ) - log.info("Total showers= %i" % num_showers) + log.info("Total showers= %i", num_showers) number_extended_events = num_showers elapsed = time.time() - start - log.info("Total elapsed time = %g sec" % elapsed) + log.info("Total elapsed time = %g sec", elapsed) # Back out the applied gain to the SCI, ERR, and readnoise arrays so they're # back in units of DN diff --git a/src/stcal/jump/twopoint_difference.py b/src/stcal/jump/twopoint_difference.py index 00d22458..bb2dfe83 100644 --- a/src/stcal/jump/twopoint_difference.py +++ b/src/stcal/jump/twopoint_difference.py @@ -159,7 +159,7 @@ def find_crs( not only_use_ints and nints * ngrps < minimum_sigclip_groups and ngrps < minimum_groups ): log.info("Jump Step was skipped because exposure has less than the minimum number of usable groups") - log.info("Data shape {}".format(str(dat.shape))) + log.info("Data shape %s", dat.shape) dummy = np.zeros((dataa.shape[1] - 1, dataa.shape[2], dataa.shape[3]), dtype=np.float32) return gdq, row_below_gdq, row_above_gdq, 0, dummy else: @@ -193,9 +193,10 @@ def find_crs( not only_use_ints and total_groups >= minimum_sigclip_groups ): log.info( - " Jump Step using sigma clip {} greater than {}, rejection threshold {}".format( - str(total_groups), str(minimum_sigclip_groups), str(normal_rej_thresh) - ) + " Jump Step using sigma clip %s greater than %s, rejection threshold %s", + total_groups, + minimum_sigclip_groups, + normal_rej_thresh, ) warnings.filterwarnings("ignore", ".*All-NaN slice encountered.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*Mean of empty slice.*", RuntimeWarning) diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index acad8393..bcf5e177 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -105,7 +105,7 @@ def gls_ramp_fit(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, max_cores num_available_cores = cpu_count() number_slices = utils.compute_num_slices(max_cores, nrows, num_available_cores) - log.info(f"Number of data slices: {number_slices}") + log.info("Number of data slices: %s", number_slices) # Get needed sizes and shapes ( @@ -152,10 +152,10 @@ def gls_ramp_fit(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, max_cores tstop = time.time() - log.info("Number of groups per integration: %d" % nreads) - log.info("Number of integrations: %d" % n_int) + log.info("Number of groups per integration: %d", nreads) + log.info("Number of integrations: %d", n_int) - log.debug(f"The execution time in seconds: {tstop - tstart:,}") + log.debug("The execution time in seconds: %d", tstop - tstart) return image_info, integ_info, gls_opt_info @@ -180,7 +180,7 @@ def gls_fit_multi(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt, number number_slices: int The number of slices/cores to use for multiprocessing. """ - log.info(f"Number of processors used for multiprocessing: {number_slices}") + log.info("Number of processors used for multiprocessing: %s", number_slices) slices, rows_per_slice = compute_slices_for_starmap( ramp_data, save_opt, readnoise_2d, gain_2d, max_num_cr, number_slices ) @@ -350,12 +350,12 @@ def reassemble_opt(ramp_data, opt_res, opt_slice, crow, nrows): inter, err, pedestal, ampl, ampl_err = opt_slice # srow, erow = crow, crow + nrows - log.debug(f" ---> ({crow}, {crow + nrows})") - log.debug(f"inter = {inter.shape}") - log.debug(f"err = {err.shape}") - log.debug(f"pedestal = {pedestal.shape}") - log.debug(f"ampl = {ampl.shape}") - log.debug(f"ampl_err = {ampl_err.shape}") + log.debug(" ---> (%i, %i)", crow, crow + nrows) + log.debug("inter = %s", inter.shape) + log.debug("err = %s", err.shape) + log.debug("pedestal = %s", pedestal.shape) + log.debug("ampl = %s", ampl.shape) + log.debug("ampl_err = %s", ampl_err.shape) # TODO Dimension check """ @@ -1658,7 +1658,7 @@ def gls_fit( try: la.solve(temp_var[z], I_2) except la.LinAlgError as msg2: - log.warning("singular matrix, z = %d" % z) + log.warning("singular matrix, z = %d", z) raise la.LinAlgError(msg2) del I_2 diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index 693073fa..09c49ae4 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -157,7 +157,7 @@ def ols_ramp_fit_multiprocessing( opt_info: tuple The tuple of computed optional results arrays for fitting. """ - log.info(f"Number of processors used for multiprocessing: {number_slices}") + log.info("Number of processors used for multiprocessing: %s", number_slices) slices, rows_per_slice = compute_slices_for_starmap( ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, number_slices ) @@ -1473,8 +1473,8 @@ def ramp_fit_overall( log.debug("Instrument: %s", instrume) log.debug("Number of pixels in 2D array: %d", nrows * ncols) - log.debug("Shape of 2D image: (%d, %d)" % (imshape)) - log.debug("Shape of data cube: (%d, %d, %d)" % (orig_cubeshape)) + log.debug("Shape of 2D image: (%d, %d)", *imshape) + log.debug("Shape of data cube: (%d, %d, %d)", *orig_cubeshape) log.debug("Buffer size (bytes): %d", buffsize) log.debug("Number of rows per buffer: %d", nrows) log.info("Number of groups per integration: %d", orig_ngroups) diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index 53837917..0a8c1cba 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -880,11 +880,11 @@ def get_efftim_ped(ramp_data): log.error("Can not retrieve values needed to calculate integ. time") log.debug("Calculating effective integration time for a single group using:") - log.debug(" groupgap: %s" % (groupgap)) - log.debug(" nframes: %s" % (nframes)) - log.debug(" frame_time: %s" % (frame_time)) - log.debug(" dropframes1: %s" % (dropframes1)) - log.info("Effective integration time per group: %s" % (effintim)) + log.debug(" groupgap: %s", groupgap) + log.debug(" nframes: %s", nframes) + log.debug(" frame_time: %s", frame_time) + log.debug(" dropframes1: %s", dropframes1) + log.info("Effective integration time per group: %s", effintim) return effintim, nframes, groupgap, dropframes1 @@ -1271,8 +1271,11 @@ def log_stats(c_rates): log.debug("The number of pixels having insufficient data") log.debug("due to excessive CRs or saturation %d:", len(wh_c_0[0])) log.debug( - "Count rates - min, mean, max, std: %f, %f, %f, %f" - % (c_rates.min(), c_rates.mean(), c_rates.max(), c_rates.std()) + "Count rates - min, mean, max, std: %f, %f, %f, %f", + c_rates.min(), + c_rates.mean(), + c_rates.max(), + c_rates.std(), ) diff --git a/src/stcal/saturation/saturation.py b/src/stcal/saturation/saturation.py index 57402c3d..056deac6 100644 --- a/src/stcal/saturation/saturation.py +++ b/src/stcal/saturation/saturation.py @@ -120,9 +120,9 @@ def flag_saturated_pixels( zframe[ints] = plane n_sat = np.any(np.any(np.bitwise_and(gdq, saturated), axis=0), axis=0).sum() - log.info(f"Detected {n_sat} saturated pixels") + log.info("Detected %i saturated pixels", n_sat) n_floor = np.any(np.any(np.bitwise_and(gdq, ad_floor), axis=0), axis=0).sum() - log.info(f"Detected {n_floor} A/D floor pixels") + log.info("Detected %i A/D floor pixels", n_floor) pdq = np.bitwise_or(pdq, sat_dq) From 57fc7a1cca1d4cb3e23b358498685ef91a0a95dd Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:16:54 -0500 Subject: [PATCH 08/36] Use pyupgrade linting --- pyproject.toml | 4 ++-- src/stcal/alignment/util.py | 14 ++++++-------- src/stcal/ramp_fitting/utils.py | 10 +++++----- tests/test_ramp_fitting_cas22.py | 2 +- 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0a041550..1fcf1343 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -88,7 +88,7 @@ select = [ 'C', 'I', # isort # 'N', # pep8-naming - # 'UP', # pyupgrade + 'UP', # pyupgrade # 'S', # flake8-bandit # 'BLE', # flake8-blind-except # 'B', # flake8-bugbear @@ -113,7 +113,7 @@ select = [ # 'ERA', # eradicate (remove commented out code) # 'PGH', # pygrep (simple grep checks) # 'PL', # pylint (general linting, flake8 alternative) - # 'FLY', # flynt (f-string conversion where possible) + 'FLY', # flynt (f-string conversion where possible) # 'NPY', # NumPy-specific checks (recommendations from NumPy) # 'PERF', # Perflint (performance linting) 'RUF', # ruff specific checks diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index a833b4da..b326374a 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -6,7 +6,7 @@ import functools import logging -from typing import List, Protocol +from typing import Protocol import gwcs import numpy as np @@ -381,7 +381,7 @@ def wcsinfo_from_model(input_model: SupportsDataWithWcs): for key in ["CRPIX", "CRVAL", "CDELT", "CTYPE", "CUNIT"]: val = [] for ax in range(1, wcsaxes + 1): - k = (key + "{0}".format(ax)).lower() + k = (key + f"{ax}").lower() v = getattr(input_model.meta.wcsinfo, k, defaults[key]) val.append(v) wcsinfo[key] = np.array(val) @@ -389,7 +389,7 @@ def wcsinfo_from_model(input_model: SupportsDataWithWcs): pc = np.zeros((wcsaxes, wcsaxes), dtype=np.float32) for i in range(1, wcsaxes + 1): for j in range(1, wcsaxes + 1): - pc[i - 1, j - 1] = getattr(input_model.meta.wcsinfo, "pc{0}_{1}".format(i, j), 1) + pc[i - 1, j - 1] = getattr(input_model.meta.wcsinfo, f"pc{i}_{j}", 1) wcsinfo["PC"] = pc wcsinfo["RADESYS"] = input_model.meta.coordinates.reference_frame wcsinfo["has_cd"] = False @@ -505,7 +505,7 @@ def compute_fiducial(wcslist: list, bounding_box=None) -> np.ndarray: return fiducial -def calc_rotation_matrix(roll_ref: float, v3i_yangle: float, vparity: int = 1) -> List[float]: +def calc_rotation_matrix(roll_ref: float, v3i_yangle: float, vparity: int = 1) -> list[float]: """Calculate the rotation matrix. Parameters @@ -729,10 +729,8 @@ def update_s_region_keyword(model, footprint): s_region : str String containing the S_REGION object. """ - s_region = ( - "POLYGON ICRS " " {0:.9f} {1:.9f}" " {2:.9f} {3:.9f}" " {4:.9f} {5:.9f}" " {6:.9f} {7:.9f}".format( - *footprint.flatten() - ) + s_region = "POLYGON ICRS " " {:.9f} {:.9f}" " {:.9f} {:.9f}" " {:.9f} {:.9f}" " {:.9f} {:.9f}".format( + *footprint.flatten() ) if "nan" in s_region: # do not update s_region if there are NaNs. diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index 0a8c1cba..af6e89c6 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -309,7 +309,7 @@ def print_full(self): # pragma: no cover """ print("Will now print all optional output arrays - ") print(" yint_seg: ") - print((self.yint_seg)) + print(self.yint_seg) print(" ") print(" slope_seg: ") print(self.slope_seg) @@ -321,16 +321,16 @@ def print_full(self): # pragma: no cover print(self.sigslope_seg) print(" ") print(" inv_var_2d: ") - print((self.inv_var_2d)) + print(self.inv_var_2d) print(" ") print(" firstf_int: ") - print((self.firstf_int)) + print(self.firstf_int) print(" ") print(" ped_int: ") - print((self.ped_int)) + print(self.ped_int) print(" ") print(" cr_mag_seg: ") - print((self.cr_mag_seg)) + print(self.cr_mag_seg) def alloc_arrays_1(n_int, imshape): diff --git a/tests/test_ramp_fitting_cas22.py b/tests/test_ramp_fitting_cas22.py index 02002f8f..148337cc 100644 --- a/tests/test_ramp_fitting_cas22.py +++ b/tests/test_ramp_fitting_cas22.py @@ -66,7 +66,7 @@ def test_simulated_ramps(use_unit, use_dq): # ramps passing the below criterion have at least two adjacent valid reads # i.e., we can make a measurement from them. okay = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 - okay = okay.reshape((320 * 320)) + okay = okay.reshape(320 * 320) # Sanity check that when no dq is used, all ramps are used if not use_dq: From 13589e81a688c5cab8e99c5a3e7d66105da67bb3 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:23:19 -0500 Subject: [PATCH 09/36] Add numpy linting --- pyproject.toml | 2 +- tests/test_ramp_fitting_cas22.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1fcf1343..4647b3b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -114,7 +114,7 @@ select = [ # 'PGH', # pygrep (simple grep checks) # 'PL', # pylint (general linting, flake8 alternative) 'FLY', # flynt (f-string conversion where possible) - # 'NPY', # NumPy-specific checks (recommendations from NumPy) + 'NPY', # NumPy-specific checks (recommendations from NumPy) # 'PERF', # Perflint (performance linting) 'RUF', # ruff specific checks ] diff --git a/tests/test_ramp_fitting_cas22.py b/tests/test_ramp_fitting_cas22.py index 148337cc..6eaeb523 100644 --- a/tests/test_ramp_fitting_cas22.py +++ b/tests/test_ramp_fitting_cas22.py @@ -6,6 +6,9 @@ import pytest from stcal.ramp_fitting import ols_cas22_fit as ramp +# Purposefully set a fixed seed so that the tests in this module are deterministic +RNG = np.random.default_rng(42) + # Read Time in seconds # For Roman, the read time of the detectors is a fixed value and is currently # backed into code. Will need to refactor to consider the more general case. @@ -33,7 +36,7 @@ def test_simulated_ramps(use_unit, use_dq): # now let's mark a bunch of the ramps as compromised. When using dq flags if use_dq: - bad = np.random.uniform(size=resultants.shape) > 0.7 + bad = RNG.uniform(size=resultants.shape) > 0.7 dq |= bad output = ramp.fit_ramps_casertano( @@ -123,10 +126,10 @@ def simulate_many_ramps(ntrial=100, flux=100, readnoise=5, read_pattern=None): for i, reads in enumerate(read_pattern): subbuf = np.zeros(ntrial, dtype="i4") for _ in reads: - buf += np.random.poisson(ROMAN_READ_TIME * flux, ntrial) + buf += RNG.poisson(ROMAN_READ_TIME * flux, ntrial) subbuf += buf resultants[i] = (subbuf / len(reads)).astype("f4") - resultants += np.random.randn(len(read_pattern), ntrial) * (readnoise / np.sqrt(nread)).reshape( - len(read_pattern), 1 - ) + resultants += RNG.standard_normal(size=(len(read_pattern), ntrial)) * ( + readnoise / np.sqrt(nread) + ).reshape(len(read_pattern), 1) return (read_pattern, flux, readnoise, resultants) From c977670c4f82077796a2bfc0239a3c2271c975cd Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:26:09 -0500 Subject: [PATCH 10/36] Enable bandit linting --- pyproject.toml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4647b3b5..33185103 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ select = [ 'I', # isort # 'N', # pep8-naming 'UP', # pyupgrade - # 'S', # flake8-bandit + 'S', # flake8-bandit # 'BLE', # flake8-blind-except # 'B', # flake8-bugbear # 'A', # flake8-builtins (prevent shadowing of builtins) @@ -129,6 +129,11 @@ exclude = [ '.eggs', ] +[tool.ruff.lint.extend-per-file-ignores] +"tests/*.py" = [ + "S101" +] + [tool.cibuildwheel.macos] archs = ["x86_64", "arm64"] From 4d816abcaea4a1860e8e82a83281f02665b301cc Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:31:40 -0500 Subject: [PATCH 11/36] Add bugbear linting --- pyproject.toml | 2 +- src/stcal/basic_utils.py | 7 +++++-- src/stcal/dqflags.py | 5 +++-- src/stcal/dynamicdq.py | 7 +++++-- src/stcal/ramp_fitting/gls_fit.py | 4 ++-- src/stcal/ramp_fitting/ols_fit.py | 6 +++--- 6 files changed, 19 insertions(+), 12 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 33185103..8c4e635e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -91,7 +91,7 @@ select = [ 'UP', # pyupgrade 'S', # flake8-bandit # 'BLE', # flake8-blind-except - # 'B', # flake8-bugbear + 'B', # flake8-bugbear # 'A', # flake8-builtins (prevent shadowing of builtins) # 'C4', # flake8-comprehensions (best practices for comprehensions) # 'T10', # flake8-debugger (prevent debugger statements in code) diff --git a/src/stcal/basic_utils.py b/src/stcal/basic_utils.py index 6f65408a..b9f60c8c 100644 --- a/src/stcal/basic_utils.py +++ b/src/stcal/basic_utils.py @@ -3,12 +3,15 @@ warnings.warn( "basic_utils has been moved to stdatamodels.basic_utils, please use that instead", DeprecationWarning, + stacklevel=2, ) try: from stdatamodels.basic_utils import multiple_replace -except ImportError: - raise ImportError("basic_utils has been moved to stdatamodels.basic_utils, please install stdatamodels") +except ImportError as err: + raise ImportError( + "basic_utils has been moved to stdatamodels.basic_utils, please install stdatamodels" + ) from err __all__ = [multiple_replace] diff --git a/src/stcal/dqflags.py b/src/stcal/dqflags.py index e71b6b28..f89fb566 100644 --- a/src/stcal/dqflags.py +++ b/src/stcal/dqflags.py @@ -3,6 +3,7 @@ warnings.warn( "dqflags has been moved to stdatamodels.dqflags, please use that instead", DeprecationWarning, + stacklevel=2, ) try: @@ -12,8 +13,8 @@ interpret_bit_flags, multiple_replace, ) -except ImportError: - raise ImportError("dqflags has been moved to stdatamodels.dqflags, please install stdatamodels") +except ImportError as err: + raise ImportError("dqflags has been moved to stdatamodels.dqflags, please install stdatamodels") from err __all__ = [ap_interpret_bit_flags, multiple_replace, interpret_bit_flags, dqflags_to_mnemonics] diff --git a/src/stcal/dynamicdq.py b/src/stcal/dynamicdq.py index 519fe43d..09145bc3 100644 --- a/src/stcal/dynamicdq.py +++ b/src/stcal/dynamicdq.py @@ -3,12 +3,15 @@ warnings.warn( "dynamicdq has been moved to stdatamodels.dynamicdq, please use that instead", DeprecationWarning, + stacklevel=2, ) try: from stdatamodels.dynamicdq import dynamic_mask -except ImportError: - raise ImportError("dynamicdq has been moved to stdatamodels.dynamicdq, please install stdatamodels") +except ImportError as err: + raise ImportError( + "dynamicdq has been moved to stdatamodels.dynamicdq, please install stdatamodels" + ) from err __all__ = [dynamic_mask] diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index bcf5e177..8440304d 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -1652,14 +1652,14 @@ def gls_fit( try: # inverse of temp_var fitparam_cov = la.solve(temp_var, I_2) - except la.LinAlgError: + except la.LinAlgError as err: # find the pixel with the singular matrix for z in range(nz): try: la.solve(temp_var[z], I_2) except la.LinAlgError as msg2: log.warning("singular matrix, z = %d", z) - raise la.LinAlgError(msg2) + raise la.LinAlgError(msg2) from err del I_2 # [xT @ ramp_invcov @ y] diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index 09c49ae4..746ef025 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -1728,8 +1728,8 @@ def calc_slope( # to fit well. for ii, val in enumerate(these_p): if these_r[ii] != (ngroups - 1): - end_st[end_heads[these_p[ii]], these_p[ii]] = these_r[ii] - end_heads[these_p[ii]] += 1 + end_st[end_heads[val], val] = these_r[ii] + end_heads[val] += 1 # Sort and reverse array to handle the order that saturated pixels # were added @@ -1745,7 +1745,7 @@ def calc_slope( # LS fit until 'ngroups' iterations or all pixels in # section have been processed - for iter_num in range(ngroups): + for _ in range(ngroups): if pixel_done.all(): break From 7b55b92df3755fb280e4db8b755aaab0ea458fc8 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:36:18 -0500 Subject: [PATCH 12/36] Add builtin name shadowing linting --- pyproject.toml | 2 +- src/stcal/ramp_fitting/gls_fit.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8c4e635e..fcd80936 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,7 +92,7 @@ select = [ 'S', # flake8-bandit # 'BLE', # flake8-blind-except 'B', # flake8-bugbear - # 'A', # flake8-builtins (prevent shadowing of builtins) + 'A', # flake8-builtins (prevent shadowing of builtins) # 'C4', # flake8-comprehensions (best practices for comprehensions) # 'T10', # flake8-debugger (prevent debugger statements in code) # 'ISC', # flake8-implicit-str-concat (prevent implicit string concat) diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index 8440304d..ef37d93d 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -965,7 +965,7 @@ def determine_slope( use_extra_terms = True - iter = 0 + iter_ = 0 done = False if NUM_ITER_NO_EXTRA_TERMS <= 0: # Even the first iteration uses the extra terms. @@ -991,11 +991,11 @@ def determine_slope( temp_use_extra_terms, ) - iter += 1 - if iter == NUM_ITER_NO_EXTRA_TERMS: + iter_ += 1 + if iter_ == NUM_ITER_NO_EXTRA_TERMS: temp_use_extra_terms = use_extra_terms - if iter >= MAX_ITER: + if iter_ >= MAX_ITER: done = True else: # If there are pixels with zero or negative variance, ignore @@ -1004,7 +1004,7 @@ def determine_slope( slope_diff = np.where(slope_var_sect > 0.0, prev_slope_sect - slope_sect, 0.0) max_slope_diff = np.abs(slope_diff).max() - if iter >= MIN_ITER and max_slope_diff < slope_diff_cutoff: + if iter_ >= MIN_ITER and max_slope_diff < slope_diff_cutoff: done = True current_fit = evaluate_fit( From 317496d0dda4e9ebd11b54d50523983e8acd68e1 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:37:16 -0500 Subject: [PATCH 13/36] Add implicit string concat linting --- pyproject.toml | 6 +++--- src/stcal/alignment/util.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fcd80936..73ee0dc2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,9 +93,9 @@ select = [ # 'BLE', # flake8-blind-except 'B', # flake8-bugbear 'A', # flake8-builtins (prevent shadowing of builtins) - # 'C4', # flake8-comprehensions (best practices for comprehensions) - # 'T10', # flake8-debugger (prevent debugger statements in code) - # 'ISC', # flake8-implicit-str-concat (prevent implicit string concat) + 'C4', # flake8-comprehensions (best practices for comprehensions) + 'T10', # flake8-debugger (prevent debugger statements in code) + 'ISC', # flake8-implicit-str-concat (prevent implicit string concat) # 'ICN', # flake8-import-conventions (enforce import conventions) # 'INP', # flake8-no-pep420 (prevent use of PEP420, i.e. implicit name spaces) 'G', # flake8-logging-format (best practices for logging) diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index b326374a..bc585dc6 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -729,7 +729,7 @@ def update_s_region_keyword(model, footprint): s_region : str String containing the S_REGION object. """ - s_region = "POLYGON ICRS " " {:.9f} {:.9f}" " {:.9f} {:.9f}" " {:.9f} {:.9f}" " {:.9f} {:.9f}".format( + s_region = "POLYGON ICRS {:.9f} {:.9f} {:.9f} {:.9f} {:.9f} {:.9f} {:.9f} {:.9f}".format( *footprint.flatten() ) if "nan" in s_region: @@ -772,7 +772,7 @@ def _get_forward_transform_func(wcs1): elif isinstance(wcs1, gwcs.WCS): forward_transform = wcs1.forward_transform else: - raise TypeError("Expected input to be astropy.wcs.WCS or gwcs.WCS " "object") + raise TypeError("Expected input to be astropy.wcs.WCS or gwcs.WCS object") return forward_transform def _get_backward_transform_func(wcs2): @@ -781,7 +781,7 @@ def _get_backward_transform_func(wcs2): elif isinstance(wcs2, gwcs.WCS): backward_transform = wcs2.backward_transform else: - raise TypeError("Expected input to be astropy.wcs.WCS or gwcs.WCS " "object") + raise TypeError("Expected input to be astropy.wcs.WCS or gwcs.WCS object") return backward_transform def _reproject(x: float | np.ndarray, y: float | np.ndarray) -> tuple: From a73d7942127c07311da8e192424184338a852d1e Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:38:56 -0500 Subject: [PATCH 14/36] Add misc improvement linting --- pyproject.toml | 6 +++--- src/stcal/ramp_fitting/ols_fit.py | 4 ++-- src/stcal/ramp_fitting/utils.py | 6 +++--- tests/test_dark_current.py | 16 ++++++++-------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 73ee0dc2..a614fab0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,10 +96,10 @@ select = [ 'C4', # flake8-comprehensions (best practices for comprehensions) 'T10', # flake8-debugger (prevent debugger statements in code) 'ISC', # flake8-implicit-str-concat (prevent implicit string concat) - # 'ICN', # flake8-import-conventions (enforce import conventions) - # 'INP', # flake8-no-pep420 (prevent use of PEP420, i.e. implicit name spaces) + 'ICN', # flake8-import-conventions (enforce import conventions) + 'INP', # flake8-no-pep420 (prevent use of PEP420, i.e. implicit name spaces) 'G', # flake8-logging-format (best practices for logging) - # 'PIE', # flake8-pie (misc suggested improvement linting) + 'PIE', # flake8-pie (misc suggested improvement linting) # 'T20', # flake8-print (prevent print statements in code) # 'PT', # flake8-pytest-style (best practices for pytest) # 'Q', # flake8-quotes (best practices for quotes) diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index 746ef025..e2b5c6bf 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -910,7 +910,7 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): med_rates = utils.compute_median_rates(ramp_data) # Loop over data integrations: - for num_int in range(0, n_int): + for num_int in range(n_int): # Loop over data sections ramp_data.current_integ = num_int for rlo in range(0, cubeshape[1], nrows): @@ -1392,7 +1392,7 @@ def ramp_fit_overall( if save_opt: dq_slice = np.zeros((gdq_cube_shape[2], gdq_cube_shape[3]), dtype=np.uint32) - for num_int in range(0, n_int): + for num_int in range(n_int): dq_slice = groupdq[num_int, 0, :, :] opt_res.ped_int[num_int, :, :] = utils.calc_pedestal( ramp_data, num_int, slope_int, opt_res.firstf_int, dq_slice, nframes, groupgap, dropframes1 diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index af6e89c6..cde8c0fc 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -118,7 +118,7 @@ def reshape_res(self, num_int, rlo, rhi, sect_shape, ff_sect, save_opt): Returns ------- """ - for ii_seg in range(0, self.slope_seg.shape[1]): + for ii_seg in range(self.slope_seg.shape[1]): self.slope_seg[num_int, ii_seg, rlo:rhi, :] = self.slope_2d[ii_seg, :].reshape(sect_shape) if save_opt: @@ -212,7 +212,7 @@ def shrink_crmag(self, n_int, dq_cube, imshape, nreads, jump_det): # Loop over data integrations to find max num of crs flagged per pixel # (this could exceed the maximum number of segments fit) max_cr = 0 - for ii_int in range(0, n_int): + for ii_int in range(n_int): dq_int = dq_cube[ii_int, :, :, :] dq_cr = np.bitwise_and(jump_det, dq_int) max_cr_int = (dq_cr > 0.0).sum(axis=0).max() @@ -223,7 +223,7 @@ def shrink_crmag(self, n_int, dq_cube, imshape, nreads, jump_det): # Loop over integrations and groups: for those pix having a cr, add # the magnitude to the compressed array - for ii_int in range(0, n_int): + for ii_int in range(n_int): cr_mag_int = self.cr_mag_seg[ii_int, :, :, :] cr_int_has_cr = np.where(cr_mag_int.sum(axis=0) != 0) diff --git a/tests/test_dark_current.py b/tests/test_dark_current.py index 5129edfe..95ce2535 100644 --- a/tests/test_dark_current.py +++ b/tests/test_dark_current.py @@ -197,7 +197,7 @@ def test_more_sci_frames(make_rampmodel, make_darkmodel): dm_ramp.exp_groupgap = 0 # populate data array of science cube - for i in range(0, ngroups - 1): + for i in range(ngroups - 1): dm_ramp.data[0, i] = i refgroups = 5 @@ -205,7 +205,7 @@ def test_more_sci_frames(make_rampmodel, make_darkmodel): dark = make_darkmodel(refgroups, nrows, ncols) # populate data array of reference file - for i in range(0, refgroups - 1): + for i in range(refgroups - 1): dark.data[0, i] = i * 0.1 # apply correction @@ -236,7 +236,7 @@ def test_sub_by_frame(make_rampmodel, make_darkmodel): dm_ramp.exp_groupgap = 0 # populate data array of science cube - for i in range(0, ngroups - 1): + for i in range(ngroups - 1): dm_ramp.data[0, i] = i # create dark reference file model with more frames than science data @@ -244,7 +244,7 @@ def test_sub_by_frame(make_rampmodel, make_darkmodel): dark = make_darkmodel(refgroups, nrows, ncols) # populate data array of reference file - for i in range(0, refgroups - 1): + for i in range(refgroups - 1): dark.data[0, i] = i * 0.1 # apply correction @@ -281,7 +281,7 @@ def test_nan(make_rampmodel, make_darkmodel): dm_ramp.exp_groupgap = 0 # populate data array of science cube - for i in range(0, ngroups - 1): + for i in range(ngroups - 1): dm_ramp.data[0, i, :, :] = i # create dark reference file model with more frames than science data @@ -289,7 +289,7 @@ def test_nan(make_rampmodel, make_darkmodel): dark = make_darkmodel(refgroups, nrows, ncols) # populate data array of reference file - for i in range(0, refgroups - 1): + for i in range(refgroups - 1): dark.data[0, i] = i * 0.1 # set NaN in dark file @@ -357,7 +357,7 @@ def test_frame_avg(make_rampmodel, make_darkmodel): dm_ramp.exp_groupgap = 0 # populate data array of science cube - for i in range(0, ngroups - 1): + for i in range(ngroups - 1): dm_ramp.data[:, i] = i + 1 # create dark reference file model @@ -366,7 +366,7 @@ def test_frame_avg(make_rampmodel, make_darkmodel): dark = make_darkmodel(refgroups, nrows, ncols) # populate data array of reference file - for i in range(0, refgroups - 1): + for i in range(refgroups - 1): dark.data[0, i] = i * 0.1 # apply correction From 3e1f3feb7c981cce4162aa3fb795775d6bb8daf1 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:49:37 -0500 Subject: [PATCH 15/36] Add pytest linting --- pyproject.toml | 7 ++++--- tests/test_alignment.py | 16 +++++++--------- tests/test_dark_current.py | 8 ++++---- tests/test_dq.py | 2 +- tests/test_jump.py | 2 +- tests/test_jump_cas22.py | 6 +++--- tests/test_twopoint_difference.py | 6 +++--- 7 files changed, 23 insertions(+), 24 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a614fab0..b2710356 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,9 +101,9 @@ select = [ 'G', # flake8-logging-format (best practices for logging) 'PIE', # flake8-pie (misc suggested improvement linting) # 'T20', # flake8-print (prevent print statements in code) - # 'PT', # flake8-pytest-style (best practices for pytest) - # 'Q', # flake8-quotes (best practices for quotes) - # 'RSE', # flake8-raise (best practices for raising exceptions) + 'PT', # flake8-pytest-style (best practices for pytest) + 'Q', # flake8-quotes (best practices for quotes) + 'RSE', # flake8-raise (best practices for raising exceptions) # 'RET', # flake8-return (best practices for return statements) # 'SLF', # flake8-self (prevent private member access) # 'TID', # flake8-tidy-imports (prevent banned api and best import practices) @@ -120,6 +120,7 @@ select = [ ] ignore = [ 'C901', # variable is too complex + 'ISC001', # interfers with formatter ] exclude = [ 'docs', diff --git a/tests/test_alignment.py b/tests/test_alignment.py index ecc3ea4e..caf503af 100644 --- a/tests/test_alignment.py +++ b/tests/test_alignment.py @@ -193,7 +193,7 @@ def test_validate_wcs_list(): @pytest.mark.parametrize( - "wcs_list, expected_error", + ("wcs_list", "expected_error"), [ ([], TypeError), ([1, 2, 3], TypeError), @@ -205,11 +205,9 @@ def test_validate_wcs_list(): ], ) def test_validate_wcs_list_invalid(wcs_list, expected_error): - with pytest.raises(Exception) as exec_info: + with pytest.raises(expected_error, match=r".*"): _validate_wcs_list(wcs_list) - assert type(exec_info.value) == expected_error - def get_fake_wcs(): fake_wcs1 = fitswcs.WCS( @@ -250,7 +248,7 @@ def get_fake_wcs(): @pytest.mark.parametrize( - "x_inp, y_inp, x_expected, y_expected", + ("x_inp", "y_inp", "x_expected", "y_expected"), [ (1000, 2000, np.array(2000), np.array(4000)), # string input test ([1000], [2000], np.array(2000), np.array(4000)), # array input test @@ -271,7 +269,7 @@ def test_wcs_bbox_from_shape_2d(): @pytest.mark.parametrize( - "shape, pixmap_expected_shape", + ("shape", "pixmap_expected_shape"), [ (None, (4, 4, 2)), ((100, 200), (100, 200, 2)), @@ -285,7 +283,7 @@ def test_calc_pixmap_shape(shape, pixmap_expected_shape): @pytest.mark.parametrize( - "model, footprint, expected_s_region, expected_log_info", + ("model", "footprint", "expected_s_region", "expected_log_info"), [ ( _create_wcs_and_datamodel((10, 0), (3, 3), (0.000028, 0.000028)), @@ -311,7 +309,7 @@ def test_update_s_region_keyword(model, footprint, expected_s_region, expected_l @pytest.mark.parametrize( - "shape, expected_bbox", + ("shape", "expected_bbox"), [ ((100, 200), ((-0.5, 199.5), (-0.5, 99.5))), ((1, 1), ((-0.5, 0.5), (-0.5, 0.5))), @@ -327,7 +325,7 @@ def test_wcs_bbox_from_shape(shape, expected_bbox): @pytest.mark.parametrize( - "model, bounding_box, data", + ("model", "bounding_box", "data"), [ ( _create_wcs_and_datamodel((10, 0), (3, 3), (0.000028, 0.000028)), diff --git a/tests/test_dark_current.py b/tests/test_dark_current.py index 95ce2535..7470a505 100644 --- a/tests/test_dark_current.py +++ b/tests/test_dark_current.py @@ -25,7 +25,7 @@ DELIM = "-" * 80 -@pytest.fixture(scope="function") +@pytest.fixture() def make_rampmodel(): """Make MIRI Ramp model for testing""" @@ -49,7 +49,7 @@ def _ramp(nints, ngroups, nrows, ncols): return _ramp -@pytest.fixture(scope="function") +@pytest.fixture() def make_darkmodel(): """Make MIRI dark model for testing""" @@ -72,7 +72,7 @@ def _dark(ngroups, nrows, ncols): return _dark -@pytest.fixture(scope="function") +@pytest.fixture() def setup_nrc_cube(): """Set up fake NIRCam data to test.""" @@ -135,7 +135,7 @@ def _params(): return params -@pytest.mark.parametrize("readpatt, ngroups, nframes, groupgap, nrows, ncols", _params()) +@pytest.mark.parametrize(("readpatt", "ngroups", "nframes", "groupgap", "nrows", "ncols"), _params()) def test_frame_averaging(setup_nrc_cube, readpatt, ngroups, nframes, groupgap, nrows, ncols): """Check that if nframes>1 or groupgap>0, then the pipeline reconstructs the dark reference file to match the frame averaging and groupgap diff --git a/tests/test_dq.py b/tests/test_dq.py index 37b1494a..b5d8b514 100644 --- a/tests/test_dq.py +++ b/tests/test_dq.py @@ -11,7 +11,7 @@ HAS_STDATAMODELS = True -@pytest.mark.parametrize("name", ("dqflags", "dynamicdq", "basic_utils")) +@pytest.mark.parametrize("name", ["dqflags", "dynamicdq", "basic_utils"]) def test_deprecation(name): error = ( nullcontext() diff --git a/tests/test_jump.py b/tests/test_jump.py index 024028f6..04255a46 100644 --- a/tests/test_jump.py +++ b/tests/test_jump.py @@ -12,7 +12,7 @@ DQFLAGS = {"JUMP_DET": 4, "SATURATED": 2, "DO_NOT_USE": 1, "GOOD": 0, "NO_GAIN_VALUE": 8} -@pytest.fixture(scope="function") +@pytest.fixture() def setup_cube(): def _cube(ngroups, readnoise=10): nints = 1 diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index fc802b51..b193e613 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -100,7 +100,7 @@ def read_pattern(): metadata : dict The metadata computed from the read pattern """ - yield [ + return [ [1, 2, 3, 4], [5], [6, 7, 8], @@ -143,7 +143,7 @@ def ramp_data(read_pattern): """ data = from_read_pattern(read_pattern, READ_TIME, len(read_pattern))._to_dict() - yield data["t_bar"], data["tau"], data["n_reads"], read_pattern + return data["t_bar"], data["tau"], data["n_reads"], read_pattern def test_fill_fixed_values(ramp_data): @@ -255,7 +255,7 @@ def pixel_data(ramp_data): resultants = _generate_resultants(read_pattern) - yield resultants, t_bar, tau, n_reads, fixed + return resultants, t_bar, tau, n_reads, fixed def test__fill_pixel_values(pixel_data): diff --git a/tests/test_twopoint_difference.py b/tests/test_twopoint_difference.py index 78100525..30a3c84c 100644 --- a/tests/test_twopoint_difference.py +++ b/tests/test_twopoint_difference.py @@ -5,7 +5,7 @@ DQFLAGS = {"JUMP_DET": 4, "SATURATED": 2, "DO_NOT_USE": 1} -@pytest.fixture(scope="function") +@pytest.fixture() def setup_cube(): def _cube(ngroups, readnoise=10): nints = 1 @@ -128,7 +128,7 @@ def test_5grps_cr2_nframe2(setup_cube): assert np.array_equal([0, 4, 4, 0, 0], out_gdq[0, :, 100, 100]) -@pytest.mark.xfail +@pytest.mark.xfail() def test_4grps_twocrs_2nd_4th(setup_cube): ngroups = 4 data, gdq, nframes, read_noise, rej_threshold = setup_cube(ngroups) @@ -607,7 +607,7 @@ def test_6grps_satat6_crat1(setup_cube): assert np.array_equal([0, DQFLAGS["JUMP_DET"], 0, 0, 0, DQFLAGS["SATURATED"]], out_gdq[0, :, 100, 100]) -@pytest.mark.xfail +@pytest.mark.xfail() def test_6grps_satat6_crat1_flagadjpixels(setup_cube): ngroups = 6 # crmag = 1000 From df579796125c6f1c1dc50b13a6b2883e413bab8e Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:57:46 -0500 Subject: [PATCH 16/36] Add return statement linting --- pyproject.toml | 2 +- src/stcal/alignment/resample_utils.py | 3 +- src/stcal/alignment/util.py | 3 +- src/stcal/jump/jump.py | 22 +- src/stcal/jump/twopoint_difference.py | 509 +++++++++++++------------- src/stcal/ramp_fitting/gls_fit.py | 2 +- src/stcal/ramp_fitting/ols_fit.py | 29 +- src/stcal/ramp_fitting/utils.py | 11 +- src/stcal/saturation/saturation.py | 3 +- tests/test_ramp_fitting_cases.py | 4 +- 10 files changed, 281 insertions(+), 307 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b2710356..c180c691 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,7 +104,7 @@ select = [ 'PT', # flake8-pytest-style (best practices for pytest) 'Q', # flake8-quotes (best practices for quotes) 'RSE', # flake8-raise (best practices for raising exceptions) - # 'RET', # flake8-return (best practices for return statements) + 'RET', # flake8-return (best practices for return statements) # 'SLF', # flake8-self (prevent private member access) # 'TID', # flake8-tidy-imports (prevent banned api and best import practices) # 'INT', # flake8-gettext (when to use printf style strings) diff --git a/src/stcal/alignment/resample_utils.py b/src/stcal/alignment/resample_utils.py index fa4d3ad6..b3de4d27 100644 --- a/src/stcal/alignment/resample_utils.py +++ b/src/stcal/alignment/resample_utils.py @@ -38,5 +38,4 @@ def calc_pixmap(in_wcs, out_wcs, shape=None): # and the reverse for all y columns grid = grid_from_bounding_box(bb) transform_function = util.reproject(in_wcs, out_wcs) - pixmap = np.dstack(transform_function(grid[0], grid[1])) - return pixmap + return np.dstack(transform_function(grid[0], grid[1])) diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index bc585dc6..e0202deb 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -345,7 +345,8 @@ def _validate_wcs_list(wcs_list): """ if not isiterable(wcs_list): raise ValueError("Expected 'wcs_list' to be an iterable of WCS objects.") - elif len(wcs_list): + + if len(wcs_list): if not all(isinstance(w, gwcs.WCS) for w in wcs_list): raise TypeError("All items in 'wcs_list' are to be instances of gwcs.wcs.WCS.") else: diff --git a/src/stcal/jump/jump.py b/src/stcal/jump/jump.py index 4552f092..a42f6aef 100644 --- a/src/stcal/jump/jump.py +++ b/src/stcal/jump/jump.py @@ -689,8 +689,7 @@ def find_circles(dqplane, bitmask, min_area): pixels = np.bitwise_and(dqplane, bitmask) contours, hierarchy = cv.findContours(pixels, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) bigcontours = [con for con in contours if cv.contourArea(con) >= min_area] - circles = [cv.minEnclosingCircle(con) for con in bigcontours] - return circles + return [cv.minEnclosingCircle(con) for con in bigcontours] def find_ellipses(dqplane, bitmask, min_area): @@ -703,8 +702,7 @@ def find_ellipses(dqplane, bitmask, min_area): # minAreaRect is used becuase fitEllipse requires 5 points and it is # possible to have a contour # with just 4 points. - ellipses = [cv.minAreaRect(con) for con in bigcontours] - return ellipses + return [cv.minAreaRect(con) for con in bigcontours] def make_snowballs( @@ -764,25 +762,20 @@ def make_snowballs( def point_inside_ellipse(point, ellipse): delta_center = np.sqrt((point[0] - ellipse[0][0]) ** 2 + (point[1] - ellipse[0][1]) ** 2) minor_axis = min(ellipse[1][0], ellipse[1][1]) - if delta_center < minor_axis: - return True - else: - return False + + return delta_center < minor_axis def near_edge(jump, low_threshold, high_threshold): # This routing tests whether the center of a jump is close to the edge of # the detector. Jumps that are within the threshold will not requre a # saturated core since this may be off the detector - if ( + return ( jump[0][0] < low_threshold or jump[0][1] < low_threshold or jump[0][0] > high_threshold or jump[0][1] > high_threshold - ): - return True - else: - return False + ) def find_faint_extended( @@ -976,5 +969,4 @@ def calc_num_slices(n_rows, max_cores, max_available): elif max_cores == "all": n_slices = max_available # Make sure we don't have more slices than rows or available cores. - n_slices = min([n_rows, n_slices, max_available]) - return n_slices + return min([n_rows, n_slices, max_available]) diff --git a/src/stcal/jump/twopoint_difference.py b/src/stcal/jump/twopoint_difference.py index bb2dfe83..283c2471 100644 --- a/src/stcal/jump/twopoint_difference.py +++ b/src/stcal/jump/twopoint_difference.py @@ -161,268 +161,265 @@ def find_crs( log.info("Jump Step was skipped because exposure has less than the minimum number of usable groups") log.info("Data shape %s", dat.shape) dummy = np.zeros((dataa.shape[1] - 1, dataa.shape[2], dataa.shape[3]), dtype=np.float32) + return gdq, row_below_gdq, row_above_gdq, 0, dummy - else: - # set 'saturated' or 'do not use' pixels to nan in data - dat[np.where(np.bitwise_and(gdq, sat_flag))] = np.nan - dat[np.where(np.bitwise_and(gdq, dnu_flag))] = np.nan - dat[np.where(np.bitwise_and(gdq, dnu_flag + sat_flag))] = np.nan - - # calculate the differences between adjacent groups (first diffs) - # use mask on data, so the results will have sat/donotuse groups masked - first_diffs = np.diff(dat, axis=1) - - # calc. the median of first_diffs for each pixel along the group axis - first_diffs_masked = np.ma.masked_array(first_diffs, mask=np.isnan(first_diffs)) - median_diffs = np.ma.median(first_diffs_masked, axis=(0, 1)) - # calculate sigma for each pixel - sigma = np.sqrt(np.abs(median_diffs) + read_noise_2 / nframes) - - # reset sigma so pxels with 0 readnoise are not flagged as jumps - sigma[np.where(sigma == 0.0)] = np.nan - - # compute 'ratio' for each group. this is the value that will be - # compared to 'threshold' to classify jumps. subtract the median of - # first_diffs from first_diffs, take the abs. value and divide by sigma. - e_jump_4d = first_diffs - median_diffs[np.newaxis, :, :] - ratio_all = ( - np.abs(first_diffs - median_diffs[np.newaxis, np.newaxis, :, :]) - / sigma[np.newaxis, np.newaxis, :, :] + + # set 'saturated' or 'do not use' pixels to nan in data + dat[np.where(np.bitwise_and(gdq, sat_flag))] = np.nan + dat[np.where(np.bitwise_and(gdq, dnu_flag))] = np.nan + dat[np.where(np.bitwise_and(gdq, dnu_flag + sat_flag))] = np.nan + + # calculate the differences between adjacent groups (first diffs) + # use mask on data, so the results will have sat/donotuse groups masked + first_diffs = np.diff(dat, axis=1) + + # calc. the median of first_diffs for each pixel along the group axis + first_diffs_masked = np.ma.masked_array(first_diffs, mask=np.isnan(first_diffs)) + median_diffs = np.ma.median(first_diffs_masked, axis=(0, 1)) + # calculate sigma for each pixel + sigma = np.sqrt(np.abs(median_diffs) + read_noise_2 / nframes) + + # reset sigma so pxels with 0 readnoise are not flagged as jumps + sigma[np.where(sigma == 0.0)] = np.nan + + # compute 'ratio' for each group. this is the value that will be + # compared to 'threshold' to classify jumps. subtract the median of + # first_diffs from first_diffs, take the abs. value and divide by sigma. + e_jump_4d = first_diffs - median_diffs[np.newaxis, :, :] + ratio_all = ( + np.abs(first_diffs - median_diffs[np.newaxis, np.newaxis, :, :]) / sigma[np.newaxis, np.newaxis, :, :] + ) + if (only_use_ints and nints >= minimum_sigclip_groups) or ( + not only_use_ints and total_groups >= minimum_sigclip_groups + ): + log.info( + " Jump Step using sigma clip %s greater than %s, rejection threshold %s", + total_groups, + minimum_sigclip_groups, + normal_rej_thresh, ) - if (only_use_ints and nints >= minimum_sigclip_groups) or ( - not only_use_ints and total_groups >= minimum_sigclip_groups - ): - log.info( - " Jump Step using sigma clip %s greater than %s, rejection threshold %s", - total_groups, - minimum_sigclip_groups, - normal_rej_thresh, + warnings.filterwarnings("ignore", ".*All-NaN slice encountered.*", RuntimeWarning) + warnings.filterwarnings("ignore", ".*Mean of empty slice.*", RuntimeWarning) + warnings.filterwarnings("ignore", ".*Degrees of freedom <= 0.*", RuntimeWarning) + + if only_use_ints: + mean, median, stddev = stats.sigma_clipped_stats( + first_diffs_masked, sigma=normal_rej_thresh, axis=0 + ) + clipped_diffs = stats.sigma_clip(first_diffs_masked, sigma=normal_rej_thresh, axis=0, masked=True) + else: + mean, median, stddev = stats.sigma_clipped_stats( + first_diffs_masked, sigma=normal_rej_thresh, axis=(0, 1) ) + clipped_diffs = stats.sigma_clip( + first_diffs_masked, sigma=normal_rej_thresh, axis=(0, 1), masked=True + ) + jump_mask = np.logical_and(clipped_diffs.mask, np.logical_not(first_diffs_masked.mask)) + jump_mask[np.bitwise_and(jump_mask, gdq[:, 1:, :, :] == sat_flag)] = False + jump_mask[np.bitwise_and(jump_mask, gdq[:, 1:, :, :] == dnu_flag)] = False + jump_mask[np.bitwise_and(jump_mask, gdq[:, 1:, :, :] == (dnu_flag + sat_flag))] = False + gdq[:, 1:, :, :] = np.bitwise_or(gdq[:, 1:, :, :], jump_mask * np.uint8(dqflags["JUMP_DET"])) + # if grp is all jump set to do not use + for integ in range(nints): + for grp in range(ngrps): + if np.all( + np.bitwise_or( + np.bitwise_and(gdq[integ, grp, :, :], jump_flag), + np.bitwise_and(gdq[integ, grp, :, :], dnu_flag), + ) + ): + jumpy, jumpx = np.where(gdq[integ, grp, :, :] == jump_flag) + gdq[integ, grp, jumpy, jumpx] = 0 + warnings.resetwarnings() + else: + for integ in range(nints): + # get data, gdq for this integration + dat = dataa[integ] + gdq_integ = gdq[integ] + + # set 'saturated' or 'do not use' pixels to nan in data + dat[np.where(np.bitwise_and(gdq_integ, sat_flag))] = np.nan + dat[np.where(np.bitwise_and(gdq_integ, dnu_flag))] = np.nan + + # calculate the differences between adjacent groups (first diffs) + # use mask on data, so the results will have sat/donotuse groups masked + first_diffs = np.diff(dat, axis=0) + + # calc. the median of first_diffs for each pixel along the group axis + median_diffs = calc_med_first_diffs(first_diffs) + + # calculate sigma for each pixel + sigma = np.sqrt(np.abs(median_diffs) + read_noise_2 / nframes) + # reset sigma so pxels with 0 readnoise are not flagged as jumps + sigma[np.where(sigma == 0.0)] = np.nan + + # compute 'ratio' for each group. this is the value that will be + # compared to 'threshold' to classify jumps. subtract the median of + # first_diffs from first_diffs, take the abs. value and divide by sigma. + e_jump = first_diffs - median_diffs[np.newaxis, :, :] + ratio = np.abs(e_jump) / sigma[np.newaxis, :, :] + + # create a 2d array containing the value of the largest 'ratio' for each group warnings.filterwarnings("ignore", ".*All-NaN slice encountered.*", RuntimeWarning) - warnings.filterwarnings("ignore", ".*Mean of empty slice.*", RuntimeWarning) - warnings.filterwarnings("ignore", ".*Degrees of freedom <= 0.*", RuntimeWarning) - - if only_use_ints: - mean, median, stddev = stats.sigma_clipped_stats( - first_diffs_masked, sigma=normal_rej_thresh, axis=0 - ) - clipped_diffs = stats.sigma_clip( - first_diffs_masked, sigma=normal_rej_thresh, axis=0, masked=True - ) - else: - mean, median, stddev = stats.sigma_clipped_stats( - first_diffs_masked, sigma=normal_rej_thresh, axis=(0, 1) - ) - clipped_diffs = stats.sigma_clip( - first_diffs_masked, sigma=normal_rej_thresh, axis=(0, 1), masked=True - ) - jump_mask = np.logical_and(clipped_diffs.mask, np.logical_not(first_diffs_masked.mask)) - jump_mask[np.bitwise_and(jump_mask, gdq[:, 1:, :, :] == sat_flag)] = False - jump_mask[np.bitwise_and(jump_mask, gdq[:, 1:, :, :] == dnu_flag)] = False - jump_mask[np.bitwise_and(jump_mask, gdq[:, 1:, :, :] == (dnu_flag + sat_flag))] = False - gdq[:, 1:, :, :] = np.bitwise_or(gdq[:, 1:, :, :], jump_mask * np.uint8(dqflags["JUMP_DET"])) - # if grp is all jump set to do not use - for integ in range(nints): - for grp in range(ngrps): - if np.all( - np.bitwise_or( - np.bitwise_and(gdq[integ, grp, :, :], jump_flag), - np.bitwise_and(gdq[integ, grp, :, :], dnu_flag), - ) - ): - jumpy, jumpx = np.where(gdq[integ, grp, :, :] == jump_flag) - gdq[integ, grp, jumpy, jumpx] = 0 + max_ratio = np.nanmax(ratio, axis=0) warnings.resetwarnings() - else: - for integ in range(nints): - # get data, gdq for this integration - dat = dataa[integ] - gdq_integ = gdq[integ] - - # set 'saturated' or 'do not use' pixels to nan in data - dat[np.where(np.bitwise_and(gdq_integ, sat_flag))] = np.nan - dat[np.where(np.bitwise_and(gdq_integ, dnu_flag))] = np.nan - - # calculate the differences between adjacent groups (first diffs) - # use mask on data, so the results will have sat/donotuse groups masked - first_diffs = np.diff(dat, axis=0) - - # calc. the median of first_diffs for each pixel along the group axis - median_diffs = calc_med_first_diffs(first_diffs) - - # calculate sigma for each pixel - sigma = np.sqrt(np.abs(median_diffs) + read_noise_2 / nframes) - # reset sigma so pxels with 0 readnoise are not flagged as jumps - sigma[np.where(sigma == 0.0)] = np.nan - - # compute 'ratio' for each group. this is the value that will be - # compared to 'threshold' to classify jumps. subtract the median of - # first_diffs from first_diffs, take the abs. value and divide by sigma. - e_jump = first_diffs - median_diffs[np.newaxis, :, :] - ratio = np.abs(e_jump) / sigma[np.newaxis, :, :] - - # create a 2d array containing the value of the largest 'ratio' for each group - warnings.filterwarnings("ignore", ".*All-NaN slice encountered.*", RuntimeWarning) - max_ratio = np.nanmax(ratio, axis=0) - warnings.resetwarnings() - # now see if the largest ratio of all groups for each pixel exceeds the threshold. - # there are different threshold for 4+, 3, and 2 usable groups - num_unusable_groups = np.sum(np.isnan(first_diffs), axis=0) - row4cr, col4cr = np.where( - np.logical_and(ndiffs - num_unusable_groups >= 4, max_ratio > normal_rej_thresh) - ) - row3cr, col3cr = np.where( - np.logical_and(ndiffs - num_unusable_groups == 3, max_ratio > three_diff_rej_thresh) - ) - row2cr, col2cr = np.where( - np.logical_and(ndiffs - num_unusable_groups == 2, max_ratio > two_diff_rej_thresh) - ) + # now see if the largest ratio of all groups for each pixel exceeds the threshold. + # there are different threshold for 4+, 3, and 2 usable groups + num_unusable_groups = np.sum(np.isnan(first_diffs), axis=0) + row4cr, col4cr = np.where( + np.logical_and(ndiffs - num_unusable_groups >= 4, max_ratio > normal_rej_thresh) + ) + row3cr, col3cr = np.where( + np.logical_and(ndiffs - num_unusable_groups == 3, max_ratio > three_diff_rej_thresh) + ) + row2cr, col2cr = np.where( + np.logical_and(ndiffs - num_unusable_groups == 2, max_ratio > two_diff_rej_thresh) + ) - # get the rows, col pairs for all pixels with at least one CR - all_crs_row = np.concatenate((row4cr, row3cr, row2cr)) - all_crs_col = np.concatenate((col4cr, col3cr, col2cr)) - - # iterate over all groups of the pix w/ an inital CR to look for subsequent CRs - # flag and clip the first CR found. recompute median/sigma/ratio - # and repeat the above steps of comparing the max 'ratio' for each pixel - # to the threshold to determine if another CR can be flagged and clipped. - # repeat this process until no more CRs are found. - for j in range(len(all_crs_row)): - # get arrays of abs(diffs), ratio, readnoise for this pixel - pix_first_diffs = first_diffs[:, all_crs_row[j], all_crs_col[j]] - pix_ratio = ratio[:, all_crs_row[j], all_crs_col[j]] - pix_rn2 = read_noise_2[all_crs_row[j], all_crs_col[j]] - - # Create a mask to flag CRs. pix_cr_mask = 0 denotes a CR - pix_cr_mask = np.ones(pix_first_diffs.shape, dtype=bool) - - # set the largest ratio as a CR - pix_cr_mask[np.nanargmax(pix_ratio)] = 0 - new_CR_found = True - - # loop and check for more CRs, setting the mask as you go and - # clipping the group with the CR. stop when no more CRs are found - # or there is only one two diffs left (which means there is - # actually one left, since the next CR will be masked after - # checking that condition) - while new_CR_found and (ndiffs - np.sum(np.isnan(pix_first_diffs)) > 2): - new_CR_found = False - - # set CRs to nans in first diffs to clip them - pix_first_diffs[~pix_cr_mask] = np.nan - - # recalculate median, sigma, and ratio - new_pix_median_diffs = calc_med_first_diffs(pix_first_diffs) - - new_pix_sigma = np.sqrt(np.abs(new_pix_median_diffs) + pix_rn2 / nframes) - new_pix_ratio = np.abs(pix_first_diffs - new_pix_median_diffs) / new_pix_sigma - - # check if largest ratio exceeds threhold appropriate for num remaining groups - - # select appropriate thresh. based on number of remaining groups - rej_thresh = normal_rej_thresh - if ndiffs - np.sum(np.isnan(pix_first_diffs)) == 3: - rej_thresh = three_diff_rej_thresh - if ndiffs - np.sum(np.isnan(pix_first_diffs)) == 2: - rej_thresh = two_diff_rej_thresh - new_pix_max_ratio_idx = np.nanargmax(new_pix_ratio) # index of largest ratio - if new_pix_ratio[new_pix_max_ratio_idx] > rej_thresh: - new_CR_found = True - pix_cr_mask[new_pix_max_ratio_idx] = 0 - unusable_diffs = np.sum(np.isnan(pix_first_diffs)) - # Found all CRs for this pix - set flags in input DQ array - gdq[integ, 1:, all_crs_row[j], all_crs_col[j]] = np.bitwise_or( - gdq[integ, 1:, all_crs_row[j], all_crs_col[j]], - dqflags["JUMP_DET"] * np.invert(pix_cr_mask), - ) + # get the rows, col pairs for all pixels with at least one CR + all_crs_row = np.concatenate((row4cr, row3cr, row2cr)) + all_crs_col = np.concatenate((col4cr, col3cr, col2cr)) + + # iterate over all groups of the pix w/ an inital CR to look for subsequent CRs + # flag and clip the first CR found. recompute median/sigma/ratio + # and repeat the above steps of comparing the max 'ratio' for each pixel + # to the threshold to determine if another CR can be flagged and clipped. + # repeat this process until no more CRs are found. + for j in range(len(all_crs_row)): + # get arrays of abs(diffs), ratio, readnoise for this pixel + pix_first_diffs = first_diffs[:, all_crs_row[j], all_crs_col[j]] + pix_ratio = ratio[:, all_crs_row[j], all_crs_col[j]] + pix_rn2 = read_noise_2[all_crs_row[j], all_crs_col[j]] + + # Create a mask to flag CRs. pix_cr_mask = 0 denotes a CR + pix_cr_mask = np.ones(pix_first_diffs.shape, dtype=bool) + + # set the largest ratio as a CR + pix_cr_mask[np.nanargmax(pix_ratio)] = 0 + new_CR_found = True + + # loop and check for more CRs, setting the mask as you go and + # clipping the group with the CR. stop when no more CRs are found + # or there is only one two diffs left (which means there is + # actually one left, since the next CR will be masked after + # checking that condition) + while new_CR_found and (ndiffs - np.sum(np.isnan(pix_first_diffs)) > 2): + new_CR_found = False + + # set CRs to nans in first diffs to clip them + pix_first_diffs[~pix_cr_mask] = np.nan + + # recalculate median, sigma, and ratio + new_pix_median_diffs = calc_med_first_diffs(pix_first_diffs) + + new_pix_sigma = np.sqrt(np.abs(new_pix_median_diffs) + pix_rn2 / nframes) + new_pix_ratio = np.abs(pix_first_diffs - new_pix_median_diffs) / new_pix_sigma + + # check if largest ratio exceeds threhold appropriate for num remaining groups + + # select appropriate thresh. based on number of remaining groups + rej_thresh = normal_rej_thresh + if ndiffs - np.sum(np.isnan(pix_first_diffs)) == 3: + rej_thresh = three_diff_rej_thresh + if ndiffs - np.sum(np.isnan(pix_first_diffs)) == 2: + rej_thresh = two_diff_rej_thresh + new_pix_max_ratio_idx = np.nanargmax(new_pix_ratio) # index of largest ratio + if new_pix_ratio[new_pix_max_ratio_idx] > rej_thresh: + new_CR_found = True + pix_cr_mask[new_pix_max_ratio_idx] = 0 + unusable_diffs = np.sum(np.isnan(pix_first_diffs)) + # Found all CRs for this pix - set flags in input DQ array + gdq[integ, 1:, all_crs_row[j], all_crs_col[j]] = np.bitwise_or( + gdq[integ, 1:, all_crs_row[j], all_crs_col[j]], + dqflags["JUMP_DET"] * np.invert(pix_cr_mask), + ) - cr_integ, cr_group, cr_row, cr_col = np.where(np.bitwise_and(gdq, jump_flag)) - num_primary_crs = len(cr_group) - if flag_4_neighbors: # iterate over each 'jump' pixel + cr_integ, cr_group, cr_row, cr_col = np.where(np.bitwise_and(gdq, jump_flag)) + num_primary_crs = len(cr_group) + if flag_4_neighbors: # iterate over each 'jump' pixel + for j in range(len(cr_group)): + ratio_this_pix = ratio_all[cr_integ[j], cr_group[j] - 1, cr_row[j], cr_col[j]] + + # Jumps must be in a certain range to have neighbors flagged + if (ratio_this_pix < max_jump_to_flag_neighbors) and ( + ratio_this_pix > min_jump_to_flag_neighbors + ): + integ = cr_integ[j] + group = cr_group[j] + row = cr_row[j] + col = cr_col[j] + + # This section saves flagged neighbors that are above or + # below the current range of row. If this method + # running in a single process, the row above and below are + # not used. If it is running in multiprocessing mode, then + # the rows above and below need to be returned to + # find_jumps to use when it reconstructs the full group dq + # array from the slices. + + # Only flag adjacent pixels if they do not already have the + # 'SATURATION' or 'DONOTUSE' flag set + if row != 0: + if (gdq[integ, group, row - 1, col] & sat_flag) == 0: + if (gdq[integ, group, row - 1, col] & dnu_flag) == 0: + gdq[integ, group, row - 1, col] = np.bitwise_or( + gdq[integ, group, row - 1, col], jump_flag + ) + else: + row_below_gdq[integ, cr_group[j], cr_col[j]] = jump_flag + + if row != nrows - 1: + if (gdq[integ, group, row + 1, col] & sat_flag) == 0: + if (gdq[integ, group, row + 1, col] & dnu_flag) == 0: + gdq[integ, group, row + 1, col] = np.bitwise_or( + gdq[integ, group, row + 1, col], jump_flag + ) + else: + row_above_gdq[integ, cr_group[j], cr_col[j]] = jump_flag + + # Here we are just checking that we don't flag neighbors of + # jumps that are off the detector. + if cr_col[j] != 0: + if (gdq[integ, group, row, col - 1] & sat_flag) == 0: + if (gdq[integ, group, row, col - 1] & dnu_flag) == 0: + gdq[integ, group, row, col - 1] = np.bitwise_or( + gdq[integ, group, row, col - 1], jump_flag + ) + + if cr_col[j] != ncols - 1: + if (gdq[integ, group, row, col + 1] & sat_flag) == 0: + if (gdq[integ, group, row, col + 1] & dnu_flag) == 0: + gdq[integ, group, row, col + 1] = np.bitwise_or( + gdq[integ, group, row, col + 1], jump_flag + ) + + # flag n groups after jumps above the specified thresholds to account for + # the transient seen after ramp jumps + flag_e_threshold = [after_jump_flag_e1, after_jump_flag_e2] + flag_groups = [after_jump_flag_n1, after_jump_flag_n2] + + for cthres, cgroup in zip(flag_e_threshold, flag_groups): + if cgroup > 0: + cr_intg, cr_group, cr_row, cr_col = np.where(np.bitwise_and(gdq, jump_flag)) for j in range(len(cr_group)): - ratio_this_pix = ratio_all[cr_integ[j], cr_group[j] - 1, cr_row[j], cr_col[j]] - - # Jumps must be in a certain range to have neighbors flagged - if (ratio_this_pix < max_jump_to_flag_neighbors) and ( - ratio_this_pix > min_jump_to_flag_neighbors - ): - integ = cr_integ[j] - group = cr_group[j] - row = cr_row[j] - col = cr_col[j] - - # This section saves flagged neighbors that are above or - # below the current range of row. If this method - # running in a single process, the row above and below are - # not used. If it is running in multiprocessing mode, then - # the rows above and below need to be returned to - # find_jumps to use when it reconstructs the full group dq - # array from the slices. - - # Only flag adjacent pixels if they do not already have the - # 'SATURATION' or 'DONOTUSE' flag set - if row != 0: - if (gdq[integ, group, row - 1, col] & sat_flag) == 0: - if (gdq[integ, group, row - 1, col] & dnu_flag) == 0: - gdq[integ, group, row - 1, col] = np.bitwise_or( - gdq[integ, group, row - 1, col], jump_flag - ) - else: - row_below_gdq[integ, cr_group[j], cr_col[j]] = jump_flag - - if row != nrows - 1: - if (gdq[integ, group, row + 1, col] & sat_flag) == 0: - if (gdq[integ, group, row + 1, col] & dnu_flag) == 0: - gdq[integ, group, row + 1, col] = np.bitwise_or( - gdq[integ, group, row + 1, col], jump_flag - ) - else: - row_above_gdq[integ, cr_group[j], cr_col[j]] = jump_flag - - # Here we are just checking that we don't flag neighbors of - # jumps that are off the detector. - if cr_col[j] != 0: - if (gdq[integ, group, row, col - 1] & sat_flag) == 0: - if (gdq[integ, group, row, col - 1] & dnu_flag) == 0: - gdq[integ, group, row, col - 1] = np.bitwise_or( - gdq[integ, group, row, col - 1], jump_flag - ) - - if cr_col[j] != ncols - 1: - if (gdq[integ, group, row, col + 1] & sat_flag) == 0: - if (gdq[integ, group, row, col + 1] & dnu_flag) == 0: - gdq[integ, group, row, col + 1] = np.bitwise_or( - gdq[integ, group, row, col + 1], jump_flag - ) - - # flag n groups after jumps above the specified thresholds to account for - # the transient seen after ramp jumps - flag_e_threshold = [after_jump_flag_e1, after_jump_flag_e2] - flag_groups = [after_jump_flag_n1, after_jump_flag_n2] - - for cthres, cgroup in zip(flag_e_threshold, flag_groups): - if cgroup > 0: - cr_intg, cr_group, cr_row, cr_col = np.where(np.bitwise_and(gdq, jump_flag)) - for j in range(len(cr_group)): - intg = cr_intg[j] - group = cr_group[j] - row = cr_row[j] - col = cr_col[j] - if e_jump_4d[intg, group - 1, row, col] >= cthres[row, col]: - for kk in range(group, min(group + cgroup + 1, ngroups)): - if (gdq[intg, kk, row, col] & sat_flag) == 0: - if (gdq[intg, kk, row, col] & dnu_flag) == 0: - gdq[intg, kk, row, col] = np.bitwise_or( - gdq[integ, kk, row, col], jump_flag - ) + intg = cr_intg[j] + group = cr_group[j] + row = cr_row[j] + col = cr_col[j] + if e_jump_4d[intg, group - 1, row, col] >= cthres[row, col]: + for kk in range(group, min(group + cgroup + 1, ngroups)): + if (gdq[intg, kk, row, col] & sat_flag) == 0: + if (gdq[intg, kk, row, col] & dnu_flag) == 0: + gdq[intg, kk, row, col] = np.bitwise_or(gdq[integ, kk, row, col], jump_flag) if "stddev" in locals(): return gdq, row_below_gdq, row_above_gdq, num_primary_crs, stddev + + if only_use_ints: + dummy = np.zeros((dataa.shape[1] - 1, dataa.shape[2], dataa.shape[3]), dtype=np.float32) else: - if only_use_ints: - dummy = np.zeros((dataa.shape[1] - 1, dataa.shape[2], dataa.shape[3]), dtype=np.float32) - else: - dummy = np.zeros((dataa.shape[2], dataa.shape[3]), dtype=np.float32) - return gdq, row_below_gdq, row_above_gdq, num_primary_crs, dummy + dummy = np.zeros((dataa.shape[2], dataa.shape[3]), dtype=np.float32) + + return gdq, row_below_gdq, row_above_gdq, num_primary_crs, dummy def calc_med_first_diffs(first_diffs): @@ -457,12 +454,14 @@ def calc_med_first_diffs(first_diffs): mask = np.ones_like(first_diffs).astype(bool) mask[np.nanargmax(np.abs(first_diffs))] = False # clip the diff with the largest abs value return np.nanmedian(first_diffs[mask]) - elif num_usable_groups == 3: # if 3, no clipping just return median + + if num_usable_groups == 3: # if 3, no clipping just return median return np.nanmedian(first_diffs) - elif num_usable_groups == 2: # if 2, return diff with minimum abs + + if num_usable_groups == 2: # if 2, return diff with minimum abs return first_diffs[np.nanargmin(np.abs(first_diffs))] - else: - return np.nan + + return np.nan # if input is multi-dimensional diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index ef37d93d..4111df94 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -274,7 +274,7 @@ def create_output_opt_res(ramp_data): The original data used to do ramp fitting. """ # TODO Need to create the optional results output arrays. - return None + return def reassemble_image(ramp_data, image_info, image_slice, crow, nrows): diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index e2b5c6bf..778487f2 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -105,12 +105,11 @@ def ols_ramp_fit_multi(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, wei return image_info, integ_info, opt_info # Call ramp fitting for multi-processor (multiple data slices) case - else: - image_info, integ_info, opt_info = ols_ramp_fit_multiprocessing( - ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, number_slices - ) + image_info, integ_info, opt_info = ols_ramp_fit_multiprocessing( + ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, weighting, number_slices + ) - return image_info, integ_info, opt_info + return image_info, integ_info, opt_info def ols_ramp_fit_multiprocessing( @@ -2286,9 +2285,7 @@ def fit_next_segment_only_good_0th_group( opt_res.append_arr(num_seg, these_pix, intercept, slope, sig_intercept, sig_slope, inv_var, save_opt) num_seg[these_pix] += 1 - f_max_seg = max(f_max_seg, num_seg.max()) - - return f_max_seg + return max(f_max_seg, num_seg.max()) def fit_next_segment_short_seg_not_at_end( @@ -2445,9 +2442,7 @@ def fit_next_segment_short_seg_not_at_end( opt_res.append_arr(num_seg, these_pix, intercept, slope, sig_intercept, sig_slope, inv_var, save_opt) num_seg[these_pix] += 1 - f_max_seg = max(f_max_seg, num_seg.max()) - - return f_max_seg + return max(f_max_seg, num_seg.max()) def fit_next_segment_short_seg_at_end( @@ -3560,9 +3555,7 @@ def check_both_groups_good(gdq): group_1_good[g1 == 0] = True # Mark the pixels with good groups in the both groups. - both = group_0_good & group_1_good - - return both + return group_0_good & group_1_good def check_good_0_bad_1(gdq): @@ -3594,9 +3587,7 @@ def check_good_0_bad_1(gdq): group_1_good[g1 != 0] = True # Mark the pixels with good group 0 and bad group 1. - both = group_0_good & group_1_good - - return both + return group_0_good & group_1_good def check_bad_0_good_1(gdq, sat): @@ -3640,9 +3631,7 @@ def check_bad_0_good_1(gdq, sat): group_1_good[g1 == 0] = True # Mark the pixels with non-saturated bad zeroeth group and good first group. - both = group_0_bad_nsat & group_1_good - - return both + return group_0_bad_nsat & group_1_good def fit_2_group( diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index cde8c0fc..159dd467 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -280,7 +280,7 @@ def output_optional(self, group_time): self.weights[1.0 / self.weights > LARGE_VARIANCE_THRESHOLD] = 0.0 warnings.resetwarnings() - opt_info = ( + return ( self.slope_seg, self.sigslope_seg, self.var_p_seg, @@ -292,8 +292,6 @@ def output_optional(self, group_time): self.cr_mag_seg, ) - return opt_info - def print_full(self): # pragma: no cover """ Diagnostic function for printing optional output arrays; most @@ -1315,8 +1313,7 @@ def compute_num_slices(max_cores, nrows, max_available): elif max_cores == "all": number_slices = max_available # Make sure we don't have more slices than rows or available cores. - number_slices = min([nrows, number_slices, max_available]) - return number_slices + return min([nrows, number_slices, max_available]) def dq_compress_final(dq_int, ramp_data): @@ -1649,6 +1646,4 @@ def groups_saturated_in_integration(intdq, sat_flag, num_sat_groups): sat_groups = np.zeros(intdq.shape, dtype=int) sat_groups[np.bitwise_and(intdq, sat_flag).astype(bool)] = 1 nsat_groups = sat_groups.sum(axis=0) - wh_nsat_groups = np.where(nsat_groups == num_sat_groups) - - return wh_nsat_groups + return np.where(nsat_groups == num_sat_groups) diff --git a/src/stcal/saturation/saturation.py b/src/stcal/saturation/saturation.py index 056deac6..1f6e703f 100644 --- a/src/stcal/saturation/saturation.py +++ b/src/stcal/saturation/saturation.py @@ -151,8 +151,7 @@ def adjacent_pixels(plane_gdq, saturated, n_pix_grow_sat): box_dim = (n_pix_grow_sat * 2) + 1 struct = np.ones((box_dim, box_dim)).astype(bool) dialated = ndimage.binary_dilation(only_sat, structure=struct).astype(only_sat.dtype) - sat_pix = np.bitwise_or(cgdq, (dialated * saturated)) - return sat_pix + return np.bitwise_or(cgdq, (dialated * saturated)) def plane_saturation(plane, sat_thresh, dqflags): diff --git a/tests/test_ramp_fitting_cases.py b/tests/test_ramp_fitting_cases.py index a3d27e0b..59e66fa5 100644 --- a/tests/test_ramp_fitting_cases.py +++ b/tests/test_ramp_fitting_cases.py @@ -839,7 +839,7 @@ def assert_pri(p_true, new_info, pix): npt.assert_allclose(var_poisson[0, pix], p_true[3], atol=2e-5, rtol=2e-5) npt.assert_allclose(var_rnoise[0, pix], p_true[4], atol=2e-5, rtol=2e-5) - return None + return def debug_opt(o_true, opt_info, pix): @@ -908,7 +908,7 @@ def assert_opt(o_true, opt_info, pix): npt.assert_allclose(opt_pedestal, o_true[6], atol=2e-5, rtol=3e-5) npt.assert_allclose(opt_weights, o_true[7], atol=2e-5, rtol=2e-5) - return None + return def dbg_print(string): From 3c946109113f9b65bc63e2467f5772c3d791015c Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 15:59:19 -0500 Subject: [PATCH 17/36] Add private member access linting --- pyproject.toml | 2 +- src/stcal/alignment/util.py | 2 +- tests/test_jump_cas22.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c180c691..60c58450 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -105,7 +105,7 @@ select = [ 'Q', # flake8-quotes (best practices for quotes) 'RSE', # flake8-raise (best practices for raising exceptions) 'RET', # flake8-return (best practices for return statements) - # 'SLF', # flake8-self (prevent private member access) + 'SLF', # flake8-self (prevent private member access) # 'TID', # flake8-tidy-imports (prevent banned api and best import practices) # 'INT', # flake8-gettext (when to use printf style strings) # 'ARG', # flake8-unused-arguments (prevent unused arguments) diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index e0202deb..84e3a14d 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -117,7 +117,7 @@ def _generate_tranform( An :py:mod:`~astropy` model containing the transform between frames. """ if transform is None: - sky_axes = refmodel.meta.wcs._get_axes_indices().tolist() + sky_axes = refmodel.meta.wcs._get_axes_indices().tolist() # noqa: SLF001 v3yangle = np.deg2rad(refmodel.meta.wcsinfo.v3yangle) vparity = refmodel.meta.wcsinfo.vparity if rotation is None: diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index b193e613..00e59fed 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -112,7 +112,7 @@ def read_pattern(): def test_from_read_pattern(read_pattern): """Test turning read_pattern into the time data""" - metadata = from_read_pattern(read_pattern, READ_TIME, len(read_pattern))._to_dict() + metadata = from_read_pattern(read_pattern, READ_TIME, len(read_pattern))._to_dict() # noqa: SLF001 t_bar = metadata["t_bar"] tau = metadata["tau"] @@ -141,7 +141,7 @@ def ramp_data(read_pattern): metadata : dict The metadata computed from the read pattern """ - data = from_read_pattern(read_pattern, READ_TIME, len(read_pattern))._to_dict() + data = from_read_pattern(read_pattern, READ_TIME, len(read_pattern))._to_dict() # noqa: SLF001 return data["t_bar"], data["tau"], data["n_reads"], read_pattern From 9e6ed4bf2d2a5b474dd5c79f2b60224cf14776f2 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 16:01:29 -0500 Subject: [PATCH 18/36] Add pathlib linting --- pyproject.toml | 6 +++--- tests/test_ramp_fitting_cases.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 60c58450..f0ecc89b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -106,10 +106,10 @@ select = [ 'RSE', # flake8-raise (best practices for raising exceptions) 'RET', # flake8-return (best practices for return statements) 'SLF', # flake8-self (prevent private member access) - # 'TID', # flake8-tidy-imports (prevent banned api and best import practices) - # 'INT', # flake8-gettext (when to use printf style strings) + 'TID', # flake8-tidy-imports (prevent banned api and best import practices) + 'INT', # flake8-gettext (when to use printf style strings) # 'ARG', # flake8-unused-arguments (prevent unused arguments) - # 'PTH', # flake8-use-pathlib (prefer pathlib over os.path) + 'PTH', # flake8-use-pathlib (prefer pathlib over os.path) # 'ERA', # eradicate (remove commented out code) # 'PGH', # pygrep (simple grep checks) # 'PL', # pylint (general linting, flake8 alternative) diff --git a/tests/test_ramp_fitting_cases.py b/tests/test_ramp_fitting_cases.py index 59e66fa5..cdea0307 100644 --- a/tests/test_ramp_fitting_cases.py +++ b/tests/test_ramp_fitting_cases.py @@ -1,5 +1,5 @@ import inspect -import os +from pathlib import Path import numpy as np import numpy.testing as npt @@ -918,5 +918,5 @@ def dbg_print(string): cf = inspect.currentframe() line_number = cf.f_back.f_lineno finfo = inspect.getframeinfo(cf.f_back) - fname = os.path.basename(finfo.filename) + fname = Path(finfo.filename).name print(f"[{fname}:{line_number}] {string}") From b2aa1d6d29e2f107509e666eadb4ed662d906846 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 16:02:39 -0500 Subject: [PATCH 19/36] Add pygrep checks --- .pre-commit-config.yaml | 2 -- pyproject.toml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 29169435..8bf37d13 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,8 +19,6 @@ repos: - repo: https://github.com/pre-commit/pygrep-hooks rev: v1.10.0 hooks: - - id: python-check-blanket-noqa - - id: python-check-mock-methods - id: rst-directive-colons - id: rst-inline-touching-normal - id: text-unicode-replacement-char diff --git a/pyproject.toml b/pyproject.toml index f0ecc89b..f289ff58 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -111,7 +111,7 @@ select = [ # 'ARG', # flake8-unused-arguments (prevent unused arguments) 'PTH', # flake8-use-pathlib (prefer pathlib over os.path) # 'ERA', # eradicate (remove commented out code) - # 'PGH', # pygrep (simple grep checks) + 'PGH', # pygrep (simple grep checks) # 'PL', # pylint (general linting, flake8 alternative) 'FLY', # flynt (f-string conversion where possible) 'NPY', # NumPy-specific checks (recommendations from NumPy) From 8eff5585acc5b874a7e0118381420c0e4872baa9 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 16:09:23 -0500 Subject: [PATCH 20/36] Add pylint checks --- pyproject.toml | 6 +++++- src/stcal/basic_utils.py | 2 +- src/stcal/dqflags.py | 2 +- src/stcal/dynamicdq.py | 2 +- src/stcal/jump/jump.py | 2 +- src/stcal/jump/twopoint_difference.py | 2 +- src/stcal/ramp_fitting/gls_fit.py | 4 ++-- src/stcal/ramp_fitting/ols_fit.py | 6 +++--- tests/test_jump_cas22.py | 4 +--- tests/test_ramp_fitting_cases.py | 4 ---- 10 files changed, 16 insertions(+), 18 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f289ff58..f17a1faa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -112,7 +112,7 @@ select = [ 'PTH', # flake8-use-pathlib (prefer pathlib over os.path) # 'ERA', # eradicate (remove commented out code) 'PGH', # pygrep (simple grep checks) - # 'PL', # pylint (general linting, flake8 alternative) + 'PL', # pylint (general linting, flake8 alternative) 'FLY', # flynt (f-string conversion where possible) 'NPY', # NumPy-specific checks (recommendations from NumPy) # 'PERF', # Perflint (performance linting) @@ -121,6 +121,10 @@ select = [ ignore = [ 'C901', # variable is too complex 'ISC001', # interfers with formatter + 'PLR0912', # Too many branches + 'PLR0913', # Too many arguments + 'PLR0915', # Too many statements + 'PLR2004', # Magic value used in comparison ] exclude = [ 'docs', diff --git a/src/stcal/basic_utils.py b/src/stcal/basic_utils.py index b9f60c8c..f9a9894f 100644 --- a/src/stcal/basic_utils.py +++ b/src/stcal/basic_utils.py @@ -14,4 +14,4 @@ ) from err -__all__ = [multiple_replace] +__all__ = ["multiple_replace"] diff --git a/src/stcal/dqflags.py b/src/stcal/dqflags.py index f89fb566..20197b13 100644 --- a/src/stcal/dqflags.py +++ b/src/stcal/dqflags.py @@ -17,4 +17,4 @@ raise ImportError("dqflags has been moved to stdatamodels.dqflags, please install stdatamodels") from err -__all__ = [ap_interpret_bit_flags, multiple_replace, interpret_bit_flags, dqflags_to_mnemonics] +__all__ = ["ap_interpret_bit_flags", "multiple_replace", "interpret_bit_flags", "dqflags_to_mnemonics"] diff --git a/src/stcal/dynamicdq.py b/src/stcal/dynamicdq.py index 09145bc3..074f6e8f 100644 --- a/src/stcal/dynamicdq.py +++ b/src/stcal/dynamicdq.py @@ -14,4 +14,4 @@ ) from err -__all__ = [dynamic_mask] +__all__ = ["dynamic_mask"] diff --git a/src/stcal/jump/jump.py b/src/stcal/jump/jump.py index a42f6aef..17729426 100644 --- a/src/stcal/jump/jump.py +++ b/src/stcal/jump/jump.py @@ -2,9 +2,9 @@ import multiprocessing import time -import astropy.stats as stats import cv2 as cv import numpy as np +from astropy import stats from astropy.convolution import Ring2DKernel, convolve from . import constants diff --git a/src/stcal/jump/twopoint_difference.py b/src/stcal/jump/twopoint_difference.py index 283c2471..3a783683 100644 --- a/src/stcal/jump/twopoint_difference.py +++ b/src/stcal/jump/twopoint_difference.py @@ -1,8 +1,8 @@ import logging import warnings -import astropy.stats as stats import numpy as np +from astropy import stats log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index 4111df94..db83cbd9 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -12,8 +12,8 @@ import logging import time -from multiprocessing import cpu_count as cpu_count -from multiprocessing.pool import Pool as Pool +from multiprocessing import cpu_count +from multiprocessing.pool import Pool import numpy as np import numpy.linalg as la diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index 778487f2..21f4cd8c 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -3,8 +3,8 @@ import logging import time import warnings -from multiprocessing import cpu_count as cpu_count -from multiprocessing.pool import Pool as Pool +from multiprocessing import cpu_count +from multiprocessing.pool import Pool import numpy as np @@ -1933,7 +1933,7 @@ def fit_next_segment( got_case = np.zeros((ncols * nrows), dtype=bool) # Special case fit with NGROUPS being 1 or 2. - if ngroups == 1 or ngroups == 2: + if ngroups in (1, 2): return fit_short_ngroups( ngroups, start, diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 00e59fed..9b2aeb82 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -499,9 +499,7 @@ def test_find_jumps(jump_data): # The two resultants excluded should be adjacent jump_correct = [] for jump in fit["jumps"]: - jump_correct.append( - jump == resultant_index or jump == resultant_index - 1 or jump == resultant_index + 1 - ) + jump_correct.append(jump in (resultant_index, resultant_index - 1, resultant_index + 1)) if not all(jump_correct): incorrect_other += 1 continue diff --git a/tests/test_ramp_fitting_cases.py b/tests/test_ramp_fitting_cases.py index cdea0307..5b75a781 100644 --- a/tests/test_ramp_fitting_cases.py +++ b/tests/test_ramp_fitting_cases.py @@ -839,8 +839,6 @@ def assert_pri(p_true, new_info, pix): npt.assert_allclose(var_poisson[0, pix], p_true[3], atol=2e-5, rtol=2e-5) npt.assert_allclose(var_rnoise[0, pix], p_true[4], atol=2e-5, rtol=2e-5) - return - def debug_opt(o_true, opt_info, pix): (slope, sigslope, var_poisson, var_rnoise, yint, sigyint, pedestal, weights, crmag) = opt_info @@ -908,8 +906,6 @@ def assert_opt(o_true, opt_info, pix): npt.assert_allclose(opt_pedestal, o_true[6], atol=2e-5, rtol=3e-5) npt.assert_allclose(opt_weights, o_true[7], atol=2e-5, rtol=2e-5) - return - def dbg_print(string): """ From 1b0b9e1616747968c40448e947e6ea65872bdc01 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 16:12:35 -0500 Subject: [PATCH 21/36] Add performance linting --- pyproject.toml | 2 +- src/stcal/alignment/util.py | 9 +++------ src/stcal/ramp_fitting/gls_fit.py | 2 +- tests/test_jump_cas22.py | 6 +++--- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f17a1faa..b9373feb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -115,7 +115,7 @@ select = [ 'PL', # pylint (general linting, flake8 alternative) 'FLY', # flynt (f-string conversion where possible) 'NPY', # NumPy-specific checks (recommendations from NumPy) - # 'PERF', # Perflint (performance linting) + 'PERF', # Perflint (performance linting) 'RUF', # ruff specific checks ] ignore = [ diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index 84e3a14d..98dffee6 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -812,13 +812,10 @@ def _reproject(x: float | np.ndarray, y: float | np.ndarray) -> tuple: sky = _get_forward_transform_func(wcs1)(x, y, 0) # rearrange into array including flattened x and y vaues - flat_sky = [] - for axis in sky: - flat_sky.append(axis.flatten()) + flat_sky = [axis.flatten() for axis in sky] det = np.array(_get_backward_transform_func(wcs2)(flat_sky[0], flat_sky[1], 0)) - det_reshaped = [] - for axis in det: - det_reshaped.append(axis.reshape(x.shape)) + det_reshaped = [axis.reshape(x.shape) for axis in det] + return tuple(det_reshaped) return _reproject diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index db83cbd9..230d8c90 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -1657,7 +1657,7 @@ def gls_fit( for z in range(nz): try: la.solve(temp_var[z], I_2) - except la.LinAlgError as msg2: + except la.LinAlgError as msg2: # noqa: PERF203 log.warning("singular matrix, z = %d", z) raise la.LinAlgError(msg2) from err del I_2 diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 9b2aeb82..61e70da5 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -497,9 +497,9 @@ def test_find_jumps(jump_data): continue # The two resultants excluded should be adjacent - jump_correct = [] - for jump in fit["jumps"]: - jump_correct.append(jump in (resultant_index, resultant_index - 1, resultant_index + 1)) + jump_correct = [ + (jump in (resultant_index, resultant_index - 1, resultant_index + 1)) for jump in fit["jumps"] + ] if not all(jump_correct): incorrect_other += 1 continue From 2e2d7834f50275ae1817afacecbc7d819f744c8f Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 16:23:21 -0500 Subject: [PATCH 22/36] Add codespell --- .github/CODEOWNERS | 2 +- .pre-commit-config.yaml | 18 ++++-- CHANGES.rst | 4 +- docs/stcal/ramp_fitting/description.rst | 4 +- pyproject.toml | 2 +- src/stcal/alignment/util.py | 8 +-- src/stcal/dark_current/dark_sub.py | 2 +- src/stcal/jump/jump.py | 12 ++-- src/stcal/jump/twopoint_difference.py | 8 +-- src/stcal/linearity/linearity.py | 2 +- src/stcal/ramp_fitting/gls_fit.py | 2 +- src/stcal/ramp_fitting/ols_cas22/_fit.pyx | 4 +- src/stcal/ramp_fitting/ols_cas22/_jump.pyx | 14 ++--- src/stcal/ramp_fitting/ols_cas22/_ramp.pyx | 8 +-- src/stcal/ramp_fitting/ols_fit.py | 24 ++++---- src/stcal/ramp_fitting/utils.py | 2 +- tests/test_ramp_fitting.py | 12 ++-- tests/test_ramp_fitting_cases.py | 2 +- tests/test_ramp_fitting_gls_fit.py | 66 +++++++++++----------- 19 files changed, 102 insertions(+), 94 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5fb314af..efcfd9e2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,3 +1,3 @@ -# automatically requests pull request reviews for files matching the given pattern; the last match takes precendence +# automatically requests pull request reviews for files matching the given pattern; the last match takes precedence * @spacetelescope/stcal-maintainers diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8bf37d13..6f6ec091 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,11 +23,13 @@ repos: - id: rst-inline-touching-normal - id: text-unicode-replacement-char -# - repo: https://github.com/asottile/pyupgrade -# rev: 'v3.10.1' -# hooks: -# - id: pyupgrade -# args: ["--py39-plus"] +- repo: https://github.com/codespell-project/codespell + rev: v2.2.5 + hooks: + - id: codespell + args: ["--write-changes"] + additional_dependencies: + - tomli - repo: https://github.com/astral-sh/ruff-pre-commit rev: 'v0.1.5' @@ -36,6 +38,12 @@ repos: args: ["--fix", "--show-fixes"] - id: ruff-format +- repo: https://github.com/adamchainz/blacken-docs + rev: 1.16.0 + hooks: + - id: blacken-docs + additional_dependencies: + - black==22.12.0 # - repo: https://github.com/pycqa/isort # rev: 5.12.0 diff --git a/CHANGES.rst b/CHANGES.rst index beb7aa78..0fb58952 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -174,7 +174,7 @@ ramp_fitting - Correct the "averaging" of the final image slope by properly excluding variances as a part of the denominator from integrations with invalid slopes. [#167] -- Removing the usage of ``numpy.where`` where possible for perfomance +- Removing the usage of ``numpy.where`` where possible for performance reasons. [#169] 1.3.7 (2023-04-26) @@ -566,7 +566,7 @@ ramp_fitting jump ~~~~ -- Fix issue in jump detection that occured when there were only 2 usable +- Fix issue in jump detection that occurred when there were only 2 usable differences with no other groups flagged. This PR also added tests and fixed some of the logging statements in twopoint difference. [#74] diff --git a/docs/stcal/ramp_fitting/description.rst b/docs/stcal/ramp_fitting/description.rst index 8664815f..d0d12c88 100644 --- a/docs/stcal/ramp_fitting/description.rst +++ b/docs/stcal/ramp_fitting/description.rst @@ -32,7 +32,7 @@ cores on the host computer and the value of the max_cores input parameter. By default the step runs on a single processor. At the other extreme if max_cores is set to 'all', it will use all available cores (real and virtual). Testing has shown a reduction in the elapsed time for the step proportional to the number of real -cores used. Using the virtual cores also reduces the elasped time but at a slightly +cores used. Using the virtual cores also reduces the elapsed time but at a slightly lower rate than the real cores. Since the data is sliced based on the number of rows, if the number of cores requested for multiprocessing is greater than the number of rows, the number of cores actually used will be no more than the @@ -139,7 +139,7 @@ is the following: the type of noise (when appropriate) will appear as the supers and the form of the data will appear as the subscript: ‘s’, ‘i’, ‘o’ for segment, integration, or overall (for the entire dataset), respectively. -It is possible for an integration or pixel to have invalid data, so useable +It is possible for an integration or pixel to have invalid data, so usable slope data will not be available. If a pixel has an invalid integration, the value for that integration for that pixel will be set to NaN in the rateints product. Further, if all integrations for a given pixel are invalid the pixel value for diff --git a/pyproject.toml b/pyproject.toml index b9373feb..94a15291 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -120,7 +120,7 @@ select = [ ] ignore = [ 'C901', # variable is too complex - 'ISC001', # interfers with formatter + 'ISC001', # interferes with formatter 'PLR0912', # Too many branches 'PLR0913', # Too many arguments 'PLR0915', # Too many statements diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index 98dffee6..66029b3b 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -144,7 +144,7 @@ def _generate_tranform( def _get_axis_min_and_bounding_box(ref_model, wcs_list, ref_wcs): """ - Calculates axis mininum values and bounding box. + Calculates axis minimum values and bounding box. Parameters ---------- @@ -286,7 +286,7 @@ def _calculate_new_wcs(ref_model, shape, wcs_list, fiducial, crpix=None, transfo The coordinates of the reference pixel. transform : ~astropy.modeling.Model - An optional tranform to be prepended to the transform constructed by the + An optional transform to be prepended to the transform constructed by the fiducial point. The number of outputs of this transform must equal the number of axes in the coordinate frame. @@ -763,7 +763,7 @@ def reproject(wcs1, wcs2): def _get_forward_transform_func(wcs1): """Get the forward transform function from the input WCS. If the wcs is a - fitswcs.WCS object all_pix2world requres three inputs, the x (str, ndarrray), + fitswcs.WCS object all_pix2world requires three inputs, the x (str, ndarrray), y (str, ndarray), and origin (int). The origin should be between 0, and 1 https://docs.astropy.org/en/latest/wcs/index.html#loading-wcs-information-from-a-fits-file ) @@ -811,7 +811,7 @@ def _reproject(x: float | np.ndarray, y: float | np.ndarray) -> tuple: raise ValueError("x and y must be the same length") sky = _get_forward_transform_func(wcs1)(x, y, 0) - # rearrange into array including flattened x and y vaues + # rearrange into array including flattened x and y values flat_sky = [axis.flatten() for axis in sky] det = np.array(_get_backward_transform_func(wcs2)(flat_sky[0], flat_sky[1], 0)) det_reshaped = [axis.reshape(x.shape) for axis in det] diff --git a/src/stcal/dark_current/dark_sub.py b/src/stcal/dark_current/dark_sub.py index 080a59a0..82041067 100644 --- a/src/stcal/dark_current/dark_sub.py +++ b/src/stcal/dark_current/dark_sub.py @@ -147,7 +147,7 @@ def do_correction_data(science_data, dark_data, dark_output=None): # Create a frame-averaged version of the dark data to match # the nframes and groupgap settings of the science data. # If the data are from JWST/MIRI, the darks are integration-dependent - # and we average them with a seperate routine. + # and we average them with a separate routine. if len(dark_data.data.shape) == 4: # only MIRI uses 4-D darks averaged_dark = average_dark_frames_4d( diff --git a/src/stcal/jump/jump.py b/src/stcal/jump/jump.py index 17729426..94fc6ba5 100644 --- a/src/stcal/jump/jump.py +++ b/src/stcal/jump/jump.py @@ -108,7 +108,7 @@ def detect_jumps( max_cores: str Maximum number of cores to use for multiprocessing. Available choices are 'none' (which will create one process), 'quarter', 'half', 'all' - (of availble cpu cores). + (of available cpu cores). max_jump_to_flag_neighbors : float value in units of sigma that sets the upper limit for flagging of @@ -699,7 +699,7 @@ def find_ellipses(dqplane, bitmask, min_area): pixels = np.bitwise_and(dqplane, bitmask) contours, hierarchy = cv.findContours(pixels, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) bigcontours = [con for con in contours if cv.contourArea(con) > min_area] - # minAreaRect is used becuase fitEllipse requires 5 points and it is + # minAreaRect is used because fitEllipse requires 5 points and it is # possible to have a contour # with just 4 points. return [cv.minAreaRect(con) for con in bigcontours] @@ -718,7 +718,7 @@ def make_snowballs( sat_flag, max_extended_radius, ): - # Ths routine will create a list of snowballs (ellipses) that have the + # This routine will create a list of snowballs (ellipses) that have the # center # of the saturation circle within the enclosing jump rectangle. snowballs = [] @@ -768,7 +768,7 @@ def point_inside_ellipse(point, ellipse): def near_edge(jump, low_threshold, high_threshold): # This routing tests whether the center of a jump is close to the edge of - # the detector. Jumps that are within the threshold will not requre a + # the detector. Jumps that are within the threshold will not require a # saturated core since this may be off the detector return ( jump[0][0] < low_threshold @@ -882,7 +882,7 @@ def find_faint_extended( extended_emission[exty, extx] = 1 # find the contours of the extended emission contours, hierarchy = cv.findContours(extended_emission, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) - # get the countours that are above the minimum size + # get the contours that are above the minimum size bigcontours = [con for con in contours if cv.contourArea(con) > min_shower_area] # get the minimum enclosing rectangle which is the same as the # minimum enclosing ellipse @@ -935,7 +935,7 @@ def find_faint_extended( if all_ellipses: # Now we actually do the flagging of the pixels inside showers. # This is deferred until all showers are detected. because the showers - # can flag future groups and would confuse the detection algorthim if + # can flag future groups and would confuse the detection algorithm if # we worked on groups that already had some flagged showers. for showers in all_ellipses: intg = showers[0] diff --git a/src/stcal/jump/twopoint_difference.py b/src/stcal/jump/twopoint_difference.py index 3a783683..e9922633 100644 --- a/src/stcal/jump/twopoint_difference.py +++ b/src/stcal/jump/twopoint_difference.py @@ -280,7 +280,7 @@ def find_crs( all_crs_row = np.concatenate((row4cr, row3cr, row2cr)) all_crs_col = np.concatenate((col4cr, col3cr, col2cr)) - # iterate over all groups of the pix w/ an inital CR to look for subsequent CRs + # iterate over all groups of the pix w/ an initial CR to look for subsequent CRs # flag and clip the first CR found. recompute median/sigma/ratio # and repeat the above steps of comparing the max 'ratio' for each pixel # to the threshold to determine if another CR can be flagged and clipped. @@ -315,7 +315,7 @@ def find_crs( new_pix_sigma = np.sqrt(np.abs(new_pix_median_diffs) + pix_rn2 / nframes) new_pix_ratio = np.abs(pix_first_diffs - new_pix_median_diffs) / new_pix_sigma - # check if largest ratio exceeds threhold appropriate for num remaining groups + # check if largest ratio exceeds threshold appropriate for num remaining groups # select appropriate thresh. based on number of remaining groups rej_thresh = normal_rej_thresh @@ -426,8 +426,8 @@ def calc_med_first_diffs(first_diffs): """Calculate the median of `first diffs` along the group axis. If there are 4+ usable groups (e.g not flagged as saturated, donotuse, - or a previously clipped CR), then the group with largest absoulte - first difference will be clipped and the median of the remianing groups + or a previously clipped CR), then the group with largest absolute + first difference will be clipped and the median of the remaining groups will be returned. If there are exactly 3 usable groups, the median of those three groups will be returned without any clipping. Finally, if there are two usable groups, the group with the smallest absolute diff --git a/src/stcal/linearity/linearity.py b/src/stcal/linearity/linearity.py index 752b22ef..0225d5e1 100644 --- a/src/stcal/linearity/linearity.py +++ b/src/stcal/linearity/linearity.py @@ -250,7 +250,7 @@ def correct_for_zero(lin_coeffs, pixeldq, dqflags): updated array of correction coefficients in reference file """ - # The critcal coefficient that should not be zero is the linear term other terms are fine to be zero + # The critical coefficient that should not be zero is the linear term other terms are fine to be zero linear_term = lin_coeffs[1, :, :] wh_zero = np.where(linear_term == 0) yzero, xzero = wh_zero[0], wh_zero[1] diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index 230d8c90..aa4035e2 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -1491,7 +1491,7 @@ def gls_fit( Notes ----- - Curently the noise model is assumed to be a combination of + Currently the noise model is assumed to be a combination of read and photon noise alone. Same technique could be used with more complex noise models, but then the ramp covariance matrix should be input. diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit.pyx index 2bc2069f..b7bec6d3 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit.pyx @@ -113,12 +113,12 @@ def fit_ramps(float[:, :] resultants, ---------- resultants : float[n_resultants, n_pixel] the resultants in electrons (Note that this can be based as any sort of - array, such as a numpy array. The memmory view is just for efficiency in + array, such as a numpy array. The memory view is just for efficiency in cython) dq : np.ndarry[n_resultants, n_pixel] the dq array. dq != 0 implies bad pixel / CR. (Kept as a numpy array so that it can be passed out without copying into new numpy array, will - be working on memmory views of this array) + be working on memory views of this array) read_noise : float[n_pixel] the read noise in electrons for each pixel (same note as the resultants) read_time : float diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump.pyx index 8b8969f7..3cab336d 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump.pyx @@ -6,7 +6,7 @@ This module contains all the functions needed to execute jump detection for the Castentano+22 ramp fitting algorithm The _ramp module contains the actual ramp fitting algorithm, this module - contains a driver for the algoritm and detection of jumps/splitting ramps. + contains a driver for the algorithm and detection of jumps/splitting ramps. Structs ------- @@ -26,7 +26,7 @@ JumpFits : struct - fits : vector[RampFit] All of the fits for each ramp fit for the pixel - index : RampQueue - The RampIndex representations correspoinding to each fit in fits + The RampIndex representations corresponding to each fit in fits Enums ----- @@ -518,12 +518,12 @@ cdef inline JumpFits fit_jumps(float[:] resultants, dq[jump0] = JUMP_DET dq[jump1] = JUMP_DET - # Record jump diagnotics + # Record jump diagnostics if include_diagnostic: ramp_fits.jumps.push_back(jump0) ramp_fits.jumps.push_back(jump1) - # The two resultant indicies need to be skipped, therefore + # The two resultant indices need to be skipped, therefore # the two # possible new ramps are: # RampIndex(ramp.start, jump0 - 1) @@ -535,7 +535,7 @@ cdef inline JumpFits fit_jumps(float[:] resultants, # we need to add the ramps in the time order they were # observed in. This results in the last observation ramp # being the top of the stack; meaning that, - # it will be the next ramp handeled. + # it will be the next ramp handled. if jump0 > ramp.start: # Note that when jump0 == ramp.start, we have detected a @@ -558,7 +558,7 @@ cdef inline JumpFits fit_jumps(float[:] resultants, # Note that jump1 > ramp.end is technically possible # however in those potential cases it will draw on # resultants which are not considered part of the ramp - # under consideration. Therefore, we have to exlude all + # under consideration. Therefore, we have to exclude all # of those values. ramps.push_back(RampIndex(jump1 + 1, ramp.end)) @@ -584,7 +584,7 @@ cdef inline JumpFits fit_jumps(float[:] resultants, ramp_fits.average.read_var += weight**2 * ramp_fit.read_var ramp_fits.average.poisson_var += weight**2 * ramp_fit.poisson_var - # Finish computing averages using the lazy proces + # Finish computing averages using the lazy process ramp_fits.average.slope /= total_weight if total_weight != 0 else 1 ramp_fits.average.read_var /= total_weight**2 if total_weight != 0 else 1 ramp_fits.average.poisson_var /= total_weight**2 if total_weight != 0 else 1 diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx index bd60e0fc..a6988e12 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx @@ -25,7 +25,7 @@ RampFit : struct The poisson variance for the fit RampQueue : vector[RampIndex] - Vector of RampIndex objects (convienience typedef) + Vector of RampIndex objects (convenience typedef) Classes ------- @@ -73,7 +73,7 @@ cdef class ReadPattern: In the case of this code memory views are the fastest "safe" array data structure. This class will immediately be unpacked into raw memory views, so that we avoid - any further overhead of swithcing between python and cython. + any further overhead of switching between python and cython. Attributes: ---------- @@ -204,7 +204,7 @@ cpdef inline RampQueue init_ramps(int[:] dq, int n_resultants): return ramps -# Keeps the static type checker/highligher happy this has no actual effect +# Keeps the static type checker/highlighter happy this has no actual effect ctypedef float[6] _row # Casertano+2022, Table 2 @@ -311,7 +311,7 @@ cdef inline RampFit fit_ramp(float[:] resultants_, cdef float t_scale = (t_bar[end] - t_bar[0]) / 2 t_scale = 1 if t_scale == 0 else t_scale - # Initalize the fit loop + # Initialize the fit loop # it is faster to generate a c++ vector than a numpy array cdef vector[float] weights = vector[float](n_resultants) cdef vector[float] coeffs = vector[float](n_resultants) diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index 21f4cd8c..22c4f764 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -307,7 +307,7 @@ def get_opt_slice(opt_info, opt_slice, row_start, nrows): # The optional results product is of variable size in its second dimension. # The number of segments/cosmic rays determine the final products size. - # Because each slice is computed indpendently, the number of segments may + # Because each slice is computed independently, the number of segments may # differ from segment to segment. The final output product is created # using the max size for this dimension. To ensure correct assignment is # done during this step, the second dimension, as well as the row @@ -734,7 +734,7 @@ def discard_miri_groups(ramp_data): ------- bool : False if no data to process after discarding unusable data. - True if useable data available for further processing. + True if usable data available for further processing. """ data = ramp_data.data err = ramp_data.err @@ -1678,7 +1678,7 @@ def calc_slope( # end_heads is initially a tuple populated with every pixel that is # either saturated or contains a cosmic ray based on the input DQ - # array, so is sized to accomodate the maximum possible number of + # array, so is sized to accommodate the maximum possible number of # pixels flagged. It is later compressed to be an array denoting # the number of endpoints per pixel. end_heads = np.ones(npix * ngroups, dtype=np.int32) @@ -1759,7 +1759,7 @@ def calc_slope( # set. In the above line that group would be excluded from the # current segment. If a segment is created only due to a group # flagged as JUMP_DET it will be the group just prior to the 0th - # group in the current segement. We want to include it as part of + # group in the current segment. We want to include it as part of # the current segment, but exclude all other groups with any other # flag. @@ -1859,7 +1859,7 @@ def fit_next_segment( delineates which channels to fit for each pixel, 2-D bool mask_2d_init : ndarray - copy of intial mask_2d, 2-D bool + copy of initial mask_2d, 2-D bool inv_var : ndarray values of 1/variance for good pixels, 1-D float @@ -2263,7 +2263,7 @@ def fit_next_segment_only_good_0th_group( save optional fitting results mask_2d_init : ndarray - copy of intial mask_2d, 2-D bool + copy of initial mask_2d, 2-D bool Returns ------- @@ -2379,7 +2379,7 @@ def fit_next_segment_short_seg_not_at_end( save optional fitting results mask_2d_init : ndarray - copy of intial mask_2d, 2-D bool + copy of initial mask_2d, 2-D bool end_locs : ndarray end locations, 1-D @@ -2533,7 +2533,7 @@ def fit_next_segment_short_seg_at_end( save optional fitting results mask_2d_init : ndarray - copy of intial mask_2d, 2-D bool + copy of initial mask_2d, 2-D bool Returns ------- @@ -2669,7 +2669,7 @@ def fit_next_segment_long_not_end_of_ramp( end locations, 1-D mask_2d_init : ndarray - copy of intial mask_2d, 2-D bool + copy of initial mask_2d, 2-D bool ngroups : int number of groups in exposure @@ -2914,7 +2914,7 @@ def fit_short_ngroups( save optional fitting results mask_2d_init : ndarray - copy of intial mask_2d, 2-D bool + copy of initial mask_2d, 2-D bool ramp_mask_sum : ndarray number of channels to fit for each pixel, 1-D int @@ -3147,7 +3147,7 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, denominator = nreads_1d * sumxx - sumx**2 # In case this branch is ever used again, disable, and then re-enable - # harmless arithmetic warrnings + # harmless arithmetic warnings warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) variance = nreads_1d / denominator @@ -3771,7 +3771,7 @@ def calc_num_seg(gdq, n_int, jump_det, do_not_use): Return ------- max_num_seg : int - The maximum number of segements within an integration + The maximum number of segments within an integration max_cr : int The maximum number of cosmic rays within an integration """ diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index 159dd467..9347bd3f 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -787,7 +787,7 @@ def gls_pedestal(first_group, slope_int, s_mask, frame_time, nframes_used): # p nframes_used : int Number of frames that were averaged together to make a group. - Exludes the groupgap. + Excludes the groupgap. Returns ------- diff --git a/tests/test_ramp_fitting.py b/tests/test_ramp_fitting.py index 1e0c5d7f..c4dea55d 100644 --- a/tests/test_ramp_fitting.py +++ b/tests/test_ramp_fitting.py @@ -5,7 +5,7 @@ DELIM = "=" * 70 -# single group intergrations fail in the GLS fitting +# single group integrations fail in the GLS fitting # so, keep the two method test separate and mark GLS test as # expected to fail. Needs fixing, but the fix is not clear # to me. [KDG - 19 Dec 2018] @@ -221,7 +221,7 @@ def base_neg_med_rates_single_integration_multi_segment(): def test_neg_med_rates_single_integration_multi_segment_optional(): """ Test a ramp with multiple segments to make sure the right number of - segments are created and to make sure all Poisson segements are set to + segments are created and to make sure all Poisson segments are set to zero. """ slopes, cube, optional, gls_dummy, dims = base_neg_med_rates_single_integration_multi_segment() @@ -355,9 +355,9 @@ def test_miri_ramp_dnu_at_ramp_beginning(): s1 = slopes1[0] tol = 1e-6 - ans = -4.1035075 + answer = -4.1035075 - assert abs(s1[0, 0] - ans) < tol + assert abs(s1[0, 0] - answer) < tol def test_miri_ramp_dnu_and_jump_at_ramp_beginning(): @@ -377,9 +377,9 @@ def test_miri_ramp_dnu_and_jump_at_ramp_beginning(): s2 = slopes2[0] tol = 1e-6 - ans = -4.9032097 + answer = -4.9032097 - assert abs(s2[0, 0] - ans) < tol + assert abs(s2[0, 0] - answer) < tol def test_2_group_cases(): diff --git a/tests/test_ramp_fitting_cases.py b/tests/test_ramp_fitting_cases.py index 5b75a781..040bc11f 100644 --- a/tests/test_ramp_fitting_cases.py +++ b/tests/test_ramp_fitting_cases.py @@ -20,7 +20,7 @@ DELIM = "-" * 80 -# single group intergrations fail in the GLS fitting +# single group integrations fail in the GLS fitting # so, keep the two method test separate and mark GLS test as # expected to fail. Needs fixing, but the fix is not clear # to me. [KDG - 19 Dec 2018] diff --git a/tests/test_ramp_fitting_gls_fit.py b/tests/test_ramp_fitting_gls_fit.py index 3bcd084f..13ac227d 100644 --- a/tests/test_ramp_fitting_gls_fit.py +++ b/tests/test_ramp_fitting_gls_fit.py @@ -316,10 +316,10 @@ def test_simple_ramp(): ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] check = 20.0 / 3 tol = 1.0e-5 - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) def test_read_noise_only_fit(): @@ -344,12 +344,12 @@ def test_read_noise_only_fit(): xvalues = np.arange(5) * 1.0 yvalues = np.array(ramp_arr) coeff = np.polyfit(xvalues, yvalues, 1) - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] check = coeff[0] tol = 1.0e-2 - # print(f"ans = {ans}") # 11.78866004 + # print(f"answer = {answer}") # 11.78866004 # print(f"check = {check}") # 11.79999999 - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) @pytest.mark.skip(reason="GLS not sure what expected value is.") @@ -372,11 +372,11 @@ def test_photon_noise_only_fit(): ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] tol = 1.0e-2 - # print(f"ans = {ans}") # 8.6579208 + # print(f"answer = {answer}") # 8.6579208 # print(f"check = {check}") # 12.5 - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) @pytest.mark.skip(reason="GLS not sure what expected value is.") @@ -403,11 +403,11 @@ def test_photon_noise_only_bad_last_group(): ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] tol = 1.0e-2 - # print(f"ans = {ans}") # 8.6579208 + # print(f"answer = {answer}") # 8.6579208 # print(f"check = {check}") # 7.6666666 - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) @pytest.mark.skip(reason="GLS not sure what expected value is.") @@ -435,11 +435,11 @@ def test_photon_noise_with_unweighted_fit(): yvalues = np.array([10, 15, 25, 33, 60]) coeff = np.polyfit(xvalues, yvalues, 1) check = coeff[0] - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] tol = 1.0e-5 - # print(f"ans = {ans}") # 8.6579208 + # print(f"answer = {answer}") # 8.6579208 # print(f"check = {check}") # 11.7999999 - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) def test_two_groups_fit(): @@ -505,9 +505,9 @@ def test_four_groups_oneCR_orphangroupatend_fit(): ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] tol = 1.0e-6 - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) def test_four_groups_two_CRs_at_end(): @@ -534,9 +534,9 @@ def test_four_groups_two_CRs_at_end(): ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] tol = 1.0e-6 - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) @pytest.mark.skip(reason="GLS code does not [yet] handle all groups as jump.") @@ -563,12 +563,12 @@ def test_four_groups_four_CRs(): ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] check = 0 tol = 1.0e-6 - # print(f"ans = {ans}") + # print(f"answer = {answer}") # print(f"check = {check}") - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) @pytest.mark.skip(reason="GLS code does not [yet] handle only one good group.") @@ -595,12 +595,12 @@ def test_four_groups_three_CRs_at_end(): ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] check = 10.0 tol = 1.0e-6 - # print(f"ans = {ans}") + # print(f"answer = {answer}") # print(f"check = {check}") - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) def test_four_groups_CR_causes_orphan_1st_group(): @@ -624,10 +624,10 @@ def test_four_groups_CR_causes_orphan_1st_group(): ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] check = 20.0 tol = 1.0e-6 - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) def test_one_group_fit(): @@ -645,10 +645,10 @@ def test_one_group_fit(): ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[0][50, 50] + answer = slopes[0][50, 50] check = 10.0 tol = 1.0e-6 - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) @pytest.mark.skip(reason="GLS not sure what expected value is.") @@ -670,12 +670,12 @@ def test_two_groups_unc(): ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[2][50, 50] + answer = slopes[2][50, 50] check = np.sqrt((deltaDN / gain) / group_time**2 + (rnoise**2 / group_time**2)) tol = 1.0e-6 - # print(f"ans = {ans}") + # print(f"answer = {answer}") # print(f"check = {check}") - np.testing.assert_allclose(ans, check, tol) + np.testing.assert_allclose(answer, check, tol) @pytest.mark.skip(reason="GLS does not comopute VAR_XXX arrays.") @@ -757,8 +757,8 @@ def test_oneCR_10_groups_combination(): ramp_data, bufsize, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - ans = slopes[0][50, 50] - print(f"ans = {ans}") + answer = slopes[0][50, 50] + print(f"answer = {answer}") # TODO Need to add the optional results product to GLS From b2d7d63bb1967aaa47869c1cb8c71bc0b385517a Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 16:41:04 -0500 Subject: [PATCH 23/36] Add repo review --- .pre-commit-config.yaml | 6 +++++ docs/conf.py | 4 +-- pyproject.toml | 43 ++++++++++++++++++++++++++---- tests/test_alignment.py | 1 + tests/test_dark_current.py | 1 + tests/test_jump.py | 1 + tests/test_jump_cas22.py | 1 + tests/test_linearity.py | 1 + tests/test_ramp_fitting.py | 1 + tests/test_ramp_fitting_cas22.py | 1 + tests/test_ramp_fitting_cases.py | 1 + tests/test_ramp_fitting_gls_fit.py | 1 + tests/test_saturation.py | 1 + tests/test_twopoint_difference.py | 2 +- 14 files changed, 57 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6f6ec091..bbd505df 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -45,6 +45,12 @@ repos: additional_dependencies: - black==22.12.0 +- repo: https://github.com/scientific-python/cookie + rev: 2023.10.27 + hooks: + - id: sp-repo-review + additional_dependencies: ["repo-review[cli]"] + # - repo: https://github.com/pycqa/isort # rev: 5.12.0 # hooks: diff --git a/docs/conf.py b/docs/conf.py index fe87bb19..2f891aba 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -26,14 +26,14 @@ def setup(app): # Read the package's `pyproject.toml` so that we can use relevant # values here: -with open(REPO_ROOT / "pyproject.toml", "rb") as configuration_file: +with (REPO_ROOT / "pyproject.toml").open("rb") as configuration_file: conf = tomllib.load(configuration_file) setup_metadata = conf["project"] project = setup_metadata["name"] primary_author = setup_metadata["authors"][0] author = f'{primary_author["name"]} <{primary_author["email"]}>' -copyright = f'{datetime.now().year}, {primary_author["name"]}' +copyright = f'{datetime.now().year}, {primary_author["name"]}' # noqa: A001 package = importlib.import_module(project) version = package.__version__.split("-", 1)[0] diff --git a/pyproject.toml b/pyproject.toml index 94a15291..05a062a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ docs = [ ] test = [ 'psutil', - 'pytest >=4.6.0', + 'pytest >=6', 'pytest-cov', 'pytest-doctestplus', ] @@ -47,7 +47,6 @@ test = [ requires = [ 'setuptools >=61', 'setuptools_scm[toml] >=3.4', - 'wheel', 'Cython >=0.29.21', 'numpy >=1.18', ] @@ -63,11 +62,24 @@ zip-safe = true where = ['src'] [tool.pytest.ini_options] -minversion = 4.6 +minversion = 6 +log_cli_level = "INFO" +xfail_strict = true doctest_plus = true doctest_rst = true text_file_format = 'rst' -addopts = '' +addopts = [ + '--color=yes', + '--doctest-rst', + '-ra', + '--strict-config', + '--strict-markers', +] +testpaths = [ + "tests", + "src/stcal", + "docs", +] norecursedirs = [ 'benchmarks', '.asv', @@ -82,7 +94,14 @@ filterwarnings = [ [tool.ruff] line-length = 110 -select = [ +src = [ + 'src', + 'tests', + 'docs', +] + +[tool.ruff.lint] +extend-select = [ 'F', # Pyflakes 'W', 'E', # pycodestyle 'C', @@ -139,6 +158,20 @@ exclude = [ "S101" ] +[tool.codespell] +skip="*.pdf,*.fits,*.asdf,.tox,build,./tags,.git,docs/_build" +# ignore-words-list=""" +# """ + +[tool.repo-review] +ignore = [ + "GH200", # Use dependabot + "PC140", # add MyPy to pre-commit + "PC180", # use prettier + "PC901", # custom pre-comit.ci message + "MY100", # Use MyPy +] + [tool.cibuildwheel.macos] archs = ["x86_64", "arm64"] diff --git a/tests/test_alignment.py b/tests/test_alignment.py index caf503af..a4041817 100644 --- a/tests/test_alignment.py +++ b/tests/test_alignment.py @@ -7,6 +7,7 @@ from astropy.io import fits from astropy.modeling import models from gwcs import coordinate_frames as cf + from stcal.alignment import resample_utils from stcal.alignment.util import ( _validate_wcs_list, diff --git a/tests/test_dark_current.py b/tests/test_dark_current.py index 7470a505..c17c728c 100644 --- a/tests/test_dark_current.py +++ b/tests/test_dark_current.py @@ -5,6 +5,7 @@ import numpy as np import pytest from numpy.testing import assert_allclose + from stcal.dark_current.dark_class import DarkData, ScienceData from stcal.dark_current.dark_sub import average_dark_frames_3d as average_dark_frames from stcal.dark_current.dark_sub import do_correction_data as darkcorr diff --git a/tests/test_jump.py b/tests/test_jump.py index 04255a46..0ddbefb1 100644 --- a/tests/test_jump.py +++ b/tests/test_jump.py @@ -1,5 +1,6 @@ import numpy as np import pytest + from stcal.jump.jump import ( calc_num_slices, extend_saturation, diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 61e70da5..d7530ad8 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -1,6 +1,7 @@ import numpy as np import pytest from numpy.testing import assert_allclose + from stcal.ramp_fitting.ols_cas22 import JUMP_DET, Parameter, Variance, fit_ramps from stcal.ramp_fitting.ols_cas22._jump import ( FixedOffsets, diff --git a/tests/test_linearity.py b/tests/test_linearity.py index 51ffee74..bdc2f12e 100644 --- a/tests/test_linearity.py +++ b/tests/test_linearity.py @@ -5,6 +5,7 @@ """ import numpy as np + from stcal.linearity.linearity import linearity_correction DQFLAGS = {"GOOD": 0, "DO_NOT_USE": 1, "SATURATED": 2, "DEAD": 1024, "HOT": 2048, "NO_LIN_CORR": 1048576} diff --git a/tests/test_ramp_fitting.py b/tests/test_ramp_fitting.py index c4dea55d..d8e90610 100644 --- a/tests/test_ramp_fitting.py +++ b/tests/test_ramp_fitting.py @@ -1,4 +1,5 @@ import numpy as np + from stcal.ramp_fitting.ramp_fit import ramp_fit_data from stcal.ramp_fitting.ramp_fit_class import RampData from stcal.ramp_fitting.utils import compute_num_slices diff --git a/tests/test_ramp_fitting_cas22.py b/tests/test_ramp_fitting_cas22.py index 6eaeb523..e6266fb9 100644 --- a/tests/test_ramp_fitting_cas22.py +++ b/tests/test_ramp_fitting_cas22.py @@ -4,6 +4,7 @@ import astropy.units as u import numpy as np import pytest + from stcal.ramp_fitting import ols_cas22_fit as ramp # Purposefully set a fixed seed so that the tests in this module are deterministic diff --git a/tests/test_ramp_fitting_cases.py b/tests/test_ramp_fitting_cases.py index 040bc11f..675e6a75 100644 --- a/tests/test_ramp_fitting_cases.py +++ b/tests/test_ramp_fitting_cases.py @@ -3,6 +3,7 @@ import numpy as np import numpy.testing as npt + from stcal.ramp_fitting.ramp_fit import ramp_fit_data from stcal.ramp_fitting.ramp_fit_class import RampData diff --git a/tests/test_ramp_fitting_gls_fit.py b/tests/test_ramp_fitting_gls_fit.py index 13ac227d..087824f1 100644 --- a/tests/test_ramp_fitting_gls_fit.py +++ b/tests/test_ramp_fitting_gls_fit.py @@ -1,5 +1,6 @@ import numpy as np import pytest + from stcal.ramp_fitting.ramp_fit import ramp_fit_class, ramp_fit_data test_dq_flags = { diff --git a/tests/test_saturation.py b/tests/test_saturation.py index 189ae521..fb207f77 100644 --- a/tests/test_saturation.py +++ b/tests/test_saturation.py @@ -5,6 +5,7 @@ """ import numpy as np + from stcal.saturation.saturation import flag_saturated_pixels # dictionary with required DQ flags diff --git a/tests/test_twopoint_difference.py b/tests/test_twopoint_difference.py index 30a3c84c..5e73141b 100644 --- a/tests/test_twopoint_difference.py +++ b/tests/test_twopoint_difference.py @@ -1,5 +1,6 @@ import numpy as np import pytest + from stcal.jump.twopoint_difference import calc_med_first_diffs, find_crs DQFLAGS = {"JUMP_DET": 4, "SATURATED": 2, "DO_NOT_USE": 1} @@ -128,7 +129,6 @@ def test_5grps_cr2_nframe2(setup_cube): assert np.array_equal([0, 4, 4, 0, 0], out_gdq[0, :, 100, 100]) -@pytest.mark.xfail() def test_4grps_twocrs_2nd_4th(setup_cube): ngroups = 4 data, gdq, nframes, read_noise, rej_threshold = setup_cube(ngroups) From e287933118a22d2428a9f57d6d152d58b03bed18 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 19:55:56 -0500 Subject: [PATCH 24/36] Add cython lint --- .pre-commit-config.yaml | 6 +++++ pyproject.toml | 3 +++ src/stcal/ramp_fitting/ols_cas22/_jump.pyx | 29 +++++++++++----------- src/stcal/ramp_fitting/ols_cas22/_ramp.pyx | 13 +++++----- 4 files changed, 31 insertions(+), 20 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bbd505df..a1a81fa3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -38,6 +38,12 @@ repos: args: ["--fix", "--show-fixes"] - id: ruff-format +- repo: https://github.com/MarcoGorelli/cython-lint + rev: v0.15.0 + hooks: + - id: cython-lint + - id: double-quote-cython-strings + - repo: https://github.com/adamchainz/blacken-docs rev: 1.16.0 hooks: diff --git a/pyproject.toml b/pyproject.toml index 05a062a9..2be084401 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -158,6 +158,9 @@ exclude = [ "S101" ] +[tool.cython-lint] +max-line-length = 110 + [tool.codespell] skip="*.pdf,*.fits,*.asdf,.tox,build,./tags,.git,docs/_build" # ignore-words-list=""" diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump.pyx index 3cab336d..90b32700 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump.pyx @@ -63,7 +63,6 @@ from stcal.ramp_fitting.ols_cas22._jump cimport Thresh, JumpFits, JUMP_DET, Fixe from stcal.ramp_fitting.ols_cas22._ramp cimport RampIndex, RampQueue, RampFit, fit_ramp, init_ramps - @boundscheck(False) @wraparound(False) @cdivision(True) @@ -246,6 +245,7 @@ cdef inline float _correction(float[:] t_bar, RampIndex ramp, float slope): return - slope / diff + @boundscheck(False) @wraparound(False) @cdivision(True) @@ -362,22 +362,23 @@ cdef inline (int, float) _fit_statistic(float[:, :] pixel, correct) # Compute the rest of the fit statistics - cdef float stat + cdef float stat, stat1, stat2 cdef int stat_index for stat_index, index in enumerate(range(ramp.start, ramp.end - 1)): # Compute max of single and double difference statistics - stat = fmaxf(_statstic(pixel[single_local_slope, index], - pixel[single_var_read_noise, index], - fixed[single_t_bar_diff_sqr, index], - fixed[single_var_slope_val, index], - slope, - correct), - _statstic(pixel[double_local_slope, index], - pixel[double_var_read_noise, index], - fixed[double_t_bar_diff_sqr, index], - fixed[double_var_slope_val, index], - slope, - correct)) + stat1 = _statstic(pixel[single_local_slope, index], + pixel[single_var_read_noise, index], + fixed[single_t_bar_diff_sqr, index], + fixed[single_var_slope_val, index], + slope, + correct) + stat2 = _statstic(pixel[double_local_slope, index], + pixel[double_var_read_noise, index], + fixed[double_t_bar_diff_sqr, index], + fixed[double_var_slope_val, index], + slope, + correct) + stat = fmaxf(stat1, stat2) # If this is larger than the current max, update the max if stat > max_stat: diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx index a6988e12..ba3e37c9 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx @@ -135,11 +135,12 @@ cpdef ReadPattern from_read_pattern(list[list[int]] read_pattern, float read_tim cdef int index, n_reads cdef list[int] resultant for index, resultant in enumerate(read_pattern): - n_reads = len(resultant) + n_reads = len(resultant) - data.n_reads[index] = n_reads - data.t_bar[index] = read_time * np.mean(resultant) - data.tau[index] = np.sum((2 * (n_reads - np.arange(n_reads)) - 1) * resultant) * read_time / n_reads**2 + data.n_reads[index] = n_reads + data.t_bar[index] = read_time * np.mean(resultant) + data.tau[index] = (np.sum((2 * (n_reads - np.arange(n_reads)) - 1) * resultant) * + read_time / n_reads**2) return data @@ -209,7 +210,7 @@ ctypedef float[6] _row # Casertano+2022, Table 2 cdef _row[2] _PTABLE = [[-INFINITY, 5, 10, 20, 50, 100], - [ 0, 0.4, 1, 3, 6, 10 ]] + [0, 0.4, 1, 3, 6, 10]] @boundscheck(False) @@ -324,7 +325,7 @@ cdef inline RampFit fit_ramp(float[:] resultants_, for i in range(n_resultants): # Casertano+22, Eq. 45 weights[i] = ((((1 + power) * n_reads[i]) / (1 + power * n_reads[i])) * - fabs((t_bar[i] - t_bar_mid) / t_scale) ** power) + fabs((t_bar[i] - t_bar_mid) / t_scale) ** power) # Casertano+22 Eq. 35 f0 += weights[i] From f0a8ea3cf74fd89f07f8f1a61aacfd1de8d26b0c Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 20:00:26 -0500 Subject: [PATCH 25/36] Enable isort for cython files only --- .pre-commit-config.yaml | 10 ++++++---- pyproject.toml | 5 +++++ src/stcal/ramp_fitting/ols_cas22/_fit.pyx | 17 +++++++++-------- src/stcal/ramp_fitting/ols_cas22/_jump.pyx | 9 ++++----- src/stcal/ramp_fitting/ols_cas22/_ramp.pyx | 10 ++++------ 5 files changed, 28 insertions(+), 23 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a1a81fa3..9d67261a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -57,10 +57,12 @@ repos: - id: sp-repo-review additional_dependencies: ["repo-review[cli]"] -# - repo: https://github.com/pycqa/isort -# rev: 5.12.0 -# hooks: -# - id: isort +- repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + name: isort (cython) + types: [cython] # - repo: https://github.com/psf/black # rev: 23.7.0 diff --git a/pyproject.toml b/pyproject.toml index 2be084401..3eac6b0b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -161,6 +161,11 @@ exclude = [ [tool.cython-lint] max-line-length = 110 +[tool.isort] +profile = "black" +filter_files = true +line_length = 110 + [tool.codespell] skip="*.pdf,*.fits,*.asdf,.tox,build,./tags,.git,docs/_build" # ignore-words-list=""" diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit.pyx index b7bec6d3..dbe3c536 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit.pyx @@ -29,23 +29,24 @@ fit_ramps : function from __future__ import annotations import numpy as np -cimport numpy as cnp +cimport numpy as cnp from cython cimport boundscheck, wraparound from libcpp cimport bool from libcpp.list cimport list as cpp_list -from stcal.ramp_fitting.ols_cas22._jump cimport (Thresh, - JumpFits, - fill_fixed_values, - fit_jumps, - n_fixed_offsets, - n_pixel_offsets) +from stcal.ramp_fitting.ols_cas22._jump cimport ( + JumpFits, + Thresh, + fill_fixed_values, + fit_jumps, + n_fixed_offsets, + n_pixel_offsets, +) from stcal.ramp_fitting.ols_cas22._ramp cimport ReadPattern, from_read_pattern from typing import NamedTuple - # Initialize numpy for cython use in this module cnp.import_array() diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump.pyx index 90b32700..808482f3 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump.pyx @@ -54,13 +54,12 @@ fit_jumps : function to splitting across detected jumps (if jump detection is turned on). """ -from cython cimport boundscheck, wraparound, cdivision - +from cython cimport boundscheck, cdivision, wraparound +from libc.math cimport NAN, fmaxf, isnan, log10, sqrt from libcpp cimport bool -from libc.math cimport sqrt, log10, fmaxf, NAN, isnan -from stcal.ramp_fitting.ols_cas22._jump cimport Thresh, JumpFits, JUMP_DET, FixedOffsets, PixelOffsets -from stcal.ramp_fitting.ols_cas22._ramp cimport RampIndex, RampQueue, RampFit, fit_ramp, init_ramps +from stcal.ramp_fitting.ols_cas22._jump cimport JUMP_DET, FixedOffsets, JumpFits, PixelOffsets, Thresh +from stcal.ramp_fitting.ols_cas22._ramp cimport RampFit, RampIndex, RampQueue, fit_ramp, init_ramps @boundscheck(False) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx index ba3e37c9..cf9b9336 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx @@ -50,15 +50,13 @@ fit_ramps : function listed for a single pixel """ import numpy as np -cimport numpy as cnp - -from cython cimport boundscheck, wraparound, cdivision, cpow -from libc.math cimport sqrt, fabs, INFINITY, NAN, fmaxf +cimport numpy as cnp +from cython cimport boundscheck, cdivision, cpow, wraparound +from libc.math cimport INFINITY, NAN, fabs, fmaxf, sqrt from libcpp.vector cimport vector -from stcal.ramp_fitting.ols_cas22._ramp cimport RampIndex, RampQueue, RampFit, ReadPattern - +from stcal.ramp_fitting.ols_cas22._ramp cimport RampFit, RampIndex, RampQueue, ReadPattern # Initialize numpy for cython use in this module cnp.import_array() From 1bbbc887779945e5c81518b47ae26d9ea8240cef Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 20:02:28 -0500 Subject: [PATCH 26/36] Clean up pyproject.toml file --- pyproject.toml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3eac6b0b..3d7d1688 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,12 +104,11 @@ src = [ extend-select = [ 'F', # Pyflakes 'W', 'E', # pycodestyle - 'C', 'I', # isort # 'N', # pep8-naming 'UP', # pyupgrade 'S', # flake8-bandit - # 'BLE', # flake8-blind-except + 'BLE', # flake8-blind-except 'B', # flake8-bugbear 'A', # flake8-builtins (prevent shadowing of builtins) 'C4', # flake8-comprehensions (best practices for comprehensions) @@ -138,7 +137,6 @@ extend-select = [ 'RUF', # ruff specific checks ] ignore = [ - 'C901', # variable is too complex 'ISC001', # interferes with formatter 'PLR0912', # Too many branches 'PLR0913', # Too many arguments From 27ff844ab33fd2abf168b5004f92d32da4176318 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 20:07:11 -0500 Subject: [PATCH 27/36] Enable prettier for non-python code formatting --- .github/labeler.yml | 50 ++++++------ .github/pull_request_template.md | 4 + .github/workflows/build.yml | 2 +- .github/workflows/ci.yml | 4 +- .pre-commit-config.yaml | 128 +++++++++++++++---------------- CODE_OF_CONDUCT.md | 1 - README.md | 31 ++++---- docs/rtd_environment.yaml | 12 +-- pyproject.toml | 1 - 9 files changed, 113 insertions(+), 120 deletions(-) diff --git a/.github/labeler.yml b/.github/labeler.yml index 32b3d93b..8062bd5e 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,46 +1,46 @@ documentation: - - 'docs/**/*' - - any: [ '*.rst', '!CHANGES.rst' ] - - '*.md' - - '.readthedocs.yaml' - - 'LICENSE' + - "docs/**/*" + - any: ["*.rst", "!CHANGES.rst"] + - "*.md" + - ".readthedocs.yaml" + - "LICENSE" installation: - - 'pyproject.toml' - - 'setup.*' - - 'requirements-*.txt' - - 'MANIFEST.in' + - "pyproject.toml" + - "setup.*" + - "requirements-*.txt" + - "MANIFEST.in" # --------------------------------------- testing --------------------------------------- automation: - - '.github/**' - - '.bandit.yaml' - - '.codecov.yml' + - ".github/**" + - ".bandit.yaml" + - ".codecov.yml" testing: - - '**/tests/**' - - '.github/workflows/ci*.yml' - - 'conftest.py' + - "**/tests/**" + - ".github/workflows/ci*.yml" + - "conftest.py" # --------------------------------------- modules --------------------------------------- dark_current: - - '**/*dark_current*' - - '**/*dark_current*/**' + - "**/*dark_current*" + - "**/*dark_current*/**" jump: - - '**/*jump*' - - '**/*jump*/**' + - "**/*jump*" + - "**/*jump*/**" linearity: - - '**/*linearity*' - - '**/*linearity*/**' + - "**/*linearity*" + - "**/*linearity*/**" ramp_fitting: - - '**/*ramp_fitting*' - - '**/*ramp_fitting*/**' + - "**/*ramp_fitting*" + - "**/*ramp_fitting*/**" saturation: - - '**/*saturation*' - - '**/*saturation*/**' + - "**/*saturation*" + - "**/*saturation*/**" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 8f551808..de860697 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,15 +1,19 @@ + Resolves [JP-nnnn](https://jira.stsci.edu/browse/JP-nnnn) Resolves [RCAL-nnnn](https://jira.stsci.edu/browse/RCAL-nnnn) + Closes # + This PR addresses ... **Checklist** + - [ ] added entry in `CHANGES.rst` (either in `Bug Fixes` or `Changes to API`) - [ ] updated relevant tests - [ ] updated relevant documentation diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4bf8f0a0..96b0d920 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -2,7 +2,7 @@ name: build on: release: - types: [ released ] + types: [released] pull_request: workflow_dispatch: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9591f594..4950b5f1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,9 +4,9 @@ on: push: branches: - main - - '*x' + - "*x" tags: - - '*' + - "*" pull_request: branches: - main diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9d67261a..01fd3614 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,77 +1,69 @@ repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-added-large-files + - id: check-ast + - id: check-case-conflict + - id: check-yaml + args: ["--unsafe"] + - id: check-toml + - id: check-merge-conflict + - id: check-symlinks + - id: debug-statements + - id: detect-private-key + - id: end-of-file-fixer + - id: trailing-whitespace -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 - hooks: - - id: check-added-large-files - - id: check-ast - - id: check-case-conflict - - id: check-yaml - args: ["--unsafe"] - - id: check-toml - - id: check-merge-conflict - - id: check-symlinks - - id: debug-statements - - id: detect-private-key - - id: end-of-file-fixer - - id: trailing-whitespace + - repo: https://github.com/pre-commit/pygrep-hooks + rev: v1.10.0 + hooks: + - id: rst-directive-colons + - id: rst-inline-touching-normal + - id: text-unicode-replacement-char -- repo: https://github.com/pre-commit/pygrep-hooks - rev: v1.10.0 - hooks: - - id: rst-directive-colons - - id: rst-inline-touching-normal - - id: text-unicode-replacement-char + - repo: https://github.com/codespell-project/codespell + rev: v2.2.5 + hooks: + - id: codespell + args: ["--write-changes"] + additional_dependencies: + - tomli -- repo: https://github.com/codespell-project/codespell - rev: v2.2.5 - hooks: - - id: codespell - args: ["--write-changes"] - additional_dependencies: - - tomli + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: "v0.1.5" + hooks: + - id: ruff + args: ["--fix", "--show-fixes"] + - id: ruff-format -- repo: https://github.com/astral-sh/ruff-pre-commit - rev: 'v0.1.5' - hooks: - - id: ruff - args: ["--fix", "--show-fixes"] - - id: ruff-format + - repo: https://github.com/MarcoGorelli/cython-lint + rev: v0.15.0 + hooks: + - id: cython-lint + - id: double-quote-cython-strings -- repo: https://github.com/MarcoGorelli/cython-lint - rev: v0.15.0 - hooks: - - id: cython-lint - - id: double-quote-cython-strings + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + name: isort (cython) + types: [cython] -- repo: https://github.com/adamchainz/blacken-docs - rev: 1.16.0 - hooks: - - id: blacken-docs - additional_dependencies: - - black==22.12.0 + - repo: https://github.com/adamchainz/blacken-docs + rev: 1.16.0 + hooks: + - id: blacken-docs + additional_dependencies: + - black==22.12.0 -- repo: https://github.com/scientific-python/cookie - rev: 2023.10.27 - hooks: - - id: sp-repo-review - additional_dependencies: ["repo-review[cli]"] + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v3.0.1" + hooks: + - id: prettier -- repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - name: isort (cython) - types: [cython] - -# - repo: https://github.com/psf/black -# rev: 23.7.0 -# hooks: -# - id: black - -# - repo: https://github.com/PyCQA/bandit -# rev: 1.7.5 -# hooks: -# - id: bandit -# args: ["-c", "pyproject.toml"] -# additional_dependencies: ["bandit[toml]"] + - repo: https://github.com/scientific-python/cookie + rev: 2023.10.27 + hooks: + - id: sp-repo-review + additional_dependencies: ["repo-review[cli]"] diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index ddba00df..8d726b0f 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,7 +2,6 @@ We expect all "spacetelescope" organization projects to adopt a code of conduct that ensures a productive, respectful environment for all open source contributors and participants. We are committed to providing a strong and enforced code of conduct and expect everyone in our community to follow these guidelines when interacting with others in all forums. Our goal is to keep ours a positive, inclusive, successful, and growing community. The community of participants in open source Astronomy projects is made up of members from around the globe with a diverse set of skills, personalities, and experiences. It is through these differences that our community experiences success and continued growth. - As members of the community, - We pledge to treat all people with respect and provide a harassment- and bullying-free environment, regardless of sex, sexual orientation and/or gender identity, disability, physical appearance, body size, race, nationality, ethnicity, and religion. In particular, sexual language and imagery, sexist, racist, or otherwise exclusionary jokes are not appropriate. diff --git a/README.md b/README.md index 540bb821..85935db2 100644 --- a/README.md +++ b/README.md @@ -12,21 +12,20 @@ STScI Calibration algorithms and tools. **STCAL requires Python 3.9 or above and a C compiler for dependencies.** -**Linux and MacOS platforms are tested and supported. Windows is not currently supported.** +**Linux and MacOS platforms are tested and supported. Windows is not currently supported.** **If installing on MacOS Mojave 10.14, you must install - into an environment with python 3.9. Installation will fail on python 3.10 due - to lack of a stable build for dependency ``opencv-python``.** - +into an environment with python 3.9. Installation will fail on python 3.10 due +to lack of a stable build for dependency `opencv-python`.** `STCAL` is intended to be used as a support package for calibration pipeline -software, such as the `JWST` and `Roman` calibration pipelines. `STCAL` is a +software, such as the `JWST` and `Roman` calibration pipelines. `STCAL` is a separate package because it is also intended to be software that can be reused -by multiple calibration pipelines. Even though it is intended to be a support +by multiple calibration pipelines. Even though it is intended to be a support package for calibration pipelines, it can be installed and used as a stand alone -package. This could make usage unwieldy as it is easier to use `STCAL` through -calibration software. The main use case for stand alone installation is for -development purposes, such as bug fixes and feature additions. When installing +package. This could make usage unwieldy as it is easier to use `STCAL` through +calibration software. The main use case for stand alone installation is for +development purposes, such as bug fixes and feature additions. When installing calibration pipelines that depend on `STCAL` this package automatically gets installed as a dependency. @@ -48,9 +47,10 @@ If desired, you can create multiple environments to allow for switching between versions of the `stcal` package (e.g. a released version versus the current development version). In all cases, the installation is generally a 3-step process: -* Create a conda environment -* Activate that environment -* Install the desired version of the `stcal` package into that environment + +- Create a conda environment +- Activate that environment +- Install the desired version of the `stcal` package into that environment Details are given below on how to do this for different types of installations, including tagged releases and development versions. @@ -58,7 +58,7 @@ Remember that all conda operations must be done from within a bash/zsh shell. ### Installing latest releases -You can install the latest released version via `pip`. From a bash/zsh shell: +You can install the latest released version via `pip`. From a bash/zsh shell: conda create -n python conda activate @@ -100,7 +100,7 @@ fork and clone the `stcal` repo: git clone https://github.com/spacetelescope/stcal cd stcal -*Note: `python setup.py install` and `python setup.py develop` commands do not work.* +_Note: `python setup.py install` and `python setup.py develop` commands do not work._ Install from your local checked-out copy as an "editable" install: @@ -117,7 +117,6 @@ Need other useful packages in your development environment? pip install ipython jupyter matplotlib pylint ipdb - ## Contributions and Feedback We welcome contributions and feedback on the project. Please follow the @@ -131,7 +130,7 @@ at https://github.com/spacetelescope/stcal/issues. ## Unit Tests -Unit tests can be run via `pytest`. Within the top level of your local `stcal` repo checkout: +Unit tests can be run via `pytest`. Within the top level of your local `stcal` repo checkout: pip install -e ".[test]" pytest diff --git a/docs/rtd_environment.yaml b/docs/rtd_environment.yaml index aeebc4df..161fef38 100644 --- a/docs/rtd_environment.yaml +++ b/docs/rtd_environment.yaml @@ -1,9 +1,9 @@ name: rtd311 channels: - - conda-forge - - defaults + - conda-forge + - defaults dependencies: - - python=3.11 - - pip - - graphviz - - sphinx_rtd_theme>1.2.0 + - python=3.11 + - pip + - graphviz + - sphinx_rtd_theme>1.2.0 diff --git a/pyproject.toml b/pyproject.toml index 3d7d1688..b8b0774c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -173,7 +173,6 @@ skip="*.pdf,*.fits,*.asdf,.tox,build,./tags,.git,docs/_build" ignore = [ "GH200", # Use dependabot "PC140", # add MyPy to pre-commit - "PC180", # use prettier "PC901", # custom pre-comit.ci message "MY100", # Use MyPy ] From 059ee116271da611da9c3f99e8f140ffd331456f Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 20:26:45 -0500 Subject: [PATCH 28/36] Enable some pydocstyle checks --- pyproject.toml | 67 +++++++++++++++--------- src/stcal/alignment/resample_utils.py | 2 +- src/stcal/alignment/util.py | 21 +++----- src/stcal/dark_current/dark_class.py | 4 +- src/stcal/dark_current/dark_sub.py | 4 -- src/stcal/jump/jump.py | 3 +- src/stcal/jump/twopoint_difference.py | 4 +- src/stcal/linearity/linearity.py | 3 -- src/stcal/ramp_fitting/gls_fit.py | 9 +--- src/stcal/ramp_fitting/ols_cas22_fit.py | 1 - src/stcal/ramp_fitting/ols_fit.py | 12 ++--- src/stcal/ramp_fitting/ramp_fit_class.py | 5 +- src/stcal/ramp_fitting/utils.py | 12 ++--- src/stcal/saturation/saturation.py | 1 - 14 files changed, 65 insertions(+), 83 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b8b0774c..7f4e8128 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -106,35 +106,36 @@ extend-select = [ 'W', 'E', # pycodestyle 'I', # isort # 'N', # pep8-naming + 'D', # pydocstyle 'UP', # pyupgrade 'S', # flake8-bandit 'BLE', # flake8-blind-except 'B', # flake8-bugbear - 'A', # flake8-builtins (prevent shadowing of builtins) - 'C4', # flake8-comprehensions (best practices for comprehensions) - 'T10', # flake8-debugger (prevent debugger statements in code) - 'ISC', # flake8-implicit-str-concat (prevent implicit string concat) - 'ICN', # flake8-import-conventions (enforce import conventions) - 'INP', # flake8-no-pep420 (prevent use of PEP420, i.e. implicit name spaces) - 'G', # flake8-logging-format (best practices for logging) - 'PIE', # flake8-pie (misc suggested improvement linting) - # 'T20', # flake8-print (prevent print statements in code) - 'PT', # flake8-pytest-style (best practices for pytest) - 'Q', # flake8-quotes (best practices for quotes) - 'RSE', # flake8-raise (best practices for raising exceptions) - 'RET', # flake8-return (best practices for return statements) - 'SLF', # flake8-self (prevent private member access) - 'TID', # flake8-tidy-imports (prevent banned api and best import practices) - 'INT', # flake8-gettext (when to use printf style strings) - # 'ARG', # flake8-unused-arguments (prevent unused arguments) - 'PTH', # flake8-use-pathlib (prefer pathlib over os.path) - # 'ERA', # eradicate (remove commented out code) - 'PGH', # pygrep (simple grep checks) - 'PL', # pylint (general linting, flake8 alternative) - 'FLY', # flynt (f-string conversion where possible) - 'NPY', # NumPy-specific checks (recommendations from NumPy) - 'PERF', # Perflint (performance linting) - 'RUF', # ruff specific checks + 'A', # flake8-builtins (prevent shadowing of builtins) + 'C4', # flake8-comprehensions (best practices for comprehensions) + 'T10', # flake8-debugger (prevent debugger statements in code) + 'ISC', # flake8-implicit-str-concat (prevent implicit string concat) + 'ICN', # flake8-import-conventions (enforce import conventions) + 'INP', # flake8-no-pep420 (prevent use of PEP420, i.e. implicit name spaces) + 'G', # flake8-logging-format (best practices for logging) + 'PIE', # flake8-pie (misc suggested improvement linting) + # 'T20', # flake8-print (prevent print statements in code) + 'PT', # flake8-pytest-style (best practices for pytest) + 'Q', # flake8-quotes (best practices for quotes) + 'RSE', # flake8-raise (best practices for raising exceptions) + 'RET', # flake8-return (best practices for return statements) + 'SLF', # flake8-self (prevent private member access) + 'TID', # flake8-tidy-imports (prevent banned api and best import practices) + 'INT', # flake8-gettext (when to use printf style strings) + # 'ARG', # flake8-unused-arguments (prevent unused arguments) + 'PTH', # flake8-use-pathlib (prefer pathlib over os.path) + # 'ERA', # eradicate (remove commented out code) + 'PGH', # pygrep (simple grep checks) + 'PL', # pylint (general linting, flake8 alternative) + 'FLY', # flynt (f-string conversion where possible) + 'NPY', # NumPy-specific checks (recommendations from NumPy) + 'PERF', # Perflint (performance linting) + 'RUF', # ruff specific checks ] ignore = [ 'ISC001', # interferes with formatter @@ -142,6 +143,16 @@ ignore = [ 'PLR0913', # Too many arguments 'PLR0915', # Too many statements 'PLR2004', # Magic value used in comparison + + # Pydocstyle (to fix over time + 'D100', # Undocumented public module + 'D101', # Undocumented public class + 'D102', # Undocumented public method + 'D103', # Undocumented public function + 'D104', # Undocumented public package + 'D205', # 1 blank line required between summary line and description + 'D401', # First line of docstring should be in imperative mood + 'D404', # First word of docstring should not be This ] exclude = [ 'docs', @@ -153,9 +164,13 @@ exclude = [ [tool.ruff.lint.extend-per-file-ignores] "tests/*.py" = [ - "S101" + "S101", + "D", ] +[tool.ruff.lint.pydocstyle] +convention = "numpy" + [tool.cython-lint] max-line-length = 110 diff --git a/src/stcal/alignment/resample_utils.py b/src/stcal/alignment/resample_utils.py index b3de4d27..9de7c305 100644 --- a/src/stcal/alignment/resample_utils.py +++ b/src/stcal/alignment/resample_utils.py @@ -10,7 +10,7 @@ def calc_pixmap(in_wcs, out_wcs, shape=None): - """Return a pixel grid map from input frame to output frame + """Return a pixel grid map from input frame to output frame. Parameters ---------- diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index 66029b3b..54e8a7f8 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -1,7 +1,4 @@ -""" -Common utility functions for datamodel alignment. - -""" +"""Common utility functions for datamodel alignment.""" from __future__ import annotations import functools @@ -490,7 +487,6 @@ def compute_fiducial(wcslist: list, bounding_box=None) -> np.ndarray: ----- This function assumes all WCSs have the same output coordinate frame. """ - axes_types = wcslist[0].output_frame.axes_type spatial_axes = np.array(axes_types) == "SPATIAL" spectral_axes = np.array(axes_types) == "SPECTRAL" @@ -507,7 +503,7 @@ def compute_fiducial(wcslist: list, bounding_box=None) -> np.ndarray: def calc_rotation_matrix(roll_ref: float, v3i_yangle: float, vparity: int = 1) -> list[float]: - """Calculate the rotation matrix. + r"""Calculate the rotation matrix. Parameters ---------- @@ -632,7 +628,6 @@ def wcs_from_footprints( The WCS object corresponding to the combined input footprints. """ - wcs_list = [im.meta.wcs for im in dmodels] _validate_wcs_list(wcs_list) @@ -672,7 +667,6 @@ def update_s_region_imaging(model, center=True): Whether or not to use the center of the pixel as reference for the coordinates, by default True """ - bbox = model.meta.wcs.bounding_box if bbox is None: @@ -702,6 +696,7 @@ def wcs_bbox_from_shape(shape): """Create a bounding box from the shape of the data. This is appropriate to attach to a wcs object + Parameters ---------- shape : tuple @@ -766,7 +761,7 @@ def _get_forward_transform_func(wcs1): fitswcs.WCS object all_pix2world requires three inputs, the x (str, ndarrray), y (str, ndarray), and origin (int). The origin should be between 0, and 1 https://docs.astropy.org/en/latest/wcs/index.html#loading-wcs-information-from-a-fits-file - ) + ). """ if isinstance(wcs1, fitswcs.WCS): forward_transform = wcs1.all_pix2world @@ -789,15 +784,15 @@ def _reproject(x: float | np.ndarray, y: float | np.ndarray) -> tuple: """ Reprojects the input coordinates from one WCS to another. - Parameters: - ----------- + Parameters + ---------- x : float or np.ndarray x-coordinate(s) to be reprojected. y : float or np.ndarray y-coordinate(s) to be reprojected. - Returns: - -------- + Returns + ------- tuple Tuple of np.ndarrays including reprojected x and y coordinates. """ diff --git a/src/stcal/dark_current/dark_class.py b/src/stcal/dark_current/dark_class.py index d49ecb92..8bc29806 100644 --- a/src/stcal/dark_current/dark_class.py +++ b/src/stcal/dark_current/dark_class.py @@ -21,7 +21,7 @@ def __init__(self, dims=None, dark_model=None): to set wanted values. Parameters - --------- + ---------- dims : tuple, optional A tuple of integers to describe the dimensions of the arrays used during the dark current step. This argument is only used if the @@ -78,7 +78,7 @@ def __init__(self, science_model=None): from the data. Parameters - --------- + ---------- science_model : data model, optional Input data model, assumed to be a JWST RampModel like model. If this is None, then the class instantiator is responsible for diff --git a/src/stcal/dark_current/dark_sub.py b/src/stcal/dark_current/dark_sub.py index 82041067..d35b3cf5 100644 --- a/src/stcal/dark_current/dark_sub.py +++ b/src/stcal/dark_current/dark_sub.py @@ -69,7 +69,6 @@ def do_correction_data(science_data, dark_data, dark_output=None): averaged_dark : DarkData New dark object with averaged frames """ - # Save some data params for easy use later sci_nints = science_data.data.shape[0] sci_ngroups = science_data.data.shape[1] @@ -195,7 +194,6 @@ def average_dark_frames_3d(dark_data, ngroups, nframes, groupgap): avg_dark : DarkData New dark object with averaged frames """ - # Create a model for the averaged dark data dny = dark_data.data.shape[1] dnx = dark_data.data.shape[2] @@ -268,7 +266,6 @@ def average_dark_frames_4d(dark_data, nints, ngroups, nframes, groupgap): avg_dark : dark data model New dark object with averaged frames """ - # Create a model for the averaged dark data dint = dark_data.data.shape[0] dny = dark_data.data.shape[2] @@ -342,7 +339,6 @@ def subtract_dark(science_data, dark_data): output : data model object dark-subtracted science data """ - # The integration start number is only needed for JWST/MIRI data. # It defaults to 1 if the keyword is not in the science data. int_start = 1 if science_data.exp_intstart is None else science_data.exp_intstart diff --git a/src/stcal/jump/jump.py b/src/stcal/jump/jump.py index 94fc6ba5..c6f070de 100644 --- a/src/stcal/jump/jump.py +++ b/src/stcal/jump/jump.py @@ -499,7 +499,6 @@ def flag_large_events( Parameters ---------- - gdq : int, 4D array Group dq array jump_flag : int @@ -532,7 +531,6 @@ def flag_large_events( Nothing, gdq array is modified. """ - log.info("Flagging large Snowballs") n_showers_grp = [] @@ -825,6 +823,7 @@ def find_faint_extended( The number of groups after the detected shower to be flagged as jump. max_extended_radius: int The upper limit for the extension of saturation and jump + Returns ------- gdq : int, 4D array diff --git a/src/stcal/jump/twopoint_difference.py b/src/stcal/jump/twopoint_difference.py index e9922633..b835948b 100644 --- a/src/stcal/jump/twopoint_difference.py +++ b/src/stcal/jump/twopoint_difference.py @@ -110,6 +110,7 @@ def find_crs( integrations. This means that a group will only be compared against the same group in other integrations. If False all groups across all integrations will be used to detect outliers. + Returns ------- gdq : int, 4D array @@ -434,7 +435,7 @@ def calc_med_first_diffs(first_diffs): difference will be returned. Parameters - ----------- + ---------- first_diffs : array, float array containing the first differences of adjacent groups for a single integration. Can be 3d or 1d (for a single pix) @@ -447,7 +448,6 @@ def calc_med_first_diffs(first_diffs): array of several pixels, a 2d array with the median for each pixel will be returned. """ - if first_diffs.ndim == 1: # in the case where input is a single pixel num_usable_groups = len(first_diffs) - np.sum(np.isnan(first_diffs), axis=0) if num_usable_groups >= 4: # if 4+, clip largest and return median diff --git a/src/stcal/linearity/linearity.py b/src/stcal/linearity/linearity.py index 0225d5e1..fb083c5d 100644 --- a/src/stcal/linearity/linearity.py +++ b/src/stcal/linearity/linearity.py @@ -207,7 +207,6 @@ def correct_for_NaN(lin_coeffs, pixeldq, dqflags): lin_coeffs: 3D array updated array of correction coefficients in reference file """ - wh_nan = np.where(np.isnan(lin_coeffs)) znan, ynan, xnan = wh_nan[0], wh_nan[1], wh_nan[2] num_nan = 0 @@ -249,7 +248,6 @@ def correct_for_zero(lin_coeffs, pixeldq, dqflags): lin_coeffs: 3D array updated array of correction coefficients in reference file """ - # The critical coefficient that should not be zero is the linear term other terms are fine to be zero linear_term = lin_coeffs[1, :, :] wh_zero = np.where(linear_term == 0) @@ -296,7 +294,6 @@ def correct_for_flag(lin_coeffs, lin_dq, dqflags): lin_coeffs: 3D array updated array of correction coefficients in reference file """ - wh_flag = np.bitwise_and(lin_dq, dqflags["NO_LIN_CORR"]) num_flag = len(np.where(wh_flag > 0)[0]) diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index aa4035e2..75532833 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -228,9 +228,7 @@ def assemble_pool_results(ramp_data, save_opt, pool_results, rows_per_slice): def create_outputs(ramp_data): - """ - Create the output arrays needed for multiprocessing reassembly. - """ + """Create the output arrays needed for multiprocessing reassembly.""" image_info = create_output_image(ramp_data) integ_info = create_output_integ(ramp_data) opt_res = create_output_opt_res(ramp_data) @@ -521,7 +519,7 @@ def gls_fit_single(ramp_data, gain_2d, readnoise_2d, max_num_cr, save_opt): Save optional product. Returns - -------- + ------- image_info: tuple Tuple of ndarrays computed for the primary product for ramp fitting. @@ -1175,7 +1173,6 @@ def evaluate_fit(intercept_sect, slope_sect, cr_sect, frame_time, group_time, gd This is the same shape as data_sect, and if the fit is good, fit_model and data_sect should not differ by much. """ - shape_3d = gdq_sect.shape # the ramp, (ngroups, ny, nx) ngroups = gdq_sect.shape[0] @@ -1236,7 +1233,6 @@ def positive_fit(current_fit): This is the same as the input current_fit, except that zero and negative values will have been replaced by a positive value. """ - return np.where(current_fit <= 0.0, FIT_MUST_BE_POSITIVE, current_fit) @@ -1559,7 +1555,6 @@ def gls_fit( The variance for the intercept, slope, and for the amplitude of each cosmic ray that was detected. """ - M = float(nframes_used) ngroups = ramp_data.shape[0] diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index 343bf4db..9203686e 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -92,7 +92,6 @@ def fit_ramps_casertano( fits: always None, this is a hold over which can contain the diagnostic fit information from the jump detection algorithm. """ - # Trickery to avoid having to specify the defaults for the threshold # parameters outside the cython code. kwargs = {} diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index 22c4f764..f2c6d5a2 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -68,7 +68,6 @@ def ols_ramp_fit_multi(ramp_data, buffsize, save_opt, readnoise_2d, gain_2d, wei opt_info : tuple The tuple of computed optional results arrays for fitting. """ - # Determine number of slices to use for multi-processor computations nrows = ramp_data.data.shape[2] num_available_cores = cpu_count() @@ -1082,7 +1081,6 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans) s_inv_var_both3 : ndarray 1 / var_both3, summed over integrations """ - # Get image data information data = ramp_data.data err = ramp_data.err @@ -2102,7 +2100,7 @@ def fit_next_segment_all_other(wh_check, start, end_st, end_heads, ngroups): handled here have adequate data, but the stack arrays are updated. - increment start array - remove current end from end stack - - decrement number of ends + - decrement number of ends. Parameters ---------- @@ -2148,7 +2146,7 @@ def fit_next_segment_good_0th_bad_1st(wh_check, start, end_st, end_heads, got_ca adjusted. - increment start array - remove current end from end stack - - decrement number of ends + - decrement number of ends. Parameters ---------- @@ -2205,7 +2203,7 @@ def fit_next_segment_only_good_0th_group( - remove current end from end stack - set number of end to 0 - add slopes and variances to running sums - - set pixel_done to True to designate all fitting done + - set pixel_done to True to designate all fitting done. Parameters ---------- @@ -2475,7 +2473,7 @@ def fit_next_segment_short_seg_at_end( - set pixel_done to True to designate all fitting done For segments of this type, the final good group is the final group in the ramp, and the variable `l_interval` used below = 1, and the number of - groups in the segment = 2 + groups in the segment = 2. Parameters ---------- @@ -2929,7 +2927,6 @@ def fit_short_ngroups( num_seg : ndarray numbers of segments for good pixels, 1-D int """ - # Dataset has NGROUPS=2, so special fitting is done for all pixels. # All segments are at the end of the array. # - set start to -1 to designate all fitting done @@ -3379,7 +3376,6 @@ def calc_unwtd_fit(xvalues, nreads_1d, sumxx, sumx, sumxy, sumy): line_fit : ndarray 1-D values of fit using slope and intercept """ - denominator = nreads_1d * sumxx - sumx**2 # In case this branch is ever used again, suppress, and then re-enable diff --git a/src/stcal/ramp_fitting/ramp_fit_class.py b/src/stcal/ramp_fitting/ramp_fit_class.py index a633bd35..f8a78efd 100644 --- a/src/stcal/ramp_fitting/ramp_fit_class.py +++ b/src/stcal/ramp_fitting/ramp_fit_class.py @@ -1,8 +1,6 @@ class RampData: def __init__(self): - """ - Creates an internal ramp fit class. - """ + """Creates an internal ramp fit class.""" # Arrays from the data model self.data = None self.err = None @@ -96,7 +94,6 @@ def set_meta(self, name, frame_time, group_time, groupgap, nframes, drop_frames1 The number of frames dropped at the beginning of every integration. May not be used in some pipelines, so is defaulted to NoneType. """ - # Get meta information self.instrument_name = name diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index 9347bd3f..1813c797 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -114,9 +114,6 @@ def reshape_res(self, num_int, rlo, rhi, sect_shape, ff_sect, save_opt): save_opt : bool save optional fitting results - - Returns - ------- """ for ii_seg in range(self.slope_seg.shape[1]): self.slope_seg[num_int, ii_seg, rlo:rhi, :] = self.slope_2d[ii_seg, :].reshape(sect_shape) @@ -205,7 +202,7 @@ def shrink_crmag(self, n_int, dq_cube, imshape, nreads, jump_det): number of reads in an integration Returns - ---------- + ------- None """ @@ -295,7 +292,7 @@ def output_optional(self, group_time): def print_full(self): # pragma: no cover """ Diagnostic function for printing optional output arrays; most - useful for tiny datasets + useful for tiny datasets. Parameters ---------- @@ -862,7 +859,6 @@ def get_efftim_ped(ramp_data): number of frames dropped at the beginning of every integration; from the DRPFRMS1 keyword, or 0 if the keyword is missing """ - groupgap = ramp_data.groupgap nframes = ramp_data.nframes frame_time = ramp_data.frame_time @@ -968,7 +964,6 @@ def get_more_info(ramp_data, saturated_flag, jump_flag): # pragma: no cover jump_flag : int Group data quality flag that indicates a cosmic ray hit. """ - group_time = ramp_data.group_time nframes_used = ramp_data.nframes saturated_flag = ramp_data.flags_saturated @@ -1253,7 +1248,7 @@ def do_all_sat(ramp_data, pixeldq, groupdq, imshape, n_int, save_opt): def log_stats(c_rates): """ - Optionally log statistics of detected cosmic rays + Optionally log statistics of detected cosmic rays. Parameters ---------- @@ -1300,7 +1295,6 @@ def compute_num_slices(max_cores, nrows, max_available): number_slices : int The number of slices for multiprocessing. """ - number_slices = 1 if max_cores.isnumeric(): number_slices = int(max_cores) diff --git a/src/stcal/saturation/saturation.py b/src/stcal/saturation/saturation.py index 1f6e703f..e65e4f02 100644 --- a/src/stcal/saturation/saturation.py +++ b/src/stcal/saturation/saturation.py @@ -62,7 +62,6 @@ def flag_saturated_pixels( pdq : int, 2D array updated pixel dq array """ - nints, ngroups, nrows, ncols = data.shape saturated = dqflags["SATURATED"] ad_floor = dqflags["AD_FLOOR"] From afb839d27dd3fcb8d01905b71643777f40ead17b Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 20:35:25 -0500 Subject: [PATCH 29/36] Add type annotation linting --- pyproject.toml | 5 +++++ src/stcal/alignment/util.py | 8 ++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7f4e8128..c0a1453d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -108,6 +108,8 @@ extend-select = [ # 'N', # pep8-naming 'D', # pydocstyle 'UP', # pyupgrade + 'YTT', # flake8-2020 (system version info) + 'ANN', # flake8-annotations (best practices for type annotations) 'S', # flake8-bandit 'BLE', # flake8-blind-except 'B', # flake8-bugbear @@ -171,6 +173,9 @@ exclude = [ [tool.ruff.lint.pydocstyle] convention = "numpy" +[tool.ruff.lint.flake8-annotations] +ignore-fully-untyped = true # Turn of annotation checking for fully untyped code + [tool.cython-lint] max-line-length = 110 diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index 54e8a7f8..e73c97dc 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -72,8 +72,8 @@ def _generate_tranform( pscale_ratio: int | None = None, pscale: float | None = None, rotation: float | None = None, - transform=None, -): + transform: astmodels.Model | None = None, +) -> astmodels.Model: """ Creates a transform from pixel to world coordinates based on a reference datamodel's WCS. @@ -352,7 +352,7 @@ def _validate_wcs_list(wcs_list): return True -def wcsinfo_from_model(input_model: SupportsDataWithWcs): +def wcsinfo_from_model(input_model: SupportsDataWithWcs) -> dict[str, np.ndarray | str | bool]: """ Creates a dict {wcs_keyword: array_of_values} pairs from a datamodel. @@ -458,7 +458,7 @@ def compute_scale( return np.sqrt(xscale * yscale) -def compute_fiducial(wcslist: list, bounding_box=None) -> np.ndarray: +def compute_fiducial(wcslist: list, bounding_box: tuple | list | None = None) -> np.ndarray: """ Calculates the world coordinates of the fiducial point of a list of WCS objects. For a celestial footprint this is the center. For a spectral footprint, it is the From 24822c1df98b9bbda4ceacc504da63b71e4c0901 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 20:39:51 -0500 Subject: [PATCH 30/36] Enable error message linting --- pyproject.toml | 18 ++++++++++-------- src/stcal/alignment/util.py | 24 ++++++++++++++++-------- src/stcal/basic_utils.py | 5 ++--- src/stcal/dqflags.py | 3 ++- src/stcal/dynamicdq.py | 5 ++--- 5 files changed, 32 insertions(+), 23 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c0a1453d..9ee66bb0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -102,20 +102,22 @@ src = [ [tool.ruff.lint] extend-select = [ - 'F', # Pyflakes - 'W', 'E', # pycodestyle - 'I', # isort + 'F', # Pyflakes (part of default flake8) + 'W', 'E', # pycodestyle (part of default flake8) + 'I', # isort (import sorting) # 'N', # pep8-naming - 'D', # pydocstyle - 'UP', # pyupgrade + 'D', # pydocstyle (docstring style guide) + 'UP', # pyupgrade (upgrade code to modern python) 'YTT', # flake8-2020 (system version info) 'ANN', # flake8-annotations (best practices for type annotations) - 'S', # flake8-bandit - 'BLE', # flake8-blind-except - 'B', # flake8-bugbear + 'S', # flake8-bandit (security checks) + 'BLE', # flake8-blind-except (prevent blind except statements) + 'B', # flake8-bugbear (prevent common gotcha bugs) 'A', # flake8-builtins (prevent shadowing of builtins) 'C4', # flake8-comprehensions (best practices for comprehensions) 'T10', # flake8-debugger (prevent debugger statements in code) + 'EM', # flake8-errormessages (best practices for error messages) + 'FA', 'ISC', # flake8-implicit-str-concat (prevent implicit string concat) 'ICN', # flake8-import-conventions (enforce import conventions) 'INP', # flake8-no-pep420 (prevent use of PEP420, i.e. implicit name spaces) diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index e73c97dc..d126d018 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -341,13 +341,16 @@ def _validate_wcs_list(wcs_list): instance of WCS. """ if not isiterable(wcs_list): - raise ValueError("Expected 'wcs_list' to be an iterable of WCS objects.") + msg = "Expected 'wcs_list' to be an iterable of WCS objects." + raise ValueError(msg) if len(wcs_list): if not all(isinstance(w, gwcs.WCS) for w in wcs_list): - raise TypeError("All items in 'wcs_list' are to be instances of gwcs.wcs.WCS.") + msg = "All items in 'wcs_list' are to be instances of gwcs.wcs.WCS." + raise TypeError(msg) else: - raise TypeError("'wcs_list' should not be empty.") + msg = "'wcs_list' should not be empty." + raise TypeError(msg) return True @@ -427,7 +430,8 @@ def compute_scale( spectral = "SPECTRAL" in wcs.output_frame.axes_type if spectral and disp_axis is None: - raise ValueError("If input WCS is spectral, a disp_axis must be given") + msg = "If input WCS is spectral, a disp_axis must be given" + raise ValueError(msg) crpix = np.array(wcs.invert(*fiducial)) @@ -533,7 +537,8 @@ def calc_rotation_matrix(roll_ref: float, v3i_yangle: float, vparity: int = 1) - \\end{bmatrix} """ if vparity not in (1, -1): - raise ValueError(f"vparity should be 1 or -1. Input was: {vparity}") + msg = f"vparity should be 1 or -1. Input was: {vparity}" + raise ValueError(msg) rel_angle = roll_ref - (vparity * v3i_yangle) @@ -768,7 +773,8 @@ def _get_forward_transform_func(wcs1): elif isinstance(wcs1, gwcs.WCS): forward_transform = wcs1.forward_transform else: - raise TypeError("Expected input to be astropy.wcs.WCS or gwcs.WCS object") + msg = "Expected input to be astropy.wcs.WCS or gwcs.WCS object" + raise TypeError(msg) return forward_transform def _get_backward_transform_func(wcs2): @@ -777,7 +783,8 @@ def _get_backward_transform_func(wcs2): elif isinstance(wcs2, gwcs.WCS): backward_transform = wcs2.backward_transform else: - raise TypeError("Expected input to be astropy.wcs.WCS or gwcs.WCS object") + msg = "Expected input to be astropy.wcs.WCS or gwcs.WCS object" + raise TypeError(msg) return backward_transform def _reproject(x: float | np.ndarray, y: float | np.ndarray) -> tuple: @@ -803,7 +810,8 @@ def _reproject(x: float | np.ndarray, y: float | np.ndarray) -> tuple: if not isinstance(y, (np.ndarray)): y = np.array(y) if x.shape != y.shape: - raise ValueError("x and y must be the same length") + msg = "x and y must be the same length" + raise ValueError(msg) sky = _get_forward_transform_func(wcs1)(x, y, 0) # rearrange into array including flattened x and y values diff --git a/src/stcal/basic_utils.py b/src/stcal/basic_utils.py index f9a9894f..832cc8c8 100644 --- a/src/stcal/basic_utils.py +++ b/src/stcal/basic_utils.py @@ -9,9 +9,8 @@ try: from stdatamodels.basic_utils import multiple_replace except ImportError as err: - raise ImportError( - "basic_utils has been moved to stdatamodels.basic_utils, please install stdatamodels" - ) from err + msg = "basic_utils has been moved to stdatamodels.basic_utils, please install stdatamodels" + raise ImportError(msg) from err __all__ = ["multiple_replace"] diff --git a/src/stcal/dqflags.py b/src/stcal/dqflags.py index 20197b13..27bd2a0f 100644 --- a/src/stcal/dqflags.py +++ b/src/stcal/dqflags.py @@ -14,7 +14,8 @@ multiple_replace, ) except ImportError as err: - raise ImportError("dqflags has been moved to stdatamodels.dqflags, please install stdatamodels") from err + msg = "dqflags has been moved to stdatamodels.dqflags, please install stdatamodels" + raise ImportError(msg) from err __all__ = ["ap_interpret_bit_flags", "multiple_replace", "interpret_bit_flags", "dqflags_to_mnemonics"] diff --git a/src/stcal/dynamicdq.py b/src/stcal/dynamicdq.py index 074f6e8f..a4801936 100644 --- a/src/stcal/dynamicdq.py +++ b/src/stcal/dynamicdq.py @@ -9,9 +9,8 @@ try: from stdatamodels.dynamicdq import dynamic_mask except ImportError as err: - raise ImportError( - "dynamicdq has been moved to stdatamodels.dynamicdq, please install stdatamodels" - ) from err + msg = "dynamicdq has been moved to stdatamodels.dynamicdq, please install stdatamodels" + raise ImportError(msg) from err __all__ = ["dynamic_mask"] From e91b86e78432f136b0d8265202992e80d67ca2f5 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 21:03:32 -0500 Subject: [PATCH 31/36] Add simplification linting --- pyproject.toml | 6 ++- src/stcal/dark_current/dark_sub.py | 17 +++--- src/stcal/jump/jump.py | 22 ++++---- src/stcal/jump/twopoint_difference.py | 64 +++++++++++----------- src/stcal/ramp_fitting/gls_fit.py | 7 +-- src/stcal/ramp_fitting/ramp_fit.py | 5 +- tests/test_jump_cas22.py | 2 +- tests/test_ramp_fitting_gls_fit.py | 12 ++--- tests/test_twopoint_difference.py | 78 +++++++++++++-------------- 9 files changed, 106 insertions(+), 107 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9ee66bb0..ff54436a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -117,11 +117,11 @@ extend-select = [ 'C4', # flake8-comprehensions (best practices for comprehensions) 'T10', # flake8-debugger (prevent debugger statements in code) 'EM', # flake8-errormessages (best practices for error messages) - 'FA', + 'FA', # flake8-future-annotations (correct usage future annotations) 'ISC', # flake8-implicit-str-concat (prevent implicit string concat) 'ICN', # flake8-import-conventions (enforce import conventions) - 'INP', # flake8-no-pep420 (prevent use of PEP420, i.e. implicit name spaces) 'G', # flake8-logging-format (best practices for logging) + 'INP', # flake8-no-pep420 (prevent use of PEP420, i.e. implicit name spaces) 'PIE', # flake8-pie (misc suggested improvement linting) # 'T20', # flake8-print (prevent print statements in code) 'PT', # flake8-pytest-style (best practices for pytest) @@ -129,6 +129,8 @@ extend-select = [ 'RSE', # flake8-raise (best practices for raising exceptions) 'RET', # flake8-return (best practices for return statements) 'SLF', # flake8-self (prevent private member access) + 'SLOT', # flake8-slots (require __slots__ for immutable classes) + 'SIM', # flake8-simplify (suggest simplifications to code where possible) 'TID', # flake8-tidy-imports (prevent banned api and best import practices) 'INT', # flake8-gettext (when to use printf style strings) # 'ARG', # flake8-unused-arguments (prevent unused arguments) diff --git a/src/stcal/dark_current/dark_sub.py b/src/stcal/dark_current/dark_sub.py index d35b3cf5..d279775d 100644 --- a/src/stcal/dark_current/dark_sub.py +++ b/src/stcal/dark_current/dark_sub.py @@ -344,10 +344,7 @@ def subtract_dark(science_data, dark_data): int_start = 1 if science_data.exp_intstart is None else science_data.exp_intstart # Determine the number of integrations contained in the dark reference file - if len(dark_data.data.shape) == 4: - dark_nints = dark_data.data.shape[0] - else: - dark_nints = 1 + dark_nints = dark_data.data.shape[0] if len(dark_data.data.shape) == 4 else 1 log.debug( "subtract_dark: nints=%d, ngroups=%d, size=%d,%d", @@ -380,12 +377,12 @@ def subtract_dark(science_data, dark_data): # to the first few science integrations. There's an additional # check of the starting integration number in case the science # data are segmented. - if i < dark_nints and int_start == 1: - dark_sci = dark_data.data[i] - else: - # For science integrations beyond the number of - # dark integrations, use the last dark integration - dark_sci = dark_data.data[-1] + # + # else + # + # For science integrations beyond the number of + # dark integrations, use the last dark integration + dark_sci = dark_data.data[i] if i < dark_nints and int_start == 1 else dark_data.data[-1] else: # Use single-integration dark data dark_sci = dark_data.data diff --git a/src/stcal/jump/jump.py b/src/stcal/jump/jump.py index c6f070de..6139d103 100644 --- a/src/stcal/jump/jump.py +++ b/src/stcal/jump/jump.py @@ -724,25 +724,27 @@ def make_snowballs( for jump in jump_ellipses: # center of jump should be saturated jump_center = jump[0] - # if center of the jump ellipse is not saturated in this group and is saturated in - # the next group add the jump ellipse to the snowball list if ( + # if center of the jump ellipse is not saturated in this group and is saturated in + # the next group add the jump ellipse to the snowball list group < (num_groups - 1) and gdq[integration, group + 1, round(jump_center[1]), round(jump_center[0])] == sat_flag and gdq[integration, group, round(jump_center[1]), round(jump_center[0])] != sat_flag + ) or ( + # if the jump ellipse is near the edge, do not require saturation in the + # center of the jump ellipse + near_edge(jump, low_threshold, high_threshold) ): snowballs.append(jump) - # if the jump ellipse is near the edge, do not require saturation in the - # center of the jump ellipse - elif near_edge(jump, low_threshold, high_threshold): - snowballs.append(jump) else: for sat in sat_ellipses: # center of saturation is within the enclosing jump rectangle - if point_inside_ellipse(sat[0], jump): - if gdq[integration, group, round(jump_center[1]), round(jump_center[0])] == sat_flag: - if jump not in snowballs: - snowballs.append(jump) + if ( + point_inside_ellipse(sat[0], jump) + and gdq[integration, group, round(jump_center[1]), round(jump_center[0])] == sat_flag + and jump not in snowballs + ): + snowballs.append(jump) # extend the saturated ellipses that are larger than the min_sat_radius gdq[integration, :, :, :] = extend_saturation( gdq[integration, :, :, :], diff --git a/src/stcal/jump/twopoint_difference.py b/src/stcal/jump/twopoint_difference.py index b835948b..62d44b1a 100644 --- a/src/stcal/jump/twopoint_difference.py +++ b/src/stcal/jump/twopoint_difference.py @@ -152,10 +152,7 @@ def find_crs( for grp in range(dat.shape[1]): if np.all(np.bitwise_and(gdq[integ, grp, :, :], dnu_flag)): num_flagged_grps += 1 - if only_use_ints and nints: - total_groups = nints - else: - total_groups = nints * ngrps - num_flagged_grps + total_groups = nints if only_use_ints and nints else nints * ngrps - num_flagged_grps if (ngrps < minimum_groups and only_use_ints and nints < minimum_sigclip_groups) or ( not only_use_ints and nints * ngrps < minimum_sigclip_groups and ngrps < minimum_groups ): @@ -361,38 +358,44 @@ def find_crs( # Only flag adjacent pixels if they do not already have the # 'SATURATION' or 'DONOTUSE' flag set if row != 0: - if (gdq[integ, group, row - 1, col] & sat_flag) == 0: - if (gdq[integ, group, row - 1, col] & dnu_flag) == 0: - gdq[integ, group, row - 1, col] = np.bitwise_or( - gdq[integ, group, row - 1, col], jump_flag - ) + if (gdq[integ, group, row - 1, col] & sat_flag) == 0 and ( + gdq[integ, group, row - 1, col] & dnu_flag + ) == 0: + gdq[integ, group, row - 1, col] = np.bitwise_or( + gdq[integ, group, row - 1, col], jump_flag + ) else: row_below_gdq[integ, cr_group[j], cr_col[j]] = jump_flag if row != nrows - 1: - if (gdq[integ, group, row + 1, col] & sat_flag) == 0: - if (gdq[integ, group, row + 1, col] & dnu_flag) == 0: - gdq[integ, group, row + 1, col] = np.bitwise_or( - gdq[integ, group, row + 1, col], jump_flag - ) + if (gdq[integ, group, row + 1, col] & sat_flag) == 0 and ( + gdq[integ, group, row + 1, col] & dnu_flag + ) == 0: + gdq[integ, group, row + 1, col] = np.bitwise_or( + gdq[integ, group, row + 1, col], jump_flag + ) else: row_above_gdq[integ, cr_group[j], cr_col[j]] = jump_flag # Here we are just checking that we don't flag neighbors of # jumps that are off the detector. - if cr_col[j] != 0: - if (gdq[integ, group, row, col - 1] & sat_flag) == 0: - if (gdq[integ, group, row, col - 1] & dnu_flag) == 0: - gdq[integ, group, row, col - 1] = np.bitwise_or( - gdq[integ, group, row, col - 1], jump_flag - ) - - if cr_col[j] != ncols - 1: - if (gdq[integ, group, row, col + 1] & sat_flag) == 0: - if (gdq[integ, group, row, col + 1] & dnu_flag) == 0: - gdq[integ, group, row, col + 1] = np.bitwise_or( - gdq[integ, group, row, col + 1], jump_flag - ) + if ( + cr_col[j] != 0 + and (gdq[integ, group, row, col - 1] & sat_flag) == 0 + and (gdq[integ, group, row, col - 1] & dnu_flag) == 0 + ): + gdq[integ, group, row, col - 1] = np.bitwise_or( + gdq[integ, group, row, col - 1], jump_flag + ) + + if ( + cr_col[j] != ncols - 1 + and (gdq[integ, group, row, col + 1] & sat_flag) == 0 + and (gdq[integ, group, row, col + 1] & dnu_flag) == 0 + ): + gdq[integ, group, row, col + 1] = np.bitwise_or( + gdq[integ, group, row, col + 1], jump_flag + ) # flag n groups after jumps above the specified thresholds to account for # the transient seen after ramp jumps @@ -409,9 +412,10 @@ def find_crs( col = cr_col[j] if e_jump_4d[intg, group - 1, row, col] >= cthres[row, col]: for kk in range(group, min(group + cgroup + 1, ngroups)): - if (gdq[intg, kk, row, col] & sat_flag) == 0: - if (gdq[intg, kk, row, col] & dnu_flag) == 0: - gdq[intg, kk, row, col] = np.bitwise_or(gdq[integ, kk, row, col], jump_flag) + if (gdq[intg, kk, row, col] & sat_flag) == 0 and ( + gdq[intg, kk, row, col] & dnu_flag + ) == 0: + gdq[intg, kk, row, col] = np.bitwise_or(gdq[integ, kk, row, col], jump_flag) if "stddev" in locals(): return gdq, row_below_gdq, row_above_gdq, num_primary_crs, stddev diff --git a/src/stcal/ramp_fitting/gls_fit.py b/src/stcal/ramp_fitting/gls_fit.py index 75532833..6f67125a 100644 --- a/src/stcal/ramp_fitting/gls_fit.py +++ b/src/stcal/ramp_fitting/gls_fit.py @@ -965,11 +965,8 @@ def determine_slope( iter_ = 0 done = False - if NUM_ITER_NO_EXTRA_TERMS <= 0: - # Even the first iteration uses the extra terms. - temp_use_extra_terms = True - else: - temp_use_extra_terms = False + # Even the first iteration uses the extra terms. + temp_use_extra_terms = NUM_ITER_NO_EXTRA_TERMS <= 0 while not done: (intercept_sect, int_var_sect, slope_sect, slope_var_sect, cr_sect, cr_var_sect) = compute_slope( diff --git a/src/stcal/ramp_fitting/ramp_fit.py b/src/stcal/ramp_fitting/ramp_fit.py index 2c20b4ed..514429f1 100755 --- a/src/stcal/ramp_fitting/ramp_fit.py +++ b/src/stcal/ramp_fitting/ramp_fit.py @@ -59,10 +59,7 @@ def create_ramp_fit_class(model, dqflags=None, suppress_one_group=False): ramp_data.set_arrays(model.data, model.err, model.groupdq, model.pixeldq) # Attribute may not be supported by all pipelines. Default is NoneType. - if hasattr(model, "drop_frames1"): - drop_frames1 = model.meta.exposure.drop_frames1 - else: - drop_frames1 = None + drop_frames1 = model.meta.exposure.drop_frames1 if hasattr(model, "drop_frames1") else None ramp_data.set_meta( name=model.meta.instrument.name, frame_time=model.meta.exposure.frame_time, diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index d7530ad8..18c19c96 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -550,7 +550,7 @@ def test_override_default_threshold(jump_data): def test_jump_dq_set(jump_data): # Check the DQ flag value to start - assert JUMP_DET == 2**2 + assert 2**2 == JUMP_DET resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) diff --git a/tests/test_ramp_fitting_gls_fit.py b/tests/test_ramp_fitting_gls_fit.py index 087824f1..e940bd94 100644 --- a/tests/test_ramp_fitting_gls_fit.py +++ b/tests/test_ramp_fitting_gls_fit.py @@ -180,8 +180,8 @@ def test_nocrs_noflux(): ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - assert 0 == np.max(slopes[0]) - assert 0 == np.min(slopes[0]) + assert np.max(slopes[0]) == 0 + assert np.min(slopes[0]) == 0 @pytest.mark.skip(reason="Getting all NaN's, but expecting all zeros.") @@ -203,8 +203,8 @@ def test_nocrs_noflux_firstrows_are_nan(): ramp_data, 512, save_opt, rnoise2d, gain2d, algo, "optimal", ncores, test_dq_flags ) - assert 0 == np.max(slopes[0]) - assert 0 == np.min(slopes[0]) + assert np.max(slopes[0]) == 0 + assert np.min(slopes[0]) == 0 def test_error_when_frame_time_not_set(): @@ -293,8 +293,8 @@ def test_bad_gain_values(): assert dq[r2, c2] == flag_check # These asserts are wrong for some reason - assert 0 == np.max(data) - assert 0 == np.min(data) + assert np.max(data) == 0 + assert np.min(data) == 0 def test_simple_ramp(): diff --git a/tests/test_twopoint_difference.py b/tests/test_twopoint_difference.py index 5e73141b..c6443bc7 100644 --- a/tests/test_twopoint_difference.py +++ b/tests/test_twopoint_difference.py @@ -30,7 +30,7 @@ def test_nocrs_noflux(setup_cube): data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 0 == np.max(out_gdq) # no CR found + assert np.max(out_gdq) == 0 # no CR found def test_5grps_cr3_noflux(setup_cube): @@ -42,8 +42,8 @@ def test_5grps_cr3_noflux(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found - assert 2 == np.argmax(out_gdq[0, :, 100, 100]) # find the CR in the expected group + assert np.max(out_gdq) == 4 # a CR was found + assert np.argmax(out_gdq[0, :, 100, 100]) == 2 # find the CR in the expected group def test_5grps_cr2_noflux(setup_cube): @@ -55,8 +55,8 @@ def test_5grps_cr2_noflux(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found - assert 1 == np.argmax(out_gdq[0, :, 100, 100]) # find the CR in the expected group + assert np.max(out_gdq) == 4 # a CR was found + assert np.argmax(out_gdq[0, :, 100, 100]) == 1 # find the CR in the expected group def test_6grps_negative_differences_zeromedian(setup_cube): @@ -72,7 +72,7 @@ def test_6grps_negative_differences_zeromedian(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 0 == np.max(out_gdq) # no CR was found + assert np.max(out_gdq) == 0 # no CR was found def test_5grps_cr2_negjumpflux(setup_cube): @@ -84,8 +84,8 @@ def test_5grps_cr2_negjumpflux(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found - assert 1 == np.argmax(out_gdq[0, :, 100, 100]) # find the CR in the expected group + assert np.max(out_gdq) == 4 # a CR was found + assert np.argmax(out_gdq[0, :, 100, 100]) == 1 # find the CR in the expected group def test_3grps_cr2_noflux(setup_cube): @@ -96,7 +96,7 @@ def test_3grps_cr2_noflux(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found # assert(1,np.argmax(out_gdq[0, :, 100, 100])) # find the CR in the expected group assert np.array_equal([0, 4, 0], out_gdq[0, :, 100, 100]) @@ -109,8 +109,8 @@ def test_4grps_cr2_noflux(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found - assert 1 == np.argmax(out_gdq[0, :, 100, 100]) # find the CR in the expected group + assert np.max(out_gdq) == 4 # a CR was found + assert np.argmax(out_gdq[0, :, 100, 100]) == 1 # find the CR in the expected group def test_5grps_cr2_nframe2(setup_cube): @@ -125,7 +125,7 @@ def test_5grps_cr2_nframe2(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 4, 0, 0], out_gdq[0, :, 100, 100]) @@ -155,7 +155,7 @@ def test_5grps_twocrs_2nd_5th(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 4], out_gdq[0, :, 100, 100]) @@ -171,7 +171,7 @@ def test_5grps_twocrs_2nd_5thbig(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 4], out_gdq[0, :, 100, 100]) @@ -192,7 +192,7 @@ def test_10grps_twocrs_2nd_8th_big(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 0, 0, 0, 4, 0, 0], out_gdq[0, :, 100, 100]) @@ -213,7 +213,7 @@ def test_10grps_twocrs_10percenthit(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 0, 0, 0, 4, 0, 0], out_gdq[0, :, 100, 100]) @@ -229,7 +229,7 @@ def test_5grps_twocrs_2nd_5thbig_nframes2(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 4], out_gdq[0, :, 100, 100]) @@ -246,7 +246,7 @@ def test_6grps_twocrs_2nd_5th(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 4, 0], out_gdq[0, :, 100, 100]) @@ -263,7 +263,7 @@ def test_6grps_twocrs_2nd_5th_nframes2(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 4, 0], out_gdq[0, :, 100, 100]) @@ -286,7 +286,7 @@ def test_6grps_twocrs_twopixels_nframes2(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 4, 0], out_gdq[0, :, 100, 100]) assert np.array_equal([0, 0, 4, 0, 4, 0], out_gdq[0, :, 200, 100]) @@ -303,7 +303,7 @@ def test_5grps_cr2_negslope(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 0, 4, 0, 0], out_gdq[0, :, 100, 100]) @@ -320,7 +320,7 @@ def test_6grps_1cr(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == out_gdq[0, 5, 100, 100] + assert out_gdq[0, 5, 100, 100] == 4 def test_7grps_1cr(setup_cube): @@ -337,7 +337,7 @@ def test_7grps_1cr(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == out_gdq[0, 6, 100, 100] + assert out_gdq[0, 6, 100, 100] == 4 def test_8grps_1cr(setup_cube): @@ -355,7 +355,7 @@ def test_8grps_1cr(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == out_gdq[0, 6, 100, 100] + assert out_gdq[0, 6, 100, 100] == 4 def test_9grps_1cr_1sat(setup_cube): @@ -375,7 +375,7 @@ def test_9grps_1cr_1sat(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == out_gdq[0, 6, 100, 100] + assert out_gdq[0, 6, 100, 100] == 4 def test_10grps_1cr_2sat(setup_cube): @@ -397,7 +397,7 @@ def test_10grps_1cr_2sat(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == out_gdq[0, 6, 100, 100] + assert out_gdq[0, 6, 100, 100] == 4 def test_11grps_1cr_3sat(setup_cube): @@ -421,7 +421,7 @@ def test_11grps_1cr_3sat(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == out_gdq[0, 6, 100, 100] + assert out_gdq[0, 6, 100, 100] == 4 def test_11grps_0cr_3donotuse(setup_cube): @@ -487,7 +487,7 @@ def test_10grps_cr2_gt3sigma(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100]) @@ -501,7 +501,7 @@ def test_10grps_cr2_3sigma_nocr(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 0 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 0 # a CR was found assert np.array_equal([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100]) @@ -515,7 +515,7 @@ def test_10grps_cr2_gt3sigma_2frames(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100]) @@ -529,7 +529,7 @@ def test_10grps_cr2_gt3sigma_2frames_offdiag(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 4 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 4 # a CR was found assert np.array_equal([0, 4, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 110]) @@ -543,7 +543,7 @@ def test_10grps_cr2_3sigma_2frames_nocr(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 0 == np.max(out_gdq) # a CR was found + assert np.max(out_gdq) == 0 # a CR was found assert np.array_equal([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], out_gdq[0, :, 100, 100]) @@ -559,7 +559,7 @@ def test_10grps_nocr_2pixels_sigma0(setup_cube): out_gdq, row_below_gdq, rows_above_gdq, total_crs, stddev = find_crs( data, gdq, read_noise, rej_threshold, rej_threshold, rej_threshold, nframes, False, 200, 10, DQFLAGS ) - assert 0 == np.max(out_gdq) # no CR was found + assert np.max(out_gdq) == 0 # no CR was found def test_5grps_satat4_crat3(setup_cube): @@ -873,7 +873,7 @@ def test_10grps_1cr_afterjump(setup_cube): ) # all groups after CR should be flagged for k in range(6, 10): - assert 4 == out_gdq[0, k, 100, 100], f"after jump flagging failed in group {k}" + assert out_gdq[0, k, 100, 100] == 4, f"after jump flagging failed in group {k}" def test_10grps_1cr_afterjump_2group(setup_cube): @@ -910,11 +910,11 @@ def test_10grps_1cr_afterjump_2group(setup_cube): # 2 groups after CR should be flagged for k in range(6, 9): - assert 4 == out_gdq[0, k, 100, 100], f"after jump flagging failed in group {k}" + assert out_gdq[0, k, 100, 100] == 4, f"after jump flagging failed in group {k}" # rest not flagged for k in range(9, 10): - assert 0 == out_gdq[0, k, 100, 100], f"after jump flagging incorrect in group {k}" + assert out_gdq[0, k, 100, 100] == 0, f"after jump flagging incorrect in group {k}" def test_10grps_1cr_afterjump_toosmall(setup_cube): @@ -950,7 +950,7 @@ def test_10grps_1cr_afterjump_toosmall(setup_cube): ) # all groups after CR should be flagged for k in range(7, 10): - assert 0 == out_gdq[0, k, 100, 100], f"after jump flagging incorrect in group {k}" + assert out_gdq[0, k, 100, 100] == 0, f"after jump flagging incorrect in group {k}" def test_10grps_1cr_afterjump_twothresholds(setup_cube): @@ -989,11 +989,11 @@ def test_10grps_1cr_afterjump_twothresholds(setup_cube): ) # 2 groups after CR should be flagged for k in range(2, 5): - assert 4 == out_gdq[0, k, 100, 100], f"after jump flagging incorrect in group {k}" + assert out_gdq[0, k, 100, 100] == 4, f"after jump flagging incorrect in group {k}" # all groups after CR should be flagged for k in range(6, 10): - assert 4 == out_gdq[0, k, 100, 100], f"after jump flagging incorrect in group {k}" + assert out_gdq[0, k, 100, 100] == 4, f"after jump flagging incorrect in group {k}" def test_median_func(): From 5206b0f888483cc43e4fafe0727e261c63c3f3dc Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 21:05:43 -0500 Subject: [PATCH 32/36] Add type checking linting --- pyproject.toml | 1 + src/stcal/alignment/util.py | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ff54436a..b7b62b00 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -132,6 +132,7 @@ extend-select = [ 'SLOT', # flake8-slots (require __slots__ for immutable classes) 'SIM', # flake8-simplify (suggest simplifications to code where possible) 'TID', # flake8-tidy-imports (prevent banned api and best import practices) + 'TCH', # flake8-type-checking (move type checking imports into type checking blocks) 'INT', # flake8-gettext (when to use printf style strings) # 'ARG', # flake8-unused-arguments (prevent unused arguments) 'PTH', # flake8-use-pathlib (prefer pathlib over os.path) diff --git a/src/stcal/alignment/util.py b/src/stcal/alignment/util.py index d126d018..f3e7b085 100644 --- a/src/stcal/alignment/util.py +++ b/src/stcal/alignment/util.py @@ -3,11 +3,10 @@ import functools import logging -from typing import Protocol +from typing import TYPE_CHECKING, Protocol import gwcs import numpy as np -from asdf import AsdfFile from astropy import units as u from astropy import wcs as fitswcs from astropy.coordinates import SkyCoord @@ -15,6 +14,9 @@ from astropy.utils.misc import isiterable from gwcs.wcstools import wcs_from_fiducial +if TYPE_CHECKING: + from asdf import AsdfFile + log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) From 32bd6bb2a81803a0b5a96e15e3e8e2b9d7751eb7 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 21:10:00 -0500 Subject: [PATCH 33/36] Enable tryceratops linting --- pyproject.toml | 2 ++ src/stcal/ramp_fitting/utils.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b7b62b00..abff2aa1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -139,9 +139,11 @@ extend-select = [ # 'ERA', # eradicate (remove commented out code) 'PGH', # pygrep (simple grep checks) 'PL', # pylint (general linting, flake8 alternative) + 'TRY', # tryceratops (linting for try/except blocks) 'FLY', # flynt (f-string conversion where possible) 'NPY', # NumPy-specific checks (recommendations from NumPy) 'PERF', # Perflint (performance linting) + 'LOG', 'RUF', # ruff specific checks ] ignore = [ diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index 1813c797..73f4b5e4 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -871,7 +871,7 @@ def get_efftim_ped(ramp_data): try: effintim = (nframes + groupgap) * frame_time except TypeError: - log.error("Can not retrieve values needed to calculate integ. time") + log.exception("Can not retrieve values needed to calculate integ. time") log.debug("Calculating effective integration time for a single group using:") log.debug(" groupgap: %s", groupgap) From 4b8fccaf0254d6d14701159d98e752949436262c Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 11 Aug 2023 14:41:16 -0400 Subject: [PATCH 34/36] Update CI Note the check-style job can be removed once the pre-commit.ci bot is enabled --- .github/workflows/ci.yml | 1 - tox.ini | 15 ++++----------- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4950b5f1..ffc59324 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,7 +24,6 @@ jobs: with: envs: | - linux: check-style - - linux: check-security - linux: check-build test: uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1 diff --git a/tox.ini b/tox.ini index 2484e81e..255892a6 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] envlist = - check-{style,security,build} + check-{style,build} test{,-warnings,-cov}-xdist test-numpy{120,121,122} test-{jwst,romancal}-xdist @@ -19,17 +19,10 @@ envlist = description = check code style, e.g. with ruff skip_install = true deps = - ruff + pre-commit commands = - ruff . {posargs} - -[testenv:check-security] -description = run bandit to check security compliance -skip_install = true -deps = - bandit>=1.7 -commands = - bandit stcal -r -x src,tests + pre-commit install-hooks + pre-commit run {posargs:--color always --all-files --show-diff-on-failure} [testenv:check-build] description = check build sdist/wheel and a strict twine check for metadata From 2ce479906a3fdf784a2519a4d997b03c740812cc Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 21:15:48 -0500 Subject: [PATCH 35/36] Update changes --- CHANGES.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0fb58952..df5ae899 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,8 +1,13 @@ 1.4.5 (unreleased) ================== +Other +----- + - Added ``alignment`` sub-package. [#179] +- Enable automatic linting and code style checks [#187] + ramp_fitting ------------ @@ -27,11 +32,6 @@ Bug Fixes - -Other ------ - -- - 1.4.4 (2023-09-15) ================== From 108608b635a4e5efbe86602eacfc1ea68d067934 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 10 Nov 2023 23:33:54 -0500 Subject: [PATCH 36/36] Add soctests marker for romancal tests --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index abff2aa1..a945de6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -91,6 +91,9 @@ norecursedirs = [ filterwarnings = [ "error::ResourceWarning", ] +markers = [ + "soctests", +] [tool.ruff] line-length = 110