From 6152ac6b3ec7c8d7eb7a9eb4dbc61c26278aba97 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 25 Jun 2024 13:22:49 -0700 Subject: [PATCH 01/12] reorganize utils --- .flake8 | 4 +- .gitignore | 4 +- CHANGELOG.md | 5 + docs/uvbeam_tutorial.rst | 14 +- docs/uvcal_tutorial.rst | 18 +- docs/uvdata_tutorial.rst | 24 +- setup.py | 30 +- src/pyuvdata/__init__.py | 4 + src/pyuvdata/apply_uvflag.py | 120 + src/pyuvdata/parameter.py | 2 +- src/pyuvdata/telescopes.py | 11 +- src/pyuvdata/utils.py | 7698 ----------------- src/pyuvdata/utils.pyx | 530 -- src/pyuvdata/utils/__init__.py | 215 + src/pyuvdata/utils/array_collapse.py | 243 + src/pyuvdata/utils/bls.py | 385 + src/pyuvdata/utils/bls.pyx | 235 + src/pyuvdata/utils/coordinates.py | 474 + src/pyuvdata/utils/coordinates.pyx | 226 + src/pyuvdata/utils/file_io/__init__.py | 4 + src/pyuvdata/utils/file_io/antpos.py | 50 + src/pyuvdata/utils/file_io/fits.py | 117 + .../{hdf5_utils.py => utils/file_io/hdf5.py} | 197 +- .../{ms_utils.py => utils/file_io/ms.py} | 14 +- src/pyuvdata/utils/helpers.py | 1340 +++ src/pyuvdata/utils/lst.py | 245 + src/pyuvdata/utils/phasing.py | 2564 ++++++ src/pyuvdata/utils/phasing.pyx | 96 + src/pyuvdata/utils/pol.py | 499 ++ src/pyuvdata/utils/ps_cat.py | 796 ++ src/pyuvdata/utils/redundancy.py | 369 + src/pyuvdata/uvbase.py | 2 +- src/pyuvdata/uvbeam/beamfits.py | 31 +- src/pyuvdata/uvbeam/cst_beam.py | 7 +- src/pyuvdata/uvbeam/mwa_beam.py | 4 +- src/pyuvdata/uvbeam/uvbeam.py | 31 +- src/pyuvdata/uvcal/calfits.py | 37 +- src/pyuvdata/uvcal/calh5.py | 38 +- src/pyuvdata/uvcal/fhd_cal.py | 4 +- src/pyuvdata/uvcal/ms_cal.py | 10 +- src/pyuvdata/uvcal/uvcal.py | 77 +- src/pyuvdata/uvcalibrate.py | 421 + src/pyuvdata/uvdata/fhd.py | 6 +- src/pyuvdata/uvdata/initializers.py | 2 +- src/pyuvdata/uvdata/mir.py | 4 +- src/pyuvdata/uvdata/mir_parser.py | 6 +- src/pyuvdata/uvdata/miriad.py | 15 +- src/pyuvdata/uvdata/ms.py | 10 +- src/pyuvdata/uvdata/mwa_corr_fits.py | 12 +- src/pyuvdata/uvdata/uvdata.py | 161 +- src/pyuvdata/uvdata/uvfits.py | 24 +- src/pyuvdata/uvdata/uvh5.py | 42 +- src/pyuvdata/uvflag/uvflag.py | 61 +- tests/conftest.py | 15 + tests/test_apply_uvflag.py | 124 + tests/test_parameter.py | 4 +- tests/test_utils.py | 5045 ----------- tests/test_uvcalibrate.py | 622 ++ tests/utils/__init__.py | 4 + tests/utils/conftest.py | 119 + tests/utils/file_io/__init__.py | 4 + tests/utils/file_io/test_fits.py | 37 + tests/utils/file_io/test_hdf5.py | 66 + .../file_io/test_ms.py} | 2 +- tests/utils/test_array_collapse.py | 405 + tests/utils/test_bls.py | 38 + tests/utils/test_coordinates.py | 749 ++ tests/utils/test_helpers.py | 294 + tests/utils/test_lst.py | 211 + tests/utils/test_phasing.py | 1960 +++++ tests/utils/test_pol.py | 235 + tests/utils/test_ps_cat.py | 16 + tests/utils/test_redundancy.py | 372 + tests/uvbeam/test_beamfits.py | 22 +- tests/uvbeam/test_mwa_beam.py | 5 +- tests/uvbeam/test_uvbeam.py | 54 +- tests/uvcal/test_calfits.py | 22 +- tests/uvcal/test_calh5.py | 9 +- tests/uvcal/test_initializers.py | 4 +- tests/uvcal/test_uvcal.py | 89 +- tests/uvdata/test_fhd.py | 5 +- tests/uvdata/test_initializers.py | 4 +- tests/uvdata/test_miriad.py | 23 +- tests/uvdata/test_ms.py | 9 +- tests/uvdata/test_uvdata.py | 219 +- tests/uvdata/test_uvfits.py | 48 +- tests/uvdata/test_uvh5.py | 17 +- tests/uvflag/test_uvflag.py | 49 +- 88 files changed, 14544 insertions(+), 13895 deletions(-) create mode 100644 src/pyuvdata/apply_uvflag.py delete mode 100644 src/pyuvdata/utils.py delete mode 100644 src/pyuvdata/utils.pyx create mode 100644 src/pyuvdata/utils/__init__.py create mode 100644 src/pyuvdata/utils/array_collapse.py create mode 100644 src/pyuvdata/utils/bls.py create mode 100644 src/pyuvdata/utils/bls.pyx create mode 100644 src/pyuvdata/utils/coordinates.py create mode 100644 src/pyuvdata/utils/coordinates.pyx create mode 100644 src/pyuvdata/utils/file_io/__init__.py create mode 100644 src/pyuvdata/utils/file_io/antpos.py create mode 100644 src/pyuvdata/utils/file_io/fits.py rename src/pyuvdata/{hdf5_utils.py => utils/file_io/hdf5.py} (71%) rename src/pyuvdata/{ms_utils.py => utils/file_io/ms.py} (99%) create mode 100644 src/pyuvdata/utils/helpers.py create mode 100644 src/pyuvdata/utils/lst.py create mode 100644 src/pyuvdata/utils/phasing.py create mode 100644 src/pyuvdata/utils/phasing.pyx create mode 100644 src/pyuvdata/utils/pol.py create mode 100644 src/pyuvdata/utils/ps_cat.py create mode 100644 src/pyuvdata/utils/redundancy.py create mode 100644 src/pyuvdata/uvcalibrate.py create mode 100644 tests/test_apply_uvflag.py delete mode 100644 tests/test_utils.py create mode 100644 tests/test_uvcalibrate.py create mode 100644 tests/utils/__init__.py create mode 100644 tests/utils/conftest.py create mode 100644 tests/utils/file_io/__init__.py create mode 100644 tests/utils/file_io/test_fits.py create mode 100644 tests/utils/file_io/test_hdf5.py rename tests/{test_ms_utils.py => utils/file_io/test_ms.py} (99%) create mode 100644 tests/utils/test_array_collapse.py create mode 100644 tests/utils/test_bls.py create mode 100644 tests/utils/test_coordinates.py create mode 100644 tests/utils/test_helpers.py create mode 100644 tests/utils/test_lst.py create mode 100644 tests/utils/test_phasing.py create mode 100644 tests/utils/test_pol.py create mode 100644 tests/utils/test_ps_cat.py create mode 100644 tests/utils/test_redundancy.py diff --git a/.flake8 b/.flake8 index 5b6d626a5b..5964dd59e6 100644 --- a/.flake8 +++ b/.flake8 @@ -14,8 +14,8 @@ per-file-ignores = # remove the following lines as functions and input variables are renamed to pep8 style: src/pyuvdata/uvdata/uvdata.py: N802 src/pyuvdata/uvbeam/mwa_beam.py: N802 - src/pyuvdata/utils.py: N802, N803 - tests/test_utils.py: D,N802 + src/pyuvdata/utils/coordinates.py: N802, N803 + tests/utils/test_coordinates.py: D,N802 tests/__init__.py: D,N802 docstring-convention = numpy select = C,E,W,T4,B9,F,D,A,N,RST,B diff --git a/.gitignore b/.gitignore index 80ea997139..32fa865448 100644 --- a/.gitignore +++ b/.gitignore @@ -73,7 +73,9 @@ docs/references/_minted*/*.pygtex docs/references/_minted*/*.pygstyle # autogenerated c code -src/pyuvdata/utils.c +src/pyuvdata/utils/bls.c +src/pyuvdata/utils/phasing.c +src/pyuvdata/utils/coordinates.c src/pyuvdata/uvbeam/uvbeam.c src/pyuvdata/uvdata/corr_fits.c src/pyuvdata/uvdata/src/miriad_wrap.cpp diff --git a/CHANGELOG.md b/CHANGELOG.md index ad9cef93ee..4fab458c82 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,11 @@ time for each time range or the time_array (if there's a time_array and no time_ - Added new keyword handling for v.6 of the MIR data format within `MirParser`. ### Changed +- Restructured `utils.py` into a new submodule `utils` with functions split among +several submodules. Utility functions used widely are still available from +pyuvdata.utils, although this access pattern is deprecated for some of them. +- Moved `uvcalibrate` and `apply_uvflag` to their own modules at the top level. +Accessing them via `utils` is still available but is deprecated. - Modified `UVBeam.interp` to speed up processing when `check_azza_domain=True`. - Updated minimum dependencies: setuptools>=64, setuptools_scm>=8.0 - Restructured to a `src` layout. This should not affect most users, but the diff --git a/docs/uvbeam_tutorial.rst b/docs/uvbeam_tutorial.rst index 50ee8da3f1..7134701c94 100644 --- a/docs/uvbeam_tutorial.rst +++ b/docs/uvbeam_tutorial.rst @@ -321,7 +321,7 @@ or "ee). >>> import numpy as np >>> from pyuvdata import UVBeam >>> from pyuvdata.data import DATA_PATH - >>> import pyuvdata.utils as uvutils + >>> from pyuvdata import utils >>> settings_file = os.path.join(DATA_PATH, 'NicCSTbeams/NicCSTbeams.yaml') >>> uvb = UVBeam.from_file(settings_file, beam_type='efield') @@ -352,7 +352,7 @@ or "ee). [-5 -6 -7 -8] >>> # polarization numbers can be converted to strings using a utility function - >>> print(uvutils.polnum2str(uvb.polarization_array)) + >>> print(utils.polnum2str(uvb.polarization_array)) ['xx', 'yy', 'xy', 'yx'] >>> # select polarizations using the polarization numbers @@ -361,7 +361,7 @@ or "ee). >>> # print polarization numbers and strings after select >>> print(uvb.polarization_array) [-5 -6 -7] - >>> print(uvutils.polnum2str(uvb.polarization_array)) + >>> print(utils.polnum2str(uvb.polarization_array)) ['xx', 'yy', 'xy'] >>> # select polarizations using the polarization strings @@ -370,7 +370,7 @@ or "ee). >>> # print polarization numbers and strings after select >>> print(uvb.polarization_array) [-5 -6] - >>> print(uvutils.polnum2str(uvb.polarization_array)) + >>> print(utils.polnum2str(uvb.polarization_array)) ['xx', 'yy'] >>> # print x_orientation @@ -383,7 +383,7 @@ or "ee). >>> # print polarization numbers and strings after select >>> print(uvb.polarization_array) [-5] - >>> print(uvutils.polnum2str(uvb.polarization_array)) + >>> print(utils.polnum2str(uvb.polarization_array)) ['xx'] @@ -455,7 +455,7 @@ b) Generating pseudo Stokes ('pI', 'pQ', 'pU', 'pV') beams >>> from matplotlib.colors import LogNorm # doctest: +SKIP >>> from pyuvdata import UVBeam >>> from pyuvdata.data import DATA_PATH - >>> from pyuvdata import utils as uvutils + >>> from pyuvdata import utils >>> settings_file = os.path.join(DATA_PATH, 'NicCSTbeams/NicCSTbeams.yaml') >>> beam = UVBeam.from_file(settings_file, beam_type='efield') @@ -468,7 +468,7 @@ b) Generating pseudo Stokes ('pI', 'pQ', 'pU', 'pV') beams >>> # plotting pseudo-stokes I >>> pol_array = pstokes_beam.polarization_array - >>> pstokes = uvutils.polstr2num('pI') + >>> pstokes = utils.polstr2num('pI') >>> pstokes_ind = np.where(np.isin(pol_array, pstokes))[0][0] >>> azimuth, za = np.meshgrid(pstokes_beam.axis1_array, pstokes_beam.axis2_array) >>> plt.scatter(azimuth, 1-za, c=np.abs(pstokes_beam.data_array[0, 0, pstokes_ind, 0, :]), norm=LogNorm()) # doctest: +SKIP diff --git a/docs/uvcal_tutorial.rst b/docs/uvcal_tutorial.rst index 61c62a8424..acc64ea9ba 100644 --- a/docs/uvcal_tutorial.rst +++ b/docs/uvcal_tutorial.rst @@ -258,7 +258,7 @@ a) Data for a single antenna and instrumental polarization UVCal: Calibrating UVData ------------------------- Calibration solutions in a :class:`pyuvdata.UVCal` object can be applied to a -:class:`pyuvdata.UVData` object using the :func:`pyuvdata.utils.uvcalibrate` function. +:class:`pyuvdata.UVData` object using the :func:`pyuvdata.uvcalibrate` function. a) Calibration of UVData by UVCal @@ -267,7 +267,7 @@ a) Calibration of UVData by UVCal >>> # We can calibrate directly using a UVCal object >>> import os - >>> from pyuvdata import UVData, UVCal, utils + >>> from pyuvdata import UVData, UVCal, uvcalibrate >>> from pyuvdata.data import DATA_PATH >>> uvd = UVData.from_file( ... os.path.join(DATA_PATH, "zen.2458098.45361.HH.uvh5_downselected"), @@ -281,10 +281,10 @@ a) Calibration of UVData by UVCal >>> uvc.telescope.antenna_names = np.array( ... [name.replace("ant", "HH") for name in uvc.telescope.antenna_names] ... ) - >>> uvd_calibrated = utils.uvcalibrate(uvd, uvc, inplace=False) + >>> uvd_calibrated = uvcalibrate(uvd, uvc, inplace=False) >>> # We can also un-calibrate using the same UVCal - >>> uvd_uncalibrated = utils.uvcalibrate(uvd_calibrated, uvc, inplace=False, undo=True) + >>> uvd_uncalibrated = uvcalibrate(uvd_calibrated, uvc, inplace=False, undo=True) UVCal: Selecting data @@ -385,7 +385,7 @@ represting the physical orientation of the dipole can also be used (e.g. "Jnn" o >>> import numpy as np >>> from pyuvdata import UVCal >>> from pyuvdata.data import DATA_PATH - >>> import pyuvdata.utils as uvutils + >>> from pyuvdata import utils >>> filename = os.path.join(DATA_PATH, "zen.2458098.45361.HH.omni.calfits_downselected") >>> cal = UVCal.from_file(filename) @@ -394,7 +394,7 @@ represting the physical orientation of the dipole can also be used (e.g. "Jnn" o [-5 -6] >>> # Jones component numbers can be converted to strings using a utility function - >>> print(uvutils.jnum2str(cal.jones_array)) + >>> print(utils.jnum2str(cal.jones_array)) ['Jxx', 'Jyy'] >>> # make a copy of the object and select Jones components using the component numbers @@ -404,7 +404,7 @@ represting the physical orientation of the dipole can also be used (e.g. "Jnn" o >>> # print Jones component numbers and strings after select >>> print(cal2.jones_array) [-5] - >>> print(uvutils.jnum2str(cal2.jones_array)) + >>> print(utils.jnum2str(cal2.jones_array)) ['Jxx'] >>> # make a copy of the object and select Jones components using the component strings @@ -414,7 +414,7 @@ represting the physical orientation of the dipole can also be used (e.g. "Jnn" o >>> # print Jones component numbers and strings after select >>> print(cal2.jones_array) [-5] - >>> print(uvutils.jnum2str(cal2.jones_array)) + >>> print(utils.jnum2str(cal2.jones_array)) ['Jxx'] >>> # print x_orientation @@ -428,7 +428,7 @@ represting the physical orientation of the dipole can also be used (e.g. "Jnn" o >>> # print Jones component numbers and strings after select >>> print(cal2.jones_array) [-5] - >>> print(uvutils.jnum2str(cal2.jones_array)) + >>> print(utils.jnum2str(cal2.jones_array)) ['Jxx'] UVCal: Adding data diff --git a/docs/uvdata_tutorial.rst b/docs/uvdata_tutorial.rst index d971155bbd..9318040180 100644 --- a/docs/uvdata_tutorial.rst +++ b/docs/uvdata_tutorial.rst @@ -1095,7 +1095,7 @@ the physical orientation of the dipole can also be used (e.g. "nn" or "ee). >>> import numpy as np >>> from pyuvdata import UVData >>> from pyuvdata.data import DATA_PATH - >>> import pyuvdata.utils as uvutils + >>> from pyuvdata import utils >>> filename = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits') >>> uvd = UVData.from_file(filename) @@ -1104,7 +1104,7 @@ the physical orientation of the dipole can also be used (e.g. "nn" or "ee). [-1 -2 -3 -4] >>> # polarization numbers can be converted to strings using a utility function - >>> print(uvutils.polnum2str(uvd.polarization_array)) + >>> print(utils.polnum2str(uvd.polarization_array)) ['rr', 'll', 'rl', 'lr'] >>> # select polarizations using the polarization numbers @@ -1113,7 +1113,7 @@ the physical orientation of the dipole can also be used (e.g. "nn" or "ee). >>> # print polarization numbers and strings after select >>> print(uvd.polarization_array) [-1 -2 -3] - >>> print(uvutils.polnum2str(uvd.polarization_array)) + >>> print(utils.polnum2str(uvd.polarization_array)) ['rr', 'll', 'rl'] >>> # select polarizations using the polarization strings @@ -1122,7 +1122,7 @@ the physical orientation of the dipole can also be used (e.g. "nn" or "ee). >>> # print polarization numbers and strings after select >>> print(uvd.polarization_array) [-1 -2] - >>> print(uvutils.polnum2str(uvd.polarization_array)) + >>> print(utils.polnum2str(uvd.polarization_array)) ['rr', 'll'] >>> # read in a file with linear polarizations and an x_orientation @@ -1132,7 +1132,7 @@ the physical orientation of the dipole can also be used (e.g. "nn" or "ee). >>> # print polarization numbers and strings >>> print(uvd.polarization_array) [-5 -6] - >>> print(uvutils.polnum2str(uvd.polarization_array)) + >>> print(utils.polnum2str(uvd.polarization_array)) ['xx', 'yy'] >>> # print x_orientation @@ -1145,7 +1145,7 @@ the physical orientation of the dipole can also be used (e.g. "nn" or "ee). >>> # print polarization numbers and strings after select >>> print(uvd.polarization_array) [-6] - >>> print(uvutils.polnum2str(uvd.polarization_array)) + >>> print(utils.polnum2str(uvd.polarization_array)) ['yy'] @@ -1794,14 +1794,14 @@ ordering set by the user. >>> import os >>> from pyuvdata import UVData >>> from pyuvdata.data import DATA_PATH - >>> import pyuvdata.utils as uvutils + >>> from pyuvdata import utils >>> uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits') >>> uvd = UVData.from_file(uvfits_file) - >>> print(uvutils.polnum2str(uvd.polarization_array)) + >>> print(utils.polnum2str(uvd.polarization_array)) ['rr', 'll', 'rl', 'lr'] >>> uvd.reorder_pols('CASA') - >>> print(uvutils.polnum2str(uvd.polarization_array)) + >>> print(utils.polnum2str(uvd.polarization_array)) ['rr', 'rl', 'lr', 'll'] .. _flex_pol: @@ -1969,9 +1969,9 @@ object (except for antenna pairs with no associated data). There are also utility functions to get redundant groups from either a list of baselines vectors and corresponding baseline indices -(:func:`pyuvdata.utils.get_baseline_redundancies`) +(:func:`pyuvdata.utils.redundancy.get_baseline_redundancies`) or antenna positions and antenna indices -(:func:`pyuvdata.utils.get_antenna_redundancies`). Note that using these utility +(:func:`pyuvdata.utils.redundancy.get_antenna_redundancies`). Note that using these utility functions for the baselines on an object is less memory efficient than using :meth:`pyuvdata.UVData.get_redundancies` because the latter only uses the first time in the baseline array. @@ -1983,7 +1983,7 @@ the baseline array. >>> import numpy as np >>> from pyuvdata import UVData >>> from pyuvdata.data import DATA_PATH - >>> from pyuvdata import utils as uvutils + >>> from pyuvdata import utils >>> # This file contains a HERA19 layout. >>> uvd = UVData.from_file( diff --git a/setup.py b/setup.py index c7a2d8096c..60eafc411d 100644 --- a/setup.py +++ b/setup.py @@ -87,9 +87,25 @@ def is_platform_windows(): extra_link_args=extra_link_args, ) -utils_extension = Extension( - "pyuvdata._utils", - sources=["src/pyuvdata/utils.pyx"], +bls_extension = Extension( + "pyuvdata._bls", + sources=["src/pyuvdata/utils/bls.pyx"], + define_macros=global_c_macros, + include_dirs=[numpy.get_include()], + extra_compile_args=extra_compile_args, +) + +coordinates_extension = Extension( + "pyuvdata._coordinates", + sources=["src/pyuvdata/utils/coordinates.pyx"], + define_macros=global_c_macros, + include_dirs=[numpy.get_include()], + extra_compile_args=extra_compile_args, +) + +phasing_extension = Extension( + "pyuvdata._phasing", + sources=["src/pyuvdata/utils/phasing.pyx"], define_macros=global_c_macros, include_dirs=[numpy.get_include()], extra_compile_args=extra_compile_args, @@ -103,7 +119,13 @@ def is_platform_windows(): extra_compile_args=extra_compile_args, ) -extensions = [corr_fits_extension, utils_extension, uvbeam_extension] +extensions = [ + corr_fits_extension, + bls_extension, + coordinates_extension, + phasing_extension, + uvbeam_extension, +] # don't build miriad on windows if not is_platform_windows(): diff --git a/src/pyuvdata/__init__.py b/src/pyuvdata/__init__.py index 93835f4d39..d60cc9ebd2 100644 --- a/src/pyuvdata/__init__.py +++ b/src/pyuvdata/__init__.py @@ -30,6 +30,7 @@ warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") +from .apply_uvflag import apply_uvflag # noqa from .telescopes import ( # noqa Telescope, get_telescope, @@ -38,6 +39,7 @@ ) from .uvbeam import UVBeam # noqa from .uvcal import UVCal # noqa +from .uvcalibrate import uvcalibrate # noqa from .uvdata import FastUVH5Meta # noqa from .uvdata import UVData # noqa from .uvflag import UVFlag # noqa @@ -52,6 +54,8 @@ "known_telescopes", "known_telescope_location", "get_telescope", + "uvcalibrate", + "apply_uvflag", ] diff --git a/src/pyuvdata/apply_uvflag.py b/src/pyuvdata/apply_uvflag.py new file mode 100644 index 0000000000..5ded0d5620 --- /dev/null +++ b/src/pyuvdata/apply_uvflag.py @@ -0,0 +1,120 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Code to apply flags to calibration or visibility data.""" + +import numpy as np + + +def apply_uvflag( + uvd, uvf, *, inplace=True, unflag_first=False, flag_missing=True, force_pol=True +): + """ + Apply flags from a UVFlag to a UVData instantiation. + + Note that if uvf.Nfreqs or uvf.Ntimes is 1, it will broadcast flags across + that axis. + + Parameters + ---------- + uvd : UVData object + UVData object to add flags to. + uvf : UVFlag object + A UVFlag object in flag mode. + inplace : bool + If True overwrite flags in uvd, otherwise return new object + unflag_first : bool + If True, completely unflag the UVData before applying flags. + Else, OR the inherent uvd flags with uvf flags. + flag_missing : bool + If input uvf is a baseline type and antpairs in uvd do not exist in uvf, + flag them in uvd. Otherwise leave them untouched. + force_pol : bool + If True, broadcast flags to all polarizations if they do not match. + Only works if uvf.Npols == 1. + + Returns + ------- + UVData + If not inplace, returns new UVData object with flags applied + + """ + # assertions + if uvf.mode != "flag": + raise ValueError("UVFlag must be flag mode") + + if not inplace: + uvd = uvd.copy() + + # make a deepcopy by default b/c it is generally edited inplace downstream + uvf = uvf.copy() + + # convert to baseline type + if uvf.type != "baseline": + # edits inplace + uvf.to_baseline(uvd, force_pol=force_pol) + + else: + # make sure polarizations match or force_pol + uvd_pols, uvf_pols = ( + uvd.polarization_array.tolist(), + uvf.polarization_array.tolist(), + ) + if set(uvd_pols) != set(uvf_pols): + if uvf.Npols == 1 and force_pol: + # if uvf is 1pol we can make them match: also edits inplace + uvf.polarization_array = uvd.polarization_array + uvf.Npols = len(uvf.polarization_array) + uvf_pols = uvf.polarization_array.tolist() + + else: + raise ValueError("Input uvf and uvd polarizations do not match") + + # make sure polarization ordering is correct: also edits inplace + uvf.polarization_array = uvf.polarization_array[ + [uvd_pols.index(pol) for pol in uvf_pols] + ] + + # check time and freq shapes match: if Ntimes or Nfreqs is 1, allow + # implicit broadcasting + if uvf.Ntimes == 1: + mismatch_times = False + elif uvf.Ntimes == uvd.Ntimes: + tdiff = np.unique(uvf.time_array) - np.unique(uvd.time_array) + mismatch_times = np.any(tdiff > np.max(np.abs(uvf._time_array.tols))) + else: + mismatch_times = True + if mismatch_times: + raise ValueError("UVFlag and UVData have mismatched time arrays.") + + if uvf.Nfreqs == 1: + mismatch_freqs = False + elif uvf.Nfreqs == uvd.Nfreqs: + fdiff = np.unique(uvf.freq_array) - np.unique(uvd.freq_array) + mismatch_freqs = np.any(fdiff > np.max(np.abs(uvf._freq_array.tols))) + else: + mismatch_freqs = True + if mismatch_freqs: + raise ValueError("UVFlag and UVData have mismatched frequency arrays.") + + # unflag if desired + if unflag_first: + uvd.flag_array[:] = False + + # iterate over antpairs and apply flags: TODO need to be able to handle + # conjugated antpairs + uvf_antpairs = uvf.get_antpairs() + for ap in uvd.get_antpairs(): + uvd_ap_inds = uvd.antpair2ind(ap) + if ap not in uvf_antpairs: + if flag_missing: + uvd.flag_array[uvd_ap_inds] = True + continue + uvf_ap_inds = uvf.antpair2ind(*ap) + # addition of boolean is OR + uvd.flag_array[uvd_ap_inds] += uvf.flag_array[uvf_ap_inds] + + uvd.history += "\nFlagged with pyuvdata.apply_uvflags." + + if not inplace: + return uvd diff --git a/src/pyuvdata/parameter.py b/src/pyuvdata/parameter.py index 0ee8e930ab..57a0ce328b 100644 --- a/src/pyuvdata/parameter.py +++ b/src/pyuvdata/parameter.py @@ -16,8 +16,8 @@ import builtins import warnings -import astropy.units as units import numpy as np +from astropy import units from astropy.coordinates import EarthLocation, SkyCoord allowed_location_types = [EarthLocation] diff --git a/src/pyuvdata/telescopes.py b/src/pyuvdata/telescopes.py index 559678fa28..d6be12461d 100644 --- a/src/pyuvdata/telescopes.py +++ b/src/pyuvdata/telescopes.py @@ -17,10 +17,11 @@ from astropy import units from astropy.coordinates import Angle, EarthLocation -from . import hdf5_utils from . import parameter as uvp from . import utils from .data import DATA_PATH +from .utils.file_io import antpos +from .utils.file_io import hdf5 as hdf5_utils from .uvbase import UVBase __all__ = ["Telescope", "known_telescopes", "known_telescope_location", "get_telescope"] @@ -465,7 +466,7 @@ def check(self, *, check_extra=True, run_check_acceptability=True): if run_check_acceptability: # Check antenna positions - utils.check_surface_based_positions( + utils.helpers.check_surface_based_positions( antenna_positions=self.antenna_positions, telescope_loc=self.location, raise_error=False, @@ -558,7 +559,7 @@ def update_params_from_known_telescopes( DATA_PATH, telescope_dict["antenna_positions_file"] ) antenna_names, antenna_numbers, antenna_positions = ( - utils.parse_antpos_file(antpos_file) + antpos.read_antpos_csv(antpos_file) ) ant_info = { "Nants": antenna_names.size, @@ -773,10 +774,10 @@ def new( """ tel_obj = cls() - if not isinstance(location, tuple(utils.allowed_location_types)): + if not isinstance(location, tuple(utils.coordinates.allowed_location_types)): raise ValueError( "telescope_location has an unsupported type, it must be one of " - f"{utils.allowed_location_types}" + f"{utils.coordinates.allowed_location_types}" ) tel_obj.name = name diff --git a/src/pyuvdata/utils.py b/src/pyuvdata/utils.py deleted file mode 100644 index 0e21eac874..0000000000 --- a/src/pyuvdata/utils.py +++ /dev/null @@ -1,7698 +0,0 @@ -# -*- mode: python; coding: utf-8 -*- -# Copyright (c) 2018 Radio Astronomy Software Group -# Licensed under the 2-clause BSD License - -"""Commonly used utility functions.""" -from __future__ import annotations - -import copy -import re -import warnings -from collections.abc import Iterable -from copy import deepcopy -from functools import lru_cache, wraps -from typing import Iterable as IterableType - -import erfa -import numpy as np -from astropy import units -from astropy.coordinates import Angle, Distance, EarthLocation, SkyCoord -from astropy.time import Time -from astropy.utils import iers -from scipy.spatial.distance import cdist - -from . import _utils - -try: - from lunarsky import MoonLocation - from lunarsky import SkyCoord as LunarSkyCoord - from lunarsky import Time as LTime - - hasmoon = True -except ImportError: - hasmoon = False - - -__all__ = [ - "POL_STR2NUM_DICT", - "POL_NUM2STR_DICT", - "CONJ_POL_DICT", - "JONES_STR2NUM_DICT", - "JONES_NUM2STR_DICT", - "XORIENTMAP", - "LatLonAlt_from_XYZ", - "XYZ_from_LatLonAlt", - "rotECEF_from_ECEF", - "ECEF_from_rotECEF", - "ENU_from_ECEF", - "ECEF_from_ENU", - "undo_old_uvw_calc", - "old_uvw_calc", - "uvcalibrate", - "apply_uvflag", - "get_lst_for_time", - "polstr2num", - "polnum2str", - "jstr2num", - "jnum2str", - "parse_polstr", - "parse_jpolstr", - "conj_pol", - "reorder_conj_pols", - "baseline_to_antnums", - "antnums_to_baseline", - "baseline_index_flip", - "get_baseline_redundancies", - "get_antenna_redundancies", - "collapse", - "mean_collapse", - "absmean_collapse", - "quadmean_collapse", - "or_collapse", - "and_collapse", -] - -# standard angle tolerance: 1 mas in radians. -RADIAN_TOL = 1 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0) -# standard lst time tolerance: 5 ms (75 mas in radians), based on an expected RMS -# accuracy of 1 ms at 7 days out from issuance of Bulletin A (which are issued once a -# week with rapidly determined parameters and forecasted values of DUT1), the exact -# formula for which is t_err = 0.00025 (MJD-)**0.75 (in secs). -LST_RAD_TOL = 2 * np.pi * 5e-3 / (86400.0) - -# fmt: off -# polarization constants -# maps polarization strings to polarization integers -POL_STR2NUM_DICT = {"pI": 1, "pQ": 2, "pU": 3, "pV": 4, - "I": 1, "Q": 2, "U": 3, "V": 4, # support straight stokes names - "rr": -1, "ll": -2, "rl": -3, "lr": -4, - "xx": -5, "yy": -6, "xy": -7, "yx": -8, - "hh": -5, "vv": -6, "hv": -7, "vh": -8} - -# maps polarization integers to polarization strings -POL_NUM2STR_DICT = {1: "pI", 2: "pQ", 3: "pU", 4: "pV", - -1: "rr", -2: "ll", -3: "rl", -4: "lr", - -5: "xx", -6: "yy", -7: "xy", -8: "yx"} - -# maps how polarizations change when antennas are swapped -CONJ_POL_DICT = {"xx": "xx", "yy": "yy", "xy": "yx", "yx": "xy", - "ee": "ee", "nn": "nn", "en": "ne", "ne": "en", - "rr": "rr", "ll": "ll", "rl": "lr", "lr": "rl", - "I": "I", "Q": "Q", "U": "U", "V": "V", - "pI": "pI", "pQ": "pQ", "pU": "pU", "pV": "pV"} - -# maps jones matrix element strings to jones integers -# Add entries that don't start with "J" to allow shorthand versions -JONES_STR2NUM_DICT = {"Jxx": -5, "Jyy": -6, "Jxy": -7, "Jyx": -8, - "xx": -5, "x": -5, "yy": -6, "y": -6, "xy": -7, "yx": -8, - "Jrr": -1, "Jll": -2, "Jrl": -3, "Jlr": -4, - "rr": -1, "r": -1, "ll": -2, "l": -2, "rl": -3, "lr": -4} -# maps jones integers to jones matrix element strings -JONES_NUM2STR_DICT = {-1: "Jrr", -2: "Jll", -3: "Jrl", -4: "Jlr", - -5: "Jxx", -6: "Jyy", -7: "Jxy", -8: "Jyx"} - -# maps uvdata pols to input feed polarizations. Note that this dict is also used for -# CASA MS writing, so the pseudo-stokes parameters are included here to provide mapping -# to a consistent (if non-physical) set of feeds for the pseudo-stokes visibilities, -# which are nominally supported by the CASA MS format. -POL_TO_FEED_DICT = {"xx": ["x", "x"], "yy": ["y", "y"], - "xy": ["x", "y"], "yx": ["y", "x"], - "ee": ["e", "e"], "nn": ["n", "n"], - "en": ["e", "n"], "ne": ["n", "e"], - "rr": ["r", "r"], "ll": ["l", "l"], - "rl": ["r", "l"], "lr": ["l", "r"], - "pI": ["I", "I"], "pQ": ["Q", "Q"], - "pU": ["U", "U"], "pV": ["V", "V"]} - -# fmt: on - -XORIENTMAP = { - "east": "east", - "north": "north", - "e": "east", - "n": "north", - "ew": "east", - "ns": "north", -} - -_range_dict = { - "itrs": (6.35e6, 6.39e6, "Earth"), - "mcmf": (1717100.0, 1757100.0, "Moon"), -} - -allowed_location_types = [EarthLocation] -if hasmoon: - selenoids = { - "SPHERE": _utils.Body.Moon_sphere, - "GSFC": _utils.Body.Moon_gsfc, - "GRAIL23": _utils.Body.Moon_grail23, - "CE-1-LAM-GEO": _utils.Body.Moon_ce1lamgeo, - } - allowed_location_types.append(MoonLocation) - -allowed_cat_types = ["sidereal", "ephem", "unprojected", "driftscan"] - - -def _get_iterable(x): - """Return iterable version of input.""" - if isinstance(x, Iterable): - return x - else: - return (x,) - - -def _fits_gethduaxis(hdu, axis): - """ - Make axis arrays for fits files. - - Parameters - ---------- - hdu : astropy.io.fits HDU object - The HDU to make an axis array for. - axis : int - The axis number of interest (1-based). - - Returns - ------- - ndarray of float - Array of values for the specified axis. - - """ - ax = str(axis) - axis_num = hdu.header["NAXIS" + ax] - val = hdu.header["CRVAL" + ax] - delta = hdu.header["CDELT" + ax] - index = hdu.header["CRPIX" + ax] - 1 - - return delta * (np.arange(axis_num) - index) + val - - -def _fits_indexhdus(hdulist): - """ - Get a dict of table names and HDU numbers from a FITS HDU list. - - Parameters - ---------- - hdulist : list of astropy.io.fits HDU objects - List of HDUs to get names for - - Returns - ------- - dict - dictionary with table names as keys and HDU number as values. - - """ - tablenames = {} - for i in range(len(hdulist)): - try: - tablenames[hdulist[i].header["EXTNAME"]] = i - except KeyError: - continue - return tablenames - - -def _get_fits_extra_keywords(header, *, keywords_to_skip=None): - """ - Get any extra keywords and return as dict. - - Parameters - ---------- - header : FITS header object - header object to get extra_keywords from. - keywords_to_skip : list of str - list of keywords to not include in extra keywords in addition to standard - FITS keywords. - - Returns - ------- - dict - dict of extra keywords. - """ - # List standard FITS header items that are still should not be included in - # extra_keywords - # These are the beginnings of FITS keywords to ignore, the actual keywords - # often include integers following these names (e.g. NAXIS1, CTYPE3) - std_fits_substrings = [ - "HISTORY", - "SIMPLE", - "BITPIX", - "EXTEND", - "BLOCKED", - "GROUPS", - "PCOUNT", - "GCOUNT", - "BSCALE", - "BZERO", - "NAXIS", - "PTYPE", - "PSCAL", - "PZERO", - "CTYPE", - "CRVAL", - "CRPIX", - "CDELT", - "CROTA", - "CUNIT", - ] - - if keywords_to_skip is not None: - std_fits_substrings.extend(keywords_to_skip) - - extra_keywords = {} - # find all the other header items and keep them as extra_keywords - for key in header: - # check if key contains any of the standard FITS substrings - if np.any([sub in key for sub in std_fits_substrings]): - continue - if key == "COMMENT": - extra_keywords[key] = str(header.get(key)) - elif key != "": - extra_keywords[key] = header.get(key) - - return extra_keywords - - -def _check_history_version(history, version_string): - """Check if version_string is present in history string.""" - if version_string.replace(" ", "") in history.replace("\n", "").replace(" ", ""): - return True - else: - return False - - -def _check_histories(history1, history2): - """Check if two histories are the same.""" - if history1.replace("\n", "").replace(" ", "") == history2.replace( - "\n", "" - ).replace(" ", ""): - return True - else: - return False - - -def _combine_history_addition(history1, history2): - """ - Find extra history to add to have minimal repeats. - - Parameters - ---------- - history1 : str - First history. - history2 : str - Second history - - Returns - ------- - str - Extra history to add to first history. - - """ - # first check if they're the same to avoid more complicated processing. - if _check_histories(history1, history2): - return None - - hist2_words = history2.split(" ") - add_hist = "" - test_hist1 = " " + history1 + " " - for i, word in enumerate(hist2_words): - if " " + word + " " not in test_hist1: - add_hist += " " + word - keep_going = i + 1 < len(hist2_words) - while keep_going: - if (hist2_words[i + 1] == " ") or ( - " " + hist2_words[i + 1] + " " not in test_hist1 - ): - add_hist += " " + hist2_words[i + 1] - del hist2_words[i + 1] - keep_going = i + 1 < len(hist2_words) - else: - keep_going = False - - if add_hist == "": - add_hist = None - return add_hist - - -def _test_array_constant(array, *, tols=None): - """ - Check if an array contains constant values to some tolerance. - - Uses np.isclose on the min & max of the arrays with the given tolerances. - - Parameters - ---------- - array : np.ndarray or UVParameter - UVParameter or array to check for constant values. - tols : tuple of float, optional - length 2 tuple giving (rtol, atol) to pass to np.isclose, defaults to (0, 0) if - passing an array, otherwise defaults to using the tolerance on the UVParameter. - - Returns - ------- - bool - True if the array is constant to the given tolerances, False otherwise. - """ - # Import UVParameter here rather than at the top to avoid circular imports - from pyuvdata.parameter import UVParameter - - if isinstance(array, UVParameter): - array_to_test = array.value - if tols is None: - tols = array.tols - else: - array_to_test = array - if tols is None: - tols = (0, 0) - assert isinstance(tols, tuple), "tols must be a length-2 tuple" - assert len(tols) == 2, "tols must be a length-2 tuple" - - if array_to_test.size == 1: - # arrays with 1 element are constant by definition - return True - - # if min and max are equal don't bother with tolerance checking - if np.min(array_to_test) == np.max(array_to_test): - return True - - return np.isclose( - np.min(array_to_test), np.max(array_to_test), rtol=tols[0], atol=tols[1] - ) - - -def _test_array_constant_spacing(array, *, tols=None): - """ - Check if an array is constantly spaced to some tolerance. - - Calls _test_array_constant on the np.diff of the array. - - Parameters - ---------- - array : np.ndarray or UVParameter - UVParameter or array to check for constant spacing. - tols : tuple of float, optional - length 2 tuple giving (rtol, atol) to pass to np.isclose, defaults to (0, 0) if - passing an array, otherwise defaults to using the tolerance on the UVParameter. - - Returns - ------- - bool - True if the array spacing is constant to the given tolerances, False otherwise. - """ - # Import UVParameter here rather than at the top to avoid circular imports - from pyuvdata.parameter import UVParameter - - if isinstance(array, UVParameter): - array_to_test = array.value - if tols is None: - tols = array.tols - else: - array_to_test = array - if tols is None: - tols = (0, 0) - assert isinstance(tols, tuple), "tols must be a length-2 tuple" - assert len(tols) == 2, "tols must be a length-2 tuple" - - if array_to_test.size <= 2: - # arrays with 1 or 2 elements are constantly spaced by definition - return True - - array_diff = np.diff(array_to_test) - return _test_array_constant(array_diff, tols=tols) - - -def _check_flex_spw_contiguous(*, spw_array, flex_spw_id_array): - """ - Check if the spectral windows are contiguous for multi-spw datasets. - - This checks the flex_spw_id_array to make sure that all channels for each - spectral window are together in one block, versus being interspersed (e.g., - channel #1 and #3 is in spw #1, channels #2 and #4 are in spw #2). In theory, - UVH5 and UVData objects can handle this, but MIRIAD, MIR, UVFITS, and MS file - formats cannot, so we just consider it forbidden. - - Parameters - ---------- - spw_array : array of integers - Array of spectral window numbers, shape (Nspws,). - flex_spw_id_array : array of integers - Array of spectral window numbers per frequency channel, shape (Nfreqs,). - - """ - exp_spw_ids = np.unique(spw_array) - # This is an internal consistency check to make sure that the indexes match - # up as expected -- this shouldn't error unless someone is mucking with - # settings they shouldn't be. - assert np.all(np.unique(flex_spw_id_array) == exp_spw_ids), ( - "There are some entries in flex_spw_id_array that are not in spw_array. " - "This is a bug, please report it in an issue." - ) - - n_breaks = np.sum(flex_spw_id_array[1:] != flex_spw_id_array[:-1]) - if (n_breaks + 1) != spw_array.size: - raise ValueError( - "Channels from different spectral windows are interspersed with " - "one another, rather than being grouped together along the " - "frequency axis. Most file formats do not support such " - "non-grouping of data." - ) - - -def _check_freq_spacing( - *, - freq_array, - freq_tols, - channel_width, - channel_width_tols, - spw_array, - flex_spw_id_array, - raise_errors=True, -): - """ - Check if frequencies are evenly spaced and separated by their channel width. - - This is a requirement for writing uvfits & miriad files. - - Parameters - ---------- - freq_array : array of float - Array of frequencies, shape (Nfreqs,). - freq_tols : tuple of float - freq_array tolerances (from uvobj._freq_array.tols). - channel_width : array of float - Channel widths, either a scalar or an array of shape (Nfreqs,). - channel_width_tols : tuple of float - channel_width tolerances (from uvobj._channel_width.tols). - spw_array : array of integers or None - Array of spectral window numbers, shape (Nspws,). - flex_spw_id_array : array of integers or None - Array of spectral window numbers per frequency channel, shape (Nfreqs,). - raise_errors : bool - Option to raise errors if the various checks do not pass. - - Returns - ------- - spacing_error : bool - Flag that channel spacings or channel widths are not equal. - chanwidth_error : bool - Flag that channel spacing does not match channel width. - - """ - spacing_error = False - chanwidth_error = False - - # Check to make sure that the flexible spectral window has indicies set up - # correctly (grouped together) for this check - _check_flex_spw_contiguous(spw_array=spw_array, flex_spw_id_array=flex_spw_id_array) - - for spw_id in spw_array: - mask = flex_spw_id_array == spw_id - if sum(mask) > 1: - freq_spacing = np.diff(freq_array[mask]) - freq_dir = -1.0 if all(freq_spacing < 0) else 1.0 - if not _test_array_constant(freq_spacing, tols=freq_tols): - spacing_error = True - if not _test_array_constant(channel_width[mask], tols=channel_width_tols): - spacing_error = True - elif not np.allclose( - freq_spacing, - np.mean(channel_width[mask]) * freq_dir, - rtol=channel_width_tols[0], - atol=channel_width_tols[1], - ): - chanwidth_error = True - - if raise_errors and spacing_error: - raise ValueError( - "The frequencies are not evenly spaced (probably because of a select " - "operation) or has differing values of channel widths. Some file formats " - "(e.g. uvfits, miriad) do not support unevenly spaced frequencies." - ) - if raise_errors and chanwidth_error: - raise ValueError( - "The frequencies are separated by more than their channel width (probably " - "because of a select operation). Some file formats (e.g. uvfits, miriad) " - "do not support frequencies that are spaced by more than their channel " - "width." - ) - - return spacing_error, chanwidth_error - - -def _sort_freq_helper( - *, - Nfreqs, - freq_array, - Nspws, - spw_array, - flex_spw_id_array, - spw_order, - channel_order, - select_spw, -): - """ - Figure out the frequency sorting order for object based frequency sorting. - - Parameters - ---------- - Nfreqs : int - Number of frequencies, taken directly from the object parameter. - freq_array : array_like of float - Frequency array, taken directly from the object parameter. - Nfreqs : int - Number of spectral windows, taken directly from the object parameter. - spw_array : array_like of int - Spectral window array, taken directly from the object parameter. - flex_spw_id_array : array_like of int - Array of SPW IDs for each channel, taken directly from the object parameter. - spw_order : str or array_like of int - A string describing the desired order of spectral windows along the - frequency axis. Allowed strings include `number` (sort on spectral window - number) and `freq` (sort on median frequency). A '-' can be prepended - to signify descending order instead of the default ascending order, - e.g., if you have SPW #1 and 2, and wanted them ordered as [2, 1], - you would specify `-number`. Alternatively, one can supply an index array - of length Nspws that specifies how to shuffle the spws (this is not the desired - final spw order). Default is to apply no sorting of spectral windows. - channel_order : str or array_like of int - A string describing the desired order of frequency channels within a - spectral window. Allowed strings include `freq`, which will sort channels - within a spectral window by frequency. A '-' can be optionally prepended - to signify descending order instead of the default ascending order. - Alternatively, one can supply an index array of length Nfreqs that - specifies the new order. Default is to apply no sorting of channels - within a single spectral window. Note that proving an array_like of ints - will cause the values given to `spw_order` and `select_spw` to be ignored. - select_spw : int or array_like of int - An int or array_like of ints which specifies which spectral windows to - apply sorting. Note that setting this argument will cause the value - given to `spw_order` to be ignored. - - Returns - ------- - index_array : ndarray of int - Array giving the desired order of the channels to be used for sorting along the - frequency axis - - Raises - ------ - UserWarning - Raised if providing arguments to select_spw and channel_order (the latter - overrides the former). - ValueError - Raised if select_spw contains values not in spw_array, or if channel_order - is not the same length as freq_array. - - """ - if (spw_order is None) and (channel_order is None): - warnings.warn( - "Not specifying either spw_order or channel_order causes " - "no sorting actions to be applied. Returning object unchanged." - ) - return - - # Check to see if there are arguments we should be ignoring - if isinstance(channel_order, (np.ndarray, list, tuple)): - if select_spw is not None: - warnings.warn( - "The select_spw argument is ignored when providing an " - "array_like of int for channel_order" - ) - if spw_order is not None: - warnings.warn( - "The spw_order argument is ignored when providing an " - "array_like of int for channel_order" - ) - channel_order = np.asarray(channel_order) - if not channel_order.size == Nfreqs or not np.all( - np.sort(channel_order) == np.arange(Nfreqs) - ): - raise ValueError( - "Index array for channel_order must contain all indicies for " - "the frequency axis, without duplicates." - ) - index_array = channel_order - else: - index_array = np.arange(Nfreqs) - # Multipy by 1.0 here to make a cheap copy of the array to manipulate - temp_freqs = 1.0 * freq_array - # Same trick for ints -- add 0 to make a cheap copy - temp_spws = 0 + flex_spw_id_array - - # Check whether or not we need to sort the channels in individual windows - sort_spw = {idx: channel_order is not None for idx in spw_array} - if select_spw is not None: - if spw_order is not None: - warnings.warn( - "The spw_order argument is ignored when providing an " - "argument for select_spw" - ) - if channel_order is None: - warnings.warn( - "Specifying select_spw without providing channel_order causes " - "no sorting actions to be applied. Returning object unchanged." - ) - return - if isinstance(select_spw, (np.ndarray, list, tuple)): - sort_spw = {idx: idx in select_spw for idx in spw_array} - else: - sort_spw = {idx: idx == select_spw for idx in spw_array} - elif spw_order is not None: - if isinstance(spw_order, (np.ndarray, list, tuple)): - spw_order = np.asarray(spw_order) - if not spw_order.size == Nspws or not np.all( - np.sort(spw_order) == np.arange(Nspws) - ): - raise ValueError( - "Index array for spw_order must contain all indicies for " - "the spw_array, without duplicates." - ) - elif spw_order not in ["number", "freq", "-number", "-freq", None]: - raise ValueError( - "spw_order can only be one of 'number', '-number', " - "'freq', '-freq', None or an index array of length Nspws" - ) - elif Nspws > 1: - # Only need to do this step if we actually have multiple spws. - - # If the string starts with a '-', then we will flip the order at - # the end of the operation - flip_spws = spw_order[0] == "-" - - if "number" in spw_order: - spw_order = np.argsort(spw_array) - elif "freq" in spw_order: - spw_order = np.argsort( - [np.median(temp_freqs[temp_spws == idx]) for idx in spw_array] - ) - if flip_spws: - spw_order = np.flip(spw_order) - else: - spw_order = np.arange(Nspws) - # Now that we know the spw order, we can apply the first sort - index_array = np.concatenate( - [index_array[temp_spws == spw] for spw in spw_array[spw_order]] - ) - temp_freqs = temp_freqs[index_array] - temp_spws = temp_spws[index_array] - # Spectral windows are assumed sorted at this point - if channel_order is not None: - if channel_order not in ["freq", "-freq"]: - raise ValueError( - "channel_order can only be one of 'freq' or '-freq' or an index " - "array of length Nfreqs" - ) - for idx in spw_array: - if sort_spw[idx]: - select_mask = temp_spws == idx - subsort_order = index_array[select_mask] - subsort_order = subsort_order[np.argsort(temp_freqs[select_mask])] - index_array[select_mask] = ( - np.flip(subsort_order) - if channel_order[0] == "-" - else subsort_order - ) - if np.all(index_array[1:] > index_array[:-1]): - # Nothing to do - the data are already sorted! - return - - return index_array - - -def _check_range_overlap(val_range, range_type="time"): - """ - Detect if any val_range in an array overlap. - - Parameters - ---------- - val_range : np.array of float - Array of ranges, shape (Nranges, 2). - range_type : str - Type of range (for good error messages) - - Returns - ------- - bool - True if any range overlaps. - """ - # first check that time ranges are well formed (stop is >= than start) - if np.any((val_range[:, 1] - val_range[:, 0]) < 0): - raise ValueError( - f"The {range_type} ranges are not well-formed, some stop {range_type}s " - f"are after start {range_type}s." - ) - - # Sort by start time - sorted_ranges = val_range[np.argsort(val_range[:, 0]), :] - - # then check if adjacent pairs overlap - for ind in range(sorted_ranges.shape[0] - 1): - range1 = sorted_ranges[ind] - range2 = sorted_ranges[ind + 1] - if range2[0] < range1[1]: - return True - - -def _select_times_helper( - *, - times, - time_range, - lsts, - lst_range, - obj_time_array, - obj_time_range, - obj_lst_array, - obj_lst_range, - time_tols, - lst_tols, -): - """ - Get time indices in a select. - - Parameters - ---------- - times : array_like of float - The times to keep in the object, each value passed here should exist in the - time_array. Can be None, cannot be set with `time_range`, `lsts` or `lst_array`. - time_range : array_like of float - The time range in Julian Date to keep in the object, must be length 2. Some of - the times in the object should fall between the first and last elements. Can be - None, cannot be set with `times`, `lsts` or `lst_array`. - lsts : array_like of float - The local sidereal times (LSTs) to keep in the object, each value passed here - should exist in the lst_array. Can be None, cannot be set with `times`, - `time_range`, or `lst_range`. - lst_range : array_like of float - The local sidereal time (LST) range in radians to keep in the - object, must be of length 2. Some of the LSTs in the object should - fall between the first and last elements. If the second value is - smaller than the first, the LSTs are treated as having phase-wrapped - around LST = 2*pi = 0, and the LSTs kept on the object will run from - the larger value, through 0, and end at the smaller value. Can be None, cannot - be set with `times`, `time_range`, or `lsts`. - obj_time_array : array_like of float - Time array on object. Can be None if `object_time_range` is set. - obj_time_range : array_like of float - Time range on object. Can be None if `object_time_array` is set. - obj_lst_array : array_like of float - LST array on object. Can be None if `object_lst_range` is set. - obj_lst_range : array_like of float - LST range on object. Can be None if `object_lst_array` is set. - time_tols : tuple of float - Length 2 tuple giving (rtol, atol) to use for time matching. - lst_tols : tuple of float - Length 2 tuple giving (rtol, atol) to use for lst matching. - - """ - have_times = times is not None - have_time_range = time_range is not None - have_lsts = lsts is not None - have_lst_range = lst_range is not None - n_time_params = np.count_nonzero( - [have_times, have_time_range, have_lsts, have_lst_range] - ) - if n_time_params > 1: - raise ValueError( - "Only one of [times, time_range, lsts, lst_range] may be " - "specified per selection operation." - ) - if n_time_params == 0: - return None - - time_inds = np.zeros(0, dtype=np.int64) - if times is not None: - times = _get_iterable(times) - if np.array(times).ndim > 1: - times = np.array(times).flatten() - - if obj_time_range is not None: - for jd in times: - this_ind = np.nonzero( - np.logical_and( - (obj_time_range[:, 0] <= jd), (obj_time_range[:, 1] >= jd) - ) - )[0] - if this_ind.size > 0: - time_inds = np.append(time_inds, this_ind) - else: - raise ValueError(f"Time {jd} does not fall in any time_range.") - else: - for jd in times: - if np.any( - np.isclose(obj_time_array, jd, rtol=time_tols[0], atol=time_tols[1]) - ): - time_inds = np.append( - time_inds, - np.where( - np.isclose( - obj_time_array, jd, rtol=time_tols[0], atol=time_tols[1] - ) - )[0], - ) - else: - raise ValueError(f"Time {jd} is not present in the time_array.") - - if time_range is not None: - if np.size(time_range) != 2: - raise ValueError("time_range must be length 2.") - - if obj_time_range is not None: - for tind, trange in enumerate(obj_time_range): - if _check_range_overlap(np.stack((trange, time_range), axis=0)): - time_inds = np.append(time_inds, tind) - attr_str = "time_range" - else: - time_inds = np.nonzero( - (obj_time_array <= time_range[1]) & (obj_time_array >= time_range[0]) - )[0] - attr_str = "time_array" - if time_inds.size == 0: - raise ValueError( - f"No elements in {attr_str} between {time_range[0]} and " - f"{time_range[1]}." - ) - - if (lsts is not None or lst_range is not None) and obj_lst_range is not None: - # check for lsts wrapping around zero - lst_range_wrap = obj_lst_range[:, 0] > obj_lst_range[:, 1] - - if lsts is not None: - if np.any(np.asarray(lsts) > 2 * np.pi): - warnings.warn( - "The lsts parameter contained a value greater than 2*pi. " - "LST values are assumed to be in radians, not hours." - ) - lsts = _get_iterable(lsts) - if np.array(lsts).ndim > 1: - lsts = np.array(lsts).flatten() - - if obj_lst_range is not None: - for lst in lsts: - lst_ind = np.nonzero( - np.logical_and( - (obj_lst_range[:, 0] <= lst), (obj_lst_range[:, 1] >= lst) - ) - )[0] - if lst_ind.size == 0 and np.any(lst_range_wrap): - for lr_ind in np.nonzero(lst_range_wrap)[0]: - if (obj_lst_range[lr_ind, 0] <= lst and lst <= 2 * np.pi) or ( - lst >= 0 and lst <= obj_lst_range[lr_ind, 1] - ): - lst_ind = np.array([lr_ind]) - if lst_ind.size > 0: - time_inds = np.append(time_inds, lst_ind) - else: - raise ValueError(f"LST {lst} does not fall in any lst_range") - else: - for lst in lsts: - if np.any( - np.isclose(obj_lst_array, lst, rtol=lst_tols[0], atol=lst_tols[1]) - ): - time_inds = np.append( - time_inds, - np.where( - np.isclose( - obj_lst_array, lst, rtol=lst_tols[0], atol=lst_tols[1] - ) - )[0], - ) - else: - raise ValueError(f"LST {lst} is not present in the lst_array") - - if lst_range is not None: - if np.size(lst_range) != 2: - raise ValueError("lst_range must be length 2.") - if np.any(np.asarray(lst_range) > 2 * np.pi): - warnings.warn( - "The lst_range contained a value greater than 2*pi. " - "LST values are assumed to be in radians, not hours." - ) - if obj_lst_range is not None: - for lind, lrange in enumerate(obj_lst_range): - if not lst_range_wrap[lind] and lst_range[0] < lst_range[1]: - if _check_range_overlap(np.stack((lrange, lst_range), axis=0)): - time_inds = np.append(time_inds, lind) - else: - if (lst_range[0] >= lrange[0] and lst_range[0] <= 2 * np.pi) or ( - lst_range[1] <= lrange[1] and lst_range[1] >= 0 - ): - time_inds = np.append(time_inds, lind) - attr_str = "lst_range" - else: - if lst_range[1] < lst_range[0]: - # we're wrapping around LST = 2*pi = 0 - lst_range_1 = [lst_range[0], 2 * np.pi] - lst_range_2 = [0, lst_range[1]] - time_inds1 = np.nonzero( - (obj_lst_array <= lst_range_1[1]) - & (obj_lst_array >= lst_range_1[0]) - )[0] - time_inds2 = np.nonzero( - (obj_lst_array <= lst_range_2[1]) - & (obj_lst_array >= lst_range_2[0]) - )[0] - time_inds = np.union1d(time_inds1, time_inds2) - else: - time_inds = np.nonzero( - (obj_lst_array <= lst_range[1]) & (obj_lst_array >= lst_range[0]) - )[0] - attr_str = "lst_array" - - if time_inds.size == 0: - raise ValueError( - f"No elements in {attr_str} between {lst_range[0]} and " - f"{lst_range[1]}." - ) - return time_inds - - -def _sorted_unique_union(obj1, obj2=None): - """ - Determine the union of unique elements from two lists. - - Convenience function for handling various actions with indices. - - Parameters - ---------- - obj1 : list or tuple or set or 1D ndarray - First list from which to determine unique entries. - obj2 : list or tuple or set or 1D ndarray - Second list from which to determine unique entries, which is joined with the - first list. If None, the method will simply return the sorted list of unique - elements in obj1. - - Returns - ------- - sorted_unique : list - List containing the union of unique entries between obj1 and obj2. - """ - return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).union(obj2)) - - -def _sorted_unique_intersection(obj1, obj2=None): - """ - Determine the intersection of unique elements from two lists. - - Convenience function for handling various actions with indices. - - Parameters - ---------- - obj1 : list or tuple or set or 1D ndarray - First list from which to determine unique entries. - obj2 : list or tuple or set or 1D ndarray - Second list from which to determine unique entries, which is intersected with - the first list. If None, the method will simply return the sorted list of unique - elements in obj1. - - Returns - ------- - sorted_unique : list - List containing the intersection of unique entries between obj1 and obj2. - """ - return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).intersection(obj2)) - - -def _sorted_unique_difference(obj1, obj2=None): - """ - Determine the difference of unique elements from two lists. - - Convenience function for handling various actions with indices. - - Parameters - ---------- - obj1 : list or tuple or set or 1D ndarray - First list from which to determine unique entries. - obj2 : list or tuple or set or 1D ndarray - Second list from which to determine unique entries, which is differenced with - the first list. If None, the method will simply return the sorted list of unique - elements in obj1. - - Returns - ------- - sorted_unique : list - List containing the difference in unique entries between obj1 and obj2. - """ - return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).difference(obj2)) - - -def baseline_to_antnums(baseline, *, Nants_telescope): - """ - Get the antenna numbers corresponding to a given baseline number. - - Parameters - ---------- - baseline : int or array_like of ints - baseline number - Nants_telescope : int - number of antennas - - Returns - ------- - int or array_like of int - first antenna number(s) - int or array_like of int - second antenna number(s) - - """ - if Nants_telescope > 2147483648: - raise ValueError(f"error Nants={Nants_telescope}>2147483648 not supported") - if np.any(np.asarray(baseline) < 0): - raise ValueError("negative baseline numbers are not supported") - if np.any(np.asarray(baseline) > 4611686018498691072): - raise ValueError("baseline numbers > 4611686018498691072 are not supported") - - return_array = isinstance(baseline, (np.ndarray, list, tuple)) - ant1, ant2 = _utils.baseline_to_antnums( - np.ascontiguousarray(baseline, dtype=np.uint64) - ) - if return_array: - return ant1, ant2 - else: - return ant1.item(0), ant2.item(0) - - -def antnums_to_baseline( - ant1, ant2, *, Nants_telescope, attempt256=False, use_miriad_convention=False -): - """ - Get the baseline number corresponding to two given antenna numbers. - - Parameters - ---------- - ant1 : int or array_like of int - first antenna number - ant2 : int or array_like of int - second antenna number - Nants_telescope : int - number of antennas - attempt256 : bool - Option to try to use the older 256 standard used in - many uvfits files. If there are antenna numbers >= 256, the 2048 - standard will be used unless there are antenna numbers >= 2048 - or Nants_telescope > 2048. In that case, the 2147483648 standard - will be used. Default is False. - use_miriad_convention : bool - Option to use the MIRIAD convention where BASELINE id is - `bl = 256 * ant1 + ant2` if `ant2 < 256`, otherwise - `bl = 2048 * ant1 + ant2 + 2**16`. - Note antennas should be 1-indexed (start at 1, not 0) - - Returns - ------- - int or array of int - baseline number corresponding to the two antenna numbers. - - """ - if Nants_telescope is not None and Nants_telescope > 2147483648: - raise ValueError( - "cannot convert ant1, ant2 to a baseline index " - f"with Nants={Nants_telescope}>2147483648." - ) - if np.any(np.concatenate((np.unique(ant1), np.unique(ant2))) >= 2147483648): - raise ValueError( - "cannot convert ant1, ant2 to a baseline index " - "with antenna numbers greater than 2147483647." - ) - if np.any(np.concatenate((np.unique(ant1), np.unique(ant2))) < 0): - raise ValueError( - "cannot convert ant1, ant2 to a baseline index " - "with antenna numbers less than zero." - ) - - nants_less2048 = True - if Nants_telescope is not None and Nants_telescope > 2048: - nants_less2048 = False - - return_array = isinstance(ant1, (np.ndarray, list, tuple)) - baseline = _utils.antnums_to_baseline( - np.ascontiguousarray(ant1, dtype=np.uint64), - np.ascontiguousarray(ant2, dtype=np.uint64), - attempt256=attempt256, - nants_less2048=nants_less2048, - use_miriad_convention=use_miriad_convention, - ) - if return_array: - return baseline - else: - return baseline.item(0) - - -def baseline_index_flip(baseline, *, Nants_telescope): - """Change baseline number to reverse antenna order.""" - ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope=Nants_telescope) - return antnums_to_baseline(ant2, ant1, Nants_telescope=Nants_telescope) - - -def _x_orientation_rep_dict(x_orientation): - """Create replacement dict based on x_orientation.""" - try: - if XORIENTMAP[x_orientation.lower()] == "east": - return {"x": "e", "y": "n"} - elif XORIENTMAP[x_orientation.lower()] == "north": - return {"x": "n", "y": "e"} - except KeyError as e: - raise ValueError("x_orientation not recognized.") from e - - -def np_cache(function): - function = lru_cache(function) - - @wraps(function) - def wrapper(pol, x_orientation=None): - try: - return function(pol, x_orientation=x_orientation) - except TypeError: - if isinstance(pol, Iterable): - # Assume the reason that we got a type error is that pol was an array. - pol = tuple(pol) - return function(pol, x_orientation=x_orientation) - - # copy lru_cache attributes over too - wrapper.cache_info = function.cache_info - wrapper.cache_clear = function.cache_clear - - return wrapper - - -@np_cache -def polstr2num(pol: str | IterableType[str], *, x_orientation: str | None = None): - """ - Convert polarization str to number according to AIPS Memo 117. - - Prefer 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes, - not true Stokes, but also supports 'I', 'Q', 'U', 'V'. - - Parameters - ---------- - pol : str - polarization string - x_orientation : str, optional - Orientation of the physical dipole corresponding to what is - labelled as the x polarization ("east" or "north") to allow for - converting from E/N strings. See corresonding parameter on UVData - for more details. - - Returns - ------- - int - Number corresponding to string - - Raises - ------ - ValueError - If the pol string cannot be converted to a polarization number. - - Warns - ----- - UserWarning - If the x_orientation not recognized. - - """ - dict_use = copy.deepcopy(POL_STR2NUM_DICT) - if x_orientation is not None: - try: - rep_dict = _x_orientation_rep_dict(x_orientation) - for key, value in POL_STR2NUM_DICT.items(): - new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) - dict_use[new_key] = value - except ValueError: - warnings.warn("x_orientation not recognized.") - - poldict = {k.lower(): v for k, v in dict_use.items()} - if isinstance(pol, str): - out = poldict[pol.lower()] - elif isinstance(pol, Iterable): - out = [poldict[key.lower()] for key in pol] - else: - raise ValueError( - f"Polarization {pol} cannot be converted to a polarization number." - ) - return out - - -@np_cache -def polnum2str(num, *, x_orientation=None): - """ - Convert polarization number to str according to AIPS Memo 117. - - Uses 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes, - not true Stokes - - Parameters - ---------- - num : int - polarization number - x_orientation : str, optional - Orientation of the physical dipole corresponding to what is - labelled as the x polarization ("east" or "north") to convert to - E/N strings. See corresonding parameter on UVData for more details. - - Returns - ------- - str - String corresponding to polarization number - - Raises - ------ - ValueError - If the polarization number cannot be converted to a polarization string. - - Warns - ----- - UserWarning - If the x_orientation not recognized. - - """ - dict_use = copy.deepcopy(POL_NUM2STR_DICT) - if x_orientation is not None: - try: - rep_dict = _x_orientation_rep_dict(x_orientation) - for key, value in POL_NUM2STR_DICT.items(): - new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) - dict_use[key] = new_val - except ValueError: - warnings.warn("x_orientation not recognized.") - - if isinstance(num, (int, np.int32, np.int64)): - out = dict_use[num] - elif isinstance(num, Iterable): - out = [dict_use[i] for i in num] - else: - raise ValueError(f"Polarization {num} cannot be converted to string.") - return out - - -@np_cache -def jstr2num(jstr, *, x_orientation=None): - """ - Convert jones polarization str to number according to calfits memo. - - Parameters - ---------- - jstr : str or array_like of str - antenna (jones) polarization string(s) to convert. - x_orientation : str, optional - Orientation of the physical dipole corresponding to what is - labelled as the x polarization ("east" or "north") to allow for - converting from E/N strings. See corresonding parameter on UVData - for more details. - - Returns - ------- - int or list of int - antenna (jones) polarization number(s) corresponding to the input string(s) - - Raises - ------ - ValueError - If the jones string cannot be converted to a polarization number. - - Warns - ----- - UserWarning - If the x_orientation not recognized. - - """ - dict_use = copy.deepcopy(JONES_STR2NUM_DICT) - if x_orientation is not None: - try: - rep_dict = _x_orientation_rep_dict(x_orientation) - for key, value in JONES_STR2NUM_DICT.items(): - new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) - dict_use[new_key] = value - except ValueError: - warnings.warn("x_orientation not recognized.") - - jdict = {k.lower(): v for k, v in dict_use.items()} - if isinstance(jstr, str): - out = jdict[jstr.lower()] - elif isinstance(jstr, Iterable): - out = [jdict[key.lower()] for key in jstr] - else: - raise ValueError(f"Jones polarization {jstr} cannot be converted to index.") - return out - - -@np_cache -def jnum2str(jnum, *, x_orientation=None): - """ - Convert jones polarization number to str according to calfits memo. - - Parameters - ---------- - num : int or array_like of int - antenna (jones) polarization number(s) to convert to strings - x_orientation : str, optional - Orientation of the physical dipole corresponding to what is - labelled as the x polarization ("east" or "north") to convert to - E/N strings. See corresonding parameter on UVData for more details. - - Returns - ------- - str or list of str - antenna (jones) polarization string(s) corresponding to number - - Raises - ------ - ValueError - If the jones polarization number cannot be converted to a jones - polarization string. - - Warns - ----- - UserWarning - If the x_orientation not recognized. - - """ - dict_use = copy.deepcopy(JONES_NUM2STR_DICT) - if x_orientation is not None: - try: - rep_dict = _x_orientation_rep_dict(x_orientation) - for key, value in JONES_NUM2STR_DICT.items(): - new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) - dict_use[key] = new_val - except ValueError: - warnings.warn("x_orientation not recognized.") - - if isinstance(jnum, (int, np.int32, np.int64)): - out = dict_use[jnum] - elif isinstance(jnum, Iterable): - out = [dict_use[i] for i in jnum] - else: - raise ValueError(f"Jones polarization {jnum} cannot be converted to string.") - return out - - -@np_cache -def parse_polstr(polstr, *, x_orientation=None): - """ - Parse a polarization string and return pyuvdata standard polarization string. - - See utils.POL_STR2NUM_DICT for options. - - Parameters - ---------- - polstr : str - polarization string - x_orientation : str, optional - Orientation of the physical dipole corresponding to what is - labelled as the x polarization ("east" or "north") to allow for - converting from E/N strings. See corresonding parameter on UVData - for more details. - - Returns - ------- - str - AIPS Memo 117 standard string - - Raises - ------ - ValueError - If the pol string cannot be converted to a polarization number. - - Warns - ----- - UserWarning - If the x_orientation not recognized. - - """ - return polnum2str( - polstr2num(polstr, x_orientation=x_orientation), x_orientation=x_orientation - ) - - -@np_cache -def parse_jpolstr(jpolstr, *, x_orientation=None): - """ - Parse a Jones polarization string and return pyuvdata standard jones string. - - See utils.JONES_STR2NUM_DICT for options. - - Parameters - ---------- - jpolstr : str - Jones polarization string - - Returns - ------- - str - calfits memo standard string - - Raises - ------ - ValueError - If the jones string cannot be converted to a polarization number. - - Warns - ----- - UserWarning - If the x_orientation not recognized. - - """ - return jnum2str( - jstr2num(jpolstr, x_orientation=x_orientation), x_orientation=x_orientation - ) - - -def conj_pol(pol): - """ - Return the polarization for the conjugate baseline. - - For example, (1, 2, 'xy') = conj(2, 1, 'yx'). - The returned polarization is determined by assuming the antenna pair is - reversed in the data, and finding the correct polarization correlation - which will yield the requested baseline when conjugated. Note this means - changing the polarization for linear cross-pols, but keeping auto-pol - (e.g. xx) and Stokes the same. - - Parameters - ---------- - pol : str or int - Polarization string or integer. - - Returns - ------- - cpol : str or int - Polarization as if antennas are swapped (type matches input) - - """ - cpol_dict = {k.lower(): v for k, v in CONJ_POL_DICT.items()} - - if isinstance(pol, str): - cpol = cpol_dict[pol.lower()] - elif isinstance(pol, Iterable): - cpol = [conj_pol(p) for p in pol] - elif isinstance(pol, (int, np.int32, np.int64)): - cpol = polstr2num(cpol_dict[polnum2str(pol).lower()]) - else: - raise ValueError("Polarization not recognized, cannot be conjugated.") - return cpol - - -def reorder_conj_pols(pols): - """ - Reorder multiple pols, swapping pols that are conjugates of one another. - - For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy') - This is useful for the _key2inds function in the case where an antenna - pair is specified but the conjugate pair exists in the data. The conjugated - data should be returned in the order of the polarization axis, so after - conjugating the data, the pols need to be reordered. - For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but - the user requests antpair (1, 0), they should get: - [(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)] - - Parameters - ---------- - pols : array_like of str or int - Polarization array (strings or ints). - - Returns - ------- - conj_order : ndarray of int - Indices to reorder polarization array. - """ - if not isinstance(pols, Iterable): - raise ValueError("reorder_conj_pols must be given an array of polarizations.") - cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where - conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols] - if -1 in conj_order: - raise ValueError( - "Not all conjugate pols exist in the polarization array provided." - ) - return conj_order - - -def determine_pol_order(pols, *, order="AIPS"): - """ - Determine order of input polarization numbers. - - Determines the order by which to sort a given list of polarizations, according to - the ordering scheme. Two orders are currently supported: "AIPS" and "CASA". The - main difference between the two is the grouping of same-handed polarizations for - AIPS (whereas CASA orders the polarizations such that same-handed pols are on the - ends of the array). - - Parameters - ---------- - pols : array_like of str or int - Polarization array (strings or ints). - order : str - Polarization ordering scheme, either "CASA" or "AIPS". - - Returns - ------- - index_array : ndarray of int - Indices to reorder polarization array. - """ - if order == "AIPS": - index_array = np.argsort(np.abs(pols)) - elif order == "CASA": - casa_order = np.array([1, 2, 3, 4, -1, -3, -4, -2, -5, -7, -8, -6, 0]) - pol_inds = [] - for pol in pols: - pol_inds.append(np.where(casa_order == pol)[0][0]) - index_array = np.argsort(pol_inds) - else: - raise ValueError('order must be either "AIPS" or "CASA".') - - return index_array - - -def LatLonAlt_from_XYZ(xyz, *, frame="ITRS", ellipsoid=None, check_acceptability=True): - """ - Calculate lat/lon/alt from ECEF x,y,z. - - Parameters - ---------- - xyz : ndarray of float - numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. - frame : str - Coordinate frame of xyz. - Valid options are ITRS (default) or MCMF. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is MCMF. - check_acceptability : bool - Flag to check XYZ coordinates are reasonable. - - Returns - ------- - latitude : ndarray or float - latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians - longitude : ndarray or float - longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians - altitude : ndarray or float - altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters - - """ - frame = frame.upper() - if not hasmoon and frame == "MCMF": - raise ValueError("Need to install `lunarsky` package to work with MCMF frame.") - - if frame == "ITRS": - accept_bounds = (6.35e6, 6.39e6) - elif frame == "MCMF": - accept_bounds = (1.71e6, 1.75e6) - if ellipsoid is None: - ellipsoid = "SPHERE" - - # convert to a numpy array - xyz = np.asarray(xyz) - if xyz.ndim > 1 and xyz.shape[1] != 3: - raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).") - - squeeze = xyz.ndim == 1 - - if squeeze: - xyz = xyz[np.newaxis, :] - - xyz = np.ascontiguousarray(xyz.T, dtype=np.float64) - - # checking for acceptable values - if check_acceptability: - if frame not in ["ITRS", "MCMF"]: - raise ValueError(f'Cannot check acceptability for unknown frame "{frame}".') - norms = np.linalg.norm(xyz, axis=0) - if not all( - np.logical_and(norms >= accept_bounds[0], norms <= accept_bounds[1]) - ): - raise ValueError( - f"xyz values should be {frame} x, y, z coordinates in meters" - ) - # this helper function returns one 2D array because it is less overhead for cython - if frame == "ITRS": - lla = _utils._lla_from_xyz(xyz, _utils.Body.Earth.value) - elif frame == "MCMF": - lla = _utils._lla_from_xyz(xyz, selenoids[ellipsoid].value) - else: - raise ValueError( - f'No spherical to cartesian transform defined for frame "{frame}".' - ) - - if squeeze: - return lla[0, 0], lla[1, 0], lla[2, 0] - return lla[0], lla[1], lla[2] - - -def XYZ_from_LatLonAlt(latitude, longitude, altitude, *, frame="ITRS", ellipsoid=None): - """ - Calculate ECEF x,y,z from lat/lon/alt values. - - Parameters - ---------- - latitude : ndarray or float - latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians - longitude : ndarray or float - longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians - altitude : ndarray or float - altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters - frame : str - Coordinate frame of xyz. - Valid options are ITRS (default) or MCMF. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is MCMF. - - Returns - ------- - xyz : ndarray of float - numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. - - """ - latitude = np.ascontiguousarray(latitude, dtype=np.float64) - longitude = np.ascontiguousarray(longitude, dtype=np.float64) - altitude = np.ascontiguousarray(altitude, dtype=np.float64) - - n_pts = latitude.size - - frame = frame.upper() - if not hasmoon and frame == "MCMF": - raise ValueError("Need to install `lunarsky` package to work with MCMF frame.") - - if longitude.size != n_pts: - raise ValueError( - "latitude, longitude and altitude must all have the same length" - ) - - if altitude.size != n_pts: - raise ValueError( - "latitude, longitude and altitude must all have the same length" - ) - - if frame == "ITRS": - xyz = _utils._xyz_from_latlonalt( - latitude, longitude, altitude, _utils.Body.Earth.value - ) - elif frame == "MCMF": - if ellipsoid is None: - ellipsoid = "SPHERE" - - xyz = _utils._xyz_from_latlonalt( - latitude, longitude, altitude, selenoids[ellipsoid].value - ) - else: - raise ValueError( - f'No cartesian to spherical transform defined for frame "{frame}".' - ) - - xyz = xyz.T - if n_pts == 1: - return xyz[0] - - return xyz - - -def rotECEF_from_ECEF(xyz, longitude): - """ - Get rotated ECEF positions such that the x-axis goes through the longitude. - - Miriad and uvfits expect antenna positions in this frame - (with longitude of the array center/telescope location) - - Parameters - ---------- - xyz : ndarray of float - numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. - longitude : float - longitude in radians to rotate coordinates to - (usually the array center/telescope location). - - Returns - ------- - ndarray of float - Rotated ECEF coordinates, shape (Npts, 3). - - """ - angle = -1 * longitude - rot_matrix = np.array( - [ - [np.cos(angle), -1 * np.sin(angle), 0], - [np.sin(angle), np.cos(angle), 0], - [0, 0, 1], - ] - ) - return rot_matrix.dot(xyz.T).T - - -def ECEF_from_rotECEF(xyz, longitude): - """ - Calculate ECEF from a rotated ECEF (Inverse of rotECEF_from_ECEF). - - Parameters - ---------- - xyz : ndarray of float - numpy array, shape (Npts, 3), with rotated ECEF x,y,z coordinates. - longitude : float - longitude in radians giving the x direction of the rotated coordinates - (usually the array center/telescope location). - - Returns - ------- - ndarray of float - ECEF coordinates, shape (Npts, 3). - - """ - angle = longitude - rot_matrix = np.array( - [ - [np.cos(angle), -1 * np.sin(angle), 0], - [np.sin(angle), np.cos(angle), 0], - [0, 0, 1], - ] - ) - return rot_matrix.dot(xyz.T).T - - -def ENU_from_ECEF( - xyz, - *, - center_loc=None, - latitude=None, - longitude=None, - altitude=None, - frame="ITRS", - ellipsoid=None, -): - """ - Calculate local ENU (east, north, up) coordinates from ECEF coordinates. - - Parameters - ---------- - xyz : ndarray of float - numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. - center_loc : EarthLocation or MoonLocation object - An EarthLocation or MoonLocation object giving the center of the ENU - coordinates. Either `center_loc` or all of `latitude`, `longitude`, - `altitude` must be passed. - latitude : float - Latitude of center of ENU coordinates in radians. - Not used if `center_loc` is passed. - longitude : float - Longitude of center of ENU coordinates in radians. - Not used if `center_loc` is passed. - altitude : float - Altitude of center of ENU coordinates in radians. - Not used if `center_loc` is passed. - frame : str - Coordinate frame of xyz and center of ENU coordinates. Valid options are - ITRS (default) or MCMF. Not used if `center_loc` is passed. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is MCMF. Not used if `center_loc` is passed. - - Returns - ------- - ndarray of float - numpy array, shape (Npts, 3), with local ENU coordinates - - """ - if center_loc is not None: - if not isinstance(center_loc, tuple(allowed_location_types)): - raise ValueError( - "center_loc is not a supported type. It must be one of " - f"{allowed_location_types}" - ) - latitude = center_loc.lat.rad - longitude = center_loc.lon.rad - altitude = center_loc.height.to("m").value - if isinstance(center_loc, EarthLocation): - frame = "ITRS" - else: - frame = "MCMF" - ellipsoid = center_loc.ellipsoid - else: - if latitude is None or longitude is None or altitude is None: - raise ValueError( - "Either center_loc or all of latitude, longitude and altitude " - "must be passed." - ) - frame = frame.upper() - if not hasmoon and frame == "MCMF": - raise ValueError( - "Need to install `lunarsky` package to work with MCMF frame." - ) - - if frame == "ITRS": - sensible_radius_range = (6.35e6, 6.39e6) - world = "earth" - elif frame == "MCMF": - world = "moon" - sensible_radius_range = (1.71e6, 1.75e6) - if ellipsoid is None: - ellipsoid = "SPHERE" - else: - raise ValueError(f'No ENU_from_ECEF transform defined for frame "{frame}".') - - xyz = np.asarray(xyz) - if xyz.ndim > 1 and xyz.shape[1] != 3: - raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).") - - squeeze = False - if xyz.ndim == 1: - squeeze = True - xyz = xyz[np.newaxis, :] - xyz = np.ascontiguousarray(xyz.T, dtype=np.float64) - - # check that these are sensible ECEF values -- their magnitudes need to be - # on the order of Earth's radius - ecef_magnitudes = np.linalg.norm(xyz, axis=0) - if np.any(ecef_magnitudes <= sensible_radius_range[0]) or np.any( - ecef_magnitudes >= sensible_radius_range[1] - ): - raise ValueError( - f"{frame} vector magnitudes must be on the order" - f" of the radius of the {world}" - ) - - # the cython utility expects (3, Npts) for faster manipulation - # transpose after we get the array back to match the expected shape - enu = _utils._ENU_from_ECEF( - xyz, - np.ascontiguousarray(latitude, dtype=np.float64), - np.ascontiguousarray(longitude, dtype=np.float64), - np.ascontiguousarray(altitude, dtype=np.float64), - # we have already forced the frame to conform to our options - # and if we don't have moon we have already errored. - (_utils.Body.Earth.value if frame == "ITRS" else selenoids[ellipsoid].value), - ) - enu = enu.T - - if squeeze: - enu = np.squeeze(enu) - - return enu - - -def ECEF_from_ENU( - enu, - center_loc=None, - latitude=None, - longitude=None, - altitude=None, - frame="ITRS", - ellipsoid=None, -): - """ - Calculate ECEF coordinates from local ENU (east, north, up) coordinates. - - Parameters - ---------- - enu : ndarray of float - numpy array, shape (Npts, 3), with local ENU coordinates. - center_loc : EarthLocation or MoonLocation object - An EarthLocation or MoonLocation object giving the center of the ENU - coordinates. Either `center_loc` or all of `latitude`, `longitude`, - `altitude` must be passed. - latitude : float - Latitude of center of ENU coordinates in radians. - Not used if `center_loc` is passed. - longitude : float - Longitude of center of ENU coordinates in radians. - Not used if `center_loc` is passed. - altitude : float - Altitude of center of ENU coordinates in radians. - Not used if `center_loc` is passed. - frame : str - Coordinate frame of xyz and center of ENU coordinates. Valid options are - ITRS (default) or MCMF. Not used if `center_loc` is passed. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is MCMF. Not used if `center_loc` is passed. - - Returns - ------- - xyz : ndarray of float - numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. - - """ - if center_loc is not None: - if not isinstance(center_loc, tuple(allowed_location_types)): - raise ValueError( - "center_loc is not a supported type. It must be one of " - f"{allowed_location_types}" - ) - latitude = center_loc.lat.rad - longitude = center_loc.lon.rad - altitude = center_loc.height.to("m").value - if isinstance(center_loc, EarthLocation): - frame = "ITRS" - else: - frame = "MCMF" - ellipsoid = center_loc.ellipsoid - else: - if latitude is None or longitude is None or altitude is None: - raise ValueError( - "Either center_loc or all of latitude, longitude and altitude " - "must be passed." - ) - frame = frame.upper() - if not hasmoon and frame == "MCMF": - raise ValueError( - "Need to install `lunarsky` package to work with MCMF frame." - ) - - if frame not in ["ITRS", "MCMF"]: - raise ValueError(f'No ECEF_from_ENU transform defined for frame "{frame}".') - - if frame == "MCMF" and ellipsoid is None: - ellipsoid = "SPHERE" - - enu = np.asarray(enu) - if enu.ndim > 1 and enu.shape[1] != 3: - raise ValueError("The expected shape of the ENU array is (Npts, 3).") - squeeze = False - - if enu.ndim == 1: - squeeze = True - enu = enu[np.newaxis, :] - enu = np.ascontiguousarray(enu.T, dtype=np.float64) - - # the cython utility expects (3, Npts) for faster manipulation - # transpose after we get the array back to match the expected shape - xyz = _utils._ECEF_from_ENU( - enu, - np.ascontiguousarray(latitude, dtype=np.float64), - np.ascontiguousarray(longitude, dtype=np.float64), - np.ascontiguousarray(altitude, dtype=np.float64), - # we have already forced the frame to conform to our options - # and if we don't have moon we have already errored. - (_utils.Body.Earth.value if frame == "ITRS" else selenoids[ellipsoid].value), - ) - xyz = xyz.T - - if squeeze: - xyz = np.squeeze(xyz) - - return xyz - - -def parse_antpos_file(antenna_positions_file): - """ - Interpret an antenna positions file. - - Parameters - ---------- - antenna_positions_file : str - Name of the antenna_positions_file, which is assumed to be in DATA_PATH. - Should be a csv file with the following columns: - - - "name": antenna names - - "number": antenna numbers - - "x": x ECEF coordinate relative to the telescope location. - - "y": y ECEF coordinate relative to the telescope location. - - "z": z ECEF coordinate relative to the telescope location. - - Returns - ------- - antenna_names : array of str - Antenna names. - antenna_names : array of int - Antenna numbers. - antenna_positions : array of float - Antenna positions in ECEF relative to the telescope location. - - """ - columns = ["name", "number", "x", "y", "z"] - formats = ["U10", "i8", np.longdouble, np.longdouble, np.longdouble] - - dt = np.rec.format_parser(formats, columns, []) - ant_array = np.genfromtxt( - antenna_positions_file, - delimiter=",", - autostrip=True, - skip_header=1, - dtype=dt.dtype, - ) - antenna_names = ant_array["name"] - antenna_numbers = ant_array["number"] - antenna_positions = np.stack((ant_array["x"], ant_array["y"], ant_array["z"])).T - - return antenna_names, antenna_numbers, antenna_positions.astype("float") - - -def old_uvw_calc(ra, dec, initial_uvw): - """ - Calculate old uvws from unphased ones in an icrs or gcrs frame. - - This method should not be used and is only retained for testing the - undo_old_uvw_calc method, which is needed for fixing phases. - - This code expects input uvws relative to the telescope location in the same frame - that ra/dec are in (e.g. icrs or gcrs) and returns phased ones in the same frame. - - Parameters - ---------- - ra : float - Right ascension of phase center. - dec : float - Declination of phase center. - initial_uvw : ndarray of float - Unphased uvws or positions relative to the array center, - shape (Nlocs, 3). - - Returns - ------- - uvw : ndarray of float - uvw array in the same frame as initial_uvws, ra and dec. - - """ - if initial_uvw.ndim == 1: - initial_uvw = initial_uvw[np.newaxis, :] - - return _utils._old_uvw_calc( - np.float64(ra), - np.float64(dec), - np.ascontiguousarray(initial_uvw.T, dtype=np.float64), - ).T - - -def undo_old_uvw_calc(ra, dec, uvw): - """ - Undo the old phasing calculation on uvws in an icrs or gcrs frame. - - This code expects phased uvws or positions in the same frame that ra/dec - are in (e.g. icrs or gcrs) and returns unphased ones in the same frame. - - Parameters - ---------- - ra : float - Right ascension of phase center. - dec : float - Declination of phase center. - uvw : ndarray of float - Phased uvws or positions relative to the array center, - shape (Nlocs, 3). - - Returns - ------- - unphased_uvws : ndarray of float - Unphased uvws or positions relative to the array center, - shape (Nlocs, 3). - - """ - if uvw.ndim == 1: - uvw = uvw[np.newaxis, :] - - return _utils._undo_old_uvw_calc( - np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw.T, dtype=np.float64) - ).T - - -def polar2_to_cart3(*, lon_array, lat_array): - """ - Convert 2D polar coordinates into 3D cartesian coordinates. - - This is a simple routine for converting a set of spherical angular coordinates - into a 3D cartesian vectors, where the x-direction is set by the position (0, 0). - - Parameters - ---------- - lon_array : float or ndarray - Longitude coordinates, which increases in the counter-clockwise direction. - Units of radians. Can either be a float or ndarray -- if the latter, must have - the same shape as lat_array. - lat_array : float or ndarray - Latitude coordinates, where 0 falls on the equator of the sphere. Units of - radians. Can either be a float or ndarray -- if the latter, must have the same - shape as lat_array. - - Returns - ------- - xyz_array : ndarray of float - Cartesian coordinates of the given longitude and latitude on a unit sphere. - Shape is (3, coord_shape), where coord_shape is the shape of lon_array and - lat_array if they were provided as type ndarray, otherwise (3,). - """ - # Check to make sure that we are not playing with mixed types - if type(lon_array) is not type(lat_array): - raise ValueError( - "lon_array and lat_array must either both be floats or ndarrays." - ) - if isinstance(lon_array, np.ndarray): - if lon_array.shape != lat_array.shape: - raise ValueError("lon_array and lat_array must have the same shape.") - - # Once we know that lon_array and lat_array are of the same shape, - # time to create our 3D set of vectors! - xyz_array = np.array( - [ - np.cos(lon_array) * np.cos(lat_array), - np.sin(lon_array) * np.cos(lat_array), - np.sin(lat_array), - ], - dtype=float, - ) - - return xyz_array - - -def cart3_to_polar2(xyz_array): - """ - Convert 3D cartesian coordinates into 2D polar coordinates. - - This is a simple routine for converting a set of 3D cartesian vectors into - spherical coordinates, where the position (0, 0) lies along the x-direction. - - Parameters - ---------- - xyz_array : ndarray of float - Cartesian coordinates, need not be of unit vector length. Shape is - (3, coord_shape). - - Returns - ------- - lon_array : ndarray of float - Longitude coordinates, which increases in the counter-clockwise direction. - Units of radians, shape is (coord_shape,). - lat_array : ndarray of float - Latitude coordinates, where 0 falls on the equator of the sphere. Units of - radians, shape is (coord_shape,). - """ - if not isinstance(xyz_array, np.ndarray): - raise ValueError("xyz_array must be an ndarray.") - if xyz_array.ndim == 0: - raise ValueError("xyz_array must have ndim > 0") - if xyz_array.shape[0] != 3: - raise ValueError("xyz_array must be length 3 across the zeroth axis.") - - # The longitude coord is relatively easy to calculate, just take the X and Y - # components and find the arctac of the pair. - lon_array = np.mod(np.arctan2(xyz_array[1], xyz_array[0]), 2.0 * np.pi, dtype=float) - - # If we _knew_ that xyz_array was always of length 1, then this call could be a much - # simpler one to arcsin. But to make this generic, we'll use the length of the XY - # component along with arctan2. - lat_array = np.arctan2( - xyz_array[2], np.sqrt((xyz_array[0:2] ** 2.0).sum(axis=0)), dtype=float - ) - - # Return the two arrays - return lon_array, lat_array - - -def _rotate_matmul_wrapper(*, xyz_array, rot_matrix, n_rot): - """ - Apply a rotation matrix to a series of vectors. - - This is a simple convenience function which wraps numpy's matmul function for use - with various vector rotation functions in this module. This code could, in - principle, be replaced by a cythonized piece of code, although the matmul function - is _pretty_ well optimized already. This function is not meant to be called by - users, but is instead used by multiple higher-level utility functions (namely those - that perform rotations). - - Parameters - ---------- - xyz_array : ndarray of floats - Array of vectors to be rotated. When nrot > 1, shape may be (n_rot, 3, n_vec) - or (1, 3, n_vec), the latter is useful for when performing multiple rotations - on a fixed set of vectors. If nrot = 1, shape may be (1, 3, n_vec), (3, n_vec), - or (3,). - rot_matrix : ndarray of floats - Series of rotation matricies to be applied to the stack of vectors. Must be - of shape (n_rot, 3, 3) - n_rot : int - Number of individual rotation matricies to be applied. - - Returns - ------- - rotated_xyz : ndarray of floats - Array of vectors that have been rotated, of shape (n_rot, 3, n_vectors,). - """ - # Do a quick check to make sure that things look sensible - if rot_matrix.shape != (n_rot, 3, 3): - raise ValueError( - "rot_matrix must be of shape (n_rot, 3, 3), where n_rot=%i." % n_rot - ) - if (xyz_array.ndim == 3) and ( - (xyz_array.shape[0] not in [1, n_rot]) or (xyz_array.shape[-2] != 3) - ): - raise ValueError("Misshaped xyz_array - expected shape (n_rot, 3, n_vectors).") - if (xyz_array.ndim < 3) and (xyz_array.shape[0] != 3): - raise ValueError("Misshaped xyz_array - expected shape (3, n_vectors) or (3,).") - rotated_xyz = np.matmul(rot_matrix, xyz_array) - - return rotated_xyz - - -def _rotate_one_axis(xyz_array, *, rot_amount, rot_axis): - """ - Rotate an array of 3D positions around the a single axis (x, y, or z). - - This function performs a basic rotation of 3D vectors about one of the priciple - axes -- the x-axis, the y-axis, or the z-axis. - - Note that the rotations here obey the right-hand rule -- that is to say, from the - perspective of the positive side of the axis of rotation, a positive rotation will - cause points on the plane intersecting this axis to move in a counter-clockwise - fashion. - - Parameters - ---------- - xyz_array : ndarray of float - Set of 3-dimensional vectors be rotated, in typical right-handed cartesian - order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors). - rot_amount : float or ndarray of float - Amount (in radians) to rotate the given set of coordinates. Can either be a - single float (or ndarray of shape (1,)) if rotating all vectors by the same - amount, otherwise expected to be shape (Nrot,). - rot_axis : int - Axis around which the rotation is applied. 0 is the x-axis, 1 is the y-axis, - and 2 is the z-axis. - - Returns - ------- - rotated_xyz : ndarray of float - Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector). - """ - # If rot_amount is None or all zeros, then this is just one big old no-op. - if (rot_amount is None) or np.all(rot_amount == 0.0): - if np.ndim(xyz_array) == 1: - return deepcopy(xyz_array[np.newaxis, :, np.newaxis]) - elif np.ndim(xyz_array) == 2: - return deepcopy(xyz_array[np.newaxis, :, :]) - else: - return deepcopy(xyz_array) - - # Check and see how big of a rotation matrix we need - n_rot = 1 if (not isinstance(rot_amount, np.ndarray)) else (rot_amount.shape[0]) - n_vec = xyz_array.shape[-1] - - # The promotion of values to float64 is to suppress numerical precision issues, - # since the matrix math can - in limited circumstances - introduce precision errors - # of order 10x the limiting numerical precision of the float. For a float32/single, - # thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to - # a part in 1e15. - rot_matrix = np.zeros((3, 3, n_rot), dtype=np.float64) - - # Figure out which pieces of the matrix we need to update - temp_jdx = (rot_axis + 1) % 3 - temp_idx = (rot_axis + 2) % 3 - - # Fill in the rotation matricies accordingly - rot_matrix[rot_axis, rot_axis] = 1 - rot_matrix[temp_idx, temp_idx] = np.cos(rot_amount, dtype=np.float64) - rot_matrix[temp_jdx, temp_jdx] = rot_matrix[temp_idx, temp_idx] - rot_matrix[temp_idx, temp_jdx] = np.sin(rot_amount, dtype=np.float64) - rot_matrix[temp_jdx, temp_idx] = -rot_matrix[temp_idx, temp_jdx] - - # The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements - # of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3) - rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1]) - - if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3): - # This is a special case where we allow the rotation axis to "expand" along - # the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1 - # but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and - # swap the n_vector and n_rot axes, and then swap them back once everything - # else is done. - return np.transpose( - _rotate_matmul_wrapper( - xyz_array=np.transpose(xyz_array, axes=[2, 1, 0]), - rot_matrix=rot_matrix, - n_rot=n_rot, - ), - axes=[2, 1, 0], - ) - else: - return _rotate_matmul_wrapper( - xyz_array=xyz_array, rot_matrix=rot_matrix, n_rot=n_rot - ) - - -def _rotate_two_axis(xyz_array, *, rot_amount1, rot_amount2, rot_axis1, rot_axis2): - """ - Rotate an array of 3D positions sequentially around a pair of axes (x, y, or z). - - This function performs a sequential pair of basic rotations of 3D vectors about - the priciple axes -- the x-axis, the y-axis, or the z-axis. - - Note that the rotations here obey the right-hand rule -- that is to say, from the - perspective of the positive side of the axis of rotation, a positive rotation will - cause points on the plane intersecting this axis to move in a counter-clockwise - fashion. - - Parameters - ---------- - xyz_array : ndarray of float - Set of 3-dimensional vectors be rotated, in typical right-handed cartesian - order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors). - rot_amount1 : float or ndarray of float - Amount (in radians) of rotatation to apply during the first rotation of the - sequence, to the given set of coordinates. Can either be a single float (or - ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise - expected to be shape (Nrot,). - rot_amount2 : float or ndarray of float - Amount (in radians) of rotatation to apply during the second rotation of the - sequence, to the given set of coordinates. Can either be a single float (or - ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise - expected to be shape (Nrot,). - rot_axis1 : int - Axis around which the first rotation is applied. 0 is the x-axis, 1 is the - y-axis, and 2 is the z-axis. - rot_axis2 : int - Axis around which the second rotation is applied. 0 is the x-axis, 1 is the - y-axis, and 2 is the z-axis. - - Returns - ------- - rotated_xyz : ndarray of float - Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector). - - """ - # Capture some special cases upfront, where we can save ourselves a bit of work - no_rot1 = (rot_amount1 is None) or np.all(rot_amount1 == 0.0) - no_rot2 = (rot_amount2 is None) or np.all(rot_amount2 == 0.0) - if no_rot1 and no_rot2: - # If rot_amount is None, then this is just one big old no-op. - return deepcopy(xyz_array) - elif no_rot1: - # If rot_amount1 is None, then ignore it and just work w/ the 2nd rotation - return _rotate_one_axis(xyz_array, rot_amount=rot_amount2, rot_axis=rot_axis2) - elif no_rot2: - # If rot_amount2 is None, then ignore it and just work w/ the 1st rotation - return _rotate_one_axis(xyz_array, rot_amount=rot_amount1, rot_axis=rot_axis1) - elif rot_axis1 == rot_axis2: - # Capture the case where someone wants to do a sequence of rotations on the same - # axis. Also known as just rotating a single axis. - return _rotate_one_axis( - xyz_array, rot_amount=rot_amount1 + rot_amount2, rot_axis=rot_axis1 - ) - - # Figure out how many individual rotation matricies we need, accounting for the - # fact that these can either be floats or ndarrays. - n_rot = max( - rot_amount1.shape[0] if isinstance(rot_amount1, np.ndarray) else 1, - rot_amount2.shape[0] if isinstance(rot_amount2, np.ndarray) else 1, - ) - n_vec = xyz_array.shape[-1] - - # The promotion of values to float64 is to suppress numerical precision issues, - # since the matrix math can - in limited circumstances - introduce precision errors - # of order 10x the limiting numerical precision of the float. For a float32/single, - # thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to - # a part in 1e15. - rot_matrix = np.empty((3, 3, n_rot), dtype=np.float64) - - # There are two permulations per pair of axes -- when the pair is right-hand - # oriented vs left-hand oriented. Check here which one it is. For example, - # rotating first on the x-axis, second on the y-axis is considered a - # "right-handed" pair, whereas z-axis first, then y-axis would be considered - # a "left-handed" pair. - lhd_order = np.mod(rot_axis2 - rot_axis1, 3) != 1 - - temp_idx = [ - np.mod(rot_axis1 - lhd_order, 3), - np.mod(rot_axis1 + 1 - lhd_order, 3), - np.mod(rot_axis1 + 2 - lhd_order, 3), - ] - - # We're using lots of sin and cos calculations -- doing them once upfront saves - # quite a bit of time by eliminating redundant calculations - sin_lo = np.sin(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64) - cos_lo = np.cos(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64) - sin_hi = np.sin(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64) - cos_hi = np.cos(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64) - - # Take care of the diagonal terms first, since they aren't actually affected by the - # order of rotational opertations - rot_matrix[temp_idx[0], temp_idx[0]] = cos_hi - rot_matrix[temp_idx[1], temp_idx[1]] = cos_lo - rot_matrix[temp_idx[2], temp_idx[2]] = cos_lo * cos_hi - - # Now time for the off-diagonal terms, as a set of 3 pairs. The rotation matrix - # for a left-hand oriented pair of rotation axes (e.g., x-rot, then y-rot) is just - # a transpose of the right-hand orientation of the same pair (e.g., y-rot, then - # x-rot). - rot_matrix[temp_idx[0 + lhd_order], temp_idx[1 - lhd_order]] = sin_lo * sin_hi - rot_matrix[temp_idx[0 - lhd_order], temp_idx[lhd_order - 1]] = ( - cos_lo * sin_hi * ((-1.0) ** lhd_order) - ) - - rot_matrix[temp_idx[1 - lhd_order], temp_idx[0 + lhd_order]] = 0.0 - rot_matrix[temp_idx[1 + lhd_order], temp_idx[2 - lhd_order]] = sin_lo * ( - (-1.0) ** (1 + lhd_order) - ) - - rot_matrix[temp_idx[lhd_order - 1], temp_idx[0 - lhd_order]] = sin_hi * ( - (-1.0) ** (1 + lhd_order) - ) - rot_matrix[temp_idx[2 - lhd_order], temp_idx[1 + lhd_order]] = ( - sin_lo * cos_hi * ((-1.0) ** (lhd_order)) - ) - - # The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements - # of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3) - rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1]) - - if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3): - # This is a special case where we allow the rotation axis to "expand" along - # the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1 - # but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and - # swap the n_vector and n_rot axes, and then swap them back once everything - # else is done. - return np.transpose( - _rotate_matmul_wrapper( # xyz_array, rot_matrix, n_rot - xyz_array=np.transpose(xyz_array, axes=[2, 1, 0]), - rot_matrix=rot_matrix, - n_rot=n_rot, - ), - axes=[2, 1, 0], - ) - else: - return _rotate_matmul_wrapper( - xyz_array=xyz_array, rot_matrix=rot_matrix, n_rot=n_rot - ) - - -def calc_uvw( - *, - app_ra=None, - app_dec=None, - frame_pa=None, - lst_array=None, - use_ant_pos=True, - uvw_array=None, - antenna_positions=None, - antenna_numbers=None, - ant_1_array=None, - ant_2_array=None, - old_app_ra=None, - old_app_dec=None, - old_frame_pa=None, - telescope_lat=None, - telescope_lon=None, - from_enu=False, - to_enu=False, -): - """ - Calculate an array of baseline coordinates, in either uvw or ENU. - - This routine is meant as a convenience function for producing baseline coordinates - based under a few different circumstances: - - 1) Calculating ENU coordinates using antenna positions - 2) Calculating uvw coordinates at a given sky position using antenna positions - 3) Converting from ENU coordinates to uvw coordinates - 4) Converting from uvw coordinate to ENU coordinates - 5) Converting from uvw coordinates at one sky position to another sky position - - Different conversion pathways have different parameters that are required. - - Parameters - ---------- - app_ra : ndarray of float - Apparent RA of the target phase center, required if calculating baseline - coordinates in uvw-space (vs ENU-space). Shape is (Nblts,), units are - radians. - app_dec : ndarray of float - Apparent declination of the target phase center, required if calculating - baseline coordinates in uvw-space (vs ENU-space). Shape is (Nblts,), - units are radians. - frame_pa : ndarray of float - Position angle between the great circle of declination in the apparent frame - versus that of the reference frame, used for making sure that "North" on - the derived maps points towards a particular celestial pole (not just the - topocentric one). Required if not deriving baseline coordinates from antenna - positions, from_enu=False, and a value for old_frame_pa is given. Shape is - (Nblts,), units are radians. - old_app_ra : ndarray of float - Apparent RA of the previous phase center, required if not deriving baseline - coordinates from antenna positions and from_enu=False. Shape is (Nblts,), - units are radians. - old_app_dec : ndarray of float - Apparent declination of the previous phase center, required if not deriving - baseline coordinates from antenna positions and from_enu=False. Shape is - (Nblts,), units are radians. - old_frame_pa : ndarray of float - Frame position angle of the previous phase center, required if not deriving - baseline coordinates from antenna positions, from_enu=False, and a value - for frame_pa is supplied. Shape is (Nblts,), units are radians. - lst_array : ndarray of float - Local apparent sidereal time, required if deriving baseline coordinates from - antenna positions, or converting to/from ENU coordinates. Shape is (Nblts,). - use_ant_pos : bool - Switch to determine whether to derive uvw values from the antenna positions - (if set to True), or to use the previously calculated uvw coordinates to derive - new the new baseline vectors (if set to False). Default is True. - uvw_array : ndarray of float - Array of previous baseline coordinates (in either uvw or ENU), required if - not deriving new coordinates from antenna positions. Shape is (Nblts, 3). - antenna_positions : ndarray of float - List of antenna positions relative to array center in ECEF coordinates, - required if not providing `uvw_array`. Shape is (Nants, 3). - antenna_numbers: ndarray of int - List of antenna numbers, ordered in the same way as `antenna_positions` (e.g., - `antenna_numbers[0]` should given the number of antenna that resides at ECEF - position given by `antenna_positions[0]`). Shape is (Nants,), requred if not - providing `uvw_array`. Contains all unique entires of the joint set of - `ant_1_array` and `ant_2_array`. - ant_1_array : ndarray of int - Antenna number of the first antenna in the baseline pair, for all baselines - Required if not providing `uvw_array`, shape is (Nblts,). - ant_2_array : ndarray of int - Antenna number of the second antenna in the baseline pair, for all baselines - Required if not providing `uvw_array`, shape is (Nblts,). - telescope_lat : float - Latitude of the phase center, units radians, required if deriving baseline - coordinates from antenna positions, or converting to/from ENU coordinates. - telescope_lon : float - Longitude of the phase center, units radians, required if deriving baseline - coordinates from antenna positions, or converting to/from ENU coordinates. - from_enu : boolean - Set to True if uvw_array is expressed in ENU coordinates. Default is False. - to_enu : boolean - Set to True if you would like the output expressed in ENU coordinates. Default - is False. - - Returns - ------- - new_coords : ndarray of float64 - Set of baseline coordinates, shape (Nblts, 3). - """ - if to_enu: - if lst_array is None and not use_ant_pos: - raise ValueError( - "Must include lst_array to calculate baselines in ENU coordinates!" - ) - if telescope_lat is None: - raise ValueError( - "Must include telescope_lat to calculate baselines in ENU coordinates!" - ) - else: - if ((app_ra is None) or (app_dec is None)) and frame_pa is None: - raise ValueError( - "Must include both app_ra and app_dec, or frame_pa to calculate " - "baselines in uvw coordinates!" - ) - - if use_ant_pos: - # Assume at this point we are dealing w/ antenna positions - if antenna_positions is None: - raise ValueError("Must include antenna_positions if use_ant_pos=True.") - if (ant_1_array is None) or (ant_2_array is None) or (antenna_numbers is None): - raise ValueError( - "Must include ant_1_array, ant_2_array, and antenna_numbers " - "setting use_ant_pos=True." - ) - if lst_array is None and not to_enu: - raise ValueError( - "Must include lst_array if use_ant_pos=True and not calculating " - "baselines in ENU coordinates." - ) - if telescope_lon is None: - raise ValueError("Must include telescope_lon if use_ant_pos=True.") - - ant_dict = {ant_num: idx for idx, ant_num in enumerate(antenna_numbers)} - ant_1_index = np.array( - [ant_dict[ant_num] for ant_num in ant_1_array], dtype=int - ) - ant_2_index = np.array( - [ant_dict[ant_num] for ant_num in ant_2_array], dtype=int - ) - - N_ants = antenna_positions.shape[0] - # Use the app_ra, app_dec, and lst_array arrays to figure out how many unique - # rotations are actually needed. If the ratio of Nblts to number of unique - # entries is favorable, we can just rotate the antenna positions and save - # outselves a bit of work. - if to_enu: - # If to_enu, skip all this -- there's only one unique ha + dec combo - unique_mask = np.zeros(len(ant_1_index), dtype=np.bool_) - unique_mask[0] = True - else: - unique_mask = np.append( - True, - ( - ((lst_array[:-1] - app_ra[:-1]) != (lst_array[1:] - app_ra[1:])) - | (app_dec[:-1] != app_dec[1:]) - ), - ) - - # GHA -> Hour Angle as measured at Greenwich (because antenna coords are - # centered such that x-plane intersects the meridian at longitude 0). - if to_enu: - # Unprojected coordinates are given in the ENU convention -- that's - # equivalent to calculating uvw's based on zenith. We can use that to our - # advantage and spoof the gha and dec based on telescope lon and lat - unique_gha = np.zeros(1) - telescope_lon - unique_dec = np.zeros(1) + telescope_lat - unique_pa = None - else: - unique_gha = (lst_array[unique_mask] - app_ra[unique_mask]) - telescope_lon - unique_dec = app_dec[unique_mask] - unique_pa = 0.0 if frame_pa is None else frame_pa[unique_mask] - - # Tranpose the ant vectors so that they are in the proper shape - ant_vectors = np.transpose(antenna_positions)[np.newaxis, :, :] - # Apply rotations, and then reorganize the ndarray so that you can access - # individual antenna vectors quickly. - ant_rot_vectors = np.reshape( - np.transpose( - _rotate_one_axis( - _rotate_two_axis( - ant_vectors, - rot_amount1=unique_gha, - rot_amount2=unique_dec, - rot_axis1=2, - rot_axis2=1, - ), - rot_amount=unique_pa, - rot_axis=0, - ), - axes=[0, 2, 1], - ), - (-1, 3), - ) - - unique_mask[0] = False - unique_map = np.cumsum(unique_mask) * N_ants - new_coords = ( - ant_rot_vectors[unique_map + ant_2_index] - - ant_rot_vectors[unique_map + ant_1_index] - ) - else: - if uvw_array is None: - raise ValueError("Must include uvw_array if use_ant_pos=False.") - if from_enu: - if to_enu: - # Well this was pointless... returning your uvws unharmed - return uvw_array - # Unprojected coordinates appear to be stored in ENU coordinates -- that's - # equivalent to calculating uvw's based on zenith. We can use that to our - # advantage and spoof old_app_ra and old_app_dec based on lst_array and - # telescope_lat - if telescope_lat is None: - raise ValueError( - "Must include telescope_lat if moving between " - "ENU (i.e., 'unprojected') and uvw coordinates!" - ) - if lst_array is None: - raise ValueError( - "Must include lst_array if moving between ENU " - "(i.e., 'unprojected') and uvw coordinates!" - ) - else: - if (old_frame_pa is None) and not (frame_pa is None or to_enu): - raise ValueError( - "Must include old_frame_pa values if data are phased and " - "applying new position angle values (frame_pa)." - ) - if ((old_app_ra is None) and not (app_ra is None or to_enu)) or ( - (old_app_dec is None) and not (app_dec is None or to_enu) - ): - raise ValueError( - "Must include old_app_ra and old_app_dec values when data are " - "already phased and phasing to a new position." - ) - # For this operation, all we need is the delta-ha coverage, which _should_ be - # entirely encapsulated by the change in RA. - if (app_ra is None) and (old_app_ra is None): - gha_delta_array = 0.0 - else: - gha_delta_array = (lst_array if from_enu else old_app_ra) - ( - lst_array if to_enu else app_ra - ) - - # Notice below there's an axis re-orientation here, to go from uvw -> XYZ, - # where X is pointing in the direction of the source. This is mostly here - # for convenience and code legibility -- a slightly different pair of - # rotations would give you the same result w/o needing to cycle the axes. - - # Up front, we want to trap the corner-case where the sky position you are - # phasing up to hasn't changed, just the position angle (i.e., which way is - # up on the map). This is a much easier transform to handle. - if np.all(gha_delta_array == 0.0) and np.all(old_app_dec == app_dec): - new_coords = _rotate_one_axis( - uvw_array[:, [2, 0, 1], np.newaxis], - rot_amount=frame_pa - (0.0 if old_frame_pa is None else old_frame_pa), - rot_axis=0, - )[:, :, 0] - else: - new_coords = _rotate_two_axis( - _rotate_two_axis( - uvw_array[:, [2, 0, 1], np.newaxis], - rot_amount1=( - 0.0 if (from_enu or old_frame_pa is None) else (-old_frame_pa) - ), - rot_amount2=(-telescope_lat) if from_enu else (-old_app_dec), - rot_axis1=0, - rot_axis2=1, - ), - rot_amount1=gha_delta_array, - rot_amount2=telescope_lat if to_enu else app_dec, - rot_axis1=2, - rot_axis2=1, - ) - - # One final rotation applied here, to compensate for the fact that we want - # the Dec-axis of our image (Fourier dual to the v-axis) to be aligned with - # the chosen frame, if we not in ENU coordinates - if not to_enu: - new_coords = _rotate_one_axis( - new_coords, rot_amount=frame_pa, rot_axis=0 - ) - - # Finally drop the now-vestigal last axis of the array - new_coords = new_coords[:, :, 0] - - # There's one last task to do, which is to re-align the axes from projected - # XYZ -> uvw, where X (which points towards the source) falls on the w axis, - # and Y and Z fall on the u and v axes, respectively. - return new_coords[:, [1, 2, 0]] - - -def transform_sidereal_coords( - *, - longitude, - latitude, - in_coord_frame, - out_coord_frame, - in_coord_epoch=None, - out_coord_epoch=None, - time_array=None, -): - """ - Transform a given set of coordinates from one sidereal coordinate frame to another. - - Uses astropy to convert from a coordinates from sidereal frame into another. - This function will support transforms from several frames, including GCRS, - FK5 (i.e., J2000), FK4 (i.e., B1950), Galactic, Supergalactic, CIRS, HCRS, and - a few others (basically anything that doesn't require knowing the observers - location on Earth/other celestial body). - - Parameters - ---------- - lon_coord : float or ndarray of floats - Logitudinal coordinate to be transformed, typically expressed as the right - ascension, in units of radians. Can either be a float, or an ndarray of - floats with shape (Ncoords,). Must agree with lat_coord. - lat_coord : float or ndarray of floats - Latitudinal coordinate to be transformed, typically expressed as the - declination, in units of radians. Can either be a float, or an ndarray of - floats with shape (Ncoords,). Must agree with lon_coord. - in_coord_frame : string - Reference frame for the provided coordinates. Expected to match a list of - those supported within the astropy SkyCoord object. An incomplete list includes - 'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'. - out_coord_frame : string - Reference frame to output coordinates in. Expected to match a list of - those supported within the astropy SkyCoord object. An incomplete list includes - 'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'. - in_coord_epoch : float - Epoch for the input coordinate frame. Optional parameter, only required - when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are - in fractional years. - out_coord_epoch : float - Epoch for the output coordinate frame. Optional parameter, only required - when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are - in fractional years. - time_array : float or ndarray of floats - Julian date(s) to which the coordinates correspond to, only used in frames - with annular motion terms (e.g., abberation in GCRS). Can either be a float, - or an ndarray of floats with shape (Ntimes,), assuming that either lat_coord - and lon_coord are floats, or that Ntimes == Ncoords. - - Returns - ------- - new_lat : float or ndarray of floats - Longitudinal coordinates, in units of radians. Output will be an ndarray - if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs. - new_lon : float or ndarray of floats - Latidudinal coordinates, in units of radians. Output will be an ndarray - if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs. - """ - lon_coord = longitude * units.rad - lat_coord = latitude * units.rad - - # Check here to make sure that lat_coord and lon_coord are the same length, - # either 1 or len(time_array) - if lat_coord.shape != lon_coord.shape: - raise ValueError("lon and lat must be the same shape.") - - if lon_coord.ndim == 0: - lon_coord.shape += (1,) - lat_coord.shape += (1,) - - # Check to make sure that we have a properly formatted epoch for our in-bound - # coordinate frame - in_epoch = None - if isinstance(in_coord_epoch, str) or isinstance(in_coord_epoch, Time): - # If its a string or a Time object, we don't need to do anything more - in_epoch = Time(in_coord_epoch) - elif in_coord_epoch is not None: - if in_coord_frame.lower() in ["fk4", "fk4noeterms"]: - in_epoch = Time(in_coord_epoch, format="byear") - else: - in_epoch = Time(in_coord_epoch, format="jyear") - - # Now do the same for the outbound frame - out_epoch = None - if isinstance(out_coord_epoch, str) or isinstance(out_coord_epoch, Time): - # If its a string or a Time object, we don't need to do anything more - out_epoch = Time(out_coord_epoch) - elif out_coord_epoch is not None: - if out_coord_frame.lower() in ["fk4", "fk4noeterms"]: - out_epoch = Time(out_coord_epoch, format="byear") - else: - out_epoch = Time(out_coord_epoch, format="jyear") - - # Make sure that time array matched up with what we expect. Thanks to astropy - # weirdness, time_array has to be the same length as lat/lon coords - rep_time = False - rep_crds = False - if time_array is None: - time_obj_array = None - else: - if isinstance(time_array, Time): - time_obj_array = time_array - else: - time_obj_array = Time(time_array, format="jd", scale="utc") - if (time_obj_array.size != 1) and (lon_coord.size != 1): - if time_obj_array.shape != lon_coord.shape: - raise ValueError( - "Shape of time_array must be either that of " - " lat_coord/lon_coord if len(time_array) > 1." - ) - else: - rep_crds = (time_obj_array.size != 1) and (lon_coord.size == 1) - rep_time = (time_obj_array.size == 1) and (lon_coord.size != 1) - if rep_crds: - lon_coord = np.repeat(lon_coord, len(time_array)) - lat_coord = np.repeat(lat_coord, len(time_array)) - if rep_time: - time_obj_array = Time( - np.repeat(time_obj_array.jd, len(lon_coord)), format="jd", scale="utc" - ) - coord_object = SkyCoord( - lon_coord, - lat_coord, - frame=in_coord_frame, - equinox=in_epoch, - obstime=time_obj_array, - ) - - # Easiest, most general way to transform to the new frame is to create a dummy - # SkyCoord with all the attributes needed -- note that we particularly need this - # in order to use a non-standard equinox/epoch - new_coord = coord_object.transform_to( - SkyCoord(0, 0, unit="rad", frame=out_coord_frame, equinox=out_epoch) - ) - - return new_coord.spherical.lon.rad, new_coord.spherical.lat.rad - - -def transform_icrs_to_app( - *, - time_array, - ra, - dec, - telescope_loc, - telescope_frame="itrs", - ellipsoid=None, - epoch=2000.0, - pm_ra=None, - pm_dec=None, - vrad=None, - dist=None, - astrometry_library=None, -): - """ - Transform a set of coordinates in ICRS to topocentric/apparent coordinates. - - This utility uses one of three libraries (astropy, NOVAS, or ERFA) to calculate - the apparent (i.e., topocentric) coordinates of a source at a given time and - location, given a set of coordinates expressed in the ICRS frame. These coordinates - are most typically used for defining the phase center of the array (i.e, calculating - baseline vectors). - - As of astropy v4.2, the agreement between the three libraries is consistent down to - the level of better than 1 mas, with the values produced by astropy and pyERFA - consistent to bettter than 10 µas (this is not surprising, given that astropy uses - pyERFA under the hood for astrometry). ERFA is the default as it outputs - coordinates natively in the apparent frame (whereas NOVAS and astropy do not), as - well as the fact that of the three libraries, it produces results the fastest. - - Parameters - ---------- - time_array : float or array-like of float - Julian dates to calculate coordinate positions for. Can either be a single - float, or an array-like of shape (Ntimes,). - ra : float or array-like of float - ICRS RA of the celestial target, expressed in units of radians. Can either - be a single float or array of shape (Ntimes,), although this must be consistent - with other parameters (with the exception of telescope location parameters). - dec : float or array-like of float - ICRS Dec of the celestial target, expressed in units of radians. Can either - be a single float or array of shape (Ntimes,), although this must be consistent - with other parameters (with the exception of telescope location parameters). - telescope_loc : array-like of floats or EarthLocation or MoonLocation - ITRS latitude, longitude, and altitude (rel to sea-level) of the phase center - of the array. Can either be provided as an astropy EarthLocation, or a tuple - of shape (3,) containing (in order) the latitude, longitude, and altitude, - in units of radians, radians, and meters, respectively. - telescope_frame: str, optional - Reference frame for telescope location. Options are itrs (default) or mcmf. - Only used if telescope_loc is not an EarthLocation or MoonLocation. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is mcmf. - epoch : int or float or str or Time object - Epoch of the coordinate data supplied, only used when supplying proper motion - values. If supplying a number, it will assumed to be in Julian years. Default - is J2000.0. - pm_ra : float or array-like of float - Proper motion in RA of the source, expressed in units of milliarcsec / year. - Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS - values should be set to their expected values when the epoch is 2000.0). - Can either be a single float or array of shape (Ntimes,), although this must - be consistent with other parameters (namely ra_coord and dec_coord). Note that - units are in dRA/dt, not cos(Dec)*dRA/dt. Not required. - pm_dec : float or array-like of float - Proper motion in Dec of the source, expressed in units of milliarcsec / year. - Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS - values should be set to their expected values when the epoch is 2000.0). - Can either be a single float or array of shape (Ntimes,), although this must - be consistent with other parameters (namely ra_coord and dec_coord). Not - required. - vrad : float or array-like of float - Radial velocity of the source, expressed in units of km / sec. Can either be - a single float or array of shape (Ntimes,), although this must be consistent - with other parameters (namely ra_coord and dec_coord). Not required. - dist : float or array-like of float - Distance of the source, expressed in milliarcseconds. Can either be a single - float or array of shape (Ntimes,), although this must be consistent with other - parameters (namely ra_coord and dec_coord). Not required. - astrometry_library : str - Library used for running the coordinate conversions. Allowed options are - 'erfa' (which uses the pyERFA), 'novas' (which uses the python-novas library), - and 'astropy' (which uses the astropy utilities). Default is erfa unless - the telescope_location is a MoonLocation object, in which case the default is - astropy. - - Returns - ------- - app_ra : ndarray of floats - Apparent right ascension coordinates, in units of radians, of shape (Ntimes,). - app_dec : ndarray of floats - Apparent declination coordinates, in units of radians, of shape (Ntimes,). - """ - if telescope_frame.upper() == "MCMF": - if not hasmoon: - raise ValueError( - "Need to install `lunarsky` package to work with MCMF frame." - ) - if ellipsoid is None: - ellipsoid = "SPHERE" - - # Make sure that the library requested is actually permitted - if astrometry_library is None: - if hasmoon and isinstance(telescope_loc, MoonLocation): - astrometry_library = "astropy" - elif telescope_frame.upper() == "MCMF": - astrometry_library = "astropy" - else: - astrometry_library = "erfa" - - if astrometry_library not in ["erfa", "novas", "astropy"]: - raise ValueError( - "Requested coordinate transformation library is not supported, please " - "select either 'erfa', 'novas', or 'astropy' for astrometry_library." - ) - ra_coord = ra * units.rad - dec_coord = dec * units.rad - - # Check here to make sure that ra_coord and dec_coord are the same length, - # either 1 or len(time_array) - multi_coord = ra_coord.size != 1 - if ra_coord.shape != dec_coord.shape: - raise ValueError("ra and dec must be the same shape.") - - pm_ra_coord = None if pm_ra is None else pm_ra * (units.mas / units.yr) - pm_dec_coord = None if pm_dec is None else pm_dec * (units.mas / units.yr) - d_coord = ( - None if (dist is None or np.all(dist == 0.0)) else Distance(dist * units.pc) - ) - v_coord = None if vrad is None else vrad * (units.km / units.s) - - opt_list = [pm_ra_coord, pm_dec_coord, d_coord, v_coord] - opt_names = ["pm_ra", "pm_dec", "dist", "vrad"] - # Check the optional inputs, make sure that they're sensible - for item, name in zip(opt_list, opt_names): - if item is not None: - if ra_coord.shape != item.shape: - raise ValueError("%s must be the same shape as ra and dec." % name) - - if isinstance(telescope_loc, EarthLocation) or ( - hasmoon and isinstance(telescope_loc, MoonLocation) - ): - site_loc = telescope_loc - elif telescope_frame.upper() == "MCMF": - site_loc = MoonLocation.from_selenodetic( - telescope_loc[1] * (180.0 / np.pi), - telescope_loc[0] * (180.0 / np.pi), - height=telescope_loc[2], - ellipsoid=ellipsoid, - ) - else: - site_loc = EarthLocation.from_geodetic( - telescope_loc[1] * (180.0 / np.pi), - telescope_loc[0] * (180.0 / np.pi), - height=telescope_loc[2], - ) - - if ( - hasmoon - and isinstance(site_loc, MoonLocation) - and astrometry_library != "astropy" - ): - raise NotImplementedError( - "MoonLocation telescopes are only supported with the 'astropy' astrometry " - "library" - ) - - # Useful for both astropy and novas methods, the latter of which gives easy - # access to the IERS data that we want. - if isinstance(time_array, Time): - time_obj_array = time_array - else: - time_obj_array = Time(time_array, format="jd", scale="utc") - - if time_obj_array.size != 1: - if (time_obj_array.shape != ra_coord.shape) and multi_coord: - raise ValueError( - "time_array must be of either of length 1 (single " - "float) or same length as ra and dec." - ) - elif time_obj_array.ndim == 0: - # Make the array at least 1-dimensional so we don't run into indexing - # issues later. - time_obj_array = Time([time_obj_array]) - - # Check to make sure that we have a properly formatted epoch for our in-bound - # coordinate frame - coord_epoch = None - if isinstance(epoch, str) or isinstance(epoch, Time): - # If its a string or a Time object, we don't need to do anything more - coord_epoch = Time(epoch) - elif epoch is not None: - coord_epoch = Time(epoch, format="jyear") - - # Note if time_array is a single element - multi_time = time_obj_array.size != 1 - - # Get IERS data, which is needed for NOVAS and ERFA - polar_motion_data = iers.earth_orientation_table.get() - - pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array) - delta_x_array, delta_y_array = polar_motion_data.dcip_xy(time_obj_array) - - pm_x_array = pm_x_array.to_value("arcsec") - pm_y_array = pm_y_array.to_value("arcsec") - delta_x_array = delta_x_array.to_value("marcsec") - delta_y_array = delta_y_array.to_value("marcsec") - # Catch the case where we don't have CIP delta values yet (they don't typically have - # predictive values like the polar motion does) - delta_x_array[np.isnan(delta_x_array)] = 0.0 - delta_y_array[np.isnan(delta_y_array)] = 0.0 - - # If the source was instantiated w/ floats, it'll be a 0-dim object, which will - # throw errors if we try to treat it as an array. Reshape to a 1D array of len 1 - # so that all the calls can be uniform - if ra_coord.ndim == 0: - ra_coord.shape += (1,) - dec_coord.shape += (1,) - if pm_ra_coord is not None: - pm_ra - if d_coord is not None: - d_coord.shape += (1,) - if v_coord is not None: - v_coord.shape += (1,) - - # If there is an epoch and a proper motion, apply that motion now - - if astrometry_library == "astropy": - # Astropy doesn't have (oddly enough) a way of getting at the apparent RA/Dec - # directly, but we can cheat this by going to AltAz, and then coverting back - # to apparent RA/Dec using the telescope lat and LAST. - if (epoch is not None) and (pm_ra is not None) and (pm_dec is not None): - # astropy is a bit weird in how it handles proper motion, so rather than - # fight with it to do it all in one step, we separate it into two: first - # apply proper motion to ICRS, then transform to topocentric. - sky_coord = SkyCoord( - ra=ra_coord, - dec=dec_coord, - pm_ra_cosdec=pm_ra_coord * np.cos(dec_coord), - pm_dec=pm_dec_coord, - frame="icrs", - ) - - sky_coord = sky_coord.apply_space_motion(dt=(time_obj_array - coord_epoch)) - ra_coord = sky_coord.ra - dec_coord = sky_coord.dec - if d_coord is not None: - d_coord = d_coord.repeat(ra_coord.size) - if v_coord is not None: - v_coord = v_coord.repeat(ra_coord.size) - - if isinstance(site_loc, EarthLocation): - time_obj_array = Time(time_obj_array, location=site_loc) - - sky_coord = SkyCoord( - ra=ra_coord, - dec=dec_coord, - distance=d_coord, - radial_velocity=v_coord, - frame="icrs", - ) - - azel_data = sky_coord.transform_to( - SkyCoord( - np.zeros_like(time_obj_array) * units.rad, - np.zeros_like(time_obj_array) * units.rad, - location=site_loc, - obstime=time_obj_array, - frame="altaz", - ) - ) - else: - sky_coord = LunarSkyCoord( - ra=ra_coord, - dec=dec_coord, - distance=d_coord, - radial_velocity=v_coord, - frame="icrs", - ) - - azel_data = sky_coord.transform_to( - LunarSkyCoord( - np.zeros_like(time_obj_array) * units.rad, - np.zeros_like(time_obj_array) * units.rad, - location=site_loc, - obstime=time_obj_array, - frame="lunartopo", - ) - ) - time_obj_array = LTime(time_obj_array, location=site_loc) - - app_ha, app_dec = erfa.ae2hd( - azel_data.az.rad, azel_data.alt.rad, site_loc.lat.rad - ) - app_ra = np.mod( - time_obj_array.sidereal_time("apparent").rad - app_ha, 2 * np.pi - ) - - elif astrometry_library == "novas": - # Import the NOVAS library only if it's needed/available. - try: - import novas_de405 # noqa - from novas import compat as novas - from novas.compat import eph_manager - except ImportError as e: # pragma: no cover - raise ImportError( - "novas and/or novas_de405 are not installed but is required for " - "NOVAS functionality" - ) from e - - # Call is needed to load high-precision ephem data in NOVAS - jd_start, jd_end, number = eph_manager.ephem_open() - - # Define the obs location, which is needed to calculate diurnal abb term - # and polar wobble corrections - site_loc = novas.make_on_surface( - site_loc.lat.deg, # latitude in deg - site_loc.lon.deg, # Longitude in deg - site_loc.height.to_value("m"), # Height in meters - 0.0, # Temperature, set to 0 for now (no atm refrac) - 0.0, # Pressure, set to 0 for now (no atm refrac) - ) - - # NOVAS wants things in terrestial time and UT1 - tt_time_array = time_obj_array.tt.jd - ut1_time_array = time_obj_array.ut1.jd - gast_array = time_obj_array.sidereal_time("apparent", "greenwich").rad - - if np.any(tt_time_array < jd_start) or np.any(tt_time_array > jd_end): - raise ValueError( - "No current support for JPL ephems outside of 1700 - 2300 AD. " - "Check back later (or possibly earlier)..." - ) - - app_ra = np.zeros(tt_time_array.shape) + np.zeros(ra_coord.shape) - app_dec = np.zeros(tt_time_array.shape) + np.zeros(ra_coord.shape) - - for idx in range(len(app_ra)): - if multi_coord or (idx == 0): - # Create a catalog entry for the source in question - if pm_ra is None: - pm_ra_use = 0.0 - else: - pm_ra_use = pm_ra_coord.to_value("mas/yr") * np.cos( - dec_coord[idx].to_value("rad") - ) - - if pm_dec is None: - pm_dec_use = 0.0 - else: - pm_dec_use = pm_dec_coord.to_value("mas/yr") - - if dist is None or np.any(dist == 0.0): - parallax = 0.0 - else: - parallax = d_coord[idx].kiloparsec ** -1.0 - - if vrad is None: - vrad_use = 0.0 - else: - vrad_use = v_coord[idx].to_value("km/s") - - cat_entry = novas.make_cat_entry( - "dummy_name", # Dummy source name - "GKK", # Catalog ID, fixed for now - 156, # Star ID number, fixed for now - ra_coord[idx].to_value("hourangle"), - dec_coord[idx].to_value("deg"), - pm_ra_use, - pm_dec_use, - parallax, - vrad_use, - ) - - # Update polar wobble parameters for a given timestamp - if multi_time or (idx == 0): - gast = gast_array[idx] - pm_x = pm_x_array[idx] * np.cos(gast) + pm_y_array[idx] * np.sin(gast) - pm_y = pm_y_array[idx] * np.cos(gast) - pm_x_array[idx] * np.sin(gast) - tt_time = tt_time_array[idx] - ut1_time = ut1_time_array[idx] - novas.cel_pole(tt_time, 2, delta_x_array[idx], delta_y_array[idx]) - - # Calculate topocentric RA/Dec values - [temp_ra, temp_dec] = novas.topo_star( - tt_time, (tt_time - ut1_time) * 86400.0, cat_entry, site_loc, accuracy=0 - ) - xyz_array = polar2_to_cart3( - lon_array=temp_ra * (np.pi / 12.0), lat_array=temp_dec * (np.pi / 180.0) - ) - xyz_array = novas.wobble(tt_time, pm_x, pm_y, xyz_array, 1) - - app_ra[idx], app_dec[idx] = cart3_to_polar2(np.array(xyz_array)) - elif astrometry_library == "erfa": - # liberfa wants things in radians - pm_x_array *= np.pi / (3600.0 * 180.0) - pm_y_array *= np.pi / (3600.0 * 180.0) - - if pm_ra is None: - pm_ra_use = 0.0 - else: - pm_ra_use = pm_ra_coord.to_value("rad/yr") - - if pm_dec is None: - pm_dec_use = 0.0 - else: - pm_dec_use = pm_dec_coord.to_value("rad/yr") - - if dist is None or np.any(dist == 0.0): - parallax = 0.0 - else: - parallax = d_coord.pc**-1.0 - - if vrad is None: - vrad_use = 0 - else: - vrad_use = v_coord.to_value("km/s") - - [_, _, _, app_dec, app_ra, eqn_org] = erfa.atco13( - ra_coord.to_value("rad"), - dec_coord.to_value("rad"), - pm_ra_use, - pm_dec_use, - parallax, - vrad_use, - time_obj_array.utc.jd1, - time_obj_array.utc.jd2, - time_obj_array.delta_ut1_utc, - site_loc.lon.rad, - site_loc.lat.rad, - site_loc.height.to_value("m"), - pm_x_array, - pm_y_array, - 0, # ait pressure, used for refraction (ignored) - 0, # amb temperature, used for refraction (ignored) - 0, # rel humidity, used for refraction (ignored) - 0, # wavelength, used for refraction (ignored) - ) - - app_ra = np.mod(app_ra - eqn_org, 2 * np.pi) - - return app_ra, app_dec - - -def transform_app_to_icrs( - *, - time_array, - app_ra, - app_dec, - telescope_loc, - telescope_frame="itrs", - ellipsoid="SPHERE", - astrometry_library=None, -): - """ - Transform a set of coordinates in topocentric/apparent to ICRS coordinates. - - This utility uses either astropy or erfa to calculate the ICRS coordinates of - a given set of apparent source coordinates. These coordinates are most typically - used for defining the celestial/catalog position of a source. Note that at present, - this is only implemented in astropy and pyERFA, although it could hypothetically - be extended to NOVAS at some point. - - Parameters - ---------- - time_array : float or ndarray of float - Julian dates to calculate coordinate positions for. Can either be a single - float, or an ndarray of shape (Ntimes,). - app_ra : float or ndarray of float - ICRS RA of the celestial target, expressed in units of radians. Can either - be a single float or array of shape (Ncoord,). Note that if time_array is - not a singleton value, then Ncoord must be equal to Ntimes. - app_dec : float or ndarray of float - ICRS Dec of the celestial target, expressed in units of radians. Can either - be a single float or array of shape (Ncoord,). Note that if time_array is - not a singleton value, then Ncoord must be equal to Ntimes. - telescope_loc : tuple of floats or EarthLocation - ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center - of the array. Can either be provided as an astropy EarthLocation, or a tuple - of shape (3,) containing (in order) the latitude, longitude, and altitude, - in units of radians, radians, and meters, respectively. - telescope_frame: str, optional - Reference frame for telescope location. Options are itrs (default) or mcmf. - Only used if telescope_loc is not an EarthLocation or MoonLocation. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is mcmf. - astrometry_library : str - Library used for running the coordinate conversions. Allowed options are - 'erfa' (which uses the pyERFA), and 'astropy' (which uses the astropy - utilities). Default is erfa unless the telescope_location is a MoonLocation - object, in which case the default is astropy. - - Returns - ------- - icrs_ra : ndarray of floats - ICRS right ascension coordinates, in units of radians, of either shape - (Ntimes,) if Ntimes >1, otherwise (Ncoord,). - icrs_dec : ndarray of floats - ICRS declination coordinates, in units of radians, of either shape - (Ntimes,) if Ntimes >1, otherwise (Ncoord,). - """ - if telescope_frame.upper() == "MCMF": - if not hasmoon: - raise ValueError( - "Need to install `lunarsky` package to work with MCMF frame." - ) - - # Make sure that the library requested is actually permitted - if astrometry_library is None: - if hasmoon and isinstance(telescope_loc, MoonLocation): - astrometry_library = "astropy" - elif telescope_frame.upper() == "MCMF": - astrometry_library = "astropy" - else: - astrometry_library = "erfa" - - if astrometry_library not in ["erfa", "astropy"]: - raise ValueError( - "Requested coordinate transformation library is not supported, please " - "select either 'erfa' or 'astropy' for astrometry_library." - ) - - ra_coord = app_ra * units.rad - dec_coord = app_dec * units.rad - - # Check here to make sure that ra_coord and dec_coord are the same length, - # either 1 or len(time_array) - multi_coord = ra_coord.size != 1 - if ra_coord.shape != dec_coord.shape: - raise ValueError("app_ra and app_dec must be the same shape.") - - if isinstance(telescope_loc, EarthLocation) or ( - hasmoon and isinstance(telescope_loc, MoonLocation) - ): - site_loc = telescope_loc - elif telescope_frame.upper() == "MCMF": - site_loc = MoonLocation.from_selenodetic( - telescope_loc[1] * (180.0 / np.pi), - telescope_loc[0] * (180.0 / np.pi), - height=telescope_loc[2], - ellipsoid=ellipsoid, - ) - else: - site_loc = EarthLocation.from_geodetic( - telescope_loc[1] * (180.0 / np.pi), - telescope_loc[0] * (180.0 / np.pi), - height=telescope_loc[2], - ) - - if ( - hasmoon - and isinstance(site_loc, MoonLocation) - and astrometry_library != "astropy" - ): - raise NotImplementedError( - "MoonLocation telescopes are only supported with the 'astropy' astrometry " - "library" - ) - - assert time_array.size > 0 - if isinstance(time_array, Time): - time_obj_array = time_array - else: - time_obj_array = Time(time_array, format="jd", scale="utc") - - if time_obj_array.size != 1: - if (time_obj_array.shape != ra_coord.shape) and multi_coord: - raise ValueError( - "time_array must be of either of length 1 (single " - "float) or same length as ra and dec." - ) - elif time_obj_array.ndim == 0: - # Make the array at least 1-dimensional so we don't run into indexing - # issues later. - time_obj_array = Time([time_obj_array]) - - if astrometry_library == "astropy": - if hasmoon and isinstance(site_loc, MoonLocation): - time_obj_array = LTime(time_obj_array, location=site_loc) - else: - time_obj_array = Time(time_obj_array, location=site_loc) - - az_coord, el_coord = erfa.hd2ae( - np.mod( - time_obj_array.sidereal_time("apparent").rad - ra_coord.to_value("rad"), - 2 * np.pi, - ), - dec_coord.to_value("rad"), - site_loc.lat.rad, - ) - - if isinstance(site_loc, EarthLocation): - sky_coord = SkyCoord( - az_coord * units.rad, - el_coord * units.rad, - frame="altaz", - location=site_loc, - obstime=time_obj_array, - ) - else: - sky_coord = LunarSkyCoord( - az_coord * units.rad, - el_coord * units.rad, - frame="lunartopo", - location=site_loc, - obstime=time_obj_array, - ) - - coord_data = sky_coord.transform_to("icrs") - icrs_ra = coord_data.ra.rad - icrs_dec = coord_data.dec.rad - elif astrometry_library == "erfa": - # Get IERS data, which is needed for highest precision - polar_motion_data = iers.earth_orientation_table.get() - - pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array) - pm_x_array = pm_x_array.to_value("rad") - pm_y_array = pm_y_array.to_value("rad") - - bpn_matrix = erfa.pnm06a(time_obj_array.tt.jd1, time_obj_array.tt.jd2) - cip_x, cip_y = erfa.bpn2xy(bpn_matrix) - cio_s = erfa.s06(time_obj_array.tt.jd1, time_obj_array.tt.jd2, cip_x, cip_y) - eqn_org = erfa.eors(bpn_matrix, cio_s) - - # Observed to ICRS via ERFA - icrs_ra, icrs_dec = erfa.atoc13( - "r", - ra_coord.to_value("rad") + eqn_org, - dec_coord.to_value("rad"), - time_obj_array.utc.jd1, - time_obj_array.utc.jd2, - time_obj_array.delta_ut1_utc, - site_loc.lon.rad, - site_loc.lat.rad, - site_loc.height.value, - pm_x_array, - pm_y_array, - 0, # atm pressure, used for refraction (ignored) - 0, # amb temperature, used for refraction (ignored) - 0, # rel humidity, used for refraction (ignored) - 0, # wavelength, used for refraction (ignored) - ) - - # Return back the two RA/Dec arrays - return icrs_ra, icrs_dec - - -def calc_parallactic_angle(*, app_ra, app_dec, lst_array, telescope_lat): - """ - Calculate the parallactic angle between RA/Dec and the AltAz frame. - - Parameters - ---------- - app_ra : ndarray of floats - Array of apparent RA values in units of radians, shape (Ntimes,). - app_dec : ndarray of floats - Array of apparent dec values in units of radians, shape (Ntimes,). - telescope_lat : float - Latitude of the observatory, in units of radians. - lst_array : float or ndarray of float - Array of local apparent sidereal timesto calculate position angle values - for, in units of radians. Can either be a single float or an array of shape - (Ntimes,). - """ - # This is just a simple wrapped around the pas function in ERFA - return erfa.pas(app_ra, app_dec, lst_array, telescope_lat) - - -def calc_frame_pos_angle( - *, - time_array, - app_ra, - app_dec, - telescope_loc, - ref_frame, - ref_epoch=None, - telescope_frame="itrs", - ellipsoid="SPHERE", - offset_pos=(np.pi / 360.0), -): - """ - Calculate an position angle given apparent position and reference frame. - - This function is used to determine the position angle between the great - circle of declination in apparent coordinates, versus that in a given - reference frame. Note that this is slightly different than parallactic - angle, which is the difference between apparent declination and elevation. - - Paramters - --------- - time_array : ndarray of floats - Array of julian dates to calculate position angle values for, of shape - (Ntimes,). - app_ra : ndarray of floats - Array of apparent RA values in units of radians, shape (Ntimes,). - app_dec : ndarray of floats - Array of apparent dec values in units of radians, shape (Ntimes,). - telescope_loc : tuple of floats or EarthLocation - ITRF latitude, longitude, and altitude (rel to sea-level) of the observer. - Can either be provided as an astropy EarthLocation, or an array-like of shape - (3,) containing the latitude, longitude, and altitude, in that order, with units - of radians, radians, and meters, respectively. - ref_frame : str - Coordinate frame to calculate position angles for. Can be any of the - several supported frames in astropy (a limited list: fk4, fk5, icrs, - gcrs, cirs, galactic). - ref_epoch : str or flt - Epoch of the coordinates, only used when ref_frame = fk4 or fk5. Given - in unites of fractional years, either as a float or as a string with - the epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0). - telescope_frame: str, optional - Reference frame for telescope location. Options are itrs (default) or mcmf. - Only used if telescope_loc is not an EarthLocation or MoonLocation. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is mcmf. - offset_pos : float - Distance of the offset position used to calculate the frame PA. Default - is 0.5 degrees, which should be sufficent for most applications. - - - Returns - ------- - frame_pa : ndarray of floats - Array of position angles, in units of radians. - """ - # Check to see if the position angles should default to zero - if (ref_frame is None) or (ref_frame == "topo"): - # No-op detected, ENGAGE MAXIMUM SNARK! - return np.zeros_like(time_array) - - assert offset_pos > 0, "offset_pos must be greater than 0." - - # This creates an array of unique entries of ra + dec + time, since the processing - # time for each element can be non-negligible, and entries along the Nblt axis can - # be highly redundant. - unique_mask = np.union1d( - np.union1d( - np.unique(app_ra, return_index=True)[1], - np.unique(app_dec, return_index=True)[1], - ), - np.unique(time_array, return_index=True)[1], - ) - - # Pluck out the unique entries for each - unique_ra = app_ra[unique_mask] - unique_dec = app_dec[unique_mask] - unique_time = time_array[unique_mask] - - # Figure out how many elements we need to transform - n_coord = len(unique_mask) - - # Offset north/south positions by 0.5 deg, such that the PA is determined over a - # 1 deg arc. - up_dec = unique_dec + offset_pos - dn_dec = unique_dec - offset_pos - up_ra = dn_ra = unique_ra - - # Wrap the positions if they happen to go over the poles - up_ra[up_dec > (np.pi / 2.0)] = np.mod( - up_ra[up_dec > (np.pi / 2.0)] + np.pi, 2.0 * np.pi - ) - up_dec[up_dec > (np.pi / 2.0)] = np.pi - up_dec[up_dec > (np.pi / 2.0)] - - dn_ra[-dn_dec > (np.pi / 2.0)] = np.mod( - dn_ra[dn_dec > (np.pi / 2.0)] + np.pi, 2.0 * np.pi - ) - dn_dec[-dn_dec > (np.pi / 2.0)] = np.pi - dn_dec[-dn_dec > (np.pi / 2.0)] - - # Run the set of offset coordinates through the "reverse" transform. The two offset - # positions are concat'd together to help reduce overheads - ref_ra, ref_dec = calc_sidereal_coords( - time_array=np.tile(unique_time, 2), - app_ra=np.concatenate((dn_ra, up_ra)), - app_dec=np.concatenate((dn_dec, up_dec)), - telescope_loc=telescope_loc, - coord_frame=ref_frame, - telescope_frame=telescope_frame, - ellipsoid=ellipsoid, - coord_epoch=ref_epoch, - ) - - # Use the pas function from ERFA to calculate the position angle. The negative sign - # is here because we're measuring PA of app -> frame, but we want frame -> app. - unique_pa = -erfa.pas( - ref_ra[:n_coord], ref_dec[:n_coord], ref_ra[n_coord:], ref_dec[n_coord:] - ) - - # Finally, we have to go back through and "fill in" the redundant entries - frame_pa = np.zeros_like(app_ra) - for idx in range(n_coord): - select_mask = np.logical_and( - np.logical_and(unique_ra[idx] == app_ra, unique_dec[idx] == app_dec), - unique_time[idx] == time_array, - ) - frame_pa[select_mask] = unique_pa[idx] - - return frame_pa - - -def lookup_jplhorizons( - target_name, - time_array, - *, - telescope_loc=None, - high_cadence=False, - force_indv_lookup=None, -): - """ - Lookup solar system body coordinates via the JPL-Horizons service. - - This utility is useful for generating ephemerides, which can then be interpolated in - order to provide positional data for a target which is moving, such as planetary - bodies and other solar system objects. Use of this function requires the - installation of the `astroquery` module. - - - Parameters - ---------- - target_name : str - Name of the target to gather an ephemeris for. Must match the name - in the JPL-Horizons database. - time_array : array-like of float - Times in UTC Julian days to gather an ephemeris for. - telescope_loc : tuple of floats or EarthLocation - ITRS latitude, longitude, and altitude (rel to sea-level) of the observer. - Can either be provided as an EarthLocation object, or an - array-like of shape (3,) containing the latitude, longitude, and altitude, - in that order, with units of radians, radians, and meters, respectively. - high_cadence : bool - If set to True, will calculate ephemeris points every 3 minutes in time, as - opposed to the default of every 3 hours. - force_indv_lookup : bool - If set to True, will calculate coordinate values for each value found within - `time_array`. If False, a regularized time grid is sampled that encloses the - values contained within `time_array`. Default is False, unless `time_array` is - of length 1, in which the default is set to True. - - - Returns - ------- - ephem_times : ndarray of float - Times for which the ephemeris values were calculated, in UTC Julian days. - ephem_ra : ndarray of float - ICRS Right ascension of the target at the values within `ephem_times`, in - units of radians. - ephem_dec : ndarray of float - ICRS Declination of the target at the values within `ephem_times`, in units - of radians. - ephem_dist : ndarray of float - Distance of the target relative to the observer, at the values within - `ephem_times`, in units of parsecs. - ephem_vel : ndarray of float - Velocity of the targets relative to the observer, at the values within - `ephem_times`, in units of km/sec. - """ - try: - from astroquery.jplhorizons import Horizons - except ImportError as err: # pragma: no cover - raise ImportError( - "astroquery is not installed but is required for " - "planet ephemeris functionality" - ) from err - from json import load as json_load - from os.path import join as path_join - - from pyuvdata.data import DATA_PATH - - # Get the telescope location into a format that JPL-Horizons can understand, - # which is nominally a dict w/ entries for lon (units of deg), lat (units of - # deg), and elevation (units of km). - if isinstance(telescope_loc, EarthLocation): - site_loc = { - "lon": telescope_loc.lon.deg, - "lat": telescope_loc.lat.deg, - "elevation": telescope_loc.height.to_value(unit=units.km), - } - elif hasmoon and isinstance(telescope_loc, MoonLocation): - raise NotImplementedError( - "Cannot lookup JPL positions for telescopes with a MoonLocation" - ) - elif telescope_loc is None: - # Setting to None will report the geocentric position - site_loc = None - else: - site_loc = { - "lon": telescope_loc[1] * (180.0 / np.pi), - "lat": telescope_loc[0] * (180.0 / np.pi), - "elevation": telescope_loc[2] * (0.001), # m -> km - } - - # If force_indv_lookup is True, or unset but only providing a single value, then - # just calculate the RA/Dec for the times requested rather than creating a table - # to interpolate from. - if force_indv_lookup or ( - (np.array(time_array).size == 1) and (force_indv_lookup is None) - ): - epoch_list = np.unique(time_array) - if len(epoch_list) > 50: - raise ValueError( - "Requesting too many individual ephem points from JPL-Horizons. This " - "can be remedied by setting force_indv_lookup=False or limiting the " - "number of values in time_array." - ) - else: - # When querying for multiple times, its faster (and kinder to the - # good folks at JPL) to create a range to query, and then interpolate - # between values. The extra buffer of 0.001 or 0.25 days for high and - # low cadence is to give enough data points to allow for spline - # interpolation of the data. - if high_cadence: - start_time = np.min(time_array) - 0.001 - stop_time = np.max(time_array) + 0.001 - step_time = "3m" - n_entries = (stop_time - start_time) * (1440.0 / 3.0) - else: - # The start/stop time here are setup to maximize reusability of the - # data, since astroquery appears to cache the results from previous - # queries. - start_time = (0.25 * np.floor(4.0 * np.min(time_array))) - 0.25 - stop_time = (0.25 * np.ceil(4.0 * np.max(time_array))) + 0.25 - step_time = "3h" - n_entries = (stop_time - start_time) * (24.0 / 3.0) - # We don't want to overtax the JPL service, so limit ourselves to 1000 - # individual queries at a time. Note that this is likely a conservative - # cap for JPL-Horizons, but there should be exceptionally few applications - # that actually require more than this. - if n_entries > 1000: - if (len(np.unique(time_array)) <= 50) and (force_indv_lookup is None): - # If we have a _very_ sparse set of epochs, pass that along instead - epoch_list = np.unique(time_array) - else: - # Otherwise, time to raise an error - raise ValueError( - "Too many ephem points requested from JPL-Horizons. This " - "can be remedied by setting high_cadance=False or limiting " - "the number of values in time_array." - ) - else: - epoch_list = { - "start": Time(start_time, format="jd").isot, - "stop": Time(stop_time, format="jd").isot, - "step": step_time, - } - # Check to make sure dates are within the 1700-2200 time range, - # since not all targets are supported outside of this range - if (np.min(time_array) < 2341973.0) or (np.max(time_array) > 2524593.0): - raise ValueError( - "No current support for JPL ephems outside of 1700 - 2300 AD. " - "Check back later (or possibly earlier)..." - ) - - # JPL-Horizons has a separate catalog with what it calls 'major bodies', - # and will throw an error if you use the wrong catalog when calling for - # astrometry. We'll use the dict below to capture this behavior. - with open(path_join(DATA_PATH, "jpl_major_bodies.json"), "r") as fhandle: - major_body_dict = json_load(fhandle) - - target_id = target_name - id_type = "smallbody" - # If we find the target in the major body database, then we can extract the - # target ID to make the query a bit more robust (otherwise JPL-Horizons will fail - # on account that id will find multiple partial matches: e.g., "Mars" will be - # matched with "Mars", "Mars Explorer", "Mars Barycenter"..., and JPL-Horizons will - # not know which to choose). - if target_name in major_body_dict.keys(): - target_id = major_body_dict[target_name] - id_type = None - - query_obj = Horizons( - id=target_id, location=site_loc, epochs=epoch_list, id_type=id_type - ) - # If not in the major bodies catalog, try the minor bodies list, and if - # still not found, throw an error. - try: - ephem_data = query_obj.ephemerides(extra_precision=True) - except KeyError: - # This is a fix for an astroquery + JPL-Horizons bug, that's related to - # API change on JPL's side. In this case, the source is identified, but - # astroquery can't correctly parse the return message from JPL-Horizons. - # See astroquery issue #2169. - ephem_data = query_obj.ephemerides(extra_precision=False) # pragma: no cover - except ValueError as err: - query_obj._session.close() - if "Unknown target" in str(err): - raise ValueError( - "Target ID is not recognized in either the small or major bodies " - "catalogs, please consult the JPL-Horizons database for supported " - "targets (https://ssd.jpl.nasa.gov/?horizons)." - ) from err - else: - raise # pragma: no cover - # This is explicitly closed here to trap a bug that occassionally throws an - # unexpected warning, see astroquery issue #1807 - query_obj._session.close() - - # Now that we have the ephem data, extract out the relevant data - ephem_times = np.array(ephem_data["datetime_jd"]) - ephem_ra = np.array(ephem_data["RA"]) * (np.pi / 180.0) - ephem_dec = np.array(ephem_data["DEC"]) * (np.pi / 180.0) - ephem_dist = np.array(ephem_data["delta"]) # AU - ephem_vel = np.array(ephem_data["delta_rate"]) # km/s - - return ephem_times, ephem_ra, ephem_dec, ephem_dist, ephem_vel - - -def interpolate_ephem( - *, time_array, ephem_times, ephem_ra, ephem_dec, ephem_dist=None, ephem_vel=None -): - """ - Interpolates ephemerides to give positions for requested times. - - This is a simple tool for calculated interpolated RA and Dec positions, as well - as distances and velocities, for a given ephemeris. Under the hood, the method - uses as cubic spline interpolation to calculate values at the requested times, - provided that there are enough values to interpolate over to do so (requires - >= 4 points), otherwise a linear interpolation is used. - - Parameters - ---------- - time_array : array-like of floats - Times to interpolate positions for, in UTC Julian days. - ephem_times : array-like of floats - Times in UTC Julian days which describe that match to the recorded postions - of the target. Must be array-like, of shape (Npts,), where Npts is the number - of ephemeris points. - ephem_ra : array-like of floats - Right ascencion of the target, at the times given in `ephem_times`. Units are - in radians, must have the same shape as `ephem_times`. - ephem_dec : array-like of floats - Declination of the target, at the times given in `ephem_times`. Units are - in radians, must have the same shape as `ephem_times`. - ephem_dist : array-like of floats - Distance of the target from the observer, at the times given in `ephem_times`. - Optional argument, in units of parsecs. Must have the same shape as - `ephem_times`. - ephem_vel : array-like of floats - Velocities of the target, at the times given in `ephem_times`. Optional - argument, in units of km/sec. Must have the same shape as `ephem_times`. - - Returns - ------- - ra_vals : ndarray of float - Interpolated RA values, returned as an ndarray of floats with - units of radians, and the same shape as `time_array`. - dec_vals : ndarray of float - Interpolated declination values, returned as an ndarray of floats with - units of radians, and the same shape as `time_array`. - dist_vals : None or ndarray of float - If `ephem_dist` was provided, an ndarray of floats (with same shape as - `time_array`) with the interpolated target distances, in units of parsecs. - If `ephem_dist` was not provided, this returns as None. - vel_vals : None or ndarray of float - If `ephem_vals` was provided, an ndarray of floats (with same shape as - `time_array`) with the interpolated target velocities, in units of km/sec. - If `ephem_vals` was not provided, this returns as None. - - """ - # We're importing this here since it's only used for this one function - from scipy.interpolate import interp1d - - ephem_shape = np.array(ephem_times).shape - - # Make sure that things look reasonable - if np.array(ephem_ra).shape != ephem_shape: - raise ValueError("ephem_ra must have the same shape as ephem_times.") - - if np.array(ephem_dec).shape != ephem_shape: - raise ValueError("ephem_dec must have the same shape as ephem_times.") - - if (np.array(ephem_dist).shape != ephem_shape) and (ephem_dist is not None): - raise ValueError("ephem_dist must have the same shape as ephem_times.") - - if (np.array(ephem_vel).shape != ephem_shape) and (ephem_vel is not None): - raise ValueError("ephem_vel must have the same shape as ephem_times.") - - ra_vals = np.zeros_like(time_array, dtype=float) - dec_vals = np.zeros_like(time_array, dtype=float) - dist_vals = None if ephem_dist is None else np.zeros_like(time_array, dtype=float) - vel_vals = None if ephem_vel is None else np.zeros_like(time_array, dtype=float) - - if len(ephem_times) == 1: - ra_vals += ephem_ra - dec_vals += ephem_dec - if ephem_dist is not None: - dist_vals += ephem_dist - if ephem_vel is not None: - vel_vals += ephem_vel - else: - if len(ephem_times) > 3: - interp_kind = "cubic" - else: - interp_kind = "linear" - - # If we have values that line up perfectly, just use those directly - select_mask = np.isin(time_array, ephem_times) - if np.any(select_mask): - time_select = time_array[select_mask] - ra_vals[select_mask] = interp1d(ephem_times, ephem_ra, kind="nearest")( - time_select - ) - dec_vals[select_mask] = interp1d(ephem_times, ephem_dec, kind="nearest")( - time_select - ) - if ephem_dist is not None: - dist_vals[select_mask] = interp1d( - ephem_times, ephem_dist, kind="nearest" - )(time_select) - if ephem_vel is not None: - vel_vals[select_mask] = interp1d( - ephem_times, ephem_vel, kind="nearest" - )(time_select) - - # If we have values lining up between grid points, use spline interpolation - # to calculate their values - select_mask = ~select_mask - if np.any(select_mask): - time_select = time_array[select_mask] - ra_vals[select_mask] = interp1d(ephem_times, ephem_ra, kind=interp_kind)( - time_select - ) - dec_vals[select_mask] = interp1d(ephem_times, ephem_dec, kind=interp_kind)( - time_select - ) - if ephem_dist is not None: - dist_vals[select_mask] = interp1d( - ephem_times, ephem_dist, kind=interp_kind - )(time_select) - if ephem_vel is not None: - vel_vals[select_mask] = interp1d( - ephem_times, ephem_vel, kind=interp_kind - )(time_select) - - return (ra_vals, dec_vals, dist_vals, vel_vals) - - -def get_lst_for_time( - jd_array=None, - *, - telescope_loc=None, - latitude=None, - longitude=None, - altitude=None, - astrometry_library=None, - frame="itrs", - ellipsoid=None, -): - """ - Get the local apparent sidereal time for a set of jd times at an earth location. - - This function calculates the local apparent sidereal time (LAST), given a UTC time - and a position on the Earth, using either the astropy or NOVAS libraries. It - is important to note that there is an apporoximate 20 microsecond difference - between the two methods, presumably due to small differences in the apparent - reference frame. These differences will cancel out when calculating coordinates - in the TOPO frame, so long as apparent coordinates are calculated using the - same library (i.e., astropy or NOVAS). Failing to do so can introduce errors - up to ~1 mas in the horizontal coordinate system (i.e., AltAz). - - Parameters - ---------- - jd_array : ndarray of float - JD times to get lsts for. - telescope_loc : tuple or EarthLocation or MoonLocation - Alternative way of specifying telescope lat/lon/alt, either as a 3-element tuple - or as an astropy EarthLocation (or lunarsky MoonLocation). Cannot supply both - `telescope_loc` and `latitute`, `longitude`, or `altitude`. - latitude : float - Latitude of location to get lst for in degrees. Cannot specify both `latitute` - and `telescope_loc`. - longitude : float - Longitude of location to get lst for in degrees. Cannot specify both `longitude` - and `telescope_loc`. - altitude : float - Altitude of location to get lst for in meters. Cannot specify both `altitude` - and `telescope_loc`. - astrometry_library : str - Library used for running the LST calculations. Allowed options are 'erfa' - (which uses the pyERFA), 'novas' (which uses the python-novas library), - and 'astropy' (which uses the astropy utilities). Default is erfa unless - the telescope_location is a MoonLocation object, in which case the default is - astropy. - frame : str - Reference frame for latitude/longitude/altitude. Options are itrs (default) - or mcmf. Not used if telescope_loc is an EarthLocation or MoonLocation object. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is mcmf. Not used if telescope_loc is - an EarthLocation or MoonLocation object. - - Returns - ------- - ndarray of float - LASTs in radians corresponding to the jd_array. - - """ - site_loc = None - if telescope_loc is not None: - if not all(item is None for item in [latitude, longitude, altitude]): - raise ValueError( - "Cannot set both telescope_loc and latitude/longitude/altitude" - ) - if isinstance(telescope_loc, EarthLocation) or ( - hasmoon and isinstance(telescope_loc, MoonLocation) - ): - site_loc = telescope_loc - if isinstance(telescope_loc, EarthLocation): - frame = "ITRS" - else: - frame = "MCMF" - else: - latitude, longitude, altitude = telescope_loc - - if site_loc is None: - if frame.upper() == "MCMF": - if not hasmoon: - raise ValueError( - "Need to install `lunarsky` package to work with MCMF frame." - ) - if ellipsoid is None: - ellipsoid = "SPHERE" - - site_loc = MoonLocation.from_selenodetic( - Angle(longitude, unit="deg"), - Angle(latitude, unit="deg"), - altitude, - ellipsoid=ellipsoid, - ) - else: - site_loc = EarthLocation.from_geodetic( - Angle(longitude, unit="deg"), - Angle(latitude, unit="deg"), - height=altitude, - ) - if astrometry_library is None: - if frame == "itrs": - astrometry_library = "erfa" - else: - astrometry_library = "astropy" - - if astrometry_library not in ["erfa", "astropy", "novas"]: - raise ValueError( - "Requested coordinate transformation library is not supported, please " - "select either 'erfa' or 'astropy' for astrometry_library." - ) - - if isinstance(jd_array, np.ndarray): - lst_array = np.zeros_like(jd_array) - if lst_array.ndim == 0: - lst_array = lst_array.reshape(1) - else: - lst_array = np.zeros(1) - - jd, reverse_inds = np.unique(jd_array, return_inverse=True) - - if isinstance(site_loc, EarthLocation): - TimeClass = Time - else: - if not astrometry_library == "astropy": - raise NotImplementedError( - "The MCMF frame is only supported with the 'astropy' astrometry library" - ) - TimeClass = LTime - - times = TimeClass(jd, format="jd", scale="utc", location=site_loc) - - if iers.conf.auto_max_age is None: # pragma: no cover - delta, status = times.get_delta_ut1_utc(return_status=True) - if np.any( - np.isin(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE)) - ): - warnings.warn( - "time is out of IERS range, setting delta ut1 utc to extrapolated value" - ) - times.delta_ut1_utc = delta - if astrometry_library == "erfa": - # This appears to be what astropy is using under the hood, - # so it _should_ be totally consistent. - gast_array = erfa.gst06a( - times.ut1.jd1, times.ut1.jd2, times.tt.jd1, times.tt.jd2 - ) - - # Technically one should correct for the polar wobble here, but the differences - # along the equitorial are miniscule -- of order 10s of nanoradians, well below - # the promised accuracy of IERS -- and rotation matricies can be expensive. - # We do want to correct though for for secular polar drift (s'/TIO locator), - # which nudges the Earth rotation angle of order 47 uas per century. - sp = erfa.sp00(times.tt.jd1, times.tt.jd2) - - lst_array = np.mod(gast_array + sp + site_loc.lon.rad, 2.0 * np.pi)[ - reverse_inds - ] - elif astrometry_library == "astropy": - lst_array = times.sidereal_time("apparent").radian - if lst_array.ndim == 0: - lst_array = lst_array.reshape(1) - lst_array = lst_array[reverse_inds] - elif astrometry_library == "novas": - # Import the NOVAS library only if it's needed/available. - try: - import novas_de405 # noqa - from novas import compat as novas - from novas.compat import eph_manager - except ImportError as e: # pragma: no cover - raise ImportError( - "novas and/or novas_de405 are not installed but is required for " - "NOVAS functionality" - ) from e - - jd_start, jd_end, number = eph_manager.ephem_open() - - tt_time_array = times.tt.value - ut1_high_time_array = times.ut1.jd1 - ut1_low_time_array = times.ut1.jd2 - full_ut1_time_array = ut1_high_time_array + ut1_low_time_array - polar_motion_data = iers.earth_orientation_table.get() - - delta_x_array = np.interp( - times.mjd, - polar_motion_data["MJD"].value, - polar_motion_data["dX_2000A_B"].value, - left=0.0, - right=0.0, - ) - - delta_y_array = np.interp( - times.mjd, - polar_motion_data["MJD"].value, - polar_motion_data["dY_2000A_B"].value, - left=0.0, - right=0.0, - ) - - # Catch the case where we don't have CIP delta values yet (they don't typically - # have predictive values like the polar motion does) - delta_x_array[np.isnan(delta_x_array)] = 0.0 - delta_y_array[np.isnan(delta_y_array)] = 0.0 - - for idx in range(len(times)): - novas.cel_pole( - tt_time_array[idx], 2, delta_x_array[idx], delta_y_array[idx] - ) - # The NOVAS routine will return Greenwich Apparent Sidereal Time (GAST), - # in units of hours - lst_array[reverse_inds == idx] = novas.sidereal_time( - ut1_high_time_array[idx], - ut1_low_time_array[idx], - (tt_time_array[idx] - full_ut1_time_array[idx]) * 86400.0, - ) - - # Add the telescope lon to convert from GAST to LAST (local) - lst_array = np.mod(lst_array + (longitude / 15.0), 24.0) - - # Convert from hours back to rad - lst_array *= np.pi / 12.0 - - lst_array = np.reshape(lst_array, jd_array.shape) - - return lst_array - - -def calc_app_coords( - *, - lon_coord, - lat_coord, - coord_frame="icrs", - coord_epoch=None, - coord_times=None, - coord_type="sidereal", - time_array=None, - lst_array=None, - telescope_loc=None, - telescope_frame="itrs", - ellipsoid=None, - pm_ra=None, - pm_dec=None, - vrad=None, - dist=None, -): - """ - Calculate apparent coordinates for several different coordinate types. - - This function calculates apparent positions at the current epoch. - - Parameters - ---------- - lon_coord : float or ndarray of float - Longitudinal (e.g., RA) coordinates, units of radians. Must match the same - shape as lat_coord. - lat_coord : float or ndarray of float - Latitudinal (e.g., Dec) coordinates, units of radians. Must match the same - shape as lon_coord. - coord_frame : string - The requested reference frame for the output coordinates, can be any frame - that is presently supported by astropy. - coord_epoch : float or str or Time object - Epoch for ref_frame, nominally only used if converting to either the FK4 or - FK5 frames, in units of fractional years. If provided as a float and the - coord_frame is an FK4-variant, value will assumed to be given in Besselian - years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be - in Julian years. - coord_times : float or ndarray of float - Only used when `coord_type="ephem"`, the JD UTC time for each value of - `lon_coord` and `lat_coord`. These values are used to interpolate `lon_coord` - and `lat_coord` values to those times listed in `time_array`. - coord_type : str - Type of source to calculate coordinates for. Must be one of: - "sidereal" (fixed RA/Dec), - "ephem" (RA/Dec that moves with time), - "driftscan" (fixed az/el position), - "unprojected" (alias for "driftscan" with (Az, Alt) = (0 deg, 90 deg)). - time_array : float or ndarray of float or Time object - Times for which the apparent coordinates were calculated, in UTC JD. If more - than a single element, must be the same shape as lon_coord and lat_coord if - both of those are arrays (instead of single floats). - telescope_loc : array-like of floats or EarthLocation or MoonLocation - ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center - of the array. Can either be provided as an astropy EarthLocation, a lunarsky - Moonlocation, or a tuple of shape (3,) containing (in order) the latitude, - longitude, and altitude for a position on Earth in units of radians, radians, - and meters, respectively. - telescope_frame: str, optional - Reference frame for telescope location. Options are itrs (default) or mcmf. - Only used if telescope_loc is not an EarthLocation or MoonLocation. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is mcmf. - pm_ra : float or ndarray of float - Proper motion in RA of the source, expressed in units of milliarcsec / year. - Can either be a single float or array of shape (Ntimes,), although this must - be consistent with other parameters (namely ra_coord and dec_coord). Not - required, motion is calculated relative to the value of `coord_epoch`. - pm_dec : float or ndarray of float - Proper motion in Dec of the source, expressed in units of milliarcsec / year. - Can either be a single float or array of shape (Ntimes,), although this must - be consistent with other parameters (namely ra_coord and dec_coord). Not - required, motion is calculated relative to the value of `coord_epoch`. - vrad : float or ndarray of float - Radial velocity of the source, expressed in units of km / sec. Can either be - a single float or array of shape (Ntimes,), although this must be consistent - with other parameters (namely ra_coord and dec_coord). Not required. - dist : float or ndarray of float - Distance of the source, expressed in milliarcseconds. Can either be a single - float or array of shape (Ntimes,), although this must be consistent with other - parameters (namely ra_coord and dec_coord). Not required. - - Returns - ------- - app_ra : ndarray of floats - Apparent right ascension coordinates, in units of radians. - app_dec : ndarray of floats - Apparent declination coordinates, in units of radians. - """ - if isinstance(telescope_loc, EarthLocation) or ( - hasmoon and isinstance(telescope_loc, MoonLocation) - ): - site_loc = telescope_loc - if hasmoon and isinstance(telescope_loc, MoonLocation): - ellipsoid = MoonLocation.ellipsoid - elif telescope_frame.upper() == "MCMF": - if not hasmoon: - raise ValueError( - "Need to install `lunarsky` package to work with MCMF frame." - ) - if ellipsoid is None: - ellipsoid = "SPHERE" - site_loc = MoonLocation.from_selenodetic( - telescope_loc[1] * (180.0 / np.pi), - telescope_loc[0] * (180.0 / np.pi), - height=telescope_loc[2], - ellipsoid=ellipsoid, - ) - else: - site_loc = EarthLocation.from_geodetic( - telescope_loc[1] * (180.0 / np.pi), - telescope_loc[0] * (180.0 / np.pi), - height=telescope_loc[2], - ) - - # Time objects and unique don't seem to play well together, so we break apart - # their handling here - if isinstance(time_array, Time): - time_array = time_array.utc.jd - - unique_time_array, unique_mask = np.unique(time_array, return_index=True) - - if coord_type in ["driftscan", "unprojected"]: - if lst_array is None: - unique_lst = get_lst_for_time(unique_time_array, telescope_loc=site_loc) - else: - unique_lst = lst_array[unique_mask] - - if coord_type == "sidereal": - # If the coordinates are not in the ICRS frame, go ahead and transform them now - if coord_frame != "icrs": - icrs_ra, icrs_dec = transform_sidereal_coords( - longitude=lon_coord, - latitude=lat_coord, - in_coord_frame=coord_frame, - out_coord_frame="icrs", - in_coord_epoch=coord_epoch, - time_array=unique_time_array, - ) - else: - icrs_ra = lon_coord - icrs_dec = lat_coord - unique_app_ra, unique_app_dec = transform_icrs_to_app( - time_array=unique_time_array, - ra=icrs_ra, - dec=icrs_dec, - telescope_loc=site_loc, - pm_ra=pm_ra, - pm_dec=pm_dec, - vrad=vrad, - dist=dist, - ) - - elif coord_type == "driftscan": - # Use the ERFA function ae2hd, which will do all the heavy - # lifting for us - unique_app_ha, unique_app_dec = erfa.ae2hd( - lon_coord, lat_coord, site_loc.lat.rad - ) - # The above returns HA/Dec, so we just need to rotate by - # the LST to get back app RA and Dec - unique_app_ra = np.mod(unique_app_ha + unique_lst, 2 * np.pi) - unique_app_dec = unique_app_dec + np.zeros_like(unique_app_ra) - elif coord_type == "ephem": - interp_ra, interp_dec, _, _ = interpolate_ephem( - time_array=unique_time_array, - ephem_times=coord_times, - ephem_ra=lon_coord, - ephem_dec=lat_coord, - ) - if coord_frame != "icrs": - icrs_ra, icrs_dec = transform_sidereal_coords( - longitude=interp_ra, - latitude=interp_dec, - in_coord_frame=coord_frame, - out_coord_frame="icrs", - in_coord_epoch=coord_epoch, - time_array=unique_time_array, - ) - else: - icrs_ra = interp_ra - icrs_dec = interp_dec - # TODO: Vel and distance handling to be integrated here, once they are are - # needed for velocity frame tracking - unique_app_ra, unique_app_dec = transform_icrs_to_app( - time_array=unique_time_array, - ra=icrs_ra, - dec=icrs_dec, - telescope_loc=site_loc, - pm_ra=pm_ra, - pm_dec=pm_dec, - ) - elif coord_type == "unprojected": - # This is the easiest one - this is just supposed to be ENU, so set the - # apparent coords to the current lst and telescope_lat. - unique_app_ra = unique_lst.copy() - unique_app_dec = np.zeros_like(unique_app_ra) + site_loc.lat.rad - else: - raise ValueError("Object type %s is not recognized." % coord_type) - - # Now that we've calculated all the unique values, time to backfill through the - # "redundant" entries in the Nblt axis. - app_ra = np.zeros(np.array(time_array).shape) - app_dec = np.zeros(np.array(time_array).shape) - - for idx, unique_time in enumerate(unique_time_array): - select_mask = time_array == unique_time - app_ra[select_mask] = unique_app_ra[idx] - app_dec[select_mask] = unique_app_dec[idx] - - return app_ra, app_dec - - -def calc_sidereal_coords( - *, - time_array, - app_ra, - app_dec, - telescope_loc, - coord_frame, - telescope_frame="itrs", - ellipsoid=None, - coord_epoch=None, -): - """ - Calculate sidereal coordinates given apparent coordinates. - - This function calculates coordinates in the requested frame (at a given epoch) - from a set of apparent coordinates. - - Parameters - ---------- - time_array : float or ndarray of float or Time object - Times for which the apparent coordinates were calculated, in UTC JD. Must - match the shape of app_ra and app_dec. - app_ra : float or ndarray of float - Array of apparent right ascension coordinates, units of radians. Must match - the shape of time_array and app_dec. - app_ra : float or ndarray of float - Array of apparent right declination coordinates, units of radians. Must match - the shape of time_array and app_dec. - telescope_loc : tuple of floats or EarthLocation - ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center - of the array. Can either be provided as an astropy EarthLocation, or a tuple - of shape (3,) containing (in order) the latitude, longitude, and altitude, - in units of radians, radians, and meters, respectively. - coord_frame : string - The requested reference frame for the output coordinates, can be any frame - that is presently supported by astropy. Default is ICRS. - telescope_frame: str, optional - Reference frame for telescope location. Options are itrs (default) or mcmf. - Only used if telescope_loc is not an EarthLocation or MoonLocation. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is mcmf. - coord_epoch : float or str or Time object - Epoch for ref_frame, nominally only used if converting to either the FK4 or - FK5 frames, in units of fractional years. If provided as a float and the - ref_frame is an FK4-variant, value will assumed to be given in Besselian - years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be - in Julian years. - - Returns - ------- - ref_ra : ndarray of floats - Right ascension coordinates in the requested frame, in units of radians. - Either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,). - ref_dec : ndarray of floats - Declination coordinates in the requested frame, in units of radians. - Either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,). - """ - # Check to make sure that we have a properly formatted epoch for our in-bound - # coordinate frame - epoch = None - if isinstance(coord_epoch, str) or isinstance(coord_epoch, Time): - # If its a string or a Time object, we don't need to do anything more - epoch = Time(coord_epoch) - elif coord_epoch is not None: - if coord_frame.lower() in ["fk4", "fk4noeterms"]: - epoch = Time(coord_epoch, format="byear") - else: - epoch = Time(coord_epoch, format="jyear") - - if telescope_frame == "mcmf" and ellipsoid is None: - ellipsoid = "SPHERE" - - icrs_ra, icrs_dec = transform_app_to_icrs( - time_array=time_array, - app_ra=app_ra, - app_dec=app_dec, - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=ellipsoid, - ) - - if coord_frame == "icrs": - ref_ra, ref_dec = (icrs_ra, icrs_dec) - else: - ref_ra, ref_dec = transform_sidereal_coords( - longitude=icrs_ra, - latitude=icrs_dec, - in_coord_frame="icrs", - out_coord_frame=coord_frame, - out_coord_epoch=epoch, - time_array=time_array, - ) - - return ref_ra, ref_dec - - -def check_lsts_against_times( - *, - jd_array, - lst_array, - lst_tols, - latitude=None, - longitude=None, - altitude=None, - frame="itrs", - ellipsoid=None, - telescope_loc=None, -): - """ - Check that LSTs are consistent with the time_array and telescope location. - - This just calls `get_lst_for_time`, compares that result to the `lst_array` - and warns if they are not within the tolerances specified by `lst_tols`. - - Parameters - ---------- - jd_array : ndarray of float - JD times to get lsts for. - lst_array : ndarray of float - LSTs to check to see if they match the jd_array at the location. - latitude : float - Latitude of location to check the lst for in degrees. - longitude : float - Longitude of location to check the lst for in degrees. - altitude : float - Altitude of location to check the lst for in meters. - lst_tops : tuple of float - A length 2 tuple giving the (relative, absolute) tolerances to check the - LST agreement to. These are passed directly to numpy.allclose. - frame : str - Reference frame for latitude/longitude/altitude. - Options are itrs (default) or mcmf. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", "GSFC", - "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is mcmf. - telescope_loc : tuple or EarthLocation or MoonLocation - Alternative way of specifying telescope lat/lon/alt, either as a 3-element tuple - or as an astropy EarthLocation (or lunarsky MoonLocation). Cannot supply both - `telescope_loc` and `latitute`, `longitude`, or `altitude`. - - Returns - ------- - None - - Warns - ----- - If the `lst_array` does not match the calculated LSTs to the lst_tols. - - """ - # Don't worry about passing the astrometry library because we test that they agree - # to better than our standard lst tolerances. - lsts = get_lst_for_time( - jd_array=jd_array, - telescope_loc=telescope_loc, - latitude=latitude, - longitude=longitude, - altitude=altitude, - frame=frame, - ellipsoid=ellipsoid, - ) - - if not np.allclose(lst_array, lsts, rtol=lst_tols[0], atol=lst_tols[1]): - warnings.warn( - "The lst_array is not self-consistent with the time_array and " - "telescope location. Consider recomputing with the " - "`set_lsts_from_time_array` method." - ) - - -def check_surface_based_positions( - *, - telescope_loc=None, - telescope_frame="itrs", - antenna_positions=None, - raise_error=True, - raise_warning=True, -): - """ - Check that antenna positions are consistent with ground-based values. - - Check that the antenna position, telescope location, or combination of both produces - locations that are consistent with surface-based positions. If supplying both - antenna position and telescope location, the check will be run against the sum total - of both. For the Earth, the permitted range of values is betwen 6350 and 6390 km, - whereas for theMoon the range is 1717.1 to 1757.1 km. - - telescope_loc : tuple or EarthLocation or MoonLocation - Telescope location, specified as a 3-element tuple (specifying geo/selenocentric - position in meters) or as an astropy EarthLocation (or lunarsky MoonLocation). - telescope_frame : str, optional - Reference frame for latitude/longitude/altitude. Options are itrs (default) or - mcmf. Only used if telescope_loc is not an EarthLocation or MoonLocation. - antenna_positions : ndarray of float - List of antenna positions relative to array center in ECEF coordinates, - required if not providing `uvw_array`. Shape is (Nants, 3). If no telescope_loc - is specified, these values will be assumed to be relative to geocenter. - raise_error : bool - If True, an error is raised if telescope_loc and/or telescope_loc do not conform - to expectations for a surface-based telescope. Default is True. - raise_warning : bool - If True, a warning is raised if telescope_loc and/or telescope_loc do not - conform to expectations for a surface-based telescope. Default is True, only - used if `raise_error` is set to False. - - Returns - ------- - valid : bool - If True, the antenna_positions and/or telescope_loc conform to expectations for - a surface-based telescope. Otherwise returns false. - - """ - if antenna_positions is None: - antenna_positions = np.zeros((1, 3)) - - if isinstance(telescope_loc, EarthLocation) or ( - hasmoon and isinstance(telescope_loc, MoonLocation) - ): - antenna_positions = antenna_positions + ( - telescope_loc.x.to("m").value, - telescope_loc.y.to("m").value, - telescope_loc.z.to("m").value, - ) - if isinstance(telescope_loc, EarthLocation): - telescope_frame = "itrs" - else: - telescope_frame = "mcmf" - elif telescope_loc is not None: - antenna_positions = antenna_positions + telescope_loc - - low_lim, hi_lim, world = _range_dict[telescope_frame] - - err_type = None - if np.any(np.sum(antenna_positions**2.0, axis=1) < low_lim**2.0): - err_type = "below" - elif np.any(np.sum(antenna_positions**2.0, axis=1) > hi_lim**2.0): - err_type = "above" - - if err_type is None: - return True - - err_msg = ( - f"{telescope_frame} position vector magnitudes must be on the order of " - f"the radius of {world} -- they appear to lie well {err_type} this." - ) - - # If desired, raise an error - if raise_error: - raise ValueError(err_msg) - - # Otherwise, if desired, raise a warning instead - if raise_warning: - warnings.warn(err_msg) - - return False - - -def uvw_track_generator( - *, - lon_coord=None, - lat_coord=None, - coord_frame="icrs", - coord_epoch=None, - coord_type="sidereal", - time_array=None, - telescope_loc=None, - telescope_frame="itrs", - ellipsoid=None, - antenna_positions=None, - antenna_numbers=None, - ant_1_array=None, - ant_2_array=None, - uvw_array=None, - force_postive_u=False, -): - """ - Calculate uvw coordinates (among other values) for a given position on the sky. - - This function is meant to be a user-friendly wrapper around several pieces of code - for effectively simulating a track. - - Parameters - ---------- - lon_coord : float or ndarray of float - Longitudinal (e.g., RA) coordinates, units of radians. Must match the same - shape as lat_coord. - lat_coord : float or ndarray of float - Latitudinal (e.g., Dec) coordinates, units of radians. Must match the same - shape as lon_coord. - coord_frame : string - The requested reference frame for the output coordinates, can be any frame - that is presently supported by astropy. - coord_epoch : float or str or Time object, optional - Epoch for ref_frame, nominally only used if converting to either the FK4 or - FK5 frames, in units of fractional years. If provided as a float and the - ref_frame is an FK4-variant, value will assumed to be given in Besselian - years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be - in Julian years. - coord_type : str - Type of source to calculate coordinates for. Must be one of: - "sidereal" (fixed RA/Dec), - "ephem" (RA/Dec that moves with time), - "driftscan" (fixed az/el position), - "unprojected" (alias for "driftscan" with (Az, Alt) = (0 deg, 90 deg)). - time_array : ndarray of float or Time object - Times for which the apparent coordinates were calculated, in UTC JD. Must - match the shape of lon_coord and lat_coord. - telescope_loc : array-like of floats or EarthLocation or MoonLocation - ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center - of the array. Can either be provided as an astropy EarthLocation, a lunarsky - Moonlocation, or a tuple of shape (3,) containing (in order) the latitude, - longitude, and altitude for a position on Earth in units of degrees, degrees, - and meters, respectively. - telescope_frame : str, optional - Reference frame for latitude/longitude/altitude. Options are itrs (default) or - mcmf. Only used if telescope_loc is not an EarthLocation or MoonLocation. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is mcmf. - antenna_positions : ndarray of float - List of antenna positions relative to array center in ECEF coordinates, - required if not providing `uvw_array`. Shape is (Nants, 3). - antenna_numbers: ndarray of int, optional - List of antenna numbers, ordered in the same way as `antenna_positions` (e.g., - `antenna_numbers[0]` should given the number of antenna that resides at ECEF - position given by `antenna_positions[0]`). Shape is (Nants,), requred if - supplying ant_1_array and ant_2_array. - ant_1_array : ndarray of int, optional - Antenna number of the first antenna in the baseline pair, for all baselines - Required if not providing `uvw_array`, shape is (Nblts,). If not supplied, then - the method will automatically fill in ant_1_array with all unique antenna - pairings for each time/position. - ant_2_array : ndarray of int, optional - Antenna number of the second antenna in the baseline pair, for all baselines - Required if not providing `uvw_array`, shape is (Nblts,). If not supplied, then - the method will automatically fill in ant_2_array with all unique antenna - pairings for each time/position. - uvw_array : ndarray of float, optional - Array of baseline coordinates (in ENU), required if not deriving new coordinates - from antenna positions. Setting this value will will cause antenna positions to - be ignored. Shape is (Nblts, 3). - force_positive_u : bool, optional - If set to true, then forces the conjugation of each individual baseline to be - set such that the uvw coordinates land on the positive-u side of the uv-plane. - Default is False. - - Returns - ------- - obs_dict : dict - Dictionary containing the results of the simulation, which includes: - "uvw" the uvw-coordinates (meters), - "app_ra" apparent RA of the sources (radians), - "app_dec" apparent Dec of the sources (radians), - "frame_pa" ngle between apparent north and `coord_frame` north (radians), - "lst" local apparent sidereal time (radians), - "site_loc" EarthLocation or MoonLocation for the telescope site. - """ - if isinstance(telescope_loc, EarthLocation) or ( - hasmoon and isinstance(telescope_loc, MoonLocation) - ): - site_loc = telescope_loc - elif telescope_frame.upper() == "MCMF": - if not hasmoon: - raise ValueError( - "Need to install `lunarsky` package to work with MCMF frame." - ) - if ellipsoid is None: - ellipsoid = "SPHERE" - - site_loc = MoonLocation.from_selenodetic( - Angle(telescope_loc[1], unit="deg"), - Angle(telescope_loc[0], unit="deg"), - telescope_loc[2], - ellipsoid=ellipsoid, - ) - else: - site_loc = EarthLocation.from_geodetic( - Angle(telescope_loc[1], unit="deg"), - Angle(telescope_loc[0], unit="deg"), - height=telescope_loc[2], - ) - - if not isinstance(lon_coord, np.ndarray): - lon_coord = np.array(lon_coord) - if not isinstance(lat_coord, np.ndarray): - lat_coord = np.array(lat_coord) - if not isinstance(time_array, np.ndarray): - time_array = np.array(time_array) - - if lon_coord.ndim == 0: - lon_coord = lon_coord.reshape(1) - if lat_coord.ndim == 0: - lat_coord = lat_coord.reshape(1) - if time_array.ndim == 0: - time_array = time_array.reshape(1) - - Ntimes = len(time_array) - if uvw_array is None: - if all(item is None for item in [antenna_numbers, ant_1_array, ant_2_array]): - antenna_numbers = np.arange(1, 1 + len(antenna_positions)) - ant_1_array = [] - ant_2_array = [] - for idx in range(len(antenna_positions)): - for jdx in range(idx + 1, len(antenna_positions)): - ant_1_array.append(idx + 1) - ant_2_array.append(jdx + 1) - - Nbase = len(ant_1_array) - - ant_1_array = np.tile(ant_1_array, Ntimes) - ant_2_array = np.tile(ant_2_array, Ntimes) - if len(lon_coord) == len(time_array): - lon_coord = np.repeat(lon_coord, Nbase) - lat_coord = np.repeat(lat_coord, Nbase) - - time_array = np.repeat(time_array, Nbase) - - lst_array = get_lst_for_time(jd_array=time_array, telescope_loc=site_loc) - app_ra, app_dec = calc_app_coords( - lon_coord=lon_coord, - lat_coord=lat_coord, - coord_frame=coord_frame, - coord_type=coord_type, - time_array=time_array, - lst_array=lst_array, - telescope_loc=site_loc, - ) - - frame_pa = calc_frame_pos_angle( - time_array=time_array, - app_ra=app_ra, - app_dec=app_dec, - telescope_loc=site_loc, - ref_frame=coord_frame, - ref_epoch=coord_epoch, - ) - - uvws = calc_uvw( - app_ra=app_ra, - app_dec=app_dec, - frame_pa=frame_pa, - lst_array=lst_array, - antenna_positions=antenna_positions, - antenna_numbers=antenna_numbers, - ant_1_array=ant_1_array, - ant_2_array=ant_2_array, - telescope_lon=site_loc.lon.rad, - telescope_lat=site_loc.lat.rad, - uvw_array=uvw_array, - use_ant_pos=(uvw_array is None), - from_enu=(uvw_array is not None), - ) - - if force_postive_u: - mask = (uvws[:, 0] < 0.0) | ((uvws[:, 0] == 0.0) & (uvws[:, 1] < 0.0)) - uvws[mask, :] *= -1.0 - - return { - "uvw": uvws, - "app_ra": app_ra, - "app_dec": app_dec, - "frame_pa": frame_pa, - "lst": lst_array, - "site_loc": site_loc, - } - - -def _adj_list(vecs, tol, n_blocks=None): - """Identify neighbors of each vec in vecs, to distance tol.""" - n_items = len(vecs) - max_items = 2**10 # Max array size used is max_items**2. Avoid using > 1 GiB - - if n_blocks is None: - n_blocks = max(n_items // max_items, 1) - - # We may sort blocks so that some pairs of blocks may be skipped. - # Reorder vectors by x. - - order = np.argsort(vecs[:, 0]) - blocks = np.array_split(order, n_blocks) - adj = [{k} for k in range(n_items)] # Adjacency lists - for b1 in blocks: - for b2 in blocks: - v1, v2 = vecs[b1], vecs[b2] - # Check for no overlap, with tolerance. - xmin1 = v1[0, 0] - tol - xmax1 = v1[-1, 0] + tol - xmin2 = v2[0, 0] - tol - xmax2 = v2[-1, 0] + tol - if max(xmin1, xmin2) > min(xmax1, xmax2): - continue - - adj_mat = cdist(vecs[b1], vecs[b2]) < tol - for bi, col in enumerate(adj_mat): - adj[b1[bi]] = adj[b1[bi]].union(b2[col]) - return [frozenset(g) for g in adj] - - -def _find_cliques(adj, strict=False): - n_items = len(adj) - - loc_gps = [] - visited = np.zeros(n_items, dtype=bool) - for k in range(n_items): - if visited[k]: - continue - a0 = adj[k] - visited[k] = True - if all(adj[it].__hash__() == a0.__hash__() for it in a0): - group = list(a0) - group.sort() - visited[list(a0)] = True - loc_gps.append(group) - - # Require all adjacency lists to be isolated maximal cliques: - if strict: - if not all(sorted(st) in loc_gps for st in adj): - raise ValueError("Non-isolated cliques found in graph.") - - return loc_gps - - -def find_clusters(*, location_ids, location_vectors, tol, strict=False): - """ - Find clusters of vectors (e.g. redundant baselines, times). - - Parameters - ---------- - location_ids : array_like of int - ID labels for locations. - location_vectors : array_like of float - location vectors, can be multidimensional - tol : float - tolerance for clusters - strict : bool - Require that all adjacency lists be isolated maximal cliques. - This ensures that vectors do not fall into multiple clusters. - Default: False - - Returns - ------- - list of list of location_ids - - """ - location_vectors = np.asarray(location_vectors) - location_ids = np.asarray(location_ids) - if location_vectors.ndim == 1: - location_vectors = location_vectors[:, np.newaxis] - - adj = _adj_list(location_vectors, tol) # adj = list of sets - - loc_gps = _find_cliques(adj, strict=strict) - loc_gps = [np.sort(location_ids[gp]).tolist() for gp in loc_gps] - return loc_gps - - -def find_clusters_grid(location_ids, location_vectors, tol=1.0): - """ - Find redundant groups using a gridding algorithm developed by the HERA team. - - This is essentially a gridding approach, but it only keeps track of the grid - points that have baselines assigned to them. It iterates through the - baselines and assigns each baseline to a an existing group if it is within - a grid spacing or makes a new group if there is no group. The location of - the group is the baseline vector of the first baseline assigned to it, rounded - to the grid spacing, so the resulting assigned grid point can depend on the - order in which baseline vectors are passed to it. It is possible for a baseline - to be assigned to a group that is up to but strictly less than 4 times the - grid spacing from its true location, so we use a grid a factor of 4 smaller - than the passed tolerance (`tol`). This method is quite robust for regular - arrays if the tolerance is properly specified, but may not behave predictably - for highly non-redundant arrays. - - Parameters - ---------- - baselines : array_like of int - Baseline numbers, shape (Nbls,) - baseline_vecs : array_like of float - Baseline vectors in meters, shape (Nbls, 3). - tol : float - Absolute tolerance of redundancy, in meters. - - Returns - ------- - baseline_groups : list of lists of int - list of lists of redundant baseline numbers - baseline_ind_conj : list of int - List of baselines that are redundant when reversed. Only returned if - include_conjugates is True - - """ - bl_gps = {} - # reduce the grid size to ensure baselines won't be assigned to a group - # more than the tol away from their location. The factor of 4 is a personal - # communication from Josh Dillon who developed this algorithm. - grid_size = tol / 4.0 - - p_or_m = (0, -1, 1) - epsilons = [[dx, dy, dz] for dx in p_or_m for dy in p_or_m for dz in p_or_m] - - def check_neighbors(delta): - # Check to make sure bl_gps doesn't have the key plus or minus rounding error - for epsilon in epsilons: - newKey = ( - delta[0] + epsilon[0], - delta[1] + epsilon[1], - delta[2] + epsilon[2], - ) - if newKey in bl_gps: - return newKey - return - - baseline_ind_conj = [] - for bl_i, bl in enumerate(location_ids): - delta = tuple(np.round(location_vectors[bl_i] / grid_size).astype(int)) - new_key = check_neighbors(delta) - if new_key is not None: - # this has a match - bl_gps[new_key].append(bl) - else: - # this is a new group - bl_gps[delta] = [bl] - - bl_list = [sorted(gv) for gv in bl_gps.values()] - - return bl_list, baseline_ind_conj - - -def get_baseline_redundancies( - baselines, baseline_vecs, *, tol=1.0, include_conjugates=False, use_grid_alg=None -): - """ - Find redundant baseline groups. - - Parameters - ---------- - baselines : array_like of int - Baseline numbers, shape (Nbls,) - baseline_vecs : array_like of float - Baseline vectors in meters, shape (Nbls, 3). - tol : float - Absolute tolerance of redundancy, in meters. - include_conjugates : bool - Option to include baselines that are redundant when flipped. - use_grid_alg : bool - Option to use the gridding based algorithm (developed by the HERA team) - to find redundancies rather than the older clustering algorithm. - - Returns - ------- - baseline_groups : list of lists of int - list of lists of redundant baseline numbers - vec_bin_centers : list of array_like of float - List of vectors describing redundant group centers - lengths : list of float - List of redundant group baseline lengths in meters - baseline_ind_conj : list of int - List of baselines that are redundant when reversed. Only returned if - include_conjugates is True - - """ - if use_grid_alg is None: - # This was added in v2.4.2 (Feb 2024). It should go away at some point. - # Normally it would be in v2.6 or later, but if v3.0 comes out - # very soon we could consider delaying the removal of this until v3.1 - warnings.warn( - "The use_grid_alg parameter is not set. Defaulting to True to " - "use the new gridding based algorithm (developed by the HERA team) " - "rather than the older clustering based algorithm. This is change " - "to the default, to use the clustering algorithm set use_grid_alg=False." - ) - use_grid_alg = True - - Nbls = baselines.shape[0] - - if not baseline_vecs.shape == (Nbls, 3): - raise ValueError("Baseline vectors must be shape (Nbls, 3)") - - baseline_vecs = copy.copy(baseline_vecs) # Protect the vectors passed in. - - if include_conjugates: - conjugates = [] - for bv in baseline_vecs: - uneg = bv[0] < -tol - uzer = np.isclose(bv[0], 0.0, atol=tol) - vneg = bv[1] < -tol - vzer = np.isclose(bv[1], 0.0, atol=tol) - wneg = bv[2] < -tol - conjugates.append(uneg or (uzer and vneg) or (uzer and vzer and wneg)) - - conjugates = np.array(conjugates, dtype=bool) - baseline_vecs[conjugates] *= -1 - baseline_ind_conj = baselines[conjugates] - bl_gps, vec_bin_centers, lens = get_baseline_redundancies( - baselines, - baseline_vecs, - tol=tol, - include_conjugates=False, - use_grid_alg=use_grid_alg, - ) - return bl_gps, vec_bin_centers, lens, baseline_ind_conj - - if use_grid_alg: - output = find_clusters_grid( - location_ids=baselines, location_vectors=baseline_vecs, tol=tol - ) - bl_gps, baseline_ind_conj = output - else: - try: - bl_gps = find_clusters( - location_ids=baselines, - location_vectors=baseline_vecs, - tol=tol, - strict=True, - ) - except ValueError as exc: - raise ValueError( - "Some baselines are falling into multiple redundant groups. " - "Lower the tolerance to resolve ambiguity or use the gridding " - "based algorithm (developed by the HERA team) to find redundancies " - "by setting use_grid_alg=True." - ) from exc - - n_unique = len(bl_gps) - vec_bin_centers = np.zeros((n_unique, 3)) - for gi, gp in enumerate(bl_gps): - inds = [np.where(i == baselines)[0] for i in gp] - vec_bin_centers[gi] = np.mean(baseline_vecs[inds, :], axis=0) - - lens = np.sqrt(np.sum(vec_bin_centers**2, axis=1)) - return bl_gps, vec_bin_centers, lens - - -def get_antenna_redundancies( - antenna_numbers, - antenna_positions, - *, - tol=1.0, - include_autos=False, - use_grid_alg=None, -): - """ - Find redundant baseline groups based on antenna positions. - - Parameters - ---------- - antenna_numbers : array_like of int - Antenna numbers, shape (Nants,). - antenna_positions : array_like of float - Antenna position vectors in the ENU (topocentric) frame in meters, - shape (Nants, 3). - tol : float - Redundancy tolerance in meters. - include_autos : bool - Option to include autocorrelations. - use_grid_alg : bool - Option to use the gridding based algorithm (developed by the HERA team) - to find redundancies rather than the older clustering algorithm. - - Returns - ------- - baseline_groups : list of lists of int - list of lists of redundant baseline numbers - vec_bin_centers : list of array_like of float - List of vectors describing redundant group centers - lengths : list of float - List of redundant group baseline lengths in meters - - Notes - ----- - The baseline numbers refer to antenna pairs (a1, a2) such that - the baseline vector formed from ENU antenna positions, - blvec = enu[a1] - enu[a2] - is close to the other baselines in the group. - - This is achieved by putting baselines in a form of the u>0 - convention, but with a tolerance in defining the signs of - vector components. - - To guarantee that the same baseline numbers are present in a UVData - object, ``UVData.conjugate_bls('u>0', uvw_tol=tol)``, where `tol` is - the tolerance used here. - - """ - if use_grid_alg is None: - # This was added in v2.4.2 (Feb 2024). It should go away at some point. - # Normally it would be in v2.6 or later, but if v3.0 comes out - # very soon we could consider delaying the removal of this until v3.1 - warnings.warn( - "The use_grid_alg parameter is not set. Defaulting to True to " - "use the new gridding based algorithm (developed by the HERA team) " - "rather than the older clustering based algorithm. This is change " - "to the default, to use the clustering algorithm set use_grid_alg=False." - ) - use_grid_alg = True - - Nants = antenna_numbers.size - - bls = [] - bl_vecs = [] - - for aj in range(Nants): - mini = aj + 1 - if include_autos: - mini = aj - for ai in range(mini, Nants): - anti, antj = antenna_numbers[ai], antenna_numbers[aj] - bidx = antnums_to_baseline(antj, anti, Nants_telescope=Nants) - bv = antenna_positions[ai] - antenna_positions[aj] - bl_vecs.append(bv) - bls.append(bidx) - bls = np.array(bls) - bl_vecs = np.array(bl_vecs) - gps, vecs, lens, conjs = get_baseline_redundancies( - bls, bl_vecs, tol=tol, include_conjugates=True, use_grid_alg=use_grid_alg - ) - # Flip the baselines in the groups. - for gi, gp in enumerate(gps): - for bi, bl in enumerate(gp): - if bl in conjs: - gps[gi][bi] = baseline_index_flip(bl, Nants_telescope=Nants) - - return gps, vecs, lens - - -def mean_collapse( - arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False -): - """ - Collapse by averaging data. - - This is similar to np.average, except it handles infs (by giving them - zero weight) and zero weight axes (by forcing result to be inf with zero - output weight). - - Parameters - ---------- - arr : array - Input array to process. - weights: ndarray, optional - weights for average. If none, will default to equal weight for all - non-infinite data. - axis : int or tuple, optional - Axis or axes to collapse (passed to np.sum). Default is all. - return_weights : bool - Whether to return sum of weights. - return_weights_square: bool - Whether to return the sum of the square of the weights. Default is False. - - """ - arr = copy.deepcopy(arr) # avoid changing outside - if weights is None: - weights = np.ones_like(arr) - else: - weights = copy.deepcopy(weights) - weights = weights * np.logical_not(np.isinf(arr)) - arr[np.isinf(arr)] = 0 - weight_out = np.sum(weights, axis=axis) - if return_weights_square: - weights_square = weights**2 - weights_square_out = np.sum(weights_square, axis=axis) - out = np.sum(weights * arr, axis=axis) - where = weight_out > 1e-10 - out = np.true_divide(out, weight_out, where=where) - out = np.where(where, out, np.inf) - if return_weights and return_weights_square: - return out, weight_out, weights_square_out - elif return_weights: - return out, weight_out - elif return_weights_square: - return out, weights_square_out - else: - return out - - -def absmean_collapse( - arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False -): - """ - Collapse by averaging absolute value of data. - - Parameters - ---------- - arr : array - Input array to process. - weights: ndarray, optional - weights for average. If none, will default to equal weight for all - non-infinite data. - axis : int or tuple, optional - Axis or axes to collapse (passed to np.sum). Default is all. - return_weights : bool - Whether to return sum of weights. - return_weights_square: bool - whether to return the sum of the squares of the weights. Default is False. - - """ - return mean_collapse( - np.abs(arr), - weights=weights, - axis=axis, - return_weights=return_weights, - return_weights_square=return_weights_square, - ) - - -def quadmean_collapse( - arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False -): - """ - Collapse by averaging in quadrature. - - Parameters - ---------- - arr : array - Input array to process. - weights: ndarray, optional - weights for average. If none, will default to equal weight for all - non-infinite data. - axis : int or tuple, optional - Axis or axes to collapse (passed to np.sum). Default is all. - return_weights : bool - Whether to return sum of weights. - return_weights_square: bool - whether to return the sum of the squares of the weights. Default is False. - - """ - out = mean_collapse( - np.abs(arr) ** 2, - weights=weights, - axis=axis, - return_weights=return_weights, - return_weights_square=return_weights_square, - ) - if return_weights and return_weights_square: - return np.sqrt(out[0]), out[1], out[2] - elif return_weights or return_weights_square: - return np.sqrt(out[0]), out[1] - else: - return np.sqrt(out) - - -def or_collapse( - arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False -): - """ - Collapse using OR operation. - - Parameters - ---------- - arr : array - Input array to process. - weights: ndarray, optional - NOT USED, but kept for symmetry with other collapsing functions. - axis : int or tuple, optional - Axis or axes to collapse (take OR over). Default is all. - return_weights : bool - Whether to return dummy weights array. - NOTE: the dummy weights will simply be an array of ones - return_weights_square: bool - NOT USED, but kept for symmetry with other collapsing functions. - - """ - if arr.dtype != np.bool_: - raise ValueError("Input to or_collapse function must be boolean array") - out = np.any(arr, axis=axis) - if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]): - warnings.warn("Currently weights are not handled when OR-ing boolean arrays.") - if return_weights: - return out, np.ones_like(out, dtype=np.float64) - else: - return out - - -def and_collapse( - arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False -): - """ - Collapse using AND operation. - - Parameters - ---------- - arr : array - Input array to process. - weights: ndarray, optional - NOT USED, but kept for symmetry with other collapsing functions. - axis : int or tuple, optional - Axis or axes to collapse (take AND over). Default is all. - return_weights : bool - Whether to return dummy weights array. - NOTE: the dummy weights will simply be an array of ones - return_weights_square: bool - NOT USED, but kept for symmetry with other collapsing functions. - - """ - if arr.dtype != np.bool_: - raise ValueError("Input to and_collapse function must be boolean array") - out = np.all(arr, axis=axis) - if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]): - warnings.warn("Currently weights are not handled when AND-ing boolean arrays.") - if return_weights: - return out, np.ones_like(out, dtype=np.float64) - else: - return out - - -def collapse( - arr, - alg, - *, - weights=None, - axis=None, - return_weights=False, - return_weights_square=False, -): - """ - Parent function to collapse an array with a given algorithm. - - Parameters - ---------- - arr : array - Input array to process. - alg : str - Algorithm to use. Must be defined in this function with - corresponding subfunction above. - weights: ndarray, optional - weights for collapse operation (e.g. weighted mean). - NOTE: Some subfunctions do not use the weights. See corresponding - doc strings. - axis : int or tuple, optional - Axis or axes to collapse. Default is all. - return_weights : bool - Whether to return sum of weights. - return_weights_square: bool - Whether to return the sum of the squares of the weights. Default is False. - - """ - collapse_dict = { - "mean": mean_collapse, - "absmean": absmean_collapse, - "quadmean": quadmean_collapse, - "or": or_collapse, - "and": and_collapse, - } - try: - out = collapse_dict[alg]( - arr, - weights=weights, - axis=axis, - return_weights=return_weights, - return_weights_square=return_weights_square, - ) - except KeyError as err: - raise ValueError( - "Collapse algorithm must be one of: " - + ", ".join(collapse_dict.keys()) - + "." - ) from err - return out - - -def uvcalibrate( - uvdata, - uvcal, - *, - inplace=True, - prop_flags=True, - Dterm_cal=False, - flip_gain_conj=False, - delay_convention="minus", - undo=False, - time_check=True, - ant_check=True, -): - """ - Calibrate a UVData object with a UVCal object. - - Parameters - ---------- - uvdata : UVData object - UVData object to calibrate. - uvcal : UVCal object - UVCal object containing the calibration. - inplace : bool, optional - if True edit uvdata in place, else return a calibrated copy - prop_flags : bool, optional - if True, propagate calibration flags to data flags - and doesn't use flagged gains. Otherwise, uses flagged gains and - does not propagate calibration flags to data flags. - Dterm_cal : bool, optional - Calibrate the off-diagonal terms in the Jones matrix if present - in uvcal. Default is False. Currently not implemented. - flip_gain_conj : bool, optional - This function uses the UVData ant_1_array and ant_2_array to specify the - antennas in the UVCal object. By default, the conjugation convention, which - follows the UVData convention (i.e. ant2 - ant1), is that the applied - gain = ant1_gain * conjugate(ant2_gain). If the other convention is required, - set flip_gain_conj=True. - delay_convention : str, optional - Exponent sign to use in conversion of 'delay' to 'gain' cal_type - if the input uvcal is not inherently 'gain' cal_type. Default to 'minus'. - undo : bool, optional - If True, undo the provided calibration. i.e. apply the calibration with - flipped gain_convention. Flag propagation rules apply the same. - time_check : bool - Option to check that times match between the UVCal and UVData - objects if UVCal has a single time or time range. Times are always - checked if UVCal has multiple times. - ant_check : bool - Option to check that all antennas with data on the UVData - object have calibration solutions in the UVCal object. If this option is - set to False, uvcalibrate will proceed without erroring and data for - antennas without calibrations will be flagged. - - Returns - ------- - UVData, optional - Returns if not inplace - - """ - if uvcal.cal_type == "gain" and uvcal.wide_band: - raise ValueError( - "uvcalibrate currently does not support wide-band calibrations" - ) - if uvcal.cal_type == "delay" and uvcal.Nspws > 1: - # To fix this, need to make UVCal.convert_to_gain support multiple spws - raise ValueError( - "uvcalibrate currently does not support multi spectral window delay " - "calibrations" - ) - - if not inplace: - uvdata = uvdata.copy() - - # check both objects - uvdata.check() - uvcal.check() - - # Check whether the UVData antennas *that have data associated with them* - # have associated data in the UVCal object - uvdata_unique_nums = np.unique(np.append(uvdata.ant_1_array, uvdata.ant_2_array)) - uvdata.telescope.antenna_names = np.asarray(uvdata.telescope.antenna_names) - uvdata_used_antnames = np.array( - [ - uvdata.telescope.antenna_names[ - np.where(uvdata.telescope.antenna_numbers == antnum) - ][0] - for antnum in uvdata_unique_nums - ] - ) - uvcal_unique_nums = np.unique(uvcal.ant_array) - uvcal.telescope.antenna_names = np.asarray(uvcal.telescope.antenna_names) - uvcal_used_antnames = np.array( - [ - uvcal.telescope.antenna_names[ - np.where(uvcal.telescope.antenna_numbers == antnum) - ][0] - for antnum in uvcal_unique_nums - ] - ) - - ant_arr_match = uvcal_used_antnames.tolist() == uvdata_used_antnames.tolist() - - if not ant_arr_match: - # check more carefully - name_missing = [] - for this_ant_name in uvdata_used_antnames: - wh_ant_match = np.nonzero(uvcal_used_antnames == this_ant_name) - if wh_ant_match[0].size == 0: - name_missing.append(this_ant_name) - - if len(name_missing) > 0: - if len(name_missing) == uvdata_used_antnames.size: - # all antenna_names with data on UVData are missing on UVCal. - if not ant_check: - warnings.warn( - "All antenna names with data on UVData are missing " - "on UVCal. Since ant_check is False, calibration will " - "proceed but all data will be flagged." - ) - else: - raise ValueError( - "All antenna names with data on UVData are missing " - "on UVCal. To continue with calibration " - "(and flag all the data), set ant_check=False." - ) - else: - # Only some antenna_names with data on UVData are missing on UVCal - if not ant_check: - warnings.warn( - f"Antennas {name_missing} have data on UVData but are missing " - "on UVCal. Since ant_check is False, calibration will " - "proceed and the data for these antennas will be flagged." - ) - else: - raise ValueError( - f"Antennas {name_missing} have data on UVData but " - "are missing on UVCal. To continue calibration and " - "flag the data from missing antennas, set ant_check=False." - ) - - uvdata_times, uvd_time_ri = np.unique(uvdata.time_array, return_inverse=True) - downselect_cal_times = False - # time_range supercedes time_array. - if uvcal.time_range is not None: - if np.min(uvdata_times) < np.min(uvcal.time_range[:, 0]) or np.max( - uvdata_times - ) > np.max(uvcal.time_range[:, 1]): - if not time_check and uvcal.Ntimes == 1: - warnings.warn( - "Time_range on UVCal does not cover all UVData times " - "but time_check is False, so calibration " - "will be applied anyway." - ) - else: - msg = "Time_ranges on UVCal do not cover all UVData times." - if uvcal.Ntimes == 1: - msg = ( - "Time_range on UVCal does not cover all UVData times. " - "Set time_check=False to apply calibration anyway." - ) - else: - msg = "Time_ranges on UVCal do not cover all UVData times." - raise ValueError(msg) - - # now check in detail that all UVData times fall in a UVCal time range. - # also create the indexing array to match UVData blts to UVCal time inds - if uvcal.Ntimes > 1: - trange_ind_arr = np.full_like(uvdata.time_array, -1, dtype=int) - for tr_ind, trange in enumerate(uvcal.time_range): - time_inds = np.nonzero( - (uvdata_times >= trange[0]) & (uvdata_times <= trange[1]) - )[0] - for tind in time_inds: - trange_ind_arr[np.nonzero(uvd_time_ri == tind)[0]] = tr_ind - if np.any(trange_ind_arr < 0): - raise ValueError("Time_ranges on UVCal do not cover all UVData times.") - else: - if uvcal.Ntimes > 1 and uvcal.Ntimes < uvdata.Ntimes: - raise ValueError( - "The uvcal object has more than one time but fewer than the " - "number of unique times on the uvdata object." - ) - uvcal_times = np.unique(uvcal.time_array) - try: - time_arr_match = np.allclose( - uvcal_times, - uvdata_times, - atol=uvdata._time_array.tols[1], - rtol=uvdata._time_array.tols[0], - ) - except ValueError: - time_arr_match = False - - if not time_arr_match: - if uvcal.Ntimes == 1: - if not time_check: - warnings.warn( - "Times do not match between UVData and UVCal " - "but time_check is False, so calibration " - "will be applied anyway." - ) - else: - raise ValueError( - "Times do not match between UVData and UVCal. " - "Set time_check=False to apply calibration anyway. " - ) - else: - # check more carefully - uvcal_times_to_keep = [] - for this_time in uvdata_times: - wh_time_match = np.nonzero( - np.isclose( - uvcal.time_array - this_time, - 0, - atol=uvdata._time_array.tols[1], - rtol=uvdata._time_array.tols[0], - ) - ) - if wh_time_match[0].size > 0: - uvcal_times_to_keep.append(uvcal.time_array[wh_time_match][0]) - else: - raise ValueError( - f"Time {this_time} exists on UVData but not on UVCal." - ) - if len(uvcal_times_to_keep) < uvcal.Ntimes: - downselect_cal_times = True - - downselect_cal_freq = False - if uvcal.freq_array is not None: - uvdata_freq_arr_use = uvdata.freq_array - uvcal_freq_arr_use = uvcal.freq_array - try: - freq_arr_match = np.allclose( - np.sort(uvcal_freq_arr_use), - np.sort(uvdata_freq_arr_use), - atol=uvdata._freq_array.tols[1], - rtol=uvdata._freq_array.tols[0], - ) - except ValueError: - freq_arr_match = False - - if freq_arr_match is False: - # check more carefully - uvcal_freqs_to_keep = [] - for this_freq in uvdata_freq_arr_use: - wh_freq_match = np.nonzero( - np.isclose( - uvcal.freq_array - this_freq, - 0, - atol=uvdata._freq_array.tols[1], - rtol=uvdata._freq_array.tols[0], - ) - ) - if wh_freq_match[0].size > 0: - uvcal_freqs_to_keep.append(uvcal.freq_array[wh_freq_match][0]) - else: - raise ValueError( - f"Frequency {this_freq} exists on UVData but not on UVCal." - ) - if len(uvcal_freqs_to_keep) < uvcal.Nfreqs: - downselect_cal_freq = True - - # check if uvdata.telescope.x_orientation isn't set (it's required for uvcal) - uvd_x = uvdata.telescope.x_orientation - if uvd_x is None: - # use the uvcal x_orientation throughout - uvd_x = uvcal.telescope.x_orientation - warnings.warn( - "UVData object does not have `x_orientation` specified but UVCal does. " - "Matching based on `x` and `y` only " - ) - - uvdata_pol_strs = polnum2str(uvdata.polarization_array, x_orientation=uvd_x) - uvcal_pol_strs = jnum2str( - uvcal.jones_array, x_orientation=uvcal.telescope.x_orientation - ) - uvdata_feed_pols = { - feed for pol in uvdata_pol_strs for feed in POL_TO_FEED_DICT[pol] - } - for feed in uvdata_feed_pols: - # get diagonal jones str - jones_str = parse_jpolstr(feed, x_orientation=uvcal.telescope.x_orientation) - if jones_str not in uvcal_pol_strs: - raise ValueError( - f"Feed polarization {feed} exists on UVData but not on UVCal. " - ) - - # downselect UVCal times, frequencies - if downselect_cal_freq or downselect_cal_times: - if not downselect_cal_times: - uvcal_times_to_keep = None - elif not downselect_cal_freq: - uvcal_freqs_to_keep = None - - uvcal_use = uvcal.select( - times=uvcal_times_to_keep, frequencies=uvcal_freqs_to_keep, inplace=False - ) - - new_uvcal = True - else: - uvcal_use = uvcal - new_uvcal = False - - # input checks - if uvcal_use.cal_type == "delay": - if not new_uvcal: - # make a copy to convert to gain - uvcal_use = uvcal_use.copy() - new_uvcal = True - freq_array_use = uvdata.freq_array - channel_width = uvdata.channel_width - uvcal_use.convert_to_gain( - delay_convention=delay_convention, - freq_array=freq_array_use, - channel_width=channel_width, - ) - - # D-term calibration - if Dterm_cal: - # check for D-terms - if -7 not in uvcal_use.jones_array and -8 not in uvcal_use.jones_array: - raise ValueError( - "Cannot apply D-term calibration without -7 or -8" - "Jones polarization in uvcal object." - ) - raise NotImplementedError("D-term calibration is not yet implemented.") - - # No D-term calibration - else: - # key is number, value is name - uvdata_ant_dict = dict( - zip(uvdata.telescope.antenna_numbers, uvdata.telescope.antenna_names) - ) - # opposite: key is name, value is number - uvcal_ant_dict = dict( - zip(uvcal.telescope.antenna_names, uvcal.telescope.antenna_numbers) - ) - - # iterate over keys - for key in uvdata.get_antpairpols(): - # get indices for this key - blt_inds = uvdata.antpair2ind(key) - pol_ind = np.argmin( - np.abs(uvdata.polarization_array - polstr2num(key[2], uvd_x)) - ) - - # try to get gains for each antenna - ant1_num = key[0] - ant2_num = key[1] - - feed1, feed2 = POL_TO_FEED_DICT[key[2]] - try: - uvcal_ant1_num = uvcal_ant_dict[uvdata_ant_dict[ant1_num]] - except KeyError: - uvcal_ant1_num = None - try: - uvcal_ant2_num = uvcal_ant_dict[uvdata_ant_dict[ant2_num]] - except KeyError: - uvcal_ant2_num = None - - if (uvcal_ant1_num is None or uvcal_ant2_num is None) or not ( - uvcal_use._key_exists(antnum=uvcal_ant1_num, jpol=feed1) - and uvcal_use._key_exists(antnum=uvcal_ant2_num, jpol=feed2) - ): - uvdata.flag_array[blt_inds, :, pol_ind] = True - continue - - uvcal_key1 = (uvcal_ant1_num, feed1) - uvcal_key2 = (uvcal_ant2_num, feed2) - if flip_gain_conj: - gain = ( - np.conj(uvcal_use.get_gains(uvcal_key1)) - * uvcal_use.get_gains(uvcal_key2) - ).T # tranpose to match uvdata shape - else: - gain = ( - uvcal_use.get_gains(uvcal_key1) - * np.conj(uvcal_use.get_gains(uvcal_key2)) - ).T # tranpose to match uvdata shape - flag = (uvcal_use.get_flags(uvcal_key1) | uvcal_use.get_flags(uvcal_key2)).T - - if uvcal.time_range is not None and uvcal.Ntimes > 1: - gain = gain[trange_ind_arr[blt_inds], :] - flag = flag[trange_ind_arr[blt_inds], :] - - # propagate flags - if prop_flags: - mask = np.isclose(gain, 0.0) | flag - gain[mask] = 1.0 - uvdata.flag_array[blt_inds, :, pol_ind] += mask - - # apply to data - mult_gains = uvcal_use.gain_convention == "multiply" - if undo: - mult_gains = not mult_gains - if mult_gains: - uvdata.data_array[blt_inds, :, pol_ind] *= gain - else: - uvdata.data_array[blt_inds, :, pol_ind] /= gain - - # update attributes - uvdata.history += "\nCalibrated with pyuvdata.utils.uvcalibrate." - if undo: - uvdata.vis_units = "uncalib" - else: - if uvcal_use.gain_scale is not None: - uvdata.vis_units = uvcal_use.gain_scale - - if not inplace: - return uvdata - - -def apply_uvflag( - uvd, uvf, *, inplace=True, unflag_first=False, flag_missing=True, force_pol=True -): - """ - Apply flags from a UVFlag to a UVData instantiation. - - Note that if uvf.Nfreqs or uvf.Ntimes is 1, it will broadcast flags across - that axis. - - Parameters - ---------- - uvd : UVData object - UVData object to add flags to. - uvf : UVFlag object - A UVFlag object in flag mode. - inplace : bool - If True overwrite flags in uvd, otherwise return new object - unflag_first : bool - If True, completely unflag the UVData before applying flags. - Else, OR the inherent uvd flags with uvf flags. - flag_missing : bool - If input uvf is a baseline type and antpairs in uvd do not exist in uvf, - flag them in uvd. Otherwise leave them untouched. - force_pol : bool - If True, broadcast flags to all polarizations if they do not match. - Only works if uvf.Npols == 1. - - Returns - ------- - UVData - If not inplace, returns new UVData object with flags applied - - """ - # assertions - if uvf.mode != "flag": - raise ValueError("UVFlag must be flag mode") - - if not inplace: - uvd = uvd.copy() - - # make a deepcopy by default b/c it is generally edited inplace downstream - uvf = uvf.copy() - - # convert to baseline type - if uvf.type != "baseline": - # edits inplace - uvf.to_baseline(uvd, force_pol=force_pol) - - else: - # make sure polarizations match or force_pol - uvd_pols, uvf_pols = ( - uvd.polarization_array.tolist(), - uvf.polarization_array.tolist(), - ) - if set(uvd_pols) != set(uvf_pols): - if uvf.Npols == 1 and force_pol: - # if uvf is 1pol we can make them match: also edits inplace - uvf.polarization_array = uvd.polarization_array - uvf.Npols = len(uvf.polarization_array) - uvf_pols = uvf.polarization_array.tolist() - - else: - raise ValueError("Input uvf and uvd polarizations do not match") - - # make sure polarization ordering is correct: also edits inplace - uvf.polarization_array = uvf.polarization_array[ - [uvd_pols.index(pol) for pol in uvf_pols] - ] - - # check time and freq shapes match: if Ntimes or Nfreqs is 1, allow - # implicit broadcasting - if uvf.Ntimes == 1: - mismatch_times = False - elif uvf.Ntimes == uvd.Ntimes: - tdiff = np.unique(uvf.time_array) - np.unique(uvd.time_array) - mismatch_times = np.any(tdiff > np.max(np.abs(uvf._time_array.tols))) - else: - mismatch_times = True - if mismatch_times: - raise ValueError("UVFlag and UVData have mismatched time arrays.") - - if uvf.Nfreqs == 1: - mismatch_freqs = False - elif uvf.Nfreqs == uvd.Nfreqs: - fdiff = np.unique(uvf.freq_array) - np.unique(uvd.freq_array) - mismatch_freqs = np.any(fdiff > np.max(np.abs(uvf._freq_array.tols))) - else: - mismatch_freqs = True - if mismatch_freqs: - raise ValueError("UVFlag and UVData have mismatched frequency arrays.") - - # unflag if desired - if unflag_first: - uvd.flag_array[:] = False - - # iterate over antpairs and apply flags: TODO need to be able to handle - # conjugated antpairs - uvf_antpairs = uvf.get_antpairs() - for ap in uvd.get_antpairs(): - uvd_ap_inds = uvd.antpair2ind(ap) - if ap not in uvf_antpairs: - if flag_missing: - uvd.flag_array[uvd_ap_inds] = True - continue - uvf_ap_inds = uvf.antpair2ind(*ap) - # addition of boolean is OR - uvd.flag_array[uvd_ap_inds] += uvf.flag_array[uvf_ap_inds] - - uvd.history += "\nFlagged with pyuvdata.utils.apply_uvflags." - - if not inplace: - return uvd - - -def parse_ants(uv, ant_str, *, print_toggle=False, x_orientation=None): - """ - Get antpair and polarization from parsing an aipy-style ant string. - - Used to support the select function. Generates two lists of antenna pair - tuples and polarization indices based on parsing of the string ant_str. - If no valid polarizations (pseudo-Stokes params, or combinations of [lr] - or [xy]) or antenna numbers are found in ant_str, ant_pairs_nums and - polarizations are returned as None. - - Parameters - ---------- - uv : UVBase Object - A UVBased object that supports the following functions and parameters: - - get_ants - - get_antpairs - - get_pols - These are used to construct the baseline ant_pair_nums - and polarizations returned. - ant_str : str - String containing antenna information to parse. Can be 'all', - 'auto', 'cross', or combinations of antenna numbers and polarization - indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used - in front of an antenna number or baseline to exclude it from being - output in ant_pairs_nums. If ant_str has a minus sign as the first - character, 'all,' will be appended to the beginning of the string. - See the tutorial for examples of valid strings and their behavior. - print_toggle : bool - Boolean for printing parsed baselines for a visual user check. - x_orientation : str, optional - Orientation of the physical dipole corresponding to what is - labelled as the x polarization ("east" or "north") to allow for - converting from E/N strings. If input uv object has an `x_orientation` - parameter and the input to this function is `None`, the value from the - object will be used. Any input given to this function will override the - value on the uv object. See corresonding parameter on UVData - for more details. - - Returns - ------- - ant_pairs_nums : list of tuples of int or None - List of tuples containing the parsed pairs of antenna numbers, or - None if ant_str is 'all' or a pseudo-Stokes polarizations. - polarizations : list of int or None - List of desired polarizations or None if ant_str does not contain a - polarization specification. - - """ - required_attrs = ["get_ants", "get_antpairs", "get_pols"] - if not all(hasattr(uv, attr) for attr in required_attrs): - raise ValueError( - "UVBased objects must have all the following attributes in order " - f"to call 'parse_ants': {required_attrs}." - ) - - if x_orientation is None and ( - hasattr(uv.telescope, "x_orientation") - and uv.telescope.x_orientation is not None - ): - x_orientation = uv.telescope.x_orientation - - ant_re = r"(\(((-?\d+[lrxy]?,?)+)\)|-?\d+[lrxy]?)" - bl_re = "(^(%s_%s|%s),?)" % (ant_re, ant_re, ant_re) - str_pos = 0 - ant_pairs_nums = [] - polarizations = [] - ants_data = uv.get_ants() - ant_pairs_data = uv.get_antpairs() - pols_data = uv.get_pols() - warned_ants = [] - warned_pols = [] - - if ant_str.startswith("-"): - ant_str = "all," + ant_str - - while str_pos < len(ant_str): - m = re.search(bl_re, ant_str[str_pos:]) - if m is None: - if ant_str[str_pos:].upper().startswith("ALL"): - if len(ant_str[str_pos:].split(",")) > 1: - ant_pairs_nums = uv.get_antpairs() - elif ant_str[str_pos:].upper().startswith("AUTO"): - for pair in ant_pairs_data: - if pair[0] == pair[1] and pair not in ant_pairs_nums: - ant_pairs_nums.append(pair) - elif ant_str[str_pos:].upper().startswith("CROSS"): - for pair in ant_pairs_data: - if not (pair[0] == pair[1] or pair in ant_pairs_nums): - ant_pairs_nums.append(pair) - elif ant_str[str_pos:].upper().startswith("PI"): - polarizations.append(polstr2num("pI")) - elif ant_str[str_pos:].upper().startswith("PQ"): - polarizations.append(polstr2num("pQ")) - elif ant_str[str_pos:].upper().startswith("PU"): - polarizations.append(polstr2num("pU")) - elif ant_str[str_pos:].upper().startswith("PV"): - polarizations.append(polstr2num("pV")) - else: - raise ValueError(f"Unparsable argument {ant_str}") - - comma_cnt = ant_str[str_pos:].find(",") - if comma_cnt >= 0: - str_pos += comma_cnt + 1 - else: - str_pos = len(ant_str) - else: - m = m.groups() - str_pos += len(m[0]) - if m[2] is None: - ant_i_list = [m[8]] - ant_j_list = list(uv.get_ants()) - else: - if m[3] is None: - ant_i_list = [m[2]] - else: - ant_i_list = m[3].split(",") - - if m[6] is None: - ant_j_list = [m[5]] - else: - ant_j_list = m[6].split(",") - - for ant_i in ant_i_list: - include_i = True - if isinstance(ant_i, str) and ant_i.startswith("-"): - ant_i = ant_i[1:] # nibble the - off the string - include_i = False - - for ant_j in ant_j_list: - include_j = True - if isinstance(ant_j, str) and ant_j.startswith("-"): - ant_j = ant_j[1:] - include_j = False - - pols = None - ant_i, ant_j = str(ant_i), str(ant_j) - if not ant_i.isdigit(): - ai = re.search(r"(\d+)([x,y,l,r])", ant_i).groups() - - if not ant_j.isdigit(): - aj = re.search(r"(\d+)([x,y,l,r])", ant_j).groups() - - if ant_i.isdigit() and ant_j.isdigit(): - ai = [ant_i, ""] - aj = [ant_j, ""] - elif ant_i.isdigit() and not ant_j.isdigit(): - if "x" in ant_j or "y" in ant_j: - pols = ["x" + aj[1], "y" + aj[1]] - else: - pols = ["l" + aj[1], "r" + aj[1]] - ai = [ant_i, ""] - elif not ant_i.isdigit() and ant_j.isdigit(): - if "x" in ant_i or "y" in ant_i: - pols = [ai[1] + "x", ai[1] + "y"] - else: - pols = [ai[1] + "l", ai[1] + "r"] - aj = [ant_j, ""] - elif not ant_i.isdigit() and not ant_j.isdigit(): - pols = [ai[1] + aj[1]] - - ant_tuple = (abs(int(ai[0])), abs(int(aj[0]))) - - # Order tuple according to order in object - if ant_tuple in ant_pairs_data: - pass - elif ant_tuple[::-1] in ant_pairs_data: - ant_tuple = ant_tuple[::-1] - else: - if not ( - ant_tuple[0] in ants_data or ant_tuple[0] in warned_ants - ): - warned_ants.append(ant_tuple[0]) - if not ( - ant_tuple[1] in ants_data or ant_tuple[1] in warned_ants - ): - warned_ants.append(ant_tuple[1]) - if pols is not None: - for pol in pols: - if not (pol.lower() in pols_data or pol in warned_pols): - warned_pols.append(pol) - continue - - if include_i and include_j: - if ant_tuple not in ant_pairs_nums: - ant_pairs_nums.append(ant_tuple) - if pols is not None: - for pol in pols: - if ( - pol.lower() in pols_data - and polstr2num(pol, x_orientation=x_orientation) - not in polarizations - ): - polarizations.append( - polstr2num(pol, x_orientation=x_orientation) - ) - elif not ( - pol.lower() in pols_data or pol in warned_pols - ): - warned_pols.append(pol) - else: - if pols is not None: - for pol in pols: - if pol.lower() in pols_data: - if uv.Npols == 1 and [pol.lower()] == pols_data: - ant_pairs_nums.remove(ant_tuple) - if ( - polstr2num(pol, x_orientation=x_orientation) - in polarizations - ): - polarizations.remove( - polstr2num(pol, x_orientation=x_orientation) - ) - elif not ( - pol.lower() in pols_data or pol in warned_pols - ): - warned_pols.append(pol) - elif ant_tuple in ant_pairs_nums: - ant_pairs_nums.remove(ant_tuple) - - if ant_str.upper() == "ALL": - ant_pairs_nums = None - elif len(ant_pairs_nums) == 0: - if not ant_str.upper() in ["AUTO", "CROSS"]: - ant_pairs_nums = None - - if len(polarizations) == 0: - polarizations = None - else: - polarizations.sort(reverse=True) - - if print_toggle: - print("\nParsed antenna pairs:") - if ant_pairs_nums is not None: - for pair in ant_pairs_nums: - print(pair) - - print("\nParsed polarizations:") - if polarizations is not None: - for pol in polarizations: - print(polnum2str(pol, x_orientation=x_orientation)) - - if len(warned_ants) > 0: - warnings.warn( - "Warning: Antenna number {a} passed, but not present " - "in the ant_1_array or ant_2_array".format( - a=(",").join(map(str, warned_ants)) - ) - ) - - if len(warned_pols) > 0: - warnings.warn( - "Warning: Polarization {p} is not present in the polarization_array".format( - p=(",").join(warned_pols).upper() - ) - ) - - return ant_pairs_nums, polarizations - - -def _combine_filenames(filename1, filename2): - """Combine the filename attribute from multiple UVBase objects. - - The 4 cases are: - 1. `filename1` has been set, `filename2` has not - 2. `filename1` has not been set, `filename2` has - 3. `filename1` and `filename2` both have been set - 4. `filename1` and `filename2` both have not been set - In case (1), we do not want to update the attribute, because it is - already set correctly. In case (2), we want to replace `filename1` - with the value from `filename2. In case (3), we want to take the union of - the sets of the filenames. In case (4), we want the filename attribute - to still be `None`. - - Parameters - ---------- - filename1 : list of str or None - The list of filenames for the first UVBase object. If it is not set, it - should be `None`. - filename2 : list of str or None - The list of filenames for the second UVData object. If it is not set, it - should be `None`. - - Returns - ------- - combined_filenames : list of str or None - The combined list, with potentially duplicate entries removed. - """ - combined_filenames = filename1 - if filename1 is not None: - if filename2 is not None: - combined_filenames = sorted(set(filename1).union(set(filename2))) - elif filename2 is not None: - combined_filenames = filename2 - - return combined_filenames - - -def _get_slice_len(s, axlen): - """ - Get length of a slice s into array of len axlen. - - Parameters - ---------- - s : slice object - Slice object to index with - axlen : int - Length of axis s slices into - - Returns - ------- - int - Length of slice object - """ - if s.start is None: - start = 0 - else: - start = s.start - if s.stop is None: - stop = axlen - else: - stop = np.min([s.stop, axlen]) - if s.step is None: - step = 1 - else: - step = s.step - - return ((stop - 1 - start) // step) + 1 - - -def _get_dset_shape(dset, indices): - """ - Given a tuple of indices, determine the indexed array shape. - - Parameters - ---------- - dset : numpy array or h5py dataset - A numpy array or a reference to an HDF5 dataset on disk. Requires the - `dset.shape` attribute exists and returns a tuple. - indices : tuple - A tuple with the indices to extract along each dimension of dset. - Each element should contain a list of indices, a slice element, - or a list of slice elements that will be concatenated after slicing. - For data arrays with 4 dimensions, the second dimension (the old spw axis) - should not be included because it can only be length one. - - Returns - ------- - tuple - a tuple with the shape of the indexed array - tuple - a tuple with indices used (will be different than input if dset has - 4 dimensions and indices has 3 dimensions) - """ - dset_shape = list(dset.shape) - if len(dset_shape) == 4 and len(indices) == 3: - indices = (indices[0], np.s_[:], indices[1], indices[2]) - - for i, inds in enumerate(indices): - # check for integer - if isinstance(inds, (int, np.integer)): - dset_shape[i] = 1 - # check for slice object - if isinstance(inds, slice): - dset_shape[i] = _get_slice_len(inds, dset_shape[i]) - # check for list - if isinstance(inds, list): - # check for list of integers - if isinstance(inds[0], (int, np.integer)): - dset_shape[i] = len(inds) - elif isinstance(inds[0], slice): - dset_shape[i] = sum((_get_slice_len(s, dset_shape[i]) for s in inds)) - - return dset_shape, indices - - -def _convert_to_slices( - indices, *, max_nslice_frac=0.1, max_nslice=None, return_index_on_fail=False -): - """ - Convert list of indices to a list of slices. - - Parameters - ---------- - indices : list - A 1D list of integers for array indexing (boolean ndarrays are also supported). - max_nslice_frac : float - A float from 0 -- 1. If the number of slices - needed to represent input 'indices' divided by len(indices) - exceeds this fraction, then we determine that we cannot - easily represent 'indices' with a list of slices. - max_nslice : int - Optional argument, defines the maximum number of slices for determining if - `indices` can be easily represented with a list of slices. If set, then - the argument supplied to `max_nslice_frac` is ignored. - return_index_on_fail : bool - If set to True and the list of input indexes cannot easily be respresented by - a list of slices (as defined by `max_nslice` or `max_nslice_frac`), then return - the input list of index values instead of a list of suboptimal slices. - - Returns - ------- - slice_list : list - Nominally the list of slice objects used to represent indices. However, if - `return_index_on_fail=True` and input indexes cannot easily be respresented, - return a 1-element list containing the input for `indices`. - check : bool - If True, indices is easily represented by slices - (`max_nslice_frac` or `max_nslice` conditions met), otherwise False. - - Notes - ----- - Example: - if: indices = [1, 2, 3, 4, 10, 11, 12, 13, 14] - then: slices = [slice(1, 5, 1), slice(11, 15, 1)] - """ - # check for already a slice or a single index position - if isinstance(indices, slice): - return [indices], True - if isinstance(indices, (int, np.integer)): - return [slice(indices, indices + 1, 1)], True - - # check for boolean index - if isinstance(indices, np.ndarray) and (indices.dtype == bool): - eval_ind = np.where(indices)[0] - else: - eval_ind = indices - # assert indices is longer than 2, or return trivial solutions - if len(eval_ind) == 0: - return [slice(0, 0, 0)], False - if len(eval_ind) <= 2: - return [ - slice(eval_ind[0], eval_ind[-1] + 1, max(eval_ind[-1] - eval_ind[0], 1)) - ], True - - # Catch the simplest case of "give me a single slice or exit" - if (max_nslice == 1) and return_index_on_fail: - step = eval_ind[1] - eval_ind[0] - if all(np.diff(eval_ind) == step): - return [slice(eval_ind[0], eval_ind[-1] + 1, step)], True - return [indices], False - - # setup empty slices list - Ninds = len(eval_ind) - slices = [] - - # iterate over indices - start = last_step = None - for ind in eval_ind: - if last_step is None: - # Check if this is the first slice, in which case start is None - if start is None: - start = ind - continue - last_step = ind - start - last_ind = ind - continue - - # calculate step from previous index - step = ind - last_ind - - # if step != last_step, this ends the slice - if step != last_step: - # append to list - slices.append(slice(start, last_ind + 1, last_step)) - - # setup next step - start = ind - last_step = None - - last_ind = ind - - # Append the last slice - slices.append(slice(start, ind + 1, last_step)) - - # determine whether slices are a reasonable representation, and determine max_nslice - # if only max_nslice_frac was supplied. - if max_nslice is None: - max_nslice = max_nslice_frac * Ninds - check = len(slices) <= max_nslice - - if return_index_on_fail and not check: - return [indices], check - else: - return slices, check - - -def _index_dset(dset, indices, *, input_array=None): - """ - Index a UVH5 data, flags or nsamples h5py dataset to get data or overwrite data. - - If no `input_array` is passed, this function extracts the data at the indices - and returns it. If `input_array` is passed, this function replaced the data at the - indices with the input array. - - Parameters - ---------- - dset : h5py dataset - A reference to an HDF5 dataset on disk. - indices : tuple - A tuple with the indices to extract along each dimension of dset. - Each element should contain a list of indices, a slice element, - or a list of slice elements that will be concatenated after slicing. - Indices must be provided such that all dimensions can be indexed - simultaneously. This should have a length equal to the length of the dset, - with an exception to support the old array shape for uvdata arrays (in that - case the dset is length 4 but the second dimension is shallow, so only three - indices need to be passed). - input_array : ndarray, optional - Array to be copied into the dset at the indices. If not provided, the data in - the dset is indexed and returned. - - Returns - ------- - ndarray or None - The indexed dset if the `input_array` parameter is not used. - - Notes - ----- - This makes and fills an empty array with dset indices. - For trivial indexing, (e.g. a trivial slice), constructing - a new array and filling it is suboptimal over direct - indexing, e.g. dset[indices]. - This function specializes in repeated slices over the same axis, - e.g. if indices is [[slice(0, 5), slice(10, 15), ...], ..., ] - """ - # get dset and arr shape - dset_shape = dset.shape - arr_shape, indices = _get_dset_shape(dset, indices) - - if input_array is None: - # create empty array of dset dtype - arr = np.empty(arr_shape, dtype=dset.dtype) - else: - arr = input_array - - # get arr and dset indices for each dimension in indices - dset_indices = [] - arr_indices = [] - nselects_per_dim = [] - for i, dset_inds in enumerate(indices): - if isinstance(dset_inds, (int, np.integer)): - # this dimension is len 1, so slice is fine - arr_indices.append([slice(None)]) - dset_indices.append([[dset_inds]]) - nselects_per_dim.append(1) - - elif isinstance(dset_inds, slice): - # this dimension is just a slice, so slice is fine - arr_indices.append([slice(None)]) - dset_indices.append([dset_inds]) - nselects_per_dim.append(1) - - elif isinstance(dset_inds, (list, np.ndarray)): - if isinstance(dset_inds[0], (int, np.integer)): - # this is a list of integers, append slice - arr_indices.append([slice(None)]) - dset_indices.append([dset_inds]) - nselects_per_dim.append(1) - elif isinstance(dset_inds[0], slice): - # this is a list of slices, need list of slice lens - slens = [_get_slice_len(s, dset_shape[i]) for s in dset_inds] - ssums = [sum(slens[:j]) for j in range(len(slens))] - arr_inds = [slice(s, s + l) for s, l in zip(ssums, slens)] - arr_indices.append(arr_inds) - dset_indices.append(dset_inds) - nselects_per_dim.append(len(dset_inds)) - - # iterate through all selections and fill the array - total_selects = np.prod(nselects_per_dim) - axis_arrays = [] - for nsel in nselects_per_dim: - axis_arrays.append(np.arange(nsel, dtype=int)) - sel_index_arrays = list(np.meshgrid(*axis_arrays)) - for ind, array in enumerate(sel_index_arrays): - sel_index_arrays[ind] = array.flatten() - for sel in np.arange(total_selects): - sel_arr_indices = [] - sel_dset_indices = [] - for dim in np.arange(len(dset_shape)): - this_index = (sel_index_arrays[dim])[sel] - sel_arr_indices.append(arr_indices[dim][this_index]) - sel_dset_indices.append(dset_indices[dim][this_index]) - if input_array is None: - # index dset and assign to arr - arr[(*sel_arr_indices,)] = dset[(*sel_dset_indices,)] - else: - # index arr and assign to dset - dset[(*sel_dset_indices,)] = arr[(*sel_arr_indices,)] - - if input_array is None: - return arr - else: - return - - -def determine_blt_order( - *, time_array, ant_1_array, ant_2_array, baseline_array, Nbls, Ntimes -) -> tuple[str] | None: - """Get the blt order from analysing metadata.""" - times = time_array - ant1 = ant_1_array - ant2 = ant_2_array - bls = baseline_array - - time_bl = True - time_a = True - time_b = True - bl_time = True - a_time = True - b_time = True - bl_order = True - a_order = True - b_order = True - time_order = True - - if Nbls == 1 and Ntimes == 1: - return ("baseline", "time") # w.l.o.g. - - for i, (t, a, b, bl) in enumerate( - zip(times[1:], ant1[1:], ant2[1:], bls[1:]), start=1 - ): - on_bl_boundary = i % Nbls == 0 - on_time_boundary = i % Ntimes == 0 - - if t < times[i - 1]: - time_bl = False - time_a = False - time_b = False - time_order = False - - if not on_time_boundary: - bl_time = False - a_time = False - b_time = False - - if bl == bls[i - 1]: - bl_time = False - if a == ant1[i - 1]: - a_time = False - if b == ant2[i - 1]: - b_time = False - - elif t == times[i - 1]: - if bl < bls[i - 1]: - time_bl = False - if a < ant1[i - 1]: - time_a = False - if b < ant2[i - 1]: - time_b = False - - if bl < bls[i - 1]: - bl_time = False - bl_order = False - if not on_bl_boundary: - time_bl = False - if a < ant1[i - 1]: - a_time = False - a_order = False - if not on_bl_boundary: - time_a = False - if b < ant2[i - 1]: - b_time = False - b_order = False - if not on_bl_boundary: - time_b = False - - if not any( - ( - time_bl, - time_a, - time_b, - time_bl, - bl_time, - a_time, - b_time, - bl_order, - a_order, - b_order, - time_order, - ) - ): - break - - if Nbls > 1 and Ntimes > 1: - assert not ( - (time_bl and bl_time) - or (time_a and a_time) - or (time_b and b_time) - or (time_order and a_order) - or (time_order and b_order) - or (a_order and b_order) - or (time_order and bl_order) - ), ( - "Something went wrong when trying to determine the order of the blts axis. " - "Please raise an issue on github, as this is not meant to happen." - "None of the following should ever be True: \n" - f"\ttime_bl and bl_time: {time_bl and bl_time}\n" - f"\ttime_a and a_time: {time_a and a_time}\n" - f"\ttime_b and b_time: {time_b and b_time}\n" - f"\ttime_order and a_order: {time_order and a_order}\n" - f"\ttime_order and b_order: {time_order and b_order}\n" - f"\ta_order and b_order: {a_order and b_order}\n" - f"\ttime_order and bl_order: {time_order and bl_order}\n\n" - "Please include the following information in your issue:\n" - f"Nbls: {Nbls}\n" - f"Ntimes: {Ntimes}\n" - f"TIMES: {times}\n" - f"ANT1: {ant1}\n" - f"ANT2: {ant2}\n" - f"BASELINES: {bls}\n" - ) - - if time_bl: - return ("time", "baseline") - if bl_time: - return ("baseline", "time") - if time_a: - return ("time", "ant1") - if a_time: - return ("ant1", "time") - if time_b: - return ("time", "ant2") - if b_time: - return ("ant2", "time") - if bl_order: - return ("baseline",) - if a_order: - return ("ant1",) - if b_order: - return ("ant2",) - if time_order: - return ("time",) - - return None - - -def determine_rectangularity( - *, - time_array: np.ndarray, - baseline_array: np.ndarray, - nbls: int, - ntimes: int, - blt_order: str | tuple[str] | None = None, -): - """Determine if the data is rectangular or not. - - Parameters - ---------- - time_array : array_like - Array of times in JD. - baseline_array : array_like - Array of baseline integers. - nbls : int - Number of baselines. - ntimes : int - Number of times. - blt_order : str or tuple of str, optional - If known, pass the blt_order, which can short-circuit the determination - of rectangularity. - - Returns - ------- - is_rect : bool - True if the data is rectangular, False otherwise. - time_axis_faster_than_bls : bool - True if the data is rectangular and the time axis is the last axis (i.e. times - change first, then bls). False either if baselines change first, OR if it is - not rectangular. - - Notes - ----- - Rectangular data is defined as data for which using regular slicing of size Ntimes - or Nbls will give you either all the same time and all different baselines, or - vice versa. This does NOT require that the baselines and times are sorted within - that structure. - """ - # check if the data is rectangular - time_first = True - bl_first = True - - if time_array.size != nbls * ntimes: - return False, False - elif nbls * ntimes == 1: - return True, True - elif nbls == 1: - return True, True - elif ntimes == 1: - return True, False - elif blt_order == ("baseline", "time"): - return True, True - elif blt_order == ("time", "baseline"): - return True, False - - # That's all the easiest checks. - if time_array[1] == time_array[0]: - time_first = False - if baseline_array[1] == baseline_array[0]: - bl_first = False - if not time_first and not bl_first: - return False, False - - if time_first: - time_array = time_array.reshape((nbls, ntimes)) - baseline_array = baseline_array.reshape((nbls, ntimes)) - if np.sum(np.abs(np.diff(time_array, axis=0))) != 0: - return False, False - if (np.diff(baseline_array, axis=1) != 0).any(): - return False, False - return True, True - elif bl_first: - time_array = time_array.reshape((ntimes, nbls)) - baseline_array = baseline_array.reshape((ntimes, nbls)) - if np.sum(np.abs(np.diff(time_array, axis=1))) != 0: - return False, False - if (np.diff(baseline_array, axis=0) != 0).any(): - return False, False - return True, False - - -def slicify(ind: slice | None | IterableType[int]) -> slice | None | IterableType[int]: - """Convert an iterable of integers into a slice object if possible.""" - if ind is None or isinstance(ind, slice): - return ind - if len(ind) == 0: - return None - - if len(set(np.ediff1d(ind))) <= 1: - return slice(ind[0], ind[-1] + 1, ind[1] - ind[0] if len(ind) > 1 else 1) - else: - # can't slicify - return ind - - -def look_in_catalog( - phase_center_catalog, - *, - cat_name=None, - cat_type=None, - cat_lon=None, - cat_lat=None, - cat_frame=None, - cat_epoch=None, - cat_times=None, - cat_pm_ra=None, - cat_pm_dec=None, - cat_dist=None, - cat_vrad=None, - ignore_name=False, - target_cat_id=None, - phase_dict=None, -): - """ - Check the catalog to see if an existing entry matches provided data. - - This is a helper function for verifying if an entry already exists within - the catalog, contained within the supplied phase center catalog. - - Parameters - ---------- - phase_center_catalog : dict - Dictionary containing the entries to check. - cat_name : str - Name of the phase center, which should match a the value of "cat_name" - inside an entry of `phase_center_catalog`. - cat_type : str - Type of phase center of the entry. Must be one of: - "sidereal" (fixed RA/Dec), - "ephem" (RA/Dec that moves with time), - "driftscan" (fixed az/el position), - "unprojected" (no w-projection, equivalent to the old - `phase_type` == "drift"). - cat_lon : float or ndarray - Value of the longitudinal coordinate (e.g., RA, Az, l) in radians of the - phase center. No default unless `cat_type="unprojected"`, in which case the - default is zero. Expected to be a float for sidereal and driftscan phase - centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. - cat_lat : float or ndarray - Value of the latitudinal coordinate (e.g., Dec, El, b) in radians of the - phase center. No default unless `cat_type="unprojected"`, in which case the - default is pi/2. Expected to be a float for sidereal and driftscan phase - centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. - cat_frame : str - Coordinate frame that cat_lon and cat_lat are given in. Only used for - sidereal and ephem phase centers. Can be any of the several supported frames - in astropy (a limited list: fk4, fk5, icrs, gcrs, cirs, galactic). - cat_epoch : str or float - Epoch of the coordinates, only used when cat_frame = fk4 or fk5. Given - in units of fractional years, either as a float or as a string with the - epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0). - cat_times : ndarray of floats - Only used when `cat_type="ephem"`. Describes the time for which the values - of `cat_lon` and `cat_lat` are caclulated, in units of JD. Shape is (Npts,). - cat_pm_ra : float - Proper motion in RA, in units of mas/year. Only used for sidereal phase - centers. - cat_pm_dec : float - Proper motion in Dec, in units of mas/year. Only used for sidereal phase - centers. - cat_dist : float or ndarray of float - Distance of the source, in units of pc. Only used for sidereal and ephem - phase centers. Expected to be a float for sidereal and driftscan phase - centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. - cat_vrad : float or ndarray of float - Radial velocity of the source, in units of km/s. Only used for sidereal and - ephem phase centers. Expected to be a float for sidereal and driftscan phase - centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. - ignore_name : bool - Nominally, this method will only look at entries where `cat_name` - matches the name of an entry in the catalog. However, by setting this to - True, the method will search all entries in the catalog and see if any - match all of the provided data (excluding `cat_name`). - target_cat_id : int - Optional argument to specify a particular cat_id to check against. - phase_dict : dict - Instead of providing individual parameters, one may provide a dict which - matches that format used within `phase_center_catalog` for checking for - existing entries. If used, all other parameters (save for `ignore_name` - and `cat_name`) are disregarded. - - Returns - ------- - cat_id : int or None - The unique ID number for the phase center added to the internal catalog. - This value is used in the `phase_center_id_array` attribute to denote which - source a given baseline-time corresponds to. If no catalog entry matches, - then None is returned. - cat_diffs : int - The number of differences between the information provided and the catalog - entry contained within `phase_center_catalog`. If everything matches, then - `cat_diffs=0`. - """ - # 1 marcsec tols - radian_tols = (0, RADIAN_TOL) - default_tols = (1e-5, 1e-8) - match_id = None - match_diffs = 99999 - - if (cat_name is None) and (not ignore_name): - if phase_dict is None: - raise ValueError( - "Must specify either phase_dict or cat_name if ignore_name=False." - ) - cat_name = phase_dict["cat_name"] - - if cat_type is not None and cat_type not in allowed_cat_types: - raise ValueError(f"If set, cat_type must be one of {allowed_cat_types}") - - # Emulate the defaults that are set if None is detected for - # unprojected and driftscan types. - if cat_type in ["unprojected", "driftscan"]: - if cat_lon is None: - cat_lon = 0.0 - if cat_lat is None: - cat_lat = np.pi / 2 - if cat_frame is None: - cat_frame = "altaz" - - if phase_dict is None: - phase_dict = { - "cat_type": cat_type, - "cat_lon": cat_lon, - "cat_lat": cat_lat, - "cat_frame": cat_frame, - "cat_epoch": cat_epoch, - "cat_times": cat_times, - "cat_pm_ra": cat_pm_ra, - "cat_pm_dec": cat_pm_dec, - "cat_dist": cat_dist, - "cat_vrad": cat_vrad, - } - - tol_dict = { - "cat_type": None, - "cat_lon": radian_tols, - "cat_lat": radian_tols, - "cat_frame": None, - "cat_epoch": None, - "cat_times": default_tols, - "cat_pm_ra": default_tols, - "cat_pm_dec": default_tols, - "cat_dist": default_tols, - "cat_vrad": default_tols, - } - - if target_cat_id is not None: - if target_cat_id not in phase_center_catalog: - raise ValueError(f"No phase center with ID number {target_cat_id}.") - name_dict = {target_cat_id: phase_center_catalog[target_cat_id]["cat_name"]} - else: - name_dict = { - key: cat_dict["cat_name"] for key, cat_dict in phase_center_catalog.items() - } - - for cat_id, name in name_dict.items(): - cat_diffs = 0 - if (cat_name != name) and (not ignore_name): - continue - check_dict = phase_center_catalog[cat_id] - for key in tol_dict.keys(): - if phase_dict.get(key) is not None: - if check_dict.get(key) is None: - cat_diffs += 1 - elif tol_dict[key] is None: - # If no tolerance specified, expect attributes to be identical - cat_diffs += phase_dict.get(key) != check_dict.get(key) - else: - # allclose will throw a Value error if you have two arrays - # of different shape, which we can catch to flag that - # the two arrays are actually not within tolerance. - if np.shape(phase_dict[key]) != np.shape(check_dict[key]): - cat_diffs += 1 - else: - cat_diffs += not np.allclose( - phase_dict[key], - check_dict[key], - tol_dict[key][0], - tol_dict[key][1], - ) - else: - cat_diffs += check_dict[key] is not None - if (cat_diffs == 0) or (cat_name == name): - if cat_diffs < match_diffs: - # If our current match is an improvement on any previous matches, - # then record it as the best match. - match_id = cat_id - match_diffs = cat_diffs - if match_diffs == 0: - # If we have a total match, we can bail at this point - break - - return match_id, match_diffs - - -def look_for_name(phase_center_catalog, cat_name): - """ - Look up catalog IDs which match a given name. - - Parameters - ---------- - phase_center_catalog : dict - Catalog to look for matching names in. - cat_name : str or list of str - Name to match against entries in phase_center_catalog. - - Returns - ------- - cat_id_list : list - List of all catalog IDs which match the given name. - """ - if isinstance(cat_name, str): - return [ - pc_id - for pc_id, pc_dict in phase_center_catalog.items() - if pc_dict["cat_name"] == cat_name - ] - else: - return [ - pc_id - for pc_id, pc_dict in phase_center_catalog.items() - if pc_dict["cat_name"] in cat_name - ] - - -def print_phase_center_info( - phase_center_catalog, - catalog_identifier=None, - *, - hms_format=None, - return_str=False, - print_table=True, -): - """ - Print out the details of the phase centers. - - Prints out an ASCII table that contains the details of the supploed phase center - catalog, which typically acts as the internal source catalog for various UV objects. - - Parameters - ---------- - phase_center_catalog : dict - Dict containing the list of phase centers (and corresponding data) to be - printed out. - catalog_identifier : str or int or list of str or int - Optional parameter which, if provided, will cause the method to only return - information on the phase center(s) with the matching name(s) or catalog ID - number(s). Default is to print out information on all catalog entries. - hms_format : bool - Optional parameter, which if selected, can be used to force coordinates to - be printed out in Hours-Min-Sec (if set to True) or Deg-Min-Sec (if set to - False) format. Default is to print out in HMS if all the objects have - coordinate frames of icrs, gcrs, fk5, fk4, and top; otherwise, DMS format - is used. - return_str: bool - If set to True, the method returns an ASCII string which contains all the - table infrmation. Default is False. - print_table : bool - If set to True, prints the table to the terminal window. Default is True. - - Returns - ------- - table_str : bool - If return_str=True, an ASCII string containing the entire table text - - Raises - ------ - ValueError - If `cat_name` matches no keys in `phase_center_catalog`. - """ - r2d = 180.0 / np.pi - r2m = 60.0 * 180.0 / np.pi - r2s = 3600.0 * 180.0 / np.pi - ra_frames = ["icrs", "gcrs", "fk5", "fk4", "topo"] - - if catalog_identifier is not None: - if isinstance(catalog_identifier, (str, int)): - pass - elif isinstance(catalog_identifier, list) and all( - isinstance(cat, (str, int)) for cat in catalog_identifier - ): - pass - else: - raise TypeError( - "catalog_identifier must be a string, an integer or a list of " - "strings or integers." - ) - - if not isinstance(catalog_identifier, list): - catalog_identifier = [catalog_identifier] - - cat_id_list = [] - for cat in catalog_identifier: - if isinstance(cat, str): - this_list = [] - for key, ps_dict in phase_center_catalog.items(): - if ps_dict["cat_name"] == cat: - this_list.append(key) - if len(this_list) == 0: - raise ValueError(f"No entry by the name {cat} in the catalog.") - cat_id_list.extend(this_list) - else: - # Force cat_id to be a list to make downstream code simpler. - # If cat_id is an int, it will throw a TypeError on casting to - # list, which we can catch. - if cat not in phase_center_catalog: - raise ValueError(f"No entry with the ID {cat} in the catalog.") - cat_id_list.append(cat) - else: - cat_id_list = list(phase_center_catalog) - - dict_list = [phase_center_catalog[cat_id] for cat_id in cat_id_list] - - # We want to check and actually see which fields we need to print - any_lon = any_lat = any_frame = any_epoch = any_times = False - any_pm_ra = any_pm_dec = any_dist = any_vrad = False - - for indv_dict in dict_list: - any_lon = any_lon or indv_dict.get("cat_lon") is not None - any_lat = any_lat or indv_dict.get("cat_lat") is not None - any_frame = any_frame or indv_dict.get("cat_frame") is not None - any_epoch = any_epoch or indv_dict.get("cat_epoch") is not None - any_times = any_times or indv_dict.get("cat_times") is not None - any_pm_ra = any_pm_ra or indv_dict.get("cat_pm_ra") is not None - any_pm_dec = any_pm_dec or indv_dict.get("cat_pm_dec") is not None - any_dist = any_dist or indv_dict.get("cat_dist") is not None - any_vrad = any_vrad or indv_dict.get("cat_vrad") is not None - - if any_lon and (hms_format is None): - cat_frame = indv_dict.get("cat_frame") - cat_type = indv_dict["cat_type"] - if (cat_frame not in ra_frames) or (cat_type == "driftscan"): - hms_format = False - - if hms_format is None: - hms_format = True - - col_list = [] - col_list.append( - {"hdr": ("ID", "#"), "fmt": "% 4i", "field": " %4s ", "name": "cat_id"} - ) - col_list.append( - { - "hdr": ("Cat Entry", "Name"), - "fmt": "%12s", - "field": " %12s ", - "name": "cat_name", - } - ) - col_list.append( - {"hdr": ("Type", ""), "fmt": "%12s", "field": " %12s ", "name": "cat_type"} - ) - - if any_lon: - col_list.append( - { - "hdr": ("Az/Lon/RA", "hours" if hms_format else "deg"), - "fmt": "% 3i:%02i:%05.2f", - "field": " %12s " if hms_format else " %13s ", - "name": "cat_lon", - } - ) - if any_lat: - col_list.append( - { - "hdr": ("El/Lat/Dec", "deg"), - "fmt": "%1s%2i:%02i:%05.2f", - "field": " %12s ", - "name": "cat_lat", - } - ) - if any_frame: - col_list.append( - {"hdr": ("Frame", ""), "fmt": "%5s", "field": " %5s ", "name": "cat_frame"} - ) - if any_epoch: - col_list.append( - {"hdr": ("Epoch", ""), "fmt": "%7s", "field": " %7s ", "name": "cat_epoch"} - ) - if any_times: - col_list.append( - { - "hdr": (" Ephem Range ", "Start-MJD End-MJD"), - "fmt": " %8.2f % 8.2f", - "field": " %20s ", - "name": "cat_times", - } - ) - if any_pm_ra: - col_list.append( - { - "hdr": ("PM-Ra", "mas/yr"), - "fmt": "%.4g", - "field": " %6s ", - "name": "cat_pm_ra", - } - ) - if any_pm_dec: - col_list.append( - { - "hdr": ("PM-Dec", "mas/yr"), - "fmt": "%.4g", - "field": " %6s ", - "name": "cat_pm_dec", - } - ) - if any_dist: - col_list.append( - {"hdr": ("Dist", "pc"), "fmt": "%.1e", "field": " %7s ", "name": "cat_dist"} - ) - if any_vrad: - col_list.append( - { - "hdr": ("V_rad", "km/s"), - "fmt": "%.4g", - "field": " %6s ", - "name": "cat_vrad", - } - ) - - top_str = "" - bot_str = "" - for col in col_list: - top_str += col["field"] % col["hdr"][0] - bot_str += col["field"] % col["hdr"][1] - - info_str = "" - - info_str += top_str + "\n" - info_str += bot_str + "\n" - info_str += ("-" * len(bot_str)) + "\n" - # We want to print in the order of cat_id - for idx in np.argsort(cat_id_list): - tbl_str = "" - for col in col_list: - # If we have a "special" field that needs extra handling, - # take care of that up front - if col["name"] == "cat_id": - temp_val = cat_id_list[idx] - else: - temp_val = dict_list[idx][col["name"]] - if temp_val is None: - temp_str = "" - elif col["name"] == "cat_lon": - # Force the longitude component to be a positive value - temp_val = np.mod(np.median(temp_val), 2 * np.pi) - temp_val /= 15.0 if hms_format else 1.0 - coord_tuple = ( - np.mod(temp_val * r2d, 360.0), - np.mod(temp_val * r2m, 60.0), - np.mod(temp_val * r2s, 60.0), - ) - temp_str = col["fmt"] % coord_tuple - elif col["name"] == "cat_lat": - temp_val = np.median(temp_val) - coord_tuple = ( - "-" if temp_val < 0.0 else "+", - np.mod(np.abs(temp_val) * r2d, 360.0), - np.mod(np.abs(temp_val) * r2m, 60.0), - np.mod(np.abs(temp_val) * r2s, 60.0), - ) - temp_str = col["fmt"] % coord_tuple - elif col["name"] == "cat_epoch": - use_byrs = dict_list[idx]["cat_frame"] in ["fk4", "fk4noeterms"] - temp_val = ("B%6.1f" if use_byrs else "J%6.1f") % temp_val - temp_str = col["fmt"] % temp_val - elif col["name"] == "cat_times": - time_tuple = ( - np.min(temp_val) - 2400000.5, - np.max(temp_val) - 2400000.5, - ) - temp_str = col["fmt"] % time_tuple - elif (col["name"] == "cat_dist") or (col["name"] == "cat_vrad"): - temp_val = np.median(temp_val) - temp_str = col["fmt"] % temp_val - else: - temp_str = col["fmt"] % temp_val - tbl_str += col["field"] % temp_str - info_str += tbl_str + "\n" - - if print_table: - # We need this extra bit of code to handle trailing whitespace, since - # otherwise some checks (e.g., doc check on tutorials) will balk - print( - "\n".join([line.rstrip() for line in info_str.split("\n")]), end="" - ) # pragma: nocover - if return_str: - return info_str - - -def generate_new_phase_center_id( - phase_center_catalog=None, *, cat_id=None, old_id=None, reserved_ids=None -): - """ - Update a phase center with a new catalog ID number. - - Parameters - ---------- - phase_center_catalog : dict - Catalog to be updated. Note that the supplied catalog will be modified in situ. - cat_id : int - Optional argument. If supplied, then the method will check to see that the - supplied ID is not in either the supplied catalog or in the reserved IDs. - provided value as the new catalog ID, provided that an existing catalog - If not supplied, then the method will automatically assign a value, defaulting - to the value in `cat_id` if supplied (and assuming that ID value has no - conflicts with the reserved IDs). - old_id : int - Optional argument, current catalog ID of the phase center, which corresponds to - a key in `phase_center_catalog`. - reserved_ids : array-like in int - Optional argument. An array-like of ints that denotes which ID numbers - are already reserved. Useful for when combining two separate catalogs. - - Returns - ------- - new_id : int - New phase center ID. - - Raises - ------ - ValueError - If there's no entry that matches `cat_id`, or of the value `new_id` - is already taken. - """ - used_cat_ids = set() - if phase_center_catalog is None: - if old_id is not None: - raise ValueError("Cannot specify old_id if no catalog is supplied.") - else: - used_cat_ids = set(phase_center_catalog) - if old_id is not None: - if old_id not in phase_center_catalog: - raise ValueError(f"No match in catalog to an entry with id {cat_id}.") - used_cat_ids.remove(old_id) - - if reserved_ids is not None: - used_cat_ids = used_cat_ids.union(reserved_ids) - - if cat_id is None: - # Default to using the old ID if available. - cat_id = old_id - - # If the old ID is in the reserved list, then we'll need to update it - if (old_id is None) or (old_id in used_cat_ids): - cat_id = set(range(len(used_cat_ids) + 1)).difference(used_cat_ids).pop() - elif cat_id in used_cat_ids: - if phase_center_catalog is not None and cat_id in phase_center_catalog: - raise ValueError( - "Provided cat_id belongs to another source (%s)." - % phase_center_catalog[cat_id]["cat_name"] - ) - else: - raise ValueError("Provided cat_id was found in reserved_ids.") - - return cat_id - - -def generate_phase_center_cat_entry( - cat_name=None, - *, - cat_type=None, - cat_lon=None, - cat_lat=None, - cat_frame=None, - cat_epoch=None, - cat_times=None, - cat_pm_ra=None, - cat_pm_dec=None, - cat_dist=None, - cat_vrad=None, - info_source="user", - force_update=False, - cat_id=None, -): - """ - Add an entry to a object/source catalog or find a matching one. - - This is a helper function for identifying and adding a phase center to a catalog, - typically contained within the attribute `phase_center_catalog`. If a matching - phase center is found, the catalog ID associated with that phase center is returned. - - Parameters - ---------- - cat_name : str - Name of the phase center to be added. - cat_type : str - Type of phase center to be added. Must be one of: - "sidereal" (fixed RA/Dec), - "ephem" (RA/Dec that moves with time), - "driftscan" (fixed az/el position), - "unprojected" (no w-projection, equivalent to the old - `phase_type` == "drift"). - cat_lon : float or ndarray - Value of the longitudinal coordinate (e.g., RA, Az, l) in radians of the - phase center. No default unless `cat_type="unprojected"`, in which case the - default is zero. Expected to be a float for sidereal and driftscan phase - centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. - cat_lat : float or ndarray - Value of the latitudinal coordinate (e.g., Dec, El, b) in radians of the - phase center. No default unless `cat_type="unprojected"`, in which case the - default is pi/2. Expected to be a float for sidereal and driftscan phase - centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. - cat_frame : str - Coordinate frame that cat_lon and cat_lat are given in. Only used - for sidereal and ephem targets. Can be any of the several supported frames - in astropy (a limited list: fk4, fk5, icrs, gcrs, cirs, galactic). - cat_epoch : str or float - Epoch of the coordinates, only used when cat_frame = fk4 or fk5. Given - in units of fractional years, either as a float or as a string with the - epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0). - cat_times : ndarray of floats - Only used when `cat_type="ephem"`. Describes the time for which the values - of `cat_lon` and `cat_lat` are caclulated, in units of JD. Shape is (Npts,). - cat_pm_ra : float - Proper motion in RA, in units of mas/year. Only used for sidereal phase - centers. - cat_pm_dec : float - Proper motion in Dec, in units of mas/year. Only used for sidereal phase - centers. - cat_dist : float or ndarray of float - Distance of the source, in units of pc. Only used for sidereal and ephem - phase centers. Expected to be a float for sidereal and driftscan phase - centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. - cat_vrad : float or ndarray of float - Radial velocity of the source, in units of km/s. Only used for sidereal and - ephem phase centers. Expected to be a float for sidereal and driftscan phase - centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. - info_source : str - Optional string describing the source of the information provided. Used - primarily in UVData to denote when an ephemeris has been supplied by the - JPL-Horizons system, user-supplied, or read in by one of the various file - interpreters. Default is 'user'. - force_update : bool - Normally, `_add_phase_center` will throw an error if there already exists a - phase_center with the given cat_id. However, if one sets - `force_update=True`, the method will overwrite the existing entry in - `phase_center_catalog` with the parameters supplied. Note that doing this - will _not_ update other attributes of the `UVData` object. Default is False. - cat_id : int - An integer signifying the ID number for the phase center, used in the - `phase_center_id_array` attribute. If a matching phase center entry exists - already, that phase center ID will be returned, which may be different than - the value specified to this parameter. The default is for the method to - assign this value automatically. - - Returns - ------- - phase_center_entry : dict - Catalog containing the phase centers. - cat_id : int - The unique ID number for the phase center that either matches the specified - parameters or was added to the internal catalog. If a matching entry was - found, this may not be the value passed to the `cat_id` parameter. This - value is used in the `phase_center_id_array` attribute to denote which - source a given baseline-time corresponds to. - - Raises - ------ - ValueError - If attempting to add a non-unique source name or if adding a sidereal - source without coordinates. - - """ - if not isinstance(cat_name, str): - raise ValueError("cat_name must be a string.") - - # We currently only have 4 supported types -- make sure the user supplied - # one of those - if cat_type not in allowed_cat_types: - raise ValueError(f"cat_type must be one of {allowed_cat_types}.") - - # Both proper motion parameters need to be set together - if (cat_pm_ra is None) != (cat_pm_dec is None): - raise ValueError( - "Must supply values for either both or neither of " - "cat_pm_ra and cat_pm_dec." - ) - - # If left unset, unprojected and driftscan defaulted to Az, El = (0 deg, 90 deg) - if cat_type in ["unprojected", "driftscan"]: - if cat_lon is None: - cat_lon = 0.0 - if cat_lat is None: - cat_lat = np.pi / 2 - if cat_frame is None: - cat_frame = "altaz" - - # check some case-specific things and make sure all the entries are acceptable - if (cat_times is None) and (cat_type == "ephem"): - raise ValueError("cat_times cannot be None for ephem object.") - elif (cat_times is not None) and (cat_type != "ephem"): - raise ValueError("cat_times cannot be used for non-ephem phase centers.") - - if (cat_lon is None) and (cat_type in ["sidereal", "ephem"]): - raise ValueError("cat_lon cannot be None for sidereal or ephem phase centers.") - - if (cat_lat is None) and (cat_type in ["sidereal", "ephem"]): - raise ValueError("cat_lat cannot be None for sidereal or ephem phase centers.") - - if (cat_frame is None) and (cat_type in ["sidereal", "ephem"]): - raise ValueError( - "cat_frame cannot be None for sidereal or ephem phase centers." - ) - elif (cat_frame != "altaz") and (cat_type in ["driftscan", "unprojected"]): - raise ValueError( - "cat_frame must be either None or 'altaz' when the cat type " - "is either driftscan or unprojected." - ) - - if (cat_type == "unprojected") and (cat_lon != 0.0): - raise ValueError( - "Catalog entries that are unprojected must have cat_lon set to either " - "0 or None." - ) - if (cat_type == "unprojected") and (cat_lat != (np.pi / 2)): - raise ValueError( - "Catalog entries that are unprojected must have cat_lat set to either " - "pi/2 or None." - ) - - if (cat_type != "sidereal") and ( - (cat_pm_ra is not None) or (cat_pm_dec is not None) - ): - raise ValueError( - "Non-zero proper motion values (cat_pm_ra, cat_pm_dec) " - "for cat types other than sidereal are not supported." - ) - - if isinstance(cat_epoch, Time) or isinstance(cat_epoch, str): - if cat_frame in ["fk4", "fk4noeterms"]: - cat_epoch = Time(cat_epoch).byear - else: - cat_epoch = Time(cat_epoch).jyear - elif cat_epoch is not None: - cat_epoch = float(cat_epoch) - - if cat_type == "ephem": - cat_times = np.array(cat_times, dtype=float).reshape(-1) - cshape = cat_times.shape - try: - cat_lon = np.array(cat_lon, dtype=float).reshape(cshape) - cat_lat = np.array(cat_lat, dtype=float).reshape(cshape) - if cat_dist is not None: - cat_dist = np.array(cat_dist, dtype=float).reshape(cshape) - if cat_vrad is not None: - cat_vrad = np.array(cat_vrad, dtype=float).reshape(cshape) - except ValueError as err: - raise ValueError( - "Object properties -- lon, lat, pm_ra, pm_dec, dist, vrad -- must " - "be of the same size as cat_times for ephem phase centers." - ) from err - else: - if cat_lon is not None: - cat_lon = float(cat_lon) - cat_lon = None if cat_lon is None else float(cat_lon) - cat_lat = None if cat_lat is None else float(cat_lat) - cat_pm_ra = None if cat_pm_ra is None else float(cat_pm_ra) - cat_pm_dec = None if cat_pm_dec is None else float(cat_pm_dec) - cat_dist = None if cat_dist is None else float(cat_dist) - cat_vrad = None if cat_vrad is None else float(cat_vrad) - - cat_entry = { - "cat_name": cat_name, - "cat_type": cat_type, - "cat_lon": cat_lon, - "cat_lat": cat_lat, - "cat_frame": cat_frame, - "cat_epoch": cat_epoch, - "cat_times": cat_times, - "cat_pm_ra": cat_pm_ra, - "cat_pm_dec": cat_pm_dec, - "cat_vrad": cat_vrad, - "cat_dist": cat_dist, - "info_source": info_source, - } - - return cat_entry diff --git a/src/pyuvdata/utils.pyx b/src/pyuvdata/utils.pyx deleted file mode 100644 index 09e6efa9c9..0000000000 --- a/src/pyuvdata/utils.pyx +++ /dev/null @@ -1,530 +0,0 @@ -# -*- mode: python; coding: utf-8 -*- -# Copyright (c) 2020 Radio Astronomy Software Group -# Licensed under the 2-clause BSD License - -# distutils: language = c -# cython: linetrace=True - -import enum - -# python imports -import warnings - -# cython imports - -cimport cython -cimport numpy - -numpy.import_array() - -from libc.math cimport atan2, cos, sin, sqrt - - -cdef class Ellipsoid: - cdef readonly numpy.float64_t gps_a, gps_b, e_squared, e_prime_squared, b_div_a2 - - @cython.cdivision - def __init__(self, numpy.float64_t gps_a, numpy.float64_t gps_b): - self.gps_a = gps_a - self.gps_b = gps_b - self.b_div_a2 = (self.gps_b / self.gps_a)**2 - self.e_squared = (1 - self.b_div_a2) - self.e_prime_squared = (self.b_div_a2**-1 - 1) - - -# A python interface for different celestial bodies -class Body(enum.Enum): - Earth = Ellipsoid(6378137, 6356752.31424518) - - try: - from lunarsky.moon import SELENOIDS - - Moon_sphere = Ellipsoid( - SELENOIDS["SPHERE"]._equatorial_radius.to('m').value, - SELENOIDS["SPHERE"]._equatorial_radius.to('m').value * (1-SELENOIDS["SPHERE"]._flattening) - ) - - Moon_gsfc = Ellipsoid( - SELENOIDS["GSFC"]._equatorial_radius.to('m').value, - SELENOIDS["GSFC"]._equatorial_radius.to('m').value * (1-SELENOIDS["GSFC"]._flattening) - ) - - Moon_grail23 = Ellipsoid( - SELENOIDS["GRAIL23"]._equatorial_radius.to('m').value, - SELENOIDS["GRAIL23"]._equatorial_radius.to('m').value * (1-SELENOIDS["GRAIL23"]._flattening) - ) - - Moon_ce1lamgeo = Ellipsoid( - SELENOIDS["CE-1-LAM-GEO"]._equatorial_radius.to('m').value, - SELENOIDS["CE-1-LAM-GEO"]._equatorial_radius.to('m').value * (1-SELENOIDS["CE-1-LAM-GEO"]._flattening) - ) - except: - # lunar sky not installed, don't add any moon bodies - pass - -ctypedef fused int_or_float: - numpy.uint64_t - numpy.int64_t - numpy.int32_t - numpy.uint32_t - numpy.float64_t - numpy.float32_t - - -cdef inline int_or_float max(int_or_float a, int_or_float b): - return a if a > b else b - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef int_or_float arraymin(int_or_float[::1] array) nogil: - cdef int_or_float minval = array[0] - cdef Py_ssize_t i - for i in range(array.shape[0]): - if array[i] < minval: - minval = array[i] - return minval - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef int_or_float arraymax(int_or_float[::1] array) nogil: - cdef int_or_float maxval = array[0] - cdef Py_ssize_t i - for i in range(array.shape[0]): - if array[i] > maxval: - maxval = array[i] - return maxval - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef inline void _bl_to_ant_256( - numpy.uint64_t[::1] _bl, - numpy.uint64_t[:, ::1] _ants, - long nbls, -): - cdef Py_ssize_t i - - for i in range(nbls): - _ants[1, i] = (_bl[i]) % 256 - _ants[0, i] = (_bl[i] - (_ants[1, i])) // 256 - return - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef inline void _bl_to_ant_2048( - numpy.uint64_t[::1] _bl, - numpy.uint64_t[:, ::1] _ants, - int nbls -): - cdef Py_ssize_t i - for i in range(nbls): - _ants[1, i] = (_bl[i] - 2 ** 16) % 2048 - _ants[0, i] = (_bl[i] - 2 ** 16 - (_ants[1, i])) // 2048 - return - -# defining these constants helps cython not cast the large -# numbers as python ints -cdef numpy.uint64_t bl_large = 2 ** 16 + 2 ** 22 -cdef numpy.uint64_t large_mod = 2147483648 - -@cython.boundscheck(False) -@cython.wraparound(False) -@cython.cdivision(True) -cdef inline void _bl_to_ant_2147483648( - numpy.uint64_t[::1] _bl, - numpy.uint64_t[:, ::1] _ants, - int nbls -): - cdef Py_ssize_t i - for i in range(nbls): - _ants[1, i] = (_bl[i] - bl_large) % large_mod - _ants[0, i] = (_bl[i] - bl_large - (_ants[1, i])) // large_mod - return - - -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef numpy.ndarray[dtype=numpy.uint64_t, ndim=2] baseline_to_antnums( - numpy.uint64_t[::1] _bl -): - cdef numpy.uint64_t _min = arraymin(_bl) - cdef long nbls = _bl.shape[0] - cdef int ndim = 2 - cdef numpy.npy_intp * dims = [2, nbls] - cdef numpy.ndarray[ndim=2, dtype=numpy.uint64_t] ants = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_UINT64, 0) - cdef numpy.uint64_t[:, ::1] _ants = ants - - if _min >= (2 ** 16 + 2 ** 22): - _bl_to_ant_2147483648(_bl, _ants, nbls) - elif _min >= 2 ** 16: - _bl_to_ant_2048(_bl, _ants, nbls) - else: - _bl_to_ant_256(_bl, _ants, nbls) - return ants - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef inline void _antnum_to_bl_2147483648( - numpy.uint64_t[::1] ant1, - numpy.uint64_t[::1] ant2, - numpy.uint64_t[::1] baselines, - int nbls, -): - cdef Py_ssize_t i - - for i in range(nbls): - baselines[i] = large_mod * (ant1[i]) + (ant2[i]) + bl_large - return - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef inline void _antnum_to_bl_2048( - numpy.uint64_t[::1] ant1, - numpy.uint64_t[::1] ant2, - numpy.uint64_t[::1] baselines, - int nbls, -): - cdef Py_ssize_t i - - for i in range(nbls): - baselines[i] = 2048 * (ant1[i]) + (ant2[i]) + 2 ** 16 - return - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef inline void _antnum_to_bl_2048_miriad( - numpy.uint64_t[::1] ant1, - numpy.uint64_t[::1] ant2, - numpy.uint64_t[::1] baselines, - int nbls, -): - cdef Py_ssize_t i - - for i in range(nbls): - if ant2[i] > 255: # MIRIAD uses 1-index antenna IDs - baselines[i] = 2048 * (ant1[i]) + (ant2[i]) + 2 ** 16 - else: - baselines[i] = 256 * (ant1[i]) + (ant2[i]) - return - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef inline void _antnum_to_bl_256( - numpy.uint64_t[::1] ant1, - numpy.uint64_t[::1] ant2, - numpy.uint64_t[::1] baselines, - int nbls, -): - cdef Py_ssize_t i - # make views as c-contiguous arrays of a known dtype - # effectivly turns the numpy array into a c-array - for i in range(nbls): - baselines[i] = 256 * (ant1[i]) + (ant2[i]) - return - -cpdef numpy.ndarray[dtype=numpy.uint64_t] antnums_to_baseline( - numpy.uint64_t[::1] ant1, - numpy.uint64_t[::1] ant2, - bint attempt256=False, - bint nants_less2048=True, - bint use_miriad_convention=False -): - cdef int ndim = 1 - cdef int nbls = ant1.shape[0] - cdef numpy.npy_intp * dims = [nbls] - cdef numpy.ndarray[ndim=1, dtype=numpy.uint64_t] baseline = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_UINT64, 0) - cdef numpy.uint64_t[::1] _bl = baseline - cdef bint less255 - cdef bint ants_less2048 - - # to ensure baseline numbers are unambiguous, - # use the 2048 calculation for antennas >= 256 - # and use the 2147483648 calculation for antennas >= 2048 - ants_less2048 = max( - arraymax(ant1), - arraymax(ant2), - ) < 2048 - - # Some UVFITS readers (e.g. MWA and AAVS) expect the - # MIRIAD baseline convention. - if use_miriad_convention: - _antnum_to_bl_2048_miriad(ant1, ant2, _bl, nbls) - - elif attempt256: - less256 = max( - arraymax(ant1), - arraymax(ant2), - ) < 256 - - if less256: - _antnum_to_bl_256(ant1, ant2, _bl, nbls) - - elif ants_less2048 and nants_less2048: - message = ( - "antnums_to_baseline: found antenna numbers > 255, using " - "2048 baseline indexing." - ) - warnings.warn(message) - _antnum_to_bl_2048(ant1, ant2, _bl, nbls) - else: - message = ( - "antnums_to_baseline: found antenna numbers > 2047 or " - "Nants_telescope > 2048, using 2147483648 baseline indexing." - ) - warnings.warn(message) - _antnum_to_bl_2147483648(ant1, ant2, _bl, nbls) - - elif ants_less2048 and nants_less2048: - _antnum_to_bl_2048(ant1, ant2, _bl, nbls) - - else: - _antnum_to_bl_2147483648(ant1, ant2, _bl, nbls) - - return baseline - -@cython.boundscheck(False) -@cython.wraparound(False) -@cython.cdivision(True) -cpdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] _lla_from_xyz( - numpy.float64_t[:, ::1] xyz, - Ellipsoid body, -): - cdef Py_ssize_t ind - cdef int ndim = 2 - cdef int n_pts = xyz.shape[1] - cdef numpy.npy_intp * dims = [3, n_pts] - - cdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] lla = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) - cdef numpy.float64_t[:, ::1] _lla = lla - - cdef numpy.float64_t gps_p, gps_theta - - # see wikipedia geodetic_datum and Datum transformations of - # GPS positions PDF in docs/references folder - for ind in range(n_pts): - gps_p = sqrt(xyz[0, ind] ** 2 + xyz[1, ind] ** 2) - gps_theta = atan2(xyz[2, ind] * body.gps_a, gps_p * body.gps_b) - - _lla[0, ind] = atan2( - xyz[2, ind] + body.e_prime_squared * body.gps_b * sin(gps_theta) ** 3, - gps_p - body.e_squared * body.gps_a * cos(gps_theta) ** 3, - ) - - _lla[1, ind] = atan2(xyz[1, ind], xyz[0, ind]) - - _lla[2, ind] = (gps_p / cos(lla[0, ind])) - body.gps_a / sqrt(1.0 - body.e_squared * sin(lla[0, ind]) ** 2) - - return lla - -@cython.boundscheck(False) -@cython.wraparound(False) -@cython.cdivision(True) -cpdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] _xyz_from_latlonalt( - numpy.float64_t[::1] _lat, - numpy.float64_t[::1] _lon, - numpy.float64_t[::1] _alt, - Ellipsoid body, -): - cdef Py_ssize_t i - cdef int ndim = 2 - cdef int n_pts = _lat.shape[0] - cdef numpy.npy_intp * dims = [3, n_pts] - - cdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] xyz = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) - cdef numpy.float64_t[:, ::1] _xyz = xyz - - cdef numpy.float64_t sin_lat, cos_lat, sin_lon, cos_lon, gps_n - - for ind in range(n_pts): - sin_lat = sin(_lat[ind]) - sin_lon = sin(_lon[ind]) - - cos_lat = cos(_lat[ind]) - cos_lon = cos(_lon[ind]) - - gps_n = body.gps_a / sqrt(1.0 - body.e_squared * sin_lat ** 2) - - _xyz[0, ind] = (gps_n + _alt[ind]) * cos_lat * cos_lon - _xyz[1, ind] = (gps_n + _alt[ind]) * cos_lat * sin_lon - - _xyz[2, ind] = (body.b_div_a2 * gps_n + _alt[ind]) * sin_lat - return xyz - -# this function takes memoryviews as inputs -# that is why _lat, _lon, and _alt are indexed below to get the 0th entry -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef numpy.ndarray[numpy.float64_t, ndim=2] _ENU_from_ECEF( - numpy.float64_t[:, ::1] xyz, - numpy.float64_t[::1] _lat, - numpy.float64_t[::1] _lon, - numpy.float64_t[::1] _alt, - Ellipsoid body, -): - cdef Py_ssize_t i - cdef int ndim = 2 - cdef int nblts = xyz.shape[1] - cdef numpy.npy_intp * dims = [3, nblts] - cdef numpy.float64_t xyz_use[3] - - cdef numpy.float64_t sin_lat, cos_lat, sin_lon, cos_lon - - # we want a memoryview of the xyz of the center - # this looks a little silly but we don't have to define 2 different things - cdef numpy.float64_t[:] xyz_center = _xyz_from_latlonalt(_lat, _lon, _alt, body).T[0] - - cdef numpy.ndarray[numpy.float64_t, ndim=2] _enu = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) - cdef numpy.float64_t[:, ::1] enu = _enu - - sin_lat = sin(_lat[0]) - cos_lat = cos(_lat[0]) - - sin_lon = sin(_lon[0]) - cos_lon = cos(_lon[0]) - - for i in range(nblts): - xyz_use[0] = xyz[0, i] - xyz_center[0] - xyz_use[1] = xyz[1, i] - xyz_center[1] - xyz_use[2] = xyz[2, i] - xyz_center[2] - - enu[0, i] = -sin_lon * xyz_use[0] + cos_lon * xyz_use[1] - enu[1, i] = ( - - sin_lat * cos_lon * xyz_use[0] - - sin_lat * sin_lon * xyz_use[1] - + cos_lat * xyz_use[2] - ) - enu[2, i] = ( - cos_lat * cos_lon * xyz_use[0] - + cos_lat * sin_lon * xyz_use[1] - + sin_lat * xyz_use[2] - ) - - return _enu - -# this function takes memoryviews as inputs -# that is why _lat, _lon, and _alt are indexed below to get the 0th entry -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef numpy.ndarray[dtype=numpy.float64_t] _ECEF_from_ENU( - numpy.float64_t[:, ::1] enu, - numpy.float64_t[::1] _lat, - numpy.float64_t[::1] _lon, - numpy.float64_t[::1] _alt, - Ellipsoid body, -): - cdef Py_ssize_t i - cdef int ndim = 2 - cdef int nblts = enu.shape[1] - cdef numpy.npy_intp * dims = [3, nblts] - cdef numpy.float64_t sin_lat, cos_lat, sin_lon, cos_lon - - # allocate memory then make memory view for faster access - cdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] _xyz = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) - cdef numpy.float64_t[:, ::1] xyz = _xyz - - # we want a memoryview of the xyz of the center - # this looks a little silly but we don't have to define 2 different things - cdef numpy.float64_t[:] xyz_center = _xyz_from_latlonalt(_lat, _lon, _alt, body).T[0] - - sin_lat = sin(_lat[0]) - cos_lat = cos(_lat[0]) - - sin_lon = sin(_lon[0]) - cos_lon = cos(_lon[0]) - - for i in range(nblts): - xyz[0, i] = ( - - sin_lat * cos_lon * enu[1, i] - - sin_lon * enu[0, i] - + cos_lat * cos_lon * enu[2, i] - + xyz_center[0] - ) - xyz[1, i] = ( - - sin_lat * sin_lon * enu[1, i] - + cos_lon * enu[0, i] - + cos_lat * sin_lon * enu[2, i] - + xyz_center[1] - ) - xyz[2, i] = cos_lat * enu[1, i] + sin_lat * enu[2, i] + xyz_center[2] - - return _xyz - -# inital_uvw is a memoryviewed array as an input -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] _old_uvw_calc( - numpy.float64_t ra, - numpy.float64_t dec, - numpy.float64_t[:, ::1] initial_uvw -): - cdef int i - cdef int ndim = 2 - cdef int nuvw = initial_uvw.shape[1] - cdef numpy.npy_intp * dims = [3, nuvw] - cdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] uvw = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) - - # make a memoryview for the numpy array in c - cdef numpy.float64_t[:, ::1] _uvw = uvw - - cdef numpy.float64_t sin_ra, cos_ra, sin_dec, cos_dec - - sin_ra = sin(ra) - cos_ra = cos(ra) - sin_dec = sin(dec) - cos_dec = cos(dec) - - for i in range(nuvw): - _uvw[0, i] = - sin_ra * initial_uvw[0, i] + cos_ra * initial_uvw[1, i] - - _uvw[1, i] = ( - - sin_dec * cos_ra * initial_uvw[0, i] - - sin_dec * sin_ra * initial_uvw[1, i] - + cos_dec * initial_uvw[2, i] - ) - - _uvw[2, i] = ( - cos_dec * cos_ra * initial_uvw[0, i] - + cos_dec * sin_ra * initial_uvw[1, i] - + sin_dec * initial_uvw[2, i] - ) - return uvw - -# uvw is a memoryviewed array as an input -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] _undo_old_uvw_calc( - numpy.float64_t ra, - numpy.float64_t dec, - numpy.float64_t[:, ::1] uvw -): - cdef int i - cdef int ndim = 2 - cdef int nuvw = uvw.shape[1] - cdef numpy.npy_intp * dims = [3, nuvw] - cdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] unphased_uvw = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) - - # make a memoryview for the numpy array in c - cdef numpy.float64_t[:, ::1] _u_uvw = unphased_uvw - - cdef numpy.float64_t sin_ra, cos_ra, sin_dec, cos_dec - - sin_ra = sin(ra) - cos_ra = cos(ra) - sin_dec = sin(dec) - cos_dec = cos(dec) - - for i in range(nuvw): - _u_uvw[0, i] = ( - - sin_ra * uvw[0, i] - - sin_dec * cos_ra * uvw[1, i] - + cos_dec * cos_ra * uvw[2, i] - ) - - _u_uvw[1, i] = ( - cos_ra * uvw[0, i] - - sin_dec * sin_ra * uvw[1, i] - + cos_dec * sin_ra * uvw[2, i] - ) - - _u_uvw[2, i] = cos_dec * uvw[1, i] + sin_dec * uvw[2, i] - - return unphased_uvw diff --git a/src/pyuvdata/utils/__init__.py b/src/pyuvdata/utils/__init__.py new file mode 100644 index 0000000000..a9ae4800e4 --- /dev/null +++ b/src/pyuvdata/utils/__init__.py @@ -0,0 +1,215 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2018 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License + +"""Commonly used utility functions.""" +from __future__ import annotations + +import warnings + +import numpy as np + +# standard angle tolerance: 1 mas in radians. +RADIAN_TOL = 1 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0) +# standard lst time tolerance: 5 ms (75 mas in radians), based on an expected RMS +# accuracy of 1 ms at 7 days out from issuance of Bulletin A (which are issued once a +# week with rapidly determined parameters and forecasted values of DUT1), the exact +# formula for which is t_err = 0.00025 (MJD-)**0.75 (in secs). +LST_RAD_TOL = 2 * np.pi * 5e-3 / (86400.0) + +# these seem to be necessary for the installed package to access these submodules +from . import array_collapse # noqa +from . import bls # noqa +from . import coordinates # noqa +from . import file_io # noqa +from . import helpers # noqa +from . import phasing # noqa +from . import pol # noqa +from . import ps_cat # noqa +from . import redundancy # noqa + +# Add things to the utils namespace used by outside packages +from .array_collapse import collapse # noqa +from .bls import * # noqa +from .coordinates import * # noqa +from .lst import get_lst_for_time # noqa +from .phasing import uvw_track_generator # noqa +from .pol import * # noqa + + +def _check_histories(history1, history2): + """Check if two histories are the same. + + Deprecated. Use pyuvdata.utils.helpers._check_histories + """ + from .helpers import _check_histories + + warnings.warn( + "The _check_histories function has moved, please import it from " + "pyuvdata.utils.helpers. This warnings will become an error in version 3.2", + DeprecationWarning, + ) + + return _check_histories(history1, history2) + + +def _fits_gethduaxis(hdu, axis): + """ + Make axis arrays for fits files. + + Deprecated. Use pyuvdata.utils.file_io.fits._gethduaxis. + + Parameters + ---------- + hdu : astropy.io.fits HDU object + The HDU to make an axis array for. + axis : int + The axis number of interest (1-based). + + Returns + ------- + ndarray of float + Array of values for the specified axis. + + """ + from .file_io.fits import _gethduaxis + + warnings.warn( + "The _fits_gethduaxis function has moved, please import it as " + "pyuvdata.utils.file_io.fits._gethduaxis. This warnings will become an " + "error in version 3.2", + DeprecationWarning, + ) + + return _gethduaxis(hdu, axis) + + +def _fits_indexhdus(hdulist): + """ + Get a dict of table names and HDU numbers from a FITS HDU list. + + Parameters + ---------- + hdulist : list of astropy.io.fits HDU objects + List of HDUs to get names for + + Returns + ------- + dict + dictionary with table names as keys and HDU number as values. + + """ + from .file_io.fits import _indexhdus + + warnings.warn( + "The _fits_indexhdus function has moved, please import it as " + "pyuvdata.utils.file_io.fits._indexhdus. This warnings will become an " + "error in version 3.2", + DeprecationWarning, + ) + + return _indexhdus(hdulist) + + +def uvcalibrate(uvdata, uvcal, **kwargs): + """ + Calibrate a UVData object with a UVCal object. + + Deprecated, use pyuvdata.uvcalibrate + + Parameters + ---------- + uvdata : UVData object + UVData object to calibrate. + uvcal : UVCal object + UVCal object containing the calibration. + inplace : bool, optional + if True edit uvdata in place, else return a calibrated copy + prop_flags : bool, optional + if True, propagate calibration flags to data flags + and doesn't use flagged gains. Otherwise, uses flagged gains and + does not propagate calibration flags to data flags. + Dterm_cal : bool, optional + Calibrate the off-diagonal terms in the Jones matrix if present + in uvcal. Default is False. Currently not implemented. + flip_gain_conj : bool, optional + This function uses the UVData ant_1_array and ant_2_array to specify the + antennas in the UVCal object. By default, the conjugation convention, which + follows the UVData convention (i.e. ant2 - ant1), is that the applied + gain = ant1_gain * conjugate(ant2_gain). If the other convention is required, + set flip_gain_conj=True. + delay_convention : str, optional + Exponent sign to use in conversion of 'delay' to 'gain' cal_type + if the input uvcal is not inherently 'gain' cal_type. Default to 'minus'. + undo : bool, optional + If True, undo the provided calibration. i.e. apply the calibration with + flipped gain_convention. Flag propagation rules apply the same. + time_check : bool + Option to check that times match between the UVCal and UVData + objects if UVCal has a single time or time range. Times are always + checked if UVCal has multiple times. + ant_check : bool + Option to check that all antennas with data on the UVData + object have calibration solutions in the UVCal object. If this option is + set to False, uvcalibrate will proceed without erroring and data for + antennas without calibrations will be flagged. + + Returns + ------- + UVData, optional + Returns if not inplace + + """ + from ..uvcalibrate import uvcalibrate + + warnings.warn( + "uvcalibrate has moved, please import it as 'from pyuvdata import " + "uvcalibrate'. This warnings will become an error in version 3.2", + DeprecationWarning, + ) + + return uvcalibrate(uvdata, uvcal, **kwargs) + + +def apply_uvflag(uvd, uvf, **kwargs): + """ + Apply flags from a UVFlag to a UVData instantiation. + + Deprecated, use pyuvdata.apply_uvflag + + Note that if uvf.Nfreqs or uvf.Ntimes is 1, it will broadcast flags across + that axis. + + Parameters + ---------- + uvd : UVData object + UVData object to add flags to. + uvf : UVFlag object + A UVFlag object in flag mode. + inplace : bool + If True overwrite flags in uvd, otherwise return new object + unflag_first : bool + If True, completely unflag the UVData before applying flags. + Else, OR the inherent uvd flags with uvf flags. + flag_missing : bool + If input uvf is a baseline type and antpairs in uvd do not exist in uvf, + flag them in uvd. Otherwise leave them untouched. + force_pol : bool + If True, broadcast flags to all polarizations if they do not match. + Only works if uvf.Npols == 1. + + Returns + ------- + UVData + If not inplace, returns new UVData object with flags applied + + """ + from ..apply_uvflag import apply_uvflag + + warnings.warn( + "uvcalibrate has moved, please import it as 'from pyuvdata import " + "uvcalibrate'. This warnings will become an error in version 3.2", + DeprecationWarning, + ) + + return apply_uvflag(uvd, uvf, **kwargs) diff --git a/src/pyuvdata/utils/array_collapse.py b/src/pyuvdata/utils/array_collapse.py new file mode 100644 index 0000000000..fd63406a75 --- /dev/null +++ b/src/pyuvdata/utils/array_collapse.py @@ -0,0 +1,243 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for collapsing arrays.""" +import warnings +from copy import deepcopy + +import numpy as np + + +def mean_collapse( + arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False +): + """ + Collapse by averaging data. + + This is similar to np.average, except it handles infs (by giving them + zero weight) and zero weight axes (by forcing result to be inf with zero + output weight). + + Parameters + ---------- + arr : array + Input array to process. + weights: ndarray, optional + weights for average. If none, will default to equal weight for all + non-infinite data. + axis : int or tuple, optional + Axis or axes to collapse (passed to np.sum). Default is all. + return_weights : bool + Whether to return sum of weights. + return_weights_square: bool + Whether to return the sum of the square of the weights. Default is False. + + """ + arr = deepcopy(arr) # avoid changing outside + if weights is None: + weights = np.ones_like(arr) + else: + weights = deepcopy(weights) + weights = weights * np.logical_not(np.isinf(arr)) + arr[np.isinf(arr)] = 0 + weight_out = np.sum(weights, axis=axis) + if return_weights_square: + weights_square = weights**2 + weights_square_out = np.sum(weights_square, axis=axis) + out = np.sum(weights * arr, axis=axis) + where = weight_out > 1e-10 + out = np.true_divide(out, weight_out, where=where) + out = np.where(where, out, np.inf) + if return_weights and return_weights_square: + return out, weight_out, weights_square_out + elif return_weights: + return out, weight_out + elif return_weights_square: + return out, weights_square_out + else: + return out + + +def absmean_collapse( + arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False +): + """ + Collapse by averaging absolute value of data. + + Parameters + ---------- + arr : array + Input array to process. + weights: ndarray, optional + weights for average. If none, will default to equal weight for all + non-infinite data. + axis : int or tuple, optional + Axis or axes to collapse (passed to np.sum). Default is all. + return_weights : bool + Whether to return sum of weights. + return_weights_square: bool + whether to return the sum of the squares of the weights. Default is False. + + """ + return mean_collapse( + np.abs(arr), + weights=weights, + axis=axis, + return_weights=return_weights, + return_weights_square=return_weights_square, + ) + + +def quadmean_collapse( + arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False +): + """ + Collapse by averaging in quadrature. + + Parameters + ---------- + arr : array + Input array to process. + weights: ndarray, optional + weights for average. If none, will default to equal weight for all + non-infinite data. + axis : int or tuple, optional + Axis or axes to collapse (passed to np.sum). Default is all. + return_weights : bool + Whether to return sum of weights. + return_weights_square: bool + whether to return the sum of the squares of the weights. Default is False. + + """ + out = mean_collapse( + np.abs(arr) ** 2, + weights=weights, + axis=axis, + return_weights=return_weights, + return_weights_square=return_weights_square, + ) + if return_weights and return_weights_square: + return np.sqrt(out[0]), out[1], out[2] + elif return_weights or return_weights_square: + return np.sqrt(out[0]), out[1] + else: + return np.sqrt(out) + + +def or_collapse( + arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False +): + """ + Collapse using OR operation. + + Parameters + ---------- + arr : array + Input array to process. + weights: ndarray, optional + NOT USED, but kept for symmetry with other collapsing functions. + axis : int or tuple, optional + Axis or axes to collapse (take OR over). Default is all. + return_weights : bool + Whether to return dummy weights array. + NOTE: the dummy weights will simply be an array of ones + return_weights_square: bool + NOT USED, but kept for symmetry with other collapsing functions. + + """ + if arr.dtype != np.bool_: + raise ValueError("Input to or_collapse function must be boolean array") + out = np.any(arr, axis=axis) + if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]): + warnings.warn("Currently weights are not handled when OR-ing boolean arrays.") + if return_weights: + return out, np.ones_like(out, dtype=np.float64) + else: + return out + + +def and_collapse( + arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False +): + """ + Collapse using AND operation. + + Parameters + ---------- + arr : array + Input array to process. + weights: ndarray, optional + NOT USED, but kept for symmetry with other collapsing functions. + axis : int or tuple, optional + Axis or axes to collapse (take AND over). Default is all. + return_weights : bool + Whether to return dummy weights array. + NOTE: the dummy weights will simply be an array of ones + return_weights_square: bool + NOT USED, but kept for symmetry with other collapsing functions. + + """ + if arr.dtype != np.bool_: + raise ValueError("Input to and_collapse function must be boolean array") + out = np.all(arr, axis=axis) + if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]): + warnings.warn("Currently weights are not handled when AND-ing boolean arrays.") + if return_weights: + return out, np.ones_like(out, dtype=np.float64) + else: + return out + + +def collapse( + arr, + alg, + *, + weights=None, + axis=None, + return_weights=False, + return_weights_square=False, +): + """ + Parent function to collapse an array with a given algorithm. + + Parameters + ---------- + arr : array + Input array to process. + alg : str + Algorithm to use. Must be defined in this function with + corresponding subfunction above. + weights: ndarray, optional + weights for collapse operation (e.g. weighted mean). + NOTE: Some subfunctions do not use the weights. See corresponding + doc strings. + axis : int or tuple, optional + Axis or axes to collapse. Default is all. + return_weights : bool + Whether to return sum of weights. + return_weights_square: bool + Whether to return the sum of the squares of the weights. Default is False. + + """ + collapse_dict = { + "mean": mean_collapse, + "absmean": absmean_collapse, + "quadmean": quadmean_collapse, + "or": or_collapse, + "and": and_collapse, + } + try: + out = collapse_dict[alg]( + arr, + weights=weights, + axis=axis, + return_weights=return_weights, + return_weights_square=return_weights_square, + ) + except KeyError as err: + raise ValueError( + "Collapse algorithm must be one of: " + + ", ".join(collapse_dict.keys()) + + "." + ) from err + return out diff --git a/src/pyuvdata/utils/bls.py b/src/pyuvdata/utils/bls.py new file mode 100644 index 0000000000..8c1472a0fa --- /dev/null +++ b/src/pyuvdata/utils/bls.py @@ -0,0 +1,385 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for baseline numbers.""" +import re +import warnings + +import numpy as np + +from .. import _bls +from .pol import polnum2str, polstr2num + +__all__ = ["baseline_to_antnums", "antnums_to_baseline"] + + +def baseline_to_antnums(baseline, *, Nants_telescope): # noqa: N803 + """ + Get the antenna numbers corresponding to a given baseline number. + + Parameters + ---------- + baseline : int or array_like of ints + baseline number + Nants_telescope : int + number of antennas + + Returns + ------- + int or array_like of int + first antenna number(s) + int or array_like of int + second antenna number(s) + + """ + if Nants_telescope > 2147483648: + raise ValueError(f"error Nants={Nants_telescope}>2147483648 not supported") + if np.any(np.asarray(baseline) < 0): + raise ValueError("negative baseline numbers are not supported") + if np.any(np.asarray(baseline) > 4611686018498691072): + raise ValueError("baseline numbers > 4611686018498691072 are not supported") + + return_array = isinstance(baseline, (np.ndarray, list, tuple)) + ant1, ant2 = _bls.baseline_to_antnums( + np.ascontiguousarray(baseline, dtype=np.uint64) + ) + if return_array: + return ant1, ant2 + else: + return ant1.item(0), ant2.item(0) + + +def antnums_to_baseline( + ant1, + ant2, + *, + Nants_telescope, # noqa: N803 + attempt256=False, + use_miriad_convention=False, +): + """ + Get the baseline number corresponding to two given antenna numbers. + + Parameters + ---------- + ant1 : int or array_like of int + first antenna number + ant2 : int or array_like of int + second antenna number + Nants_telescope : int + number of antennas + attempt256 : bool + Option to try to use the older 256 standard used in + many uvfits files. If there are antenna numbers >= 256, the 2048 + standard will be used unless there are antenna numbers >= 2048 + or Nants_telescope > 2048. In that case, the 2147483648 standard + will be used. Default is False. + use_miriad_convention : bool + Option to use the MIRIAD convention where BASELINE id is + `bl = 256 * ant1 + ant2` if `ant2 < 256`, otherwise + `bl = 2048 * ant1 + ant2 + 2**16`. + Note antennas should be 1-indexed (start at 1, not 0) + + Returns + ------- + int or array of int + baseline number corresponding to the two antenna numbers. + + """ + if Nants_telescope is not None and Nants_telescope > 2147483648: + raise ValueError( + "cannot convert ant1, ant2 to a baseline index " + f"with Nants={Nants_telescope}>2147483648." + ) + if np.any(np.concatenate((np.unique(ant1), np.unique(ant2))) >= 2147483648): + raise ValueError( + "cannot convert ant1, ant2 to a baseline index " + "with antenna numbers greater than 2147483647." + ) + if np.any(np.concatenate((np.unique(ant1), np.unique(ant2))) < 0): + raise ValueError( + "cannot convert ant1, ant2 to a baseline index " + "with antenna numbers less than zero." + ) + + nants_less2048 = True + if Nants_telescope is not None and Nants_telescope > 2048: + nants_less2048 = False + + return_array = isinstance(ant1, (np.ndarray, list, tuple)) + baseline = _bls.antnums_to_baseline( + np.ascontiguousarray(ant1, dtype=np.uint64), + np.ascontiguousarray(ant2, dtype=np.uint64), + attempt256=attempt256, + nants_less2048=nants_less2048, + use_miriad_convention=use_miriad_convention, + ) + if return_array: + return baseline + else: + return baseline.item(0) + + +def baseline_index_flip(baseline, *, Nants_telescope): # noqa: N803 + """Change baseline number to reverse antenna order.""" + ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope=Nants_telescope) + return antnums_to_baseline(ant2, ant1, Nants_telescope=Nants_telescope) + + +def parse_ants(uv, ant_str, *, print_toggle=False, x_orientation=None): + """ + Get antpair and polarization from parsing an aipy-style ant string. + + Used to support the select function. Generates two lists of antenna pair + tuples and polarization indices based on parsing of the string ant_str. + If no valid polarizations (pseudo-Stokes params, or combinations of [lr] + or [xy]) or antenna numbers are found in ant_str, ant_pairs_nums and + polarizations are returned as None. + + Parameters + ---------- + uv : UVBase Object + A UVBased object that supports the following functions and parameters: + - get_ants + - get_antpairs + - get_pols + These are used to construct the baseline ant_pair_nums + and polarizations returned. + ant_str : str + String containing antenna information to parse. Can be 'all', + 'auto', 'cross', or combinations of antenna numbers and polarization + indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used + in front of an antenna number or baseline to exclude it from being + output in ant_pairs_nums. If ant_str has a minus sign as the first + character, 'all,' will be appended to the beginning of the string. + See the tutorial for examples of valid strings and their behavior. + print_toggle : bool + Boolean for printing parsed baselines for a visual user check. + x_orientation : str, optional + Orientation of the physical dipole corresponding to what is + labelled as the x polarization ("east" or "north") to allow for + converting from E/N strings. If input uv object has an `x_orientation` + parameter and the input to this function is `None`, the value from the + object will be used. Any input given to this function will override the + value on the uv object. See corresonding parameter on UVData + for more details. + + Returns + ------- + ant_pairs_nums : list of tuples of int or None + List of tuples containing the parsed pairs of antenna numbers, or + None if ant_str is 'all' or a pseudo-Stokes polarizations. + polarizations : list of int or None + List of desired polarizations or None if ant_str does not contain a + polarization specification. + + """ + required_attrs = ["get_ants", "get_antpairs", "get_pols"] + if not all(hasattr(uv, attr) for attr in required_attrs): + raise ValueError( + "UVBased objects must have all the following attributes in order " + f"to call 'parse_ants': {required_attrs}." + ) + + if x_orientation is None and ( + hasattr(uv.telescope, "x_orientation") + and uv.telescope.x_orientation is not None + ): + x_orientation = uv.telescope.x_orientation + + ant_re = r"(\(((-?\d+[lrxy]?,?)+)\)|-?\d+[lrxy]?)" + bl_re = "(^(%s_%s|%s),?)" % (ant_re, ant_re, ant_re) + str_pos = 0 + ant_pairs_nums = [] + polarizations = [] + ants_data = uv.get_ants() + ant_pairs_data = uv.get_antpairs() + pols_data = uv.get_pols() + warned_ants = [] + warned_pols = [] + + if ant_str.startswith("-"): + ant_str = "all," + ant_str + + while str_pos < len(ant_str): + m = re.search(bl_re, ant_str[str_pos:]) + if m is None: + if ant_str[str_pos:].upper().startswith("ALL"): + if len(ant_str[str_pos:].split(",")) > 1: + ant_pairs_nums = uv.get_antpairs() + elif ant_str[str_pos:].upper().startswith("AUTO"): + for pair in ant_pairs_data: + if pair[0] == pair[1] and pair not in ant_pairs_nums: + ant_pairs_nums.append(pair) + elif ant_str[str_pos:].upper().startswith("CROSS"): + for pair in ant_pairs_data: + if not (pair[0] == pair[1] or pair in ant_pairs_nums): + ant_pairs_nums.append(pair) + elif ant_str[str_pos:].upper().startswith("PI"): + polarizations.append(polstr2num("pI")) + elif ant_str[str_pos:].upper().startswith("PQ"): + polarizations.append(polstr2num("pQ")) + elif ant_str[str_pos:].upper().startswith("PU"): + polarizations.append(polstr2num("pU")) + elif ant_str[str_pos:].upper().startswith("PV"): + polarizations.append(polstr2num("pV")) + else: + raise ValueError(f"Unparsable argument {ant_str}") + + comma_cnt = ant_str[str_pos:].find(",") + if comma_cnt >= 0: + str_pos += comma_cnt + 1 + else: + str_pos = len(ant_str) + else: + m = m.groups() + str_pos += len(m[0]) + if m[2] is None: + ant_i_list = [m[8]] + ant_j_list = list(uv.get_ants()) + else: + if m[3] is None: + ant_i_list = [m[2]] + else: + ant_i_list = m[3].split(",") + + if m[6] is None: + ant_j_list = [m[5]] + else: + ant_j_list = m[6].split(",") + + for ant_i in ant_i_list: + include_i = True + if isinstance(ant_i, str) and ant_i.startswith("-"): + ant_i = ant_i[1:] # nibble the - off the string + include_i = False + + for ant_j in ant_j_list: + include_j = True + if isinstance(ant_j, str) and ant_j.startswith("-"): + ant_j = ant_j[1:] + include_j = False + + pols = None + ant_i, ant_j = str(ant_i), str(ant_j) + if not ant_i.isdigit(): + ai = re.search(r"(\d+)([x,y,l,r])", ant_i).groups() + + if not ant_j.isdigit(): + aj = re.search(r"(\d+)([x,y,l,r])", ant_j).groups() + + if ant_i.isdigit() and ant_j.isdigit(): + ai = [ant_i, ""] + aj = [ant_j, ""] + elif ant_i.isdigit() and not ant_j.isdigit(): + if "x" in ant_j or "y" in ant_j: + pols = ["x" + aj[1], "y" + aj[1]] + else: + pols = ["l" + aj[1], "r" + aj[1]] + ai = [ant_i, ""] + elif not ant_i.isdigit() and ant_j.isdigit(): + if "x" in ant_i or "y" in ant_i: + pols = [ai[1] + "x", ai[1] + "y"] + else: + pols = [ai[1] + "l", ai[1] + "r"] + aj = [ant_j, ""] + elif not ant_i.isdigit() and not ant_j.isdigit(): + pols = [ai[1] + aj[1]] + + ant_tuple = (abs(int(ai[0])), abs(int(aj[0]))) + + # Order tuple according to order in object + if ant_tuple in ant_pairs_data: + pass + elif ant_tuple[::-1] in ant_pairs_data: + ant_tuple = ant_tuple[::-1] + else: + if not ( + ant_tuple[0] in ants_data or ant_tuple[0] in warned_ants + ): + warned_ants.append(ant_tuple[0]) + if not ( + ant_tuple[1] in ants_data or ant_tuple[1] in warned_ants + ): + warned_ants.append(ant_tuple[1]) + if pols is not None: + for pol in pols: + if not (pol.lower() in pols_data or pol in warned_pols): + warned_pols.append(pol) + continue + + if include_i and include_j: + if ant_tuple not in ant_pairs_nums: + ant_pairs_nums.append(ant_tuple) + if pols is not None: + for pol in pols: + if ( + pol.lower() in pols_data + and polstr2num(pol, x_orientation=x_orientation) + not in polarizations + ): + polarizations.append( + polstr2num(pol, x_orientation=x_orientation) + ) + elif not ( + pol.lower() in pols_data or pol in warned_pols + ): + warned_pols.append(pol) + else: + if pols is not None: + for pol in pols: + if pol.lower() in pols_data: + if uv.Npols == 1 and [pol.lower()] == pols_data: + ant_pairs_nums.remove(ant_tuple) + if ( + polstr2num(pol, x_orientation=x_orientation) + in polarizations + ): + polarizations.remove( + polstr2num(pol, x_orientation=x_orientation) + ) + elif not ( + pol.lower() in pols_data or pol in warned_pols + ): + warned_pols.append(pol) + elif ant_tuple in ant_pairs_nums: + ant_pairs_nums.remove(ant_tuple) + + if ant_str.upper() == "ALL": + ant_pairs_nums = None + elif len(ant_pairs_nums) == 0: + if not ant_str.upper() in ["AUTO", "CROSS"]: + ant_pairs_nums = None + + if len(polarizations) == 0: + polarizations = None + else: + polarizations.sort(reverse=True) + + if print_toggle: + print("\nParsed antenna pairs:") + if ant_pairs_nums is not None: + for pair in ant_pairs_nums: + print(pair) + + print("\nParsed polarizations:") + if polarizations is not None: + for pol in polarizations: + print(polnum2str(pol, x_orientation=x_orientation)) + + if len(warned_ants) > 0: + warnings.warn( + "Warning: Antenna number {a} passed, but not present " + "in the ant_1_array or ant_2_array".format( + a=(",").join(map(str, warned_ants)) + ) + ) + + if len(warned_pols) > 0: + warnings.warn( + "Warning: Polarization {p} is not present in the polarization_array".format( + p=(",").join(warned_pols).upper() + ) + ) + + return ant_pairs_nums, polarizations diff --git a/src/pyuvdata/utils/bls.pyx b/src/pyuvdata/utils/bls.pyx new file mode 100644 index 0000000000..ee67140a00 --- /dev/null +++ b/src/pyuvdata/utils/bls.pyx @@ -0,0 +1,235 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License + +# distutils: language = c +# cython: linetrace=True + +# python imports +import warnings + +# cython imports + +cimport cython +cimport numpy + +numpy.import_array() + +ctypedef fused int_or_float: + numpy.uint64_t + numpy.int64_t + numpy.int32_t + numpy.uint32_t + numpy.float64_t + numpy.float32_t + + +cdef inline int_or_float max(int_or_float a, int_or_float b): + return a if a > b else b + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef int_or_float arraymin(int_or_float[::1] array) nogil: + cdef int_or_float minval = array[0] + cdef Py_ssize_t i + for i in range(array.shape[0]): + if array[i] < minval: + minval = array[i] + return minval + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef int_or_float arraymax(int_or_float[::1] array) nogil: + cdef int_or_float maxval = array[0] + cdef Py_ssize_t i + for i in range(array.shape[0]): + if array[i] > maxval: + maxval = array[i] + return maxval + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline void _bl_to_ant_256( + numpy.uint64_t[::1] _bl, + numpy.uint64_t[:, ::1] _ants, + long nbls, +): + cdef Py_ssize_t i + + for i in range(nbls): + _ants[1, i] = (_bl[i]) % 256 + _ants[0, i] = (_bl[i] - (_ants[1, i])) // 256 + return + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline void _bl_to_ant_2048( + numpy.uint64_t[::1] _bl, + numpy.uint64_t[:, ::1] _ants, + int nbls +): + cdef Py_ssize_t i + for i in range(nbls): + _ants[1, i] = (_bl[i] - 2 ** 16) % 2048 + _ants[0, i] = (_bl[i] - 2 ** 16 - (_ants[1, i])) // 2048 + return + +# defining these constants helps cython not cast the large +# numbers as python ints +cdef numpy.uint64_t bl_large = 2 ** 16 + 2 ** 22 +cdef numpy.uint64_t large_mod = 2147483648 + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef inline void _bl_to_ant_2147483648( + numpy.uint64_t[::1] _bl, + numpy.uint64_t[:, ::1] _ants, + int nbls +): + cdef Py_ssize_t i + for i in range(nbls): + _ants[1, i] = (_bl[i] - bl_large) % large_mod + _ants[0, i] = (_bl[i] - bl_large - (_ants[1, i])) // large_mod + return + + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef numpy.ndarray[dtype=numpy.uint64_t, ndim=2] baseline_to_antnums( + numpy.uint64_t[::1] _bl +): + cdef numpy.uint64_t _min = arraymin(_bl) + cdef long nbls = _bl.shape[0] + cdef int ndim = 2 + cdef numpy.npy_intp * dims = [2, nbls] + cdef numpy.ndarray[ndim=2, dtype=numpy.uint64_t] ants = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_UINT64, 0) + cdef numpy.uint64_t[:, ::1] _ants = ants + + if _min >= (2 ** 16 + 2 ** 22): + _bl_to_ant_2147483648(_bl, _ants, nbls) + elif _min >= 2 ** 16: + _bl_to_ant_2048(_bl, _ants, nbls) + else: + _bl_to_ant_256(_bl, _ants, nbls) + return ants + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline void _antnum_to_bl_2147483648( + numpy.uint64_t[::1] ant1, + numpy.uint64_t[::1] ant2, + numpy.uint64_t[::1] baselines, + int nbls, +): + cdef Py_ssize_t i + + for i in range(nbls): + baselines[i] = large_mod * (ant1[i]) + (ant2[i]) + bl_large + return + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline void _antnum_to_bl_2048( + numpy.uint64_t[::1] ant1, + numpy.uint64_t[::1] ant2, + numpy.uint64_t[::1] baselines, + int nbls, +): + cdef Py_ssize_t i + + for i in range(nbls): + baselines[i] = 2048 * (ant1[i]) + (ant2[i]) + 2 ** 16 + return + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline void _antnum_to_bl_2048_miriad( + numpy.uint64_t[::1] ant1, + numpy.uint64_t[::1] ant2, + numpy.uint64_t[::1] baselines, + int nbls, +): + cdef Py_ssize_t i + + for i in range(nbls): + if ant2[i] > 255: # MIRIAD uses 1-index antenna IDs + baselines[i] = 2048 * (ant1[i]) + (ant2[i]) + 2 ** 16 + else: + baselines[i] = 256 * (ant1[i]) + (ant2[i]) + return + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline void _antnum_to_bl_256( + numpy.uint64_t[::1] ant1, + numpy.uint64_t[::1] ant2, + numpy.uint64_t[::1] baselines, + int nbls, +): + cdef Py_ssize_t i + # make views as c-contiguous arrays of a known dtype + # effectivly turns the numpy array into a c-array + for i in range(nbls): + baselines[i] = 256 * (ant1[i]) + (ant2[i]) + return + +cpdef numpy.ndarray[dtype=numpy.uint64_t] antnums_to_baseline( + numpy.uint64_t[::1] ant1, + numpy.uint64_t[::1] ant2, + bint attempt256=False, + bint nants_less2048=True, + bint use_miriad_convention=False +): + cdef int ndim = 1 + cdef int nbls = ant1.shape[0] + cdef numpy.npy_intp * dims = [nbls] + cdef numpy.ndarray[ndim=1, dtype=numpy.uint64_t] baseline = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_UINT64, 0) + cdef numpy.uint64_t[::1] _bl = baseline + cdef bint less255 + cdef bint ants_less2048 + + # to ensure baseline numbers are unambiguous, + # use the 2048 calculation for antennas >= 256 + # and use the 2147483648 calculation for antennas >= 2048 + ants_less2048 = max( + arraymax(ant1), + arraymax(ant2), + ) < 2048 + + # Some UVFITS readers (e.g. MWA and AAVS) expect the + # MIRIAD baseline convention. + if use_miriad_convention: + _antnum_to_bl_2048_miriad(ant1, ant2, _bl, nbls) + + elif attempt256: + less256 = max( + arraymax(ant1), + arraymax(ant2), + ) < 256 + + if less256: + _antnum_to_bl_256(ant1, ant2, _bl, nbls) + + elif ants_less2048 and nants_less2048: + message = ( + "antnums_to_baseline: found antenna numbers > 255, using " + "2048 baseline indexing." + ) + warnings.warn(message) + _antnum_to_bl_2048(ant1, ant2, _bl, nbls) + else: + message = ( + "antnums_to_baseline: found antenna numbers > 2047 or " + "Nants_telescope > 2048, using 2147483648 baseline indexing." + ) + warnings.warn(message) + _antnum_to_bl_2147483648(ant1, ant2, _bl, nbls) + + elif ants_less2048 and nants_less2048: + _antnum_to_bl_2048(ant1, ant2, _bl, nbls) + + else: + _antnum_to_bl_2147483648(ant1, ant2, _bl, nbls) + + return baseline diff --git a/src/pyuvdata/utils/coordinates.py b/src/pyuvdata/utils/coordinates.py new file mode 100644 index 0000000000..c00916892e --- /dev/null +++ b/src/pyuvdata/utils/coordinates.py @@ -0,0 +1,474 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for coordinate transforms.""" +import numpy as np +from astropy.coordinates import EarthLocation + +from .. import _coordinates + +try: + from lunarsky import MoonLocation + + hasmoon = True +except ImportError: + hasmoon = False + +__all__ = [ + "LatLonAlt_from_XYZ", + "XYZ_from_LatLonAlt", + "rotECEF_from_ECEF", + "ECEF_from_rotECEF", + "ENU_from_ECEF", + "ECEF_from_ENU", +] + +allowed_location_types = [EarthLocation] +if hasmoon: + selenoids = { + "SPHERE": _coordinates.Body.Moon_sphere, + "GSFC": _coordinates.Body.Moon_gsfc, + "GRAIL23": _coordinates.Body.Moon_grail23, + "CE-1-LAM-GEO": _coordinates.Body.Moon_ce1lamgeo, + } + allowed_location_types.append(MoonLocation) + + +def LatLonAlt_from_XYZ(xyz, *, frame="ITRS", ellipsoid=None, check_acceptability=True): + """ + Calculate lat/lon/alt from ECEF x,y,z. + + Parameters + ---------- + xyz : ndarray of float + numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. + frame : str + Coordinate frame of xyz. + Valid options are ITRS (default) or MCMF. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is MCMF. + check_acceptability : bool + Flag to check XYZ coordinates are reasonable. + + Returns + ------- + latitude : ndarray or float + latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians + longitude : ndarray or float + longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians + altitude : ndarray or float + altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters + + """ + frame = frame.upper() + if not hasmoon and frame == "MCMF": + raise ValueError("Need to install `lunarsky` package to work with MCMF frame.") + + if frame == "ITRS": + accept_bounds = (6.35e6, 6.39e6) + elif frame == "MCMF": + accept_bounds = (1.71e6, 1.75e6) + if ellipsoid is None: + ellipsoid = "SPHERE" + + # convert to a numpy array + xyz = np.asarray(xyz) + if xyz.ndim > 1 and xyz.shape[1] != 3: + raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).") + + squeeze = xyz.ndim == 1 + + if squeeze: + xyz = xyz[np.newaxis, :] + + xyz = np.ascontiguousarray(xyz.T, dtype=np.float64) + + # checking for acceptable values + if check_acceptability: + if frame not in ["ITRS", "MCMF"]: + raise ValueError(f'Cannot check acceptability for unknown frame "{frame}".') + norms = np.linalg.norm(xyz, axis=0) + if not all( + np.logical_and(norms >= accept_bounds[0], norms <= accept_bounds[1]) + ): + raise ValueError( + f"xyz values should be {frame} x, y, z coordinates in meters" + ) + # this helper function returns one 2D array because it is less overhead for cython + if frame == "ITRS": + lla = _coordinates._lla_from_xyz(xyz, _coordinates.Body.Earth.value) + elif frame == "MCMF": + lla = _coordinates._lla_from_xyz(xyz, selenoids[ellipsoid].value) + else: + raise ValueError( + f'No spherical to cartesian transform defined for frame "{frame}".' + ) + + if squeeze: + return lla[0, 0], lla[1, 0], lla[2, 0] + return lla[0], lla[1], lla[2] + + +def XYZ_from_LatLonAlt(latitude, longitude, altitude, *, frame="ITRS", ellipsoid=None): + """ + Calculate ECEF x,y,z from lat/lon/alt values. + + Parameters + ---------- + latitude : ndarray or float + latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians + longitude : ndarray or float + longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians + altitude : ndarray or float + altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters + frame : str + Coordinate frame of xyz. + Valid options are ITRS (default) or MCMF. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is MCMF. + + Returns + ------- + xyz : ndarray of float + numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. + + """ + latitude = np.ascontiguousarray(latitude, dtype=np.float64) + longitude = np.ascontiguousarray(longitude, dtype=np.float64) + altitude = np.ascontiguousarray(altitude, dtype=np.float64) + + n_pts = latitude.size + + frame = frame.upper() + if not hasmoon and frame == "MCMF": + raise ValueError("Need to install `lunarsky` package to work with MCMF frame.") + + if longitude.size != n_pts: + raise ValueError( + "latitude, longitude and altitude must all have the same length" + ) + + if altitude.size != n_pts: + raise ValueError( + "latitude, longitude and altitude must all have the same length" + ) + + if frame == "ITRS": + xyz = _coordinates._xyz_from_latlonalt( + latitude, longitude, altitude, _coordinates.Body.Earth.value + ) + elif frame == "MCMF": + if ellipsoid is None: + ellipsoid = "SPHERE" + + xyz = _coordinates._xyz_from_latlonalt( + latitude, longitude, altitude, selenoids[ellipsoid].value + ) + else: + raise ValueError( + f'No cartesian to spherical transform defined for frame "{frame}".' + ) + + xyz = xyz.T + if n_pts == 1: + return xyz[0] + + return xyz + + +def rotECEF_from_ECEF(xyz, longitude): + """ + Get rotated ECEF positions such that the x-axis goes through the longitude. + + Miriad and uvfits expect antenna positions in this frame + (with longitude of the array center/telescope location) + + Parameters + ---------- + xyz : ndarray of float + numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. + longitude : float + longitude in radians to rotate coordinates to + (usually the array center/telescope location). + + Returns + ------- + ndarray of float + Rotated ECEF coordinates, shape (Npts, 3). + + """ + angle = -1 * longitude + rot_matrix = np.array( + [ + [np.cos(angle), -1 * np.sin(angle), 0], + [np.sin(angle), np.cos(angle), 0], + [0, 0, 1], + ] + ) + return rot_matrix.dot(xyz.T).T + + +def ECEF_from_rotECEF(xyz, longitude): + """ + Calculate ECEF from a rotated ECEF (Inverse of rotECEF_from_ECEF). + + Parameters + ---------- + xyz : ndarray of float + numpy array, shape (Npts, 3), with rotated ECEF x,y,z coordinates. + longitude : float + longitude in radians giving the x direction of the rotated coordinates + (usually the array center/telescope location). + + Returns + ------- + ndarray of float + ECEF coordinates, shape (Npts, 3). + + """ + angle = longitude + rot_matrix = np.array( + [ + [np.cos(angle), -1 * np.sin(angle), 0], + [np.sin(angle), np.cos(angle), 0], + [0, 0, 1], + ] + ) + return rot_matrix.dot(xyz.T).T + + +def ENU_from_ECEF( + xyz, + *, + center_loc=None, + latitude=None, + longitude=None, + altitude=None, + frame="ITRS", + ellipsoid=None, +): + """ + Calculate local ENU (east, north, up) coordinates from ECEF coordinates. + + Parameters + ---------- + xyz : ndarray of float + numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. + center_loc : EarthLocation or MoonLocation object + An EarthLocation or MoonLocation object giving the center of the ENU + coordinates. Either `center_loc` or all of `latitude`, `longitude`, + `altitude` must be passed. + latitude : float + Latitude of center of ENU coordinates in radians. + Not used if `center_loc` is passed. + longitude : float + Longitude of center of ENU coordinates in radians. + Not used if `center_loc` is passed. + altitude : float + Altitude of center of ENU coordinates in radians. + Not used if `center_loc` is passed. + frame : str + Coordinate frame of xyz and center of ENU coordinates. Valid options are + ITRS (default) or MCMF. Not used if `center_loc` is passed. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is MCMF. Not used if `center_loc` is passed. + + Returns + ------- + ndarray of float + numpy array, shape (Npts, 3), with local ENU coordinates + + """ + if center_loc is not None: + if not isinstance(center_loc, tuple(allowed_location_types)): + raise ValueError( + "center_loc is not a supported type. It must be one of " + f"{allowed_location_types}" + ) + latitude = center_loc.lat.rad + longitude = center_loc.lon.rad + altitude = center_loc.height.to("m").value + if isinstance(center_loc, EarthLocation): + frame = "ITRS" + else: + frame = "MCMF" + ellipsoid = center_loc.ellipsoid + else: + if latitude is None or longitude is None or altitude is None: + raise ValueError( + "Either center_loc or all of latitude, longitude and altitude " + "must be passed." + ) + frame = frame.upper() + if not hasmoon and frame == "MCMF": + raise ValueError( + "Need to install `lunarsky` package to work with MCMF frame." + ) + + if frame == "ITRS": + sensible_radius_range = (6.35e6, 6.39e6) + world = "earth" + elif frame == "MCMF": + world = "moon" + sensible_radius_range = (1.71e6, 1.75e6) + if ellipsoid is None: + ellipsoid = "SPHERE" + else: + raise ValueError(f'No ENU_from_ECEF transform defined for frame "{frame}".') + + xyz = np.asarray(xyz) + if xyz.ndim > 1 and xyz.shape[1] != 3: + raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).") + + squeeze = False + if xyz.ndim == 1: + squeeze = True + xyz = xyz[np.newaxis, :] + xyz = np.ascontiguousarray(xyz.T, dtype=np.float64) + + # check that these are sensible ECEF values -- their magnitudes need to be + # on the order of Earth's radius + ecef_magnitudes = np.linalg.norm(xyz, axis=0) + if np.any(ecef_magnitudes <= sensible_radius_range[0]) or np.any( + ecef_magnitudes >= sensible_radius_range[1] + ): + raise ValueError( + f"{frame} vector magnitudes must be on the order" + f" of the radius of the {world}" + ) + + # the cython utility expects (3, Npts) for faster manipulation + # transpose after we get the array back to match the expected shape + enu = _coordinates._ENU_from_ECEF( + xyz, + np.ascontiguousarray(latitude, dtype=np.float64), + np.ascontiguousarray(longitude, dtype=np.float64), + np.ascontiguousarray(altitude, dtype=np.float64), + # we have already forced the frame to conform to our options + # and if we don't have moon we have already errored. + ( + _coordinates.Body.Earth.value + if frame == "ITRS" + else selenoids[ellipsoid].value + ), + ) + enu = enu.T + + if squeeze: + enu = np.squeeze(enu) + + return enu + + +def ECEF_from_ENU( + enu, + center_loc=None, + latitude=None, + longitude=None, + altitude=None, + frame="ITRS", + ellipsoid=None, +): + """ + Calculate ECEF coordinates from local ENU (east, north, up) coordinates. + + Parameters + ---------- + enu : ndarray of float + numpy array, shape (Npts, 3), with local ENU coordinates. + center_loc : EarthLocation or MoonLocation object + An EarthLocation or MoonLocation object giving the center of the ENU + coordinates. Either `center_loc` or all of `latitude`, `longitude`, + `altitude` must be passed. + latitude : float + Latitude of center of ENU coordinates in radians. + Not used if `center_loc` is passed. + longitude : float + Longitude of center of ENU coordinates in radians. + Not used if `center_loc` is passed. + altitude : float + Altitude of center of ENU coordinates in radians. + Not used if `center_loc` is passed. + frame : str + Coordinate frame of xyz and center of ENU coordinates. Valid options are + ITRS (default) or MCMF. Not used if `center_loc` is passed. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is MCMF. Not used if `center_loc` is passed. + + Returns + ------- + xyz : ndarray of float + numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. + + """ + if center_loc is not None: + if not isinstance(center_loc, tuple(allowed_location_types)): + raise ValueError( + "center_loc is not a supported type. It must be one of " + f"{allowed_location_types}" + ) + latitude = center_loc.lat.rad + longitude = center_loc.lon.rad + altitude = center_loc.height.to("m").value + if isinstance(center_loc, EarthLocation): + frame = "ITRS" + else: + frame = "MCMF" + ellipsoid = center_loc.ellipsoid + else: + if latitude is None or longitude is None or altitude is None: + raise ValueError( + "Either center_loc or all of latitude, longitude and altitude " + "must be passed." + ) + frame = frame.upper() + if not hasmoon and frame == "MCMF": + raise ValueError( + "Need to install `lunarsky` package to work with MCMF frame." + ) + + if frame not in ["ITRS", "MCMF"]: + raise ValueError(f'No ECEF_from_ENU transform defined for frame "{frame}".') + + if frame == "MCMF" and ellipsoid is None: + ellipsoid = "SPHERE" + + enu = np.asarray(enu) + if enu.ndim > 1 and enu.shape[1] != 3: + raise ValueError("The expected shape of the ENU array is (Npts, 3).") + squeeze = False + + if enu.ndim == 1: + squeeze = True + enu = enu[np.newaxis, :] + enu = np.ascontiguousarray(enu.T, dtype=np.float64) + + # the cython utility expects (3, Npts) for faster manipulation + # transpose after we get the array back to match the expected shape + xyz = _coordinates._ECEF_from_ENU( + enu, + np.ascontiguousarray(latitude, dtype=np.float64), + np.ascontiguousarray(longitude, dtype=np.float64), + np.ascontiguousarray(altitude, dtype=np.float64), + # we have already forced the frame to conform to our options + # and if we don't have moon we have already errored. + ( + _coordinates.Body.Earth.value + if frame == "ITRS" + else selenoids[ellipsoid].value + ), + ) + xyz = xyz.T + + if squeeze: + xyz = np.squeeze(xyz) + + return xyz diff --git a/src/pyuvdata/utils/coordinates.pyx b/src/pyuvdata/utils/coordinates.pyx new file mode 100644 index 0000000000..6185c299a3 --- /dev/null +++ b/src/pyuvdata/utils/coordinates.pyx @@ -0,0 +1,226 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License + +# distutils: language = c +# cython: linetrace=True + +# python imports +import enum + +# cython imports +cimport cython +cimport numpy +from libc.math cimport atan2, cos, sin, sqrt + +numpy.import_array() + + +cdef class Ellipsoid: + cdef readonly numpy.float64_t gps_a, gps_b, e_squared, e_prime_squared, b_div_a2 + + @cython.cdivision + def __init__(self, numpy.float64_t gps_a, numpy.float64_t gps_b): + self.gps_a = gps_a + self.gps_b = gps_b + self.b_div_a2 = (self.gps_b / self.gps_a)**2 + self.e_squared = (1 - self.b_div_a2) + self.e_prime_squared = (self.b_div_a2**-1 - 1) + + +# A python interface for different celestial bodies +class Body(enum.Enum): + Earth = Ellipsoid(6378137, 6356752.31424518) + + try: + from lunarsky.moon import SELENOIDS + + Moon_sphere = Ellipsoid( + SELENOIDS["SPHERE"]._equatorial_radius.to('m').value, + SELENOIDS["SPHERE"]._equatorial_radius.to('m').value * (1-SELENOIDS["SPHERE"]._flattening) + ) + + Moon_gsfc = Ellipsoid( + SELENOIDS["GSFC"]._equatorial_radius.to('m').value, + SELENOIDS["GSFC"]._equatorial_radius.to('m').value * (1-SELENOIDS["GSFC"]._flattening) + ) + + Moon_grail23 = Ellipsoid( + SELENOIDS["GRAIL23"]._equatorial_radius.to('m').value, + SELENOIDS["GRAIL23"]._equatorial_radius.to('m').value * (1-SELENOIDS["GRAIL23"]._flattening) + ) + + Moon_ce1lamgeo = Ellipsoid( + SELENOIDS["CE-1-LAM-GEO"]._equatorial_radius.to('m').value, + SELENOIDS["CE-1-LAM-GEO"]._equatorial_radius.to('m').value * (1-SELENOIDS["CE-1-LAM-GEO"]._flattening) + ) + except: + # lunar sky not installed, don't add any moon bodies + pass + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cpdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] _lla_from_xyz( + numpy.float64_t[:, ::1] xyz, + Ellipsoid body, +): + cdef Py_ssize_t ind + cdef int ndim = 2 + cdef int n_pts = xyz.shape[1] + cdef numpy.npy_intp * dims = [3, n_pts] + + cdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] lla = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) + cdef numpy.float64_t[:, ::1] _lla = lla + + cdef numpy.float64_t gps_p, gps_theta + + # see wikipedia geodetic_datum and Datum transformations of + # GPS positions PDF in docs/references folder + for ind in range(n_pts): + gps_p = sqrt(xyz[0, ind] ** 2 + xyz[1, ind] ** 2) + gps_theta = atan2(xyz[2, ind] * body.gps_a, gps_p * body.gps_b) + + _lla[0, ind] = atan2( + xyz[2, ind] + body.e_prime_squared * body.gps_b * sin(gps_theta) ** 3, + gps_p - body.e_squared * body.gps_a * cos(gps_theta) ** 3, + ) + + _lla[1, ind] = atan2(xyz[1, ind], xyz[0, ind]) + + _lla[2, ind] = (gps_p / cos(lla[0, ind])) - body.gps_a / sqrt(1.0 - body.e_squared * sin(lla[0, ind]) ** 2) + + return lla + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cpdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] _xyz_from_latlonalt( + numpy.float64_t[::1] _lat, + numpy.float64_t[::1] _lon, + numpy.float64_t[::1] _alt, + Ellipsoid body, +): + cdef Py_ssize_t i + cdef int ndim = 2 + cdef int n_pts = _lat.shape[0] + cdef numpy.npy_intp * dims = [3, n_pts] + + cdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] xyz = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) + cdef numpy.float64_t[:, ::1] _xyz = xyz + + cdef numpy.float64_t sin_lat, cos_lat, sin_lon, cos_lon, gps_n + + for ind in range(n_pts): + sin_lat = sin(_lat[ind]) + sin_lon = sin(_lon[ind]) + + cos_lat = cos(_lat[ind]) + cos_lon = cos(_lon[ind]) + + gps_n = body.gps_a / sqrt(1.0 - body.e_squared * sin_lat ** 2) + + _xyz[0, ind] = (gps_n + _alt[ind]) * cos_lat * cos_lon + _xyz[1, ind] = (gps_n + _alt[ind]) * cos_lat * sin_lon + + _xyz[2, ind] = (body.b_div_a2 * gps_n + _alt[ind]) * sin_lat + return xyz + +# this function takes memoryviews as inputs +# that is why _lat, _lon, and _alt are indexed below to get the 0th entry +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef numpy.ndarray[numpy.float64_t, ndim=2] _ENU_from_ECEF( + numpy.float64_t[:, ::1] xyz, + numpy.float64_t[::1] _lat, + numpy.float64_t[::1] _lon, + numpy.float64_t[::1] _alt, + Ellipsoid body, +): + cdef Py_ssize_t i + cdef int ndim = 2 + cdef int nblts = xyz.shape[1] + cdef numpy.npy_intp * dims = [3, nblts] + cdef numpy.float64_t xyz_use[3] + + cdef numpy.float64_t sin_lat, cos_lat, sin_lon, cos_lon + + # we want a memoryview of the xyz of the center + # this looks a little silly but we don't have to define 2 different things + cdef numpy.float64_t[:] xyz_center = _xyz_from_latlonalt(_lat, _lon, _alt, body).T[0] + + cdef numpy.ndarray[numpy.float64_t, ndim=2] _enu = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) + cdef numpy.float64_t[:, ::1] enu = _enu + + sin_lat = sin(_lat[0]) + cos_lat = cos(_lat[0]) + + sin_lon = sin(_lon[0]) + cos_lon = cos(_lon[0]) + + for i in range(nblts): + xyz_use[0] = xyz[0, i] - xyz_center[0] + xyz_use[1] = xyz[1, i] - xyz_center[1] + xyz_use[2] = xyz[2, i] - xyz_center[2] + + enu[0, i] = -sin_lon * xyz_use[0] + cos_lon * xyz_use[1] + enu[1, i] = ( + - sin_lat * cos_lon * xyz_use[0] + - sin_lat * sin_lon * xyz_use[1] + + cos_lat * xyz_use[2] + ) + enu[2, i] = ( + cos_lat * cos_lon * xyz_use[0] + + cos_lat * sin_lon * xyz_use[1] + + sin_lat * xyz_use[2] + ) + + return _enu + +# this function takes memoryviews as inputs +# that is why _lat, _lon, and _alt are indexed below to get the 0th entry +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef numpy.ndarray[dtype=numpy.float64_t] _ECEF_from_ENU( + numpy.float64_t[:, ::1] enu, + numpy.float64_t[::1] _lat, + numpy.float64_t[::1] _lon, + numpy.float64_t[::1] _alt, + Ellipsoid body, +): + cdef Py_ssize_t i + cdef int ndim = 2 + cdef int nblts = enu.shape[1] + cdef numpy.npy_intp * dims = [3, nblts] + cdef numpy.float64_t sin_lat, cos_lat, sin_lon, cos_lon + + # allocate memory then make memory view for faster access + cdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] _xyz = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) + cdef numpy.float64_t[:, ::1] xyz = _xyz + + # we want a memoryview of the xyz of the center + # this looks a little silly but we don't have to define 2 different things + cdef numpy.float64_t[:] xyz_center = _xyz_from_latlonalt(_lat, _lon, _alt, body).T[0] + + sin_lat = sin(_lat[0]) + cos_lat = cos(_lat[0]) + + sin_lon = sin(_lon[0]) + cos_lon = cos(_lon[0]) + + for i in range(nblts): + xyz[0, i] = ( + - sin_lat * cos_lon * enu[1, i] + - sin_lon * enu[0, i] + + cos_lat * cos_lon * enu[2, i] + + xyz_center[0] + ) + xyz[1, i] = ( + - sin_lat * sin_lon * enu[1, i] + + cos_lon * enu[0, i] + + cos_lat * sin_lon * enu[2, i] + + xyz_center[1] + ) + xyz[2, i] = cos_lat * enu[1, i] + sin_lat * enu[2, i] + xyz_center[2] + + return _xyz diff --git a/src/pyuvdata/utils/file_io/__init__.py b/src/pyuvdata/utils/file_io/__init__.py new file mode 100644 index 0000000000..f707304f44 --- /dev/null +++ b/src/pyuvdata/utils/file_io/__init__.py @@ -0,0 +1,4 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""I/O utility methods.""" diff --git a/src/pyuvdata/utils/file_io/antpos.py b/src/pyuvdata/utils/file_io/antpos.py new file mode 100644 index 0000000000..dbe215df85 --- /dev/null +++ b/src/pyuvdata/utils/file_io/antpos.py @@ -0,0 +1,50 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Read antenna position files.""" + +import numpy as np + + +def read_antpos_csv(antenna_positions_file): + """ + Interpret an antenna positions file. + + Parameters + ---------- + antenna_positions_file : str + Name of the antenna_positions_file, which is assumed to be in DATA_PATH. + Should be a csv file with the following columns: + + - "name": antenna names + - "number": antenna numbers + - "x": x ECEF coordinate relative to the telescope location. + - "y": y ECEF coordinate relative to the telescope location. + - "z": z ECEF coordinate relative to the telescope location. + + Returns + ------- + antenna_names : array of str + Antenna names. + antenna_names : array of int + Antenna numbers. + antenna_positions : array of float + Antenna positions in ECEF relative to the telescope location. + + """ + columns = ["name", "number", "x", "y", "z"] + formats = ["U10", "i8", np.longdouble, np.longdouble, np.longdouble] + + dt = np.rec.format_parser(formats, columns, []) + ant_array = np.genfromtxt( + antenna_positions_file, + delimiter=",", + autostrip=True, + skip_header=1, + dtype=dt.dtype, + ) + antenna_names = ant_array["name"] + antenna_numbers = ant_array["number"] + antenna_positions = np.stack((ant_array["x"], ant_array["y"], ant_array["z"])).T + + return antenna_names, antenna_numbers, antenna_positions.astype("float") diff --git a/src/pyuvdata/utils/file_io/fits.py b/src/pyuvdata/utils/file_io/fits.py new file mode 100644 index 0000000000..a421232bd5 --- /dev/null +++ b/src/pyuvdata/utils/file_io/fits.py @@ -0,0 +1,117 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for working with FITS files.""" + +import numpy as np + + +def _gethduaxis(hdu, axis): + """ + Make axis arrays for fits files. + + Parameters + ---------- + hdu : astropy.io.fits HDU object + The HDU to make an axis array for. + axis : int + The axis number of interest (1-based). + + Returns + ------- + ndarray of float + Array of values for the specified axis. + + """ + ax = str(axis) + axis_num = hdu.header["NAXIS" + ax] + val = hdu.header["CRVAL" + ax] + delta = hdu.header["CDELT" + ax] + index = hdu.header["CRPIX" + ax] - 1 + + return delta * (np.arange(axis_num) - index) + val + + +def _indexhdus(hdulist): + """ + Get a dict of table names and HDU numbers from a FITS HDU list. + + Parameters + ---------- + hdulist : list of astropy.io.fits HDU objects + List of HDUs to get names for + + Returns + ------- + dict + dictionary with table names as keys and HDU number as values. + + """ + tablenames = {} + for i in range(len(hdulist)): + try: + tablenames[hdulist[i].header["EXTNAME"]] = i + except KeyError: + continue + return tablenames + + +def _get_extra_keywords(header, *, keywords_to_skip=None): + """ + Get any extra keywords and return as dict. + + Parameters + ---------- + header : FITS header object + header object to get extra_keywords from. + keywords_to_skip : list of str + list of keywords to not include in extra keywords in addition to standard + FITS keywords. + + Returns + ------- + dict + dict of extra keywords. + """ + # List standard FITS header items that are still should not be included in + # extra_keywords + # These are the beginnings of FITS keywords to ignore, the actual keywords + # often include integers following these names (e.g. NAXIS1, CTYPE3) + std_fits_substrings = [ + "HISTORY", + "SIMPLE", + "BITPIX", + "EXTEND", + "BLOCKED", + "GROUPS", + "PCOUNT", + "GCOUNT", + "BSCALE", + "BZERO", + "NAXIS", + "PTYPE", + "PSCAL", + "PZERO", + "CTYPE", + "CRVAL", + "CRPIX", + "CDELT", + "CROTA", + "CUNIT", + ] + + if keywords_to_skip is not None: + std_fits_substrings.extend(keywords_to_skip) + + extra_keywords = {} + # find all the other header items and keep them as extra_keywords + for key in header: + # check if key contains any of the standard FITS substrings + if np.any([sub in key for sub in std_fits_substrings]): + continue + if key == "COMMENT": + extra_keywords[key] = str(header.get(key)) + elif key != "": + extra_keywords[key] = header.get(key) + + return extra_keywords diff --git a/src/pyuvdata/hdf5_utils.py b/src/pyuvdata/utils/file_io/hdf5.py similarity index 71% rename from src/pyuvdata/hdf5_utils.py rename to src/pyuvdata/utils/file_io/hdf5.py index c0e3588b6e..a9539eb284 100644 --- a/src/pyuvdata/hdf5_utils.py +++ b/src/pyuvdata/utils/file_io/hdf5.py @@ -21,7 +21,7 @@ except ImportError: hasmoon = False -from . import utils +from ..coordinates import ENU_from_ECEF, LatLonAlt_from_XYZ hdf5plugin_present = True try: @@ -71,6 +71,193 @@ def _check_complex_dtype(dtype): return +def _get_slice_len(s, axlen): + """ + Get length of a slice s into array of len axlen. + + Parameters + ---------- + s : slice object + Slice object to index with + axlen : int + Length of axis s slices into + + Returns + ------- + int + Length of slice object + """ + if s.start is None: + start = 0 + else: + start = s.start + if s.stop is None: + stop = axlen + else: + stop = np.min([s.stop, axlen]) + if s.step is None: + step = 1 + else: + step = s.step + + return ((stop - 1 - start) // step) + 1 + + +def _get_dset_shape(dset, indices): + """ + Given a tuple of indices, determine the indexed array shape. + + Parameters + ---------- + dset : numpy array or h5py dataset + A numpy array or a reference to an HDF5 dataset on disk. Requires the + `dset.shape` attribute exists and returns a tuple. + indices : tuple + A tuple with the indices to extract along each dimension of dset. + Each element should contain a list of indices, a slice element, + or a list of slice elements that will be concatenated after slicing. + For data arrays with 4 dimensions, the second dimension (the old spw axis) + should not be included because it can only be length one. + + Returns + ------- + tuple + a tuple with the shape of the indexed array + tuple + a tuple with indices used (will be different than input if dset has + 4 dimensions and indices has 3 dimensions) + """ + dset_shape = list(dset.shape) + if len(dset_shape) == 4 and len(indices) == 3: + indices = (indices[0], np.s_[:], indices[1], indices[2]) + + for i, inds in enumerate(indices): + # check for integer + if isinstance(inds, (int, np.integer)): + dset_shape[i] = 1 + # check for slice object + if isinstance(inds, slice): + dset_shape[i] = _get_slice_len(inds, dset_shape[i]) + # check for list + if isinstance(inds, list): + # check for list of integers + if isinstance(inds[0], (int, np.integer)): + dset_shape[i] = len(inds) + elif isinstance(inds[0], slice): + dset_shape[i] = sum((_get_slice_len(s, dset_shape[i]) for s in inds)) + + return dset_shape, indices + + +def _index_dset(dset, indices, *, input_array=None): + """ + Index a UVH5 data, flags or nsamples h5py dataset to get data or overwrite data. + + If no `input_array` is passed, this function extracts the data at the indices + and returns it. If `input_array` is passed, this function replaced the data at the + indices with the input array. + + Parameters + ---------- + dset : h5py dataset + A reference to an HDF5 dataset on disk. + indices : tuple + A tuple with the indices to extract along each dimension of dset. + Each element should contain a list of indices, a slice element, + or a list of slice elements that will be concatenated after slicing. + Indices must be provided such that all dimensions can be indexed + simultaneously. This should have a length equal to the length of the dset, + with an exception to support the old array shape for uvdata arrays (in that + case the dset is length 4 but the second dimension is shallow, so only three + indices need to be passed). + input_array : ndarray, optional + Array to be copied into the dset at the indices. If not provided, the data in + the dset is indexed and returned. + + Returns + ------- + ndarray or None + The indexed dset if the `input_array` parameter is not used. + + Notes + ----- + This makes and fills an empty array with dset indices. + For trivial indexing, (e.g. a trivial slice), constructing + a new array and filling it is suboptimal over direct + indexing, e.g. dset[indices]. + This function specializes in repeated slices over the same axis, + e.g. if indices is [[slice(0, 5), slice(10, 15), ...], ..., ] + """ + # get dset and arr shape + dset_shape = dset.shape + arr_shape, indices = _get_dset_shape(dset, indices) + + if input_array is None: + # create empty array of dset dtype + arr = np.empty(arr_shape, dtype=dset.dtype) + else: + arr = input_array + + # get arr and dset indices for each dimension in indices + dset_indices = [] + arr_indices = [] + nselects_per_dim = [] + for i, dset_inds in enumerate(indices): + if isinstance(dset_inds, (int, np.integer)): + # this dimension is len 1, so slice is fine + arr_indices.append([slice(None)]) + dset_indices.append([[dset_inds]]) + nselects_per_dim.append(1) + + elif isinstance(dset_inds, slice): + # this dimension is just a slice, so slice is fine + arr_indices.append([slice(None)]) + dset_indices.append([dset_inds]) + nselects_per_dim.append(1) + + elif isinstance(dset_inds, (list, np.ndarray)): + if isinstance(dset_inds[0], (int, np.integer)): + # this is a list of integers, append slice + arr_indices.append([slice(None)]) + dset_indices.append([dset_inds]) + nselects_per_dim.append(1) + elif isinstance(dset_inds[0], slice): + # this is a list of slices, need list of slice lens + slens = [_get_slice_len(s, dset_shape[i]) for s in dset_inds] + ssums = [sum(slens[:j]) for j in range(len(slens))] + arr_inds = [slice(s, s + l) for s, l in zip(ssums, slens)] + arr_indices.append(arr_inds) + dset_indices.append(dset_inds) + nselects_per_dim.append(len(dset_inds)) + + # iterate through all selections and fill the array + total_selects = np.prod(nselects_per_dim) + axis_arrays = [] + for nsel in nselects_per_dim: + axis_arrays.append(np.arange(nsel, dtype=int)) + sel_index_arrays = list(np.meshgrid(*axis_arrays)) + for ind, array in enumerate(sel_index_arrays): + sel_index_arrays[ind] = array.flatten() + for sel in np.arange(total_selects): + sel_arr_indices = [] + sel_dset_indices = [] + for dim in np.arange(len(dset_shape)): + this_index = (sel_index_arrays[dim])[sel] + sel_arr_indices.append(arr_indices[dim][this_index]) + sel_dset_indices.append(dset_indices[dim][this_index]) + if input_array is None: + # index dset and assign to arr + arr[(*sel_arr_indices,)] = dset[(*sel_dset_indices,)] + else: + # index arr and assign to dset + dset[(*sel_dset_indices,)] = arr[(*sel_arr_indices,)] + + if input_array is None: + return arr + else: + return + + def _read_complex_astype(dset, indices, dtype_out=np.complex64): """ Read the given data set of a specified type to floating point complex data. @@ -101,7 +288,7 @@ def _read_complex_astype(dset, indices, dtype_out=np.complex64): raise ValueError( "output datatype must be one of (complex, np.complex64, np.complex128)" ) - dset_shape, indices = utils._get_dset_shape(dset, indices) + dset_shape, indices = _get_dset_shape(dset, indices) output_array = np.empty(dset_shape, dtype=dtype_out) # dset is indexed in native dtype, but is upcast upon assignment @@ -110,7 +297,7 @@ def _read_complex_astype(dset, indices, dtype_out=np.complex64): else: compound_dtype = [("r", "f8"), ("i", "f8")] - output_array.view(compound_dtype)[:, :] = utils._index_dset(dset, indices)[:, :] + output_array.view(compound_dtype)[:, :] = _index_dset(dset, indices)[:, :] return output_array @@ -381,7 +568,7 @@ def telescope_location_lat_lon_alt(self) -> tuple[float, float, float]: # this branch is for old UVFlag files, which were written with an # ECEF 'telescope_location' key rather than the more standard # latitude in degrees, longitude in degrees, altitude - return utils.LatLonAlt_from_XYZ( + return LatLonAlt_from_XYZ( self.telescope_location, frame=self.telescope_frame, ellipsoid=self.ellipsoid, @@ -405,7 +592,7 @@ def telescope_location_lat_lon_alt_degrees(self) -> tuple[float, float, float]: def antpos_enu(self) -> np.ndarray: """The antenna positions in ENU coordinates, in meters.""" lat, lon, alt = self.telescope_location_lat_lon_alt - return utils.ENU_from_ECEF( + return ENU_from_ECEF( self.antenna_positions + self.telescope_location, latitude=lat, longitude=lon, diff --git a/src/pyuvdata/ms_utils.py b/src/pyuvdata/utils/file_io/ms.py similarity index 99% rename from src/pyuvdata/ms_utils.py rename to src/pyuvdata/utils/file_io/ms.py index 5ac9628dd7..96e841e9b7 100644 --- a/src/pyuvdata/ms_utils.py +++ b/src/pyuvdata/utils/file_io/ms.py @@ -10,8 +10,8 @@ from astropy.coordinates import EarthLocation from astropy.time import Time -from . import __version__, known_telescope_location, known_telescopes, utils -from .uvdata.uvdata import reporting_request +from ... import __version__, known_telescope_location, known_telescopes, utils +from ...uvdata.uvdata import reporting_request try: from lunarsky import MoonLocation @@ -940,7 +940,7 @@ def read_ms_history(filepath, pyuvdata_version_str, check_origin=False, raise_er history_str += message[idx] + "\n" # Check and make sure the pyuvdata version is in the history if it's not already - if not utils._check_history_version(history_str, pyuvdata_version_str): + if not utils.helpers._check_history_version(history_str, pyuvdata_version_str): history_str += pyuvdata_version_str # Finally, return the completed string @@ -1419,7 +1419,9 @@ def write_ms_feed( pol_str = utils.polnum2str(flex_spw_polarization_array) with tables.table(filepath, ack=False, readonly=False) as feed_table: - feed_pols = {feed for pol in pol_str for feed in utils.POL_TO_FEED_DICT[pol]} + feed_pols = { + feed for pol in pol_str for feed in utils.pol.POL_TO_FEED_DICT[pol] + } nfeed_pols = len(feed_pols) pol_types = [pol.upper() for pol in sorted(feed_pols)] pol_type_table = np.tile(pol_types, (nfeeds_table, 1)) @@ -1837,7 +1839,7 @@ def write_ms_polarization( for idx, spw_pol in enumerate(np.unique(pol_arr)): pol_str = utils.polnum2str([spw_pol]) feed_pols = { - feed for pol in pol_str for feed in utils.POL_TO_FEED_DICT[pol] + feed for pol in pol_str for feed in utils.pol.POL_TO_FEED_DICT[pol] } pol_types = [pol.lower() for pol in sorted(feed_pols)] pol_tuples = np.asarray( @@ -1854,7 +1856,7 @@ def write_ms_polarization( else: pol_str = utils.polnum2str(pol_arr) feed_pols = { - feed for pol in pol_str for feed in utils.POL_TO_FEED_DICT[pol] + feed for pol in pol_str for feed in utils.pol.POL_TO_FEED_DICT[pol] } pol_types = [pol.lower() for pol in sorted(feed_pols)] pol_tuples = np.asarray( diff --git a/src/pyuvdata/utils/helpers.py b/src/pyuvdata/utils/helpers.py new file mode 100644 index 0000000000..0838c27e30 --- /dev/null +++ b/src/pyuvdata/utils/helpers.py @@ -0,0 +1,1340 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Helper utilities.""" + +import warnings +from collections.abc import Iterable +from typing import Iterable as IterableType + +import numpy as np +from astropy.coordinates import EarthLocation + +from .coordinates import hasmoon +from .lst import get_lst_for_time + +if hasmoon: + from lunarsky import MoonLocation + +_range_dict = { + "itrs": (6.35e6, 6.39e6, "Earth"), + "mcmf": (1717100.0, 1757100.0, "Moon"), +} + + +def _check_history_version(history, version_string): + """Check if version_string is present in history string.""" + if version_string.replace(" ", "") in history.replace("\n", "").replace(" ", ""): + return True + else: + return False + + +def _check_histories(history1, history2): + """Check if two histories are the same.""" + if history1.replace("\n", "").replace(" ", "") == history2.replace( + "\n", "" + ).replace(" ", ""): + return True + else: + return False + + +def _combine_history_addition(history1, history2): + """ + Find extra history to add to have minimal repeats. + + Parameters + ---------- + history1 : str + First history. + history2 : str + Second history + + Returns + ------- + str + Extra history to add to first history. + + """ + # first check if they're the same to avoid more complicated processing. + if _check_histories(history1, history2): + return None + + hist2_words = history2.split(" ") + add_hist = "" + test_hist1 = " " + history1 + " " + for i, word in enumerate(hist2_words): + if " " + word + " " not in test_hist1: + add_hist += " " + word + keep_going = i + 1 < len(hist2_words) + while keep_going: + if (hist2_words[i + 1] == " ") or ( + " " + hist2_words[i + 1] + " " not in test_hist1 + ): + add_hist += " " + hist2_words[i + 1] + del hist2_words[i + 1] + keep_going = i + 1 < len(hist2_words) + else: + keep_going = False + + if add_hist == "": + add_hist = None + return add_hist + + +def _test_array_constant(array, *, tols=None): + """ + Check if an array contains constant values to some tolerance. + + Uses np.isclose on the min & max of the arrays with the given tolerances. + + Parameters + ---------- + array : np.ndarray or UVParameter + UVParameter or array to check for constant values. + tols : tuple of float, optional + length 2 tuple giving (rtol, atol) to pass to np.isclose, defaults to (0, 0) if + passing an array, otherwise defaults to using the tolerance on the UVParameter. + + Returns + ------- + bool + True if the array is constant to the given tolerances, False otherwise. + """ + # Import UVParameter here rather than at the top to avoid circular imports + from pyuvdata.parameter import UVParameter + + if isinstance(array, UVParameter): + array_to_test = array.value + if tols is None: + tols = array.tols + else: + array_to_test = array + if tols is None: + tols = (0, 0) + assert isinstance(tols, tuple), "tols must be a length-2 tuple" + assert len(tols) == 2, "tols must be a length-2 tuple" + + if array_to_test.size == 1: + # arrays with 1 element are constant by definition + return True + + # if min and max are equal don't bother with tolerance checking + if np.min(array_to_test) == np.max(array_to_test): + return True + + return np.isclose( + np.min(array_to_test), np.max(array_to_test), rtol=tols[0], atol=tols[1] + ) + + +def _test_array_constant_spacing(array, *, tols=None): + """ + Check if an array is constantly spaced to some tolerance. + + Calls _test_array_constant on the np.diff of the array. + + Parameters + ---------- + array : np.ndarray or UVParameter + UVParameter or array to check for constant spacing. + tols : tuple of float, optional + length 2 tuple giving (rtol, atol) to pass to np.isclose, defaults to (0, 0) if + passing an array, otherwise defaults to using the tolerance on the UVParameter. + + Returns + ------- + bool + True if the array spacing is constant to the given tolerances, False otherwise. + """ + # Import UVParameter here rather than at the top to avoid circular imports + from pyuvdata.parameter import UVParameter + + if isinstance(array, UVParameter): + array_to_test = array.value + if tols is None: + tols = array.tols + else: + array_to_test = array + if tols is None: + tols = (0, 0) + assert isinstance(tols, tuple), "tols must be a length-2 tuple" + assert len(tols) == 2, "tols must be a length-2 tuple" + + if array_to_test.size <= 2: + # arrays with 1 or 2 elements are constantly spaced by definition + return True + + array_diff = np.diff(array_to_test) + return _test_array_constant(array_diff, tols=tols) + + +def _check_flex_spw_contiguous(*, spw_array, flex_spw_id_array): + """ + Check if the spectral windows are contiguous for multi-spw datasets. + + This checks the flex_spw_id_array to make sure that all channels for each + spectral window are together in one block, versus being interspersed (e.g., + channel #1 and #3 is in spw #1, channels #2 and #4 are in spw #2). In theory, + UVH5 and UVData objects can handle this, but MIRIAD, MIR, UVFITS, and MS file + formats cannot, so we just consider it forbidden. + + Parameters + ---------- + spw_array : array of integers + Array of spectral window numbers, shape (Nspws,). + flex_spw_id_array : array of integers + Array of spectral window numbers per frequency channel, shape (Nfreqs,). + + """ + exp_spw_ids = np.unique(spw_array) + # This is an internal consistency check to make sure that the indexes match + # up as expected -- this shouldn't error unless someone is mucking with + # settings they shouldn't be. + assert np.all(np.unique(flex_spw_id_array) == exp_spw_ids), ( + "There are some entries in flex_spw_id_array that are not in spw_array. " + "This is a bug, please report it in an issue." + ) + + n_breaks = np.sum(flex_spw_id_array[1:] != flex_spw_id_array[:-1]) + if (n_breaks + 1) != spw_array.size: + raise ValueError( + "Channels from different spectral windows are interspersed with " + "one another, rather than being grouped together along the " + "frequency axis. Most file formats do not support such " + "non-grouping of data." + ) + + +def _check_freq_spacing( + *, + freq_array, + freq_tols, + channel_width, + channel_width_tols, + spw_array, + flex_spw_id_array, + raise_errors=True, +): + """ + Check if frequencies are evenly spaced and separated by their channel width. + + This is a requirement for writing uvfits & miriad files. + + Parameters + ---------- + freq_array : array of float + Array of frequencies, shape (Nfreqs,). + freq_tols : tuple of float + freq_array tolerances (from uvobj._freq_array.tols). + channel_width : array of float + Channel widths, either a scalar or an array of shape (Nfreqs,). + channel_width_tols : tuple of float + channel_width tolerances (from uvobj._channel_width.tols). + spw_array : array of integers or None + Array of spectral window numbers, shape (Nspws,). + flex_spw_id_array : array of integers or None + Array of spectral window numbers per frequency channel, shape (Nfreqs,). + raise_errors : bool + Option to raise errors if the various checks do not pass. + + Returns + ------- + spacing_error : bool + Flag that channel spacings or channel widths are not equal. + chanwidth_error : bool + Flag that channel spacing does not match channel width. + + """ + spacing_error = False + chanwidth_error = False + + # Check to make sure that the flexible spectral window has indicies set up + # correctly (grouped together) for this check + _check_flex_spw_contiguous(spw_array=spw_array, flex_spw_id_array=flex_spw_id_array) + + for spw_id in spw_array: + mask = flex_spw_id_array == spw_id + if sum(mask) > 1: + freq_spacing = np.diff(freq_array[mask]) + freq_dir = -1.0 if all(freq_spacing < 0) else 1.0 + if not _test_array_constant(freq_spacing, tols=freq_tols): + spacing_error = True + if not _test_array_constant(channel_width[mask], tols=channel_width_tols): + spacing_error = True + elif not np.allclose( + freq_spacing, + np.mean(channel_width[mask]) * freq_dir, + rtol=channel_width_tols[0], + atol=channel_width_tols[1], + ): + chanwidth_error = True + + if raise_errors and spacing_error: + raise ValueError( + "The frequencies are not evenly spaced (probably because of a select " + "operation) or has differing values of channel widths. Some file formats " + "(e.g. uvfits, miriad) do not support unevenly spaced frequencies." + ) + if raise_errors and chanwidth_error: + raise ValueError( + "The frequencies are separated by more than their channel width (probably " + "because of a select operation). Some file formats (e.g. uvfits, miriad) " + "do not support frequencies that are spaced by more than their channel " + "width." + ) + + return spacing_error, chanwidth_error + + +def _get_iterable(x): + """Return iterable version of input.""" + if isinstance(x, Iterable): + return x + else: + return (x,) + + +def _sort_freq_helper( + *, + Nfreqs, # noqa: N803 + freq_array, + Nspws, + spw_array, + flex_spw_id_array, + spw_order, + channel_order, + select_spw, +): + """ + Figure out the frequency sorting order for object based frequency sorting. + + Parameters + ---------- + Nfreqs : int + Number of frequencies, taken directly from the object parameter. + freq_array : array_like of float + Frequency array, taken directly from the object parameter. + Nfreqs : int + Number of spectral windows, taken directly from the object parameter. + spw_array : array_like of int + Spectral window array, taken directly from the object parameter. + flex_spw_id_array : array_like of int + Array of SPW IDs for each channel, taken directly from the object parameter. + spw_order : str or array_like of int + A string describing the desired order of spectral windows along the + frequency axis. Allowed strings include `number` (sort on spectral window + number) and `freq` (sort on median frequency). A '-' can be prepended + to signify descending order instead of the default ascending order, + e.g., if you have SPW #1 and 2, and wanted them ordered as [2, 1], + you would specify `-number`. Alternatively, one can supply an index array + of length Nspws that specifies how to shuffle the spws (this is not the desired + final spw order). Default is to apply no sorting of spectral windows. + channel_order : str or array_like of int + A string describing the desired order of frequency channels within a + spectral window. Allowed strings include `freq`, which will sort channels + within a spectral window by frequency. A '-' can be optionally prepended + to signify descending order instead of the default ascending order. + Alternatively, one can supply an index array of length Nfreqs that + specifies the new order. Default is to apply no sorting of channels + within a single spectral window. Note that proving an array_like of ints + will cause the values given to `spw_order` and `select_spw` to be ignored. + select_spw : int or array_like of int + An int or array_like of ints which specifies which spectral windows to + apply sorting. Note that setting this argument will cause the value + given to `spw_order` to be ignored. + + Returns + ------- + index_array : ndarray of int + Array giving the desired order of the channels to be used for sorting along the + frequency axis + + Raises + ------ + UserWarning + Raised if providing arguments to select_spw and channel_order (the latter + overrides the former). + ValueError + Raised if select_spw contains values not in spw_array, or if channel_order + is not the same length as freq_array. + + """ + if (spw_order is None) and (channel_order is None): + warnings.warn( + "Not specifying either spw_order or channel_order causes " + "no sorting actions to be applied. Returning object unchanged." + ) + return + + # Check to see if there are arguments we should be ignoring + if isinstance(channel_order, (np.ndarray, list, tuple)): + if select_spw is not None: + warnings.warn( + "The select_spw argument is ignored when providing an " + "array_like of int for channel_order" + ) + if spw_order is not None: + warnings.warn( + "The spw_order argument is ignored when providing an " + "array_like of int for channel_order" + ) + channel_order = np.asarray(channel_order) + if not channel_order.size == Nfreqs or not np.all( + np.sort(channel_order) == np.arange(Nfreqs) + ): + raise ValueError( + "Index array for channel_order must contain all indicies for " + "the frequency axis, without duplicates." + ) + index_array = channel_order + else: + index_array = np.arange(Nfreqs) + # Multipy by 1.0 here to make a cheap copy of the array to manipulate + temp_freqs = 1.0 * freq_array + # Same trick for ints -- add 0 to make a cheap copy + temp_spws = 0 + flex_spw_id_array + + # Check whether or not we need to sort the channels in individual windows + sort_spw = {idx: channel_order is not None for idx in spw_array} + if select_spw is not None: + if spw_order is not None: + warnings.warn( + "The spw_order argument is ignored when providing an " + "argument for select_spw" + ) + if channel_order is None: + warnings.warn( + "Specifying select_spw without providing channel_order causes " + "no sorting actions to be applied. Returning object unchanged." + ) + return + if isinstance(select_spw, (np.ndarray, list, tuple)): + sort_spw = {idx: idx in select_spw for idx in spw_array} + else: + sort_spw = {idx: idx == select_spw for idx in spw_array} + elif spw_order is not None: + if isinstance(spw_order, (np.ndarray, list, tuple)): + spw_order = np.asarray(spw_order) + if not spw_order.size == Nspws or not np.all( + np.sort(spw_order) == np.arange(Nspws) + ): + raise ValueError( + "Index array for spw_order must contain all indicies for " + "the spw_array, without duplicates." + ) + elif spw_order not in ["number", "freq", "-number", "-freq", None]: + raise ValueError( + "spw_order can only be one of 'number', '-number', " + "'freq', '-freq', None or an index array of length Nspws" + ) + elif Nspws > 1: + # Only need to do this step if we actually have multiple spws. + + # If the string starts with a '-', then we will flip the order at + # the end of the operation + flip_spws = spw_order[0] == "-" + + if "number" in spw_order: + spw_order = np.argsort(spw_array) + elif "freq" in spw_order: + spw_order = np.argsort( + [np.median(temp_freqs[temp_spws == idx]) for idx in spw_array] + ) + if flip_spws: + spw_order = np.flip(spw_order) + else: + spw_order = np.arange(Nspws) + # Now that we know the spw order, we can apply the first sort + index_array = np.concatenate( + [index_array[temp_spws == spw] for spw in spw_array[spw_order]] + ) + temp_freqs = temp_freqs[index_array] + temp_spws = temp_spws[index_array] + # Spectral windows are assumed sorted at this point + if channel_order is not None: + if channel_order not in ["freq", "-freq"]: + raise ValueError( + "channel_order can only be one of 'freq' or '-freq' or an index " + "array of length Nfreqs" + ) + for idx in spw_array: + if sort_spw[idx]: + select_mask = temp_spws == idx + subsort_order = index_array[select_mask] + subsort_order = subsort_order[np.argsort(temp_freqs[select_mask])] + index_array[select_mask] = ( + np.flip(subsort_order) + if channel_order[0] == "-" + else subsort_order + ) + if np.all(index_array[1:] > index_array[:-1]): + # Nothing to do - the data are already sorted! + return + + return index_array + + +def _sorted_unique_union(obj1, obj2=None): + """ + Determine the union of unique elements from two lists. + + Convenience function for handling various actions with indices. + + Parameters + ---------- + obj1 : list or tuple or set or 1D ndarray + First list from which to determine unique entries. + obj2 : list or tuple or set or 1D ndarray + Second list from which to determine unique entries, which is joined with the + first list. If None, the method will simply return the sorted list of unique + elements in obj1. + + Returns + ------- + sorted_unique : list + List containing the union of unique entries between obj1 and obj2. + """ + return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).union(obj2)) + + +def _sorted_unique_intersection(obj1, obj2=None): + """ + Determine the intersection of unique elements from two lists. + + Convenience function for handling various actions with indices. + + Parameters + ---------- + obj1 : list or tuple or set or 1D ndarray + First list from which to determine unique entries. + obj2 : list or tuple or set or 1D ndarray + Second list from which to determine unique entries, which is intersected with + the first list. If None, the method will simply return the sorted list of unique + elements in obj1. + + Returns + ------- + sorted_unique : list + List containing the intersection of unique entries between obj1 and obj2. + """ + return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).intersection(obj2)) + + +def _sorted_unique_difference(obj1, obj2=None): + """ + Determine the difference of unique elements from two lists. + + Convenience function for handling various actions with indices. + + Parameters + ---------- + obj1 : list or tuple or set or 1D ndarray + First list from which to determine unique entries. + obj2 : list or tuple or set or 1D ndarray + Second list from which to determine unique entries, which is differenced with + the first list. If None, the method will simply return the sorted list of unique + elements in obj1. + + Returns + ------- + sorted_unique : list + List containing the difference in unique entries between obj1 and obj2. + """ + return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).difference(obj2)) + + +def _combine_filenames(filename1, filename2): + """Combine the filename attribute from multiple UVBase objects. + + The 4 cases are: + 1. `filename1` has been set, `filename2` has not + 2. `filename1` has not been set, `filename2` has + 3. `filename1` and `filename2` both have been set + 4. `filename1` and `filename2` both have not been set + In case (1), we do not want to update the attribute, because it is + already set correctly. In case (2), we want to replace `filename1` + with the value from `filename2. In case (3), we want to take the union of + the sets of the filenames. In case (4), we want the filename attribute + to still be `None`. + + Parameters + ---------- + filename1 : list of str or None + The list of filenames for the first UVBase object. If it is not set, it + should be `None`. + filename2 : list of str or None + The list of filenames for the second UVData object. If it is not set, it + should be `None`. + + Returns + ------- + combined_filenames : list of str or None + The combined list, with potentially duplicate entries removed. + """ + combined_filenames = filename1 + if filename1 is not None: + if filename2 is not None: + combined_filenames = sorted(set(filename1).union(set(filename2))) + elif filename2 is not None: + combined_filenames = filename2 + + return combined_filenames + + +def _convert_to_slices( + indices, *, max_nslice_frac=0.1, max_nslice=None, return_index_on_fail=False +): + """ + Convert list of indices to a list of slices. + + Parameters + ---------- + indices : list + A 1D list of integers for array indexing (boolean ndarrays are also supported). + max_nslice_frac : float + A float from 0 -- 1. If the number of slices + needed to represent input 'indices' divided by len(indices) + exceeds this fraction, then we determine that we cannot + easily represent 'indices' with a list of slices. + max_nslice : int + Optional argument, defines the maximum number of slices for determining if + `indices` can be easily represented with a list of slices. If set, then + the argument supplied to `max_nslice_frac` is ignored. + return_index_on_fail : bool + If set to True and the list of input indexes cannot easily be respresented by + a list of slices (as defined by `max_nslice` or `max_nslice_frac`), then return + the input list of index values instead of a list of suboptimal slices. + + Returns + ------- + slice_list : list + Nominally the list of slice objects used to represent indices. However, if + `return_index_on_fail=True` and input indexes cannot easily be respresented, + return a 1-element list containing the input for `indices`. + check : bool + If True, indices is easily represented by slices + (`max_nslice_frac` or `max_nslice` conditions met), otherwise False. + + Notes + ----- + Example: + if: indices = [1, 2, 3, 4, 10, 11, 12, 13, 14] + then: slices = [slice(1, 5, 1), slice(11, 15, 1)] + """ + # check for already a slice or a single index position + if isinstance(indices, slice): + return [indices], True + if isinstance(indices, (int, np.integer)): + return [slice(indices, indices + 1, 1)], True + + # check for boolean index + if isinstance(indices, np.ndarray) and (indices.dtype == bool): + eval_ind = np.where(indices)[0] + else: + eval_ind = indices + # assert indices is longer than 2, or return trivial solutions + if len(eval_ind) == 0: + return [slice(0, 0, 0)], False + if len(eval_ind) <= 2: + return [ + slice(eval_ind[0], eval_ind[-1] + 1, max(eval_ind[-1] - eval_ind[0], 1)) + ], True + + # Catch the simplest case of "give me a single slice or exit" + if (max_nslice == 1) and return_index_on_fail: + step = eval_ind[1] - eval_ind[0] + if all(np.diff(eval_ind) == step): + return [slice(eval_ind[0], eval_ind[-1] + 1, step)], True + return [indices], False + + # setup empty slices list + Ninds = len(eval_ind) + slices = [] + + # iterate over indices + start = last_step = None + for ind in eval_ind: + if last_step is None: + # Check if this is the first slice, in which case start is None + if start is None: + start = ind + continue + last_step = ind - start + last_ind = ind + continue + + # calculate step from previous index + step = ind - last_ind + + # if step != last_step, this ends the slice + if step != last_step: + # append to list + slices.append(slice(start, last_ind + 1, last_step)) + + # setup next step + start = ind + last_step = None + + last_ind = ind + + # Append the last slice + slices.append(slice(start, ind + 1, last_step)) + + # determine whether slices are a reasonable representation, and determine max_nslice + # if only max_nslice_frac was supplied. + if max_nslice is None: + max_nslice = max_nslice_frac * Ninds + check = len(slices) <= max_nslice + + if return_index_on_fail and not check: + return [indices], check + else: + return slices, check + + +def slicify(ind: slice | None | IterableType[int]) -> slice | None | IterableType[int]: + """Convert an iterable of integers into a slice object if possible.""" + if ind is None or isinstance(ind, slice): + return ind + if len(ind) == 0: + return None + + if len(set(np.ediff1d(ind))) <= 1: + return slice(ind[0], ind[-1] + 1, ind[1] - ind[0] if len(ind) > 1 else 1) + else: + # can't slicify + return ind + + +def _check_range_overlap(val_range, range_type="time"): + """ + Detect if any val_range in an array overlap. + + Parameters + ---------- + val_range : np.array of float + Array of ranges, shape (Nranges, 2). + range_type : str + Type of range (for good error messages) + + Returns + ------- + bool + True if any range overlaps. + """ + # first check that time ranges are well formed (stop is >= than start) + if np.any((val_range[:, 1] - val_range[:, 0]) < 0): + raise ValueError( + f"The {range_type} ranges are not well-formed, some stop {range_type}s " + f"are after start {range_type}s." + ) + + # Sort by start time + sorted_ranges = val_range[np.argsort(val_range[:, 0]), :] + + # then check if adjacent pairs overlap + for ind in range(sorted_ranges.shape[0] - 1): + range1 = sorted_ranges[ind] + range2 = sorted_ranges[ind + 1] + if range2[0] < range1[1]: + return True + + +def _select_times_helper( + *, + times, + time_range, + lsts, + lst_range, + obj_time_array, + obj_time_range, + obj_lst_array, + obj_lst_range, + time_tols, + lst_tols, +): + """ + Get time indices in a select. + + Parameters + ---------- + times : array_like of float + The times to keep in the object, each value passed here should exist in the + time_array. Can be None, cannot be set with `time_range`, `lsts` or `lst_array`. + time_range : array_like of float + The time range in Julian Date to keep in the object, must be length 2. Some of + the times in the object should fall between the first and last elements. Can be + None, cannot be set with `times`, `lsts` or `lst_array`. + lsts : array_like of float + The local sidereal times (LSTs) to keep in the object, each value passed here + should exist in the lst_array. Can be None, cannot be set with `times`, + `time_range`, or `lst_range`. + lst_range : array_like of float + The local sidereal time (LST) range in radians to keep in the + object, must be of length 2. Some of the LSTs in the object should + fall between the first and last elements. If the second value is + smaller than the first, the LSTs are treated as having phase-wrapped + around LST = 2*pi = 0, and the LSTs kept on the object will run from + the larger value, through 0, and end at the smaller value. Can be None, cannot + be set with `times`, `time_range`, or `lsts`. + obj_time_array : array_like of float + Time array on object. Can be None if `object_time_range` is set. + obj_time_range : array_like of float + Time range on object. Can be None if `object_time_array` is set. + obj_lst_array : array_like of float + LST array on object. Can be None if `object_lst_range` is set. + obj_lst_range : array_like of float + LST range on object. Can be None if `object_lst_array` is set. + time_tols : tuple of float + Length 2 tuple giving (rtol, atol) to use for time matching. + lst_tols : tuple of float + Length 2 tuple giving (rtol, atol) to use for lst matching. + + """ + have_times = times is not None + have_time_range = time_range is not None + have_lsts = lsts is not None + have_lst_range = lst_range is not None + n_time_params = np.count_nonzero( + [have_times, have_time_range, have_lsts, have_lst_range] + ) + if n_time_params > 1: + raise ValueError( + "Only one of [times, time_range, lsts, lst_range] may be " + "specified per selection operation." + ) + if n_time_params == 0: + return None + + time_inds = np.zeros(0, dtype=np.int64) + if times is not None: + times = _get_iterable(times) + if np.array(times).ndim > 1: + times = np.array(times).flatten() + + if obj_time_range is not None: + for jd in times: + this_ind = np.nonzero( + np.logical_and( + (obj_time_range[:, 0] <= jd), (obj_time_range[:, 1] >= jd) + ) + )[0] + if this_ind.size > 0: + time_inds = np.append(time_inds, this_ind) + else: + raise ValueError(f"Time {jd} does not fall in any time_range.") + else: + for jd in times: + if np.any( + np.isclose(obj_time_array, jd, rtol=time_tols[0], atol=time_tols[1]) + ): + time_inds = np.append( + time_inds, + np.where( + np.isclose( + obj_time_array, jd, rtol=time_tols[0], atol=time_tols[1] + ) + )[0], + ) + else: + raise ValueError(f"Time {jd} is not present in the time_array.") + + if time_range is not None: + if np.size(time_range) != 2: + raise ValueError("time_range must be length 2.") + + if obj_time_range is not None: + for tind, trange in enumerate(obj_time_range): + if _check_range_overlap(np.stack((trange, time_range), axis=0)): + time_inds = np.append(time_inds, tind) + attr_str = "time_range" + else: + time_inds = np.nonzero( + (obj_time_array <= time_range[1]) & (obj_time_array >= time_range[0]) + )[0] + attr_str = "time_array" + if time_inds.size == 0: + raise ValueError( + f"No elements in {attr_str} between {time_range[0]} and " + f"{time_range[1]}." + ) + + if (lsts is not None or lst_range is not None) and obj_lst_range is not None: + # check for lsts wrapping around zero + lst_range_wrap = obj_lst_range[:, 0] > obj_lst_range[:, 1] + + if lsts is not None: + if np.any(np.asarray(lsts) > 2 * np.pi): + warnings.warn( + "The lsts parameter contained a value greater than 2*pi. " + "LST values are assumed to be in radians, not hours." + ) + lsts = _get_iterable(lsts) + if np.array(lsts).ndim > 1: + lsts = np.array(lsts).flatten() + + if obj_lst_range is not None: + for lst in lsts: + lst_ind = np.nonzero( + np.logical_and( + (obj_lst_range[:, 0] <= lst), (obj_lst_range[:, 1] >= lst) + ) + )[0] + if lst_ind.size == 0 and np.any(lst_range_wrap): + for lr_ind in np.nonzero(lst_range_wrap)[0]: + if (obj_lst_range[lr_ind, 0] <= lst and lst <= 2 * np.pi) or ( + lst >= 0 and lst <= obj_lst_range[lr_ind, 1] + ): + lst_ind = np.array([lr_ind]) + if lst_ind.size > 0: + time_inds = np.append(time_inds, lst_ind) + else: + raise ValueError(f"LST {lst} does not fall in any lst_range") + else: + for lst in lsts: + if np.any( + np.isclose(obj_lst_array, lst, rtol=lst_tols[0], atol=lst_tols[1]) + ): + time_inds = np.append( + time_inds, + np.where( + np.isclose( + obj_lst_array, lst, rtol=lst_tols[0], atol=lst_tols[1] + ) + )[0], + ) + else: + raise ValueError(f"LST {lst} is not present in the lst_array") + + if lst_range is not None: + if np.size(lst_range) != 2: + raise ValueError("lst_range must be length 2.") + if np.any(np.asarray(lst_range) > 2 * np.pi): + warnings.warn( + "The lst_range contained a value greater than 2*pi. " + "LST values are assumed to be in radians, not hours." + ) + if obj_lst_range is not None: + for lind, lrange in enumerate(obj_lst_range): + if not lst_range_wrap[lind] and lst_range[0] < lst_range[1]: + if _check_range_overlap(np.stack((lrange, lst_range), axis=0)): + time_inds = np.append(time_inds, lind) + else: + if (lst_range[0] >= lrange[0] and lst_range[0] <= 2 * np.pi) or ( + lst_range[1] <= lrange[1] and lst_range[1] >= 0 + ): + time_inds = np.append(time_inds, lind) + attr_str = "lst_range" + else: + if lst_range[1] < lst_range[0]: + # we're wrapping around LST = 2*pi = 0 + lst_range_1 = [lst_range[0], 2 * np.pi] + lst_range_2 = [0, lst_range[1]] + time_inds1 = np.nonzero( + (obj_lst_array <= lst_range_1[1]) + & (obj_lst_array >= lst_range_1[0]) + )[0] + time_inds2 = np.nonzero( + (obj_lst_array <= lst_range_2[1]) + & (obj_lst_array >= lst_range_2[0]) + )[0] + time_inds = np.union1d(time_inds1, time_inds2) + else: + time_inds = np.nonzero( + (obj_lst_array <= lst_range[1]) & (obj_lst_array >= lst_range[0]) + )[0] + attr_str = "lst_array" + + if time_inds.size == 0: + raise ValueError( + f"No elements in {attr_str} between {lst_range[0]} and " + f"{lst_range[1]}." + ) + return time_inds + + +def check_lsts_against_times( + *, + jd_array, + lst_array, + lst_tols, + latitude=None, + longitude=None, + altitude=None, + frame="itrs", + ellipsoid=None, + telescope_loc=None, +): + """ + Check that LSTs are consistent with the time_array and telescope location. + + This just calls `get_lst_for_time`, compares that result to the `lst_array` + and warns if they are not within the tolerances specified by `lst_tols`. + + Parameters + ---------- + jd_array : ndarray of float + JD times to get lsts for. + lst_array : ndarray of float + LSTs to check to see if they match the jd_array at the location. + latitude : float + Latitude of location to check the lst for in degrees. + longitude : float + Longitude of location to check the lst for in degrees. + altitude : float + Altitude of location to check the lst for in meters. + lst_tops : tuple of float + A length 2 tuple giving the (relative, absolute) tolerances to check the + LST agreement to. These are passed directly to numpy.allclose. + frame : str + Reference frame for latitude/longitude/altitude. + Options are itrs (default) or mcmf. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", "GSFC", + "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is mcmf. + telescope_loc : tuple or EarthLocation or MoonLocation + Alternative way of specifying telescope lat/lon/alt, either as a 3-element tuple + or as an astropy EarthLocation (or lunarsky MoonLocation). Cannot supply both + `telescope_loc` and `latitute`, `longitude`, or `altitude`. + + Returns + ------- + None + + Warns + ----- + If the `lst_array` does not match the calculated LSTs to the lst_tols. + + """ + # Don't worry about passing the astrometry library because we test that they agree + # to better than our standard lst tolerances. + lsts = get_lst_for_time( + jd_array=jd_array, + telescope_loc=telescope_loc, + latitude=latitude, + longitude=longitude, + altitude=altitude, + frame=frame, + ellipsoid=ellipsoid, + ) + + if not np.allclose(lst_array, lsts, rtol=lst_tols[0], atol=lst_tols[1]): + warnings.warn( + "The lst_array is not self-consistent with the time_array and " + "telescope location. Consider recomputing with the " + "`set_lsts_from_time_array` method." + ) + + +def check_surface_based_positions( + *, + telescope_loc=None, + telescope_frame="itrs", + antenna_positions=None, + raise_error=True, + raise_warning=True, +): + """ + Check that antenna positions are consistent with ground-based values. + + Check that the antenna position, telescope location, or combination of both produces + locations that are consistent with surface-based positions. If supplying both + antenna position and telescope location, the check will be run against the sum total + of both. For the Earth, the permitted range of values is betwen 6350 and 6390 km, + whereas for theMoon the range is 1717.1 to 1757.1 km. + + telescope_loc : tuple or EarthLocation or MoonLocation + Telescope location, specified as a 3-element tuple (specifying geo/selenocentric + position in meters) or as an astropy EarthLocation (or lunarsky MoonLocation). + telescope_frame : str, optional + Reference frame for latitude/longitude/altitude. Options are itrs (default) or + mcmf. Only used if telescope_loc is not an EarthLocation or MoonLocation. + antenna_positions : ndarray of float + List of antenna positions relative to array center in ECEF coordinates, + required if not providing `uvw_array`. Shape is (Nants, 3). If no telescope_loc + is specified, these values will be assumed to be relative to geocenter. + raise_error : bool + If True, an error is raised if telescope_loc and/or telescope_loc do not conform + to expectations for a surface-based telescope. Default is True. + raise_warning : bool + If True, a warning is raised if telescope_loc and/or telescope_loc do not + conform to expectations for a surface-based telescope. Default is True, only + used if `raise_error` is set to False. + + Returns + ------- + valid : bool + If True, the antenna_positions and/or telescope_loc conform to expectations for + a surface-based telescope. Otherwise returns false. + + """ + if antenna_positions is None: + antenna_positions = np.zeros((1, 3)) + + if isinstance(telescope_loc, EarthLocation) or ( + hasmoon and isinstance(telescope_loc, MoonLocation) + ): + antenna_positions = antenna_positions + ( + telescope_loc.x.to("m").value, + telescope_loc.y.to("m").value, + telescope_loc.z.to("m").value, + ) + if isinstance(telescope_loc, EarthLocation): + telescope_frame = "itrs" + else: + telescope_frame = "mcmf" + elif telescope_loc is not None: + antenna_positions = antenna_positions + telescope_loc + + low_lim, hi_lim, world = _range_dict[telescope_frame] + + err_type = None + if np.any(np.sum(antenna_positions**2.0, axis=1) < low_lim**2.0): + err_type = "below" + elif np.any(np.sum(antenna_positions**2.0, axis=1) > hi_lim**2.0): + err_type = "above" + + if err_type is None: + return True + + err_msg = ( + f"{telescope_frame} position vector magnitudes must be on the order of " + f"the radius of {world} -- they appear to lie well {err_type} this." + ) + + # If desired, raise an error + if raise_error: + raise ValueError(err_msg) + + # Otherwise, if desired, raise a warning instead + if raise_warning: + warnings.warn(err_msg) + + return False + + +def determine_blt_order( + *, time_array, ant_1_array, ant_2_array, baseline_array, Nbls, Ntimes # noqa: N803 +) -> tuple[str] | None: + """Get the blt order from analysing metadata.""" + times = time_array + ant1 = ant_1_array + ant2 = ant_2_array + bls = baseline_array + + time_bl = True + time_a = True + time_b = True + bl_time = True + a_time = True + b_time = True + bl_order = True + a_order = True + b_order = True + time_order = True + + if Nbls == 1 and Ntimes == 1: + return ("baseline", "time") # w.l.o.g. + + for i, (t, a, b, bl) in enumerate( + zip(times[1:], ant1[1:], ant2[1:], bls[1:]), start=1 + ): + on_bl_boundary = i % Nbls == 0 + on_time_boundary = i % Ntimes == 0 + + if t < times[i - 1]: + time_bl = False + time_a = False + time_b = False + time_order = False + + if not on_time_boundary: + bl_time = False + a_time = False + b_time = False + + if bl == bls[i - 1]: + bl_time = False + if a == ant1[i - 1]: + a_time = False + if b == ant2[i - 1]: + b_time = False + + elif t == times[i - 1]: + if bl < bls[i - 1]: + time_bl = False + if a < ant1[i - 1]: + time_a = False + if b < ant2[i - 1]: + time_b = False + + if bl < bls[i - 1]: + bl_time = False + bl_order = False + if not on_bl_boundary: + time_bl = False + if a < ant1[i - 1]: + a_time = False + a_order = False + if not on_bl_boundary: + time_a = False + if b < ant2[i - 1]: + b_time = False + b_order = False + if not on_bl_boundary: + time_b = False + + if not any( + ( + time_bl, + time_a, + time_b, + time_bl, + bl_time, + a_time, + b_time, + bl_order, + a_order, + b_order, + time_order, + ) + ): + break + + if Nbls > 1 and Ntimes > 1: + assert not ( + (time_bl and bl_time) + or (time_a and a_time) + or (time_b and b_time) + or (time_order and a_order) + or (time_order and b_order) + or (a_order and b_order) + or (time_order and bl_order) + ), ( + "Something went wrong when trying to determine the order of the blts axis. " + "Please raise an issue on github, as this is not meant to happen." + "None of the following should ever be True: \n" + f"\ttime_bl and bl_time: {time_bl and bl_time}\n" + f"\ttime_a and a_time: {time_a and a_time}\n" + f"\ttime_b and b_time: {time_b and b_time}\n" + f"\ttime_order and a_order: {time_order and a_order}\n" + f"\ttime_order and b_order: {time_order and b_order}\n" + f"\ta_order and b_order: {a_order and b_order}\n" + f"\ttime_order and bl_order: {time_order and bl_order}\n\n" + "Please include the following information in your issue:\n" + f"Nbls: {Nbls}\n" + f"Ntimes: {Ntimes}\n" + f"TIMES: {times}\n" + f"ANT1: {ant1}\n" + f"ANT2: {ant2}\n" + f"BASELINES: {bls}\n" + ) + + if time_bl: + return ("time", "baseline") + if bl_time: + return ("baseline", "time") + if time_a: + return ("time", "ant1") + if a_time: + return ("ant1", "time") + if time_b: + return ("time", "ant2") + if b_time: + return ("ant2", "time") + if bl_order: + return ("baseline",) + if a_order: + return ("ant1",) + if b_order: + return ("ant2",) + if time_order: + return ("time",) + + return None + + +def determine_rectangularity( + *, + time_array: np.ndarray, + baseline_array: np.ndarray, + nbls: int, + ntimes: int, + blt_order: str | tuple[str] | None = None, +): + """Determine if the data is rectangular or not. + + Parameters + ---------- + time_array : array_like + Array of times in JD. + baseline_array : array_like + Array of baseline integers. + nbls : int + Number of baselines. + ntimes : int + Number of times. + blt_order : str or tuple of str, optional + If known, pass the blt_order, which can short-circuit the determination + of rectangularity. + + Returns + ------- + is_rect : bool + True if the data is rectangular, False otherwise. + time_axis_faster_than_bls : bool + True if the data is rectangular and the time axis is the last axis (i.e. times + change first, then bls). False either if baselines change first, OR if it is + not rectangular. + + Notes + ----- + Rectangular data is defined as data for which using regular slicing of size Ntimes + or Nbls will give you either all the same time and all different baselines, or + vice versa. This does NOT require that the baselines and times are sorted within + that structure. + """ + # check if the data is rectangular + time_first = True + bl_first = True + + if time_array.size != nbls * ntimes: + return False, False + elif nbls * ntimes == 1: + return True, True + elif nbls == 1: + return True, True + elif ntimes == 1: + return True, False + elif blt_order == ("baseline", "time"): + return True, True + elif blt_order == ("time", "baseline"): + return True, False + + # That's all the easiest checks. + if time_array[1] == time_array[0]: + time_first = False + if baseline_array[1] == baseline_array[0]: + bl_first = False + if not time_first and not bl_first: + return False, False + + if time_first: + time_array = time_array.reshape((nbls, ntimes)) + baseline_array = baseline_array.reshape((nbls, ntimes)) + if np.sum(np.abs(np.diff(time_array, axis=0))) != 0: + return False, False + if (np.diff(baseline_array, axis=1) != 0).any(): + return False, False + return True, True + elif bl_first: + time_array = time_array.reshape((ntimes, nbls)) + baseline_array = baseline_array.reshape((ntimes, nbls)) + if np.sum(np.abs(np.diff(time_array, axis=1))) != 0: + return False, False + if (np.diff(baseline_array, axis=0) != 0).any(): + return False, False + return True, False diff --git a/src/pyuvdata/utils/lst.py b/src/pyuvdata/utils/lst.py new file mode 100644 index 0000000000..cb09fb95c7 --- /dev/null +++ b/src/pyuvdata/utils/lst.py @@ -0,0 +1,245 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for working LSTs.""" +import warnings + +import erfa +import numpy as np +from astropy.coordinates import Angle, EarthLocation +from astropy.time import Time +from astropy.utils import iers + +try: + from lunarsky import MoonLocation + from lunarsky import Time as LTime + + hasmoon = True +except ImportError: + hasmoon = False + + +def get_lst_for_time( + jd_array=None, + *, + telescope_loc=None, + latitude=None, + longitude=None, + altitude=None, + astrometry_library=None, + frame="itrs", + ellipsoid=None, +): + """ + Get the local apparent sidereal time for a set of jd times at an earth location. + + This function calculates the local apparent sidereal time (LAST), given a UTC time + and a position on the Earth, using either the astropy or NOVAS libraries. It + is important to note that there is an apporoximate 20 microsecond difference + between the two methods, presumably due to small differences in the apparent + reference frame. These differences will cancel out when calculating coordinates + in the TOPO frame, so long as apparent coordinates are calculated using the + same library (i.e., astropy or NOVAS). Failing to do so can introduce errors + up to ~1 mas in the horizontal coordinate system (i.e., AltAz). + + Parameters + ---------- + jd_array : ndarray of float + JD times to get lsts for. + telescope_loc : tuple or EarthLocation or MoonLocation + Alternative way of specifying telescope lat/lon/alt, either as a 3-element tuple + or as an astropy EarthLocation (or lunarsky MoonLocation). Cannot supply both + `telescope_loc` and `latitute`, `longitude`, or `altitude`. + latitude : float + Latitude of location to get lst for in degrees. Cannot specify both `latitute` + and `telescope_loc`. + longitude : float + Longitude of location to get lst for in degrees. Cannot specify both `longitude` + and `telescope_loc`. + altitude : float + Altitude of location to get lst for in meters. Cannot specify both `altitude` + and `telescope_loc`. + astrometry_library : str + Library used for running the LST calculations. Allowed options are 'erfa' + (which uses the pyERFA), 'novas' (which uses the python-novas library), + and 'astropy' (which uses the astropy utilities). Default is erfa unless + the telescope_location is a MoonLocation object, in which case the default is + astropy. + frame : str + Reference frame for latitude/longitude/altitude. Options are itrs (default) + or mcmf. Not used if telescope_loc is an EarthLocation or MoonLocation object. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is mcmf. Not used if telescope_loc is + an EarthLocation or MoonLocation object. + + Returns + ------- + ndarray of float + LASTs in radians corresponding to the jd_array. + + """ + site_loc = None + if telescope_loc is not None: + if not all(item is None for item in [latitude, longitude, altitude]): + raise ValueError( + "Cannot set both telescope_loc and latitude/longitude/altitude" + ) + if isinstance(telescope_loc, EarthLocation) or ( + hasmoon and isinstance(telescope_loc, MoonLocation) + ): + site_loc = telescope_loc + if isinstance(telescope_loc, EarthLocation): + frame = "ITRS" + else: + frame = "MCMF" + else: + latitude, longitude, altitude = telescope_loc + + if site_loc is None: + if frame.upper() == "MCMF": + if not hasmoon: + raise ValueError( + "Need to install `lunarsky` package to work with MCMF frame." + ) + if ellipsoid is None: + ellipsoid = "SPHERE" + + site_loc = MoonLocation.from_selenodetic( + Angle(longitude, unit="deg"), + Angle(latitude, unit="deg"), + altitude, + ellipsoid=ellipsoid, + ) + else: + site_loc = EarthLocation.from_geodetic( + Angle(longitude, unit="deg"), + Angle(latitude, unit="deg"), + height=altitude, + ) + if astrometry_library is None: + if frame == "itrs": + astrometry_library = "erfa" + else: + astrometry_library = "astropy" + + if astrometry_library not in ["erfa", "astropy", "novas"]: + raise ValueError( + "Requested coordinate transformation library is not supported, please " + "select either 'erfa' or 'astropy' for astrometry_library." + ) + + if isinstance(jd_array, np.ndarray): + lst_array = np.zeros_like(jd_array) + if lst_array.ndim == 0: + lst_array = lst_array.reshape(1) + else: + lst_array = np.zeros(1) + + jd, reverse_inds = np.unique(jd_array, return_inverse=True) + + if isinstance(site_loc, EarthLocation): + TimeClass = Time + else: + if not astrometry_library == "astropy": + raise NotImplementedError( + "The MCMF frame is only supported with the 'astropy' astrometry library" + ) + TimeClass = LTime + + times = TimeClass(jd, format="jd", scale="utc", location=site_loc) + + if iers.conf.auto_max_age is None: # pragma: no cover + delta, status = times.get_delta_ut1_utc(return_status=True) + if np.any( + np.isin(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE)) + ): + warnings.warn( + "time is out of IERS range, setting delta ut1 utc to extrapolated value" + ) + times.delta_ut1_utc = delta + if astrometry_library == "erfa": + # This appears to be what astropy is using under the hood, + # so it _should_ be totally consistent. + gast_array = erfa.gst06a( + times.ut1.jd1, times.ut1.jd2, times.tt.jd1, times.tt.jd2 + ) + + # Technically one should correct for the polar wobble here, but the differences + # along the equitorial are miniscule -- of order 10s of nanoradians, well below + # the promised accuracy of IERS -- and rotation matricies can be expensive. + # We do want to correct though for for secular polar drift (s'/TIO locator), + # which nudges the Earth rotation angle of order 47 uas per century. + sp = erfa.sp00(times.tt.jd1, times.tt.jd2) + + lst_array = np.mod(gast_array + sp + site_loc.lon.rad, 2.0 * np.pi)[ + reverse_inds + ] + elif astrometry_library == "astropy": + lst_array = times.sidereal_time("apparent").radian + if lst_array.ndim == 0: + lst_array = lst_array.reshape(1) + lst_array = lst_array[reverse_inds] + elif astrometry_library == "novas": + # Import the NOVAS library only if it's needed/available. + try: + import novas_de405 # noqa + from novas import compat as novas + from novas.compat import eph_manager + except ImportError as e: # pragma: no cover + raise ImportError( + "novas and/or novas_de405 are not installed but is required for " + "NOVAS functionality" + ) from e + + jd_start, jd_end, number = eph_manager.ephem_open() + + tt_time_array = times.tt.value + ut1_high_time_array = times.ut1.jd1 + ut1_low_time_array = times.ut1.jd2 + full_ut1_time_array = ut1_high_time_array + ut1_low_time_array + polar_motion_data = iers.earth_orientation_table.get() + + delta_x_array = np.interp( + times.mjd, + polar_motion_data["MJD"].value, + polar_motion_data["dX_2000A_B"].value, + left=0.0, + right=0.0, + ) + + delta_y_array = np.interp( + times.mjd, + polar_motion_data["MJD"].value, + polar_motion_data["dY_2000A_B"].value, + left=0.0, + right=0.0, + ) + + # Catch the case where we don't have CIP delta values yet (they don't typically + # have predictive values like the polar motion does) + delta_x_array[np.isnan(delta_x_array)] = 0.0 + delta_y_array[np.isnan(delta_y_array)] = 0.0 + + for idx in range(len(times)): + novas.cel_pole( + tt_time_array[idx], 2, delta_x_array[idx], delta_y_array[idx] + ) + # The NOVAS routine will return Greenwich Apparent Sidereal Time (GAST), + # in units of hours + lst_array[reverse_inds == idx] = novas.sidereal_time( + ut1_high_time_array[idx], + ut1_low_time_array[idx], + (tt_time_array[idx] - full_ut1_time_array[idx]) * 86400.0, + ) + + # Add the telescope lon to convert from GAST to LAST (local) + lst_array = np.mod(lst_array + (longitude / 15.0), 24.0) + + # Convert from hours back to rad + lst_array *= np.pi / 12.0 + + lst_array = np.reshape(lst_array, jd_array.shape) + + return lst_array diff --git a/src/pyuvdata/utils/phasing.py b/src/pyuvdata/utils/phasing.py new file mode 100644 index 0000000000..0ad36dae0b --- /dev/null +++ b/src/pyuvdata/utils/phasing.py @@ -0,0 +1,2564 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for phasing.""" +from copy import deepcopy + +import erfa +import numpy as np +from astropy import units +from astropy.coordinates import Angle, Distance, EarthLocation, SkyCoord +from astropy.time import Time +from astropy.utils import iers + +from .. import _phasing +from .lst import get_lst_for_time + +try: + from lunarsky import MoonLocation + from lunarsky import SkyCoord as LunarSkyCoord + from lunarsky import Time as LTime + + hasmoon = True +except ImportError: + hasmoon = False + + +def old_uvw_calc(ra, dec, initial_uvw): + """ + Calculate old uvws from unphased ones in an icrs or gcrs frame. + + This method should not be used and is only retained for testing the + undo_old_uvw_calc method, which is needed for fixing phases. + + This code expects input uvws relative to the telescope location in the same frame + that ra/dec are in (e.g. icrs or gcrs) and returns phased ones in the same frame. + + Parameters + ---------- + ra : float + Right ascension of phase center. + dec : float + Declination of phase center. + initial_uvw : ndarray of float + Unphased uvws or positions relative to the array center, + shape (Nlocs, 3). + + Returns + ------- + uvw : ndarray of float + uvw array in the same frame as initial_uvws, ra and dec. + + """ + if initial_uvw.ndim == 1: + initial_uvw = initial_uvw[np.newaxis, :] + + return _phasing._old_uvw_calc( + np.float64(ra), + np.float64(dec), + np.ascontiguousarray(initial_uvw.T, dtype=np.float64), + ).T + + +def undo_old_uvw_calc(ra, dec, uvw): + """ + Undo the old phasing calculation on uvws in an icrs or gcrs frame. + + This code expects phased uvws or positions in the same frame that ra/dec + are in (e.g. icrs or gcrs) and returns unphased ones in the same frame. + + Parameters + ---------- + ra : float + Right ascension of phase center. + dec : float + Declination of phase center. + uvw : ndarray of float + Phased uvws or positions relative to the array center, + shape (Nlocs, 3). + + Returns + ------- + unphased_uvws : ndarray of float + Unphased uvws or positions relative to the array center, + shape (Nlocs, 3). + + """ + if uvw.ndim == 1: + uvw = uvw[np.newaxis, :] + + return _phasing._undo_old_uvw_calc( + np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw.T, dtype=np.float64) + ).T + + +def polar2_to_cart3(*, lon_array, lat_array): + """ + Convert 2D polar coordinates into 3D cartesian coordinates. + + This is a simple routine for converting a set of spherical angular coordinates + into a 3D cartesian vectors, where the x-direction is set by the position (0, 0). + + Parameters + ---------- + lon_array : float or ndarray + Longitude coordinates, which increases in the counter-clockwise direction. + Units of radians. Can either be a float or ndarray -- if the latter, must have + the same shape as lat_array. + lat_array : float or ndarray + Latitude coordinates, where 0 falls on the equator of the sphere. Units of + radians. Can either be a float or ndarray -- if the latter, must have the same + shape as lat_array. + + Returns + ------- + xyz_array : ndarray of float + Cartesian coordinates of the given longitude and latitude on a unit sphere. + Shape is (3, coord_shape), where coord_shape is the shape of lon_array and + lat_array if they were provided as type ndarray, otherwise (3,). + """ + # Check to make sure that we are not playing with mixed types + if type(lon_array) is not type(lat_array): + raise ValueError( + "lon_array and lat_array must either both be floats or ndarrays." + ) + if isinstance(lon_array, np.ndarray): + if lon_array.shape != lat_array.shape: + raise ValueError("lon_array and lat_array must have the same shape.") + + # Once we know that lon_array and lat_array are of the same shape, + # time to create our 3D set of vectors! + xyz_array = np.array( + [ + np.cos(lon_array) * np.cos(lat_array), + np.sin(lon_array) * np.cos(lat_array), + np.sin(lat_array), + ], + dtype=float, + ) + + return xyz_array + + +def cart3_to_polar2(xyz_array): + """ + Convert 3D cartesian coordinates into 2D polar coordinates. + + This is a simple routine for converting a set of 3D cartesian vectors into + spherical coordinates, where the position (0, 0) lies along the x-direction. + + Parameters + ---------- + xyz_array : ndarray of float + Cartesian coordinates, need not be of unit vector length. Shape is + (3, coord_shape). + + Returns + ------- + lon_array : ndarray of float + Longitude coordinates, which increases in the counter-clockwise direction. + Units of radians, shape is (coord_shape,). + lat_array : ndarray of float + Latitude coordinates, where 0 falls on the equator of the sphere. Units of + radians, shape is (coord_shape,). + """ + if not isinstance(xyz_array, np.ndarray): + raise ValueError("xyz_array must be an ndarray.") + if xyz_array.ndim == 0: + raise ValueError("xyz_array must have ndim > 0") + if xyz_array.shape[0] != 3: + raise ValueError("xyz_array must be length 3 across the zeroth axis.") + + # The longitude coord is relatively easy to calculate, just take the X and Y + # components and find the arctac of the pair. + lon_array = np.mod(np.arctan2(xyz_array[1], xyz_array[0]), 2.0 * np.pi, dtype=float) + + # If we _knew_ that xyz_array was always of length 1, then this call could be a much + # simpler one to arcsin. But to make this generic, we'll use the length of the XY + # component along with arctan2. + lat_array = np.arctan2( + xyz_array[2], np.sqrt((xyz_array[0:2] ** 2.0).sum(axis=0)), dtype=float + ) + + # Return the two arrays + return lon_array, lat_array + + +def _rotate_matmul_wrapper(*, xyz_array, rot_matrix, n_rot): + """ + Apply a rotation matrix to a series of vectors. + + This is a simple convenience function which wraps numpy's matmul function for use + with various vector rotation functions in this module. This code could, in + principle, be replaced by a cythonized piece of code, although the matmul function + is _pretty_ well optimized already. This function is not meant to be called by + users, but is instead used by multiple higher-level utility functions (namely those + that perform rotations). + + Parameters + ---------- + xyz_array : ndarray of floats + Array of vectors to be rotated. When nrot > 1, shape may be (n_rot, 3, n_vec) + or (1, 3, n_vec), the latter is useful for when performing multiple rotations + on a fixed set of vectors. If nrot = 1, shape may be (1, 3, n_vec), (3, n_vec), + or (3,). + rot_matrix : ndarray of floats + Series of rotation matricies to be applied to the stack of vectors. Must be + of shape (n_rot, 3, 3) + n_rot : int + Number of individual rotation matricies to be applied. + + Returns + ------- + rotated_xyz : ndarray of floats + Array of vectors that have been rotated, of shape (n_rot, 3, n_vectors,). + """ + # Do a quick check to make sure that things look sensible + if rot_matrix.shape != (n_rot, 3, 3): + raise ValueError( + "rot_matrix must be of shape (n_rot, 3, 3), where n_rot=%i." % n_rot + ) + if (xyz_array.ndim == 3) and ( + (xyz_array.shape[0] not in [1, n_rot]) or (xyz_array.shape[-2] != 3) + ): + raise ValueError("Misshaped xyz_array - expected shape (n_rot, 3, n_vectors).") + if (xyz_array.ndim < 3) and (xyz_array.shape[0] != 3): + raise ValueError("Misshaped xyz_array - expected shape (3, n_vectors) or (3,).") + rotated_xyz = np.matmul(rot_matrix, xyz_array) + + return rotated_xyz + + +def _rotate_one_axis(xyz_array, *, rot_amount, rot_axis): + """ + Rotate an array of 3D positions around the a single axis (x, y, or z). + + This function performs a basic rotation of 3D vectors about one of the priciple + axes -- the x-axis, the y-axis, or the z-axis. + + Note that the rotations here obey the right-hand rule -- that is to say, from the + perspective of the positive side of the axis of rotation, a positive rotation will + cause points on the plane intersecting this axis to move in a counter-clockwise + fashion. + + Parameters + ---------- + xyz_array : ndarray of float + Set of 3-dimensional vectors be rotated, in typical right-handed cartesian + order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors). + rot_amount : float or ndarray of float + Amount (in radians) to rotate the given set of coordinates. Can either be a + single float (or ndarray of shape (1,)) if rotating all vectors by the same + amount, otherwise expected to be shape (Nrot,). + rot_axis : int + Axis around which the rotation is applied. 0 is the x-axis, 1 is the y-axis, + and 2 is the z-axis. + + Returns + ------- + rotated_xyz : ndarray of float + Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector). + """ + # If rot_amount is None or all zeros, then this is just one big old no-op. + if (rot_amount is None) or np.all(rot_amount == 0.0): + if np.ndim(xyz_array) == 1: + return deepcopy(xyz_array[np.newaxis, :, np.newaxis]) + elif np.ndim(xyz_array) == 2: + return deepcopy(xyz_array[np.newaxis, :, :]) + else: + return deepcopy(xyz_array) + + # Check and see how big of a rotation matrix we need + n_rot = 1 if (not isinstance(rot_amount, np.ndarray)) else (rot_amount.shape[0]) + n_vec = xyz_array.shape[-1] + + # The promotion of values to float64 is to suppress numerical precision issues, + # since the matrix math can - in limited circumstances - introduce precision errors + # of order 10x the limiting numerical precision of the float. For a float32/single, + # thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to + # a part in 1e15. + rot_matrix = np.zeros((3, 3, n_rot), dtype=np.float64) + + # Figure out which pieces of the matrix we need to update + temp_jdx = (rot_axis + 1) % 3 + temp_idx = (rot_axis + 2) % 3 + + # Fill in the rotation matricies accordingly + rot_matrix[rot_axis, rot_axis] = 1 + rot_matrix[temp_idx, temp_idx] = np.cos(rot_amount, dtype=np.float64) + rot_matrix[temp_jdx, temp_jdx] = rot_matrix[temp_idx, temp_idx] + rot_matrix[temp_idx, temp_jdx] = np.sin(rot_amount, dtype=np.float64) + rot_matrix[temp_jdx, temp_idx] = -rot_matrix[temp_idx, temp_jdx] + + # The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements + # of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3) + rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1]) + + if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3): + # This is a special case where we allow the rotation axis to "expand" along + # the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1 + # but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and + # swap the n_vector and n_rot axes, and then swap them back once everything + # else is done. + return np.transpose( + _rotate_matmul_wrapper( + xyz_array=np.transpose(xyz_array, axes=[2, 1, 0]), + rot_matrix=rot_matrix, + n_rot=n_rot, + ), + axes=[2, 1, 0], + ) + else: + return _rotate_matmul_wrapper( + xyz_array=xyz_array, rot_matrix=rot_matrix, n_rot=n_rot + ) + + +def _rotate_two_axis(xyz_array, *, rot_amount1, rot_amount2, rot_axis1, rot_axis2): + """ + Rotate an array of 3D positions sequentially around a pair of axes (x, y, or z). + + This function performs a sequential pair of basic rotations of 3D vectors about + the priciple axes -- the x-axis, the y-axis, or the z-axis. + + Note that the rotations here obey the right-hand rule -- that is to say, from the + perspective of the positive side of the axis of rotation, a positive rotation will + cause points on the plane intersecting this axis to move in a counter-clockwise + fashion. + + Parameters + ---------- + xyz_array : ndarray of float + Set of 3-dimensional vectors be rotated, in typical right-handed cartesian + order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors). + rot_amount1 : float or ndarray of float + Amount (in radians) of rotatation to apply during the first rotation of the + sequence, to the given set of coordinates. Can either be a single float (or + ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise + expected to be shape (Nrot,). + rot_amount2 : float or ndarray of float + Amount (in radians) of rotatation to apply during the second rotation of the + sequence, to the given set of coordinates. Can either be a single float (or + ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise + expected to be shape (Nrot,). + rot_axis1 : int + Axis around which the first rotation is applied. 0 is the x-axis, 1 is the + y-axis, and 2 is the z-axis. + rot_axis2 : int + Axis around which the second rotation is applied. 0 is the x-axis, 1 is the + y-axis, and 2 is the z-axis. + + Returns + ------- + rotated_xyz : ndarray of float + Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector). + + """ + # Capture some special cases upfront, where we can save ourselves a bit of work + no_rot1 = (rot_amount1 is None) or np.all(rot_amount1 == 0.0) + no_rot2 = (rot_amount2 is None) or np.all(rot_amount2 == 0.0) + if no_rot1 and no_rot2: + # If rot_amount is None, then this is just one big old no-op. + return deepcopy(xyz_array) + elif no_rot1: + # If rot_amount1 is None, then ignore it and just work w/ the 2nd rotation + return _rotate_one_axis(xyz_array, rot_amount=rot_amount2, rot_axis=rot_axis2) + elif no_rot2: + # If rot_amount2 is None, then ignore it and just work w/ the 1st rotation + return _rotate_one_axis(xyz_array, rot_amount=rot_amount1, rot_axis=rot_axis1) + elif rot_axis1 == rot_axis2: + # Capture the case where someone wants to do a sequence of rotations on the same + # axis. Also known as just rotating a single axis. + return _rotate_one_axis( + xyz_array, rot_amount=rot_amount1 + rot_amount2, rot_axis=rot_axis1 + ) + + # Figure out how many individual rotation matricies we need, accounting for the + # fact that these can either be floats or ndarrays. + n_rot = max( + rot_amount1.shape[0] if isinstance(rot_amount1, np.ndarray) else 1, + rot_amount2.shape[0] if isinstance(rot_amount2, np.ndarray) else 1, + ) + n_vec = xyz_array.shape[-1] + + # The promotion of values to float64 is to suppress numerical precision issues, + # since the matrix math can - in limited circumstances - introduce precision errors + # of order 10x the limiting numerical precision of the float. For a float32/single, + # thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to + # a part in 1e15. + rot_matrix = np.empty((3, 3, n_rot), dtype=np.float64) + + # There are two permulations per pair of axes -- when the pair is right-hand + # oriented vs left-hand oriented. Check here which one it is. For example, + # rotating first on the x-axis, second on the y-axis is considered a + # "right-handed" pair, whereas z-axis first, then y-axis would be considered + # a "left-handed" pair. + lhd_order = np.mod(rot_axis2 - rot_axis1, 3) != 1 + + temp_idx = [ + np.mod(rot_axis1 - lhd_order, 3), + np.mod(rot_axis1 + 1 - lhd_order, 3), + np.mod(rot_axis1 + 2 - lhd_order, 3), + ] + + # We're using lots of sin and cos calculations -- doing them once upfront saves + # quite a bit of time by eliminating redundant calculations + sin_lo = np.sin(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64) + cos_lo = np.cos(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64) + sin_hi = np.sin(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64) + cos_hi = np.cos(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64) + + # Take care of the diagonal terms first, since they aren't actually affected by the + # order of rotational opertations + rot_matrix[temp_idx[0], temp_idx[0]] = cos_hi + rot_matrix[temp_idx[1], temp_idx[1]] = cos_lo + rot_matrix[temp_idx[2], temp_idx[2]] = cos_lo * cos_hi + + # Now time for the off-diagonal terms, as a set of 3 pairs. The rotation matrix + # for a left-hand oriented pair of rotation axes (e.g., x-rot, then y-rot) is just + # a transpose of the right-hand orientation of the same pair (e.g., y-rot, then + # x-rot). + rot_matrix[temp_idx[0 + lhd_order], temp_idx[1 - lhd_order]] = sin_lo * sin_hi + rot_matrix[temp_idx[0 - lhd_order], temp_idx[lhd_order - 1]] = ( + cos_lo * sin_hi * ((-1.0) ** lhd_order) + ) + + rot_matrix[temp_idx[1 - lhd_order], temp_idx[0 + lhd_order]] = 0.0 + rot_matrix[temp_idx[1 + lhd_order], temp_idx[2 - lhd_order]] = sin_lo * ( + (-1.0) ** (1 + lhd_order) + ) + + rot_matrix[temp_idx[lhd_order - 1], temp_idx[0 - lhd_order]] = sin_hi * ( + (-1.0) ** (1 + lhd_order) + ) + rot_matrix[temp_idx[2 - lhd_order], temp_idx[1 + lhd_order]] = ( + sin_lo * cos_hi * ((-1.0) ** (lhd_order)) + ) + + # The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements + # of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3) + rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1]) + + if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3): + # This is a special case where we allow the rotation axis to "expand" along + # the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1 + # but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and + # swap the n_vector and n_rot axes, and then swap them back once everything + # else is done. + return np.transpose( + _rotate_matmul_wrapper( # xyz_array, rot_matrix, n_rot + xyz_array=np.transpose(xyz_array, axes=[2, 1, 0]), + rot_matrix=rot_matrix, + n_rot=n_rot, + ), + axes=[2, 1, 0], + ) + else: + return _rotate_matmul_wrapper( + xyz_array=xyz_array, rot_matrix=rot_matrix, n_rot=n_rot + ) + + +def calc_uvw( + *, + app_ra=None, + app_dec=None, + frame_pa=None, + lst_array=None, + use_ant_pos=True, + uvw_array=None, + antenna_positions=None, + antenna_numbers=None, + ant_1_array=None, + ant_2_array=None, + old_app_ra=None, + old_app_dec=None, + old_frame_pa=None, + telescope_lat=None, + telescope_lon=None, + from_enu=False, + to_enu=False, +): + """ + Calculate an array of baseline coordinates, in either uvw or ENU. + + This routine is meant as a convenience function for producing baseline coordinates + based under a few different circumstances: + + 1) Calculating ENU coordinates using antenna positions + 2) Calculating uvw coordinates at a given sky position using antenna positions + 3) Converting from ENU coordinates to uvw coordinates + 4) Converting from uvw coordinate to ENU coordinates + 5) Converting from uvw coordinates at one sky position to another sky position + + Different conversion pathways have different parameters that are required. + + Parameters + ---------- + app_ra : ndarray of float + Apparent RA of the target phase center, required if calculating baseline + coordinates in uvw-space (vs ENU-space). Shape is (Nblts,), units are + radians. + app_dec : ndarray of float + Apparent declination of the target phase center, required if calculating + baseline coordinates in uvw-space (vs ENU-space). Shape is (Nblts,), + units are radians. + frame_pa : ndarray of float + Position angle between the great circle of declination in the apparent frame + versus that of the reference frame, used for making sure that "North" on + the derived maps points towards a particular celestial pole (not just the + topocentric one). Required if not deriving baseline coordinates from antenna + positions, from_enu=False, and a value for old_frame_pa is given. Shape is + (Nblts,), units are radians. + old_app_ra : ndarray of float + Apparent RA of the previous phase center, required if not deriving baseline + coordinates from antenna positions and from_enu=False. Shape is (Nblts,), + units are radians. + old_app_dec : ndarray of float + Apparent declination of the previous phase center, required if not deriving + baseline coordinates from antenna positions and from_enu=False. Shape is + (Nblts,), units are radians. + old_frame_pa : ndarray of float + Frame position angle of the previous phase center, required if not deriving + baseline coordinates from antenna positions, from_enu=False, and a value + for frame_pa is supplied. Shape is (Nblts,), units are radians. + lst_array : ndarray of float + Local apparent sidereal time, required if deriving baseline coordinates from + antenna positions, or converting to/from ENU coordinates. Shape is (Nblts,). + use_ant_pos : bool + Switch to determine whether to derive uvw values from the antenna positions + (if set to True), or to use the previously calculated uvw coordinates to derive + new the new baseline vectors (if set to False). Default is True. + uvw_array : ndarray of float + Array of previous baseline coordinates (in either uvw or ENU), required if + not deriving new coordinates from antenna positions. Shape is (Nblts, 3). + antenna_positions : ndarray of float + List of antenna positions relative to array center in ECEF coordinates, + required if not providing `uvw_array`. Shape is (Nants, 3). + antenna_numbers: ndarray of int + List of antenna numbers, ordered in the same way as `antenna_positions` (e.g., + `antenna_numbers[0]` should given the number of antenna that resides at ECEF + position given by `antenna_positions[0]`). Shape is (Nants,), requred if not + providing `uvw_array`. Contains all unique entires of the joint set of + `ant_1_array` and `ant_2_array`. + ant_1_array : ndarray of int + Antenna number of the first antenna in the baseline pair, for all baselines + Required if not providing `uvw_array`, shape is (Nblts,). + ant_2_array : ndarray of int + Antenna number of the second antenna in the baseline pair, for all baselines + Required if not providing `uvw_array`, shape is (Nblts,). + telescope_lat : float + Latitude of the phase center, units radians, required if deriving baseline + coordinates from antenna positions, or converting to/from ENU coordinates. + telescope_lon : float + Longitude of the phase center, units radians, required if deriving baseline + coordinates from antenna positions, or converting to/from ENU coordinates. + from_enu : boolean + Set to True if uvw_array is expressed in ENU coordinates. Default is False. + to_enu : boolean + Set to True if you would like the output expressed in ENU coordinates. Default + is False. + + Returns + ------- + new_coords : ndarray of float64 + Set of baseline coordinates, shape (Nblts, 3). + """ + if to_enu: + if lst_array is None and not use_ant_pos: + raise ValueError( + "Must include lst_array to calculate baselines in ENU coordinates!" + ) + if telescope_lat is None: + raise ValueError( + "Must include telescope_lat to calculate baselines in ENU coordinates!" + ) + else: + if ((app_ra is None) or (app_dec is None)) and frame_pa is None: + raise ValueError( + "Must include both app_ra and app_dec, or frame_pa to calculate " + "baselines in uvw coordinates!" + ) + + if use_ant_pos: + # Assume at this point we are dealing w/ antenna positions + if antenna_positions is None: + raise ValueError("Must include antenna_positions if use_ant_pos=True.") + if (ant_1_array is None) or (ant_2_array is None) or (antenna_numbers is None): + raise ValueError( + "Must include ant_1_array, ant_2_array, and antenna_numbers " + "setting use_ant_pos=True." + ) + if lst_array is None and not to_enu: + raise ValueError( + "Must include lst_array if use_ant_pos=True and not calculating " + "baselines in ENU coordinates." + ) + if telescope_lon is None: + raise ValueError("Must include telescope_lon if use_ant_pos=True.") + + ant_dict = {ant_num: idx for idx, ant_num in enumerate(antenna_numbers)} + ant_1_index = np.array( + [ant_dict[ant_num] for ant_num in ant_1_array], dtype=int + ) + ant_2_index = np.array( + [ant_dict[ant_num] for ant_num in ant_2_array], dtype=int + ) + + N_ants = antenna_positions.shape[0] + # Use the app_ra, app_dec, and lst_array arrays to figure out how many unique + # rotations are actually needed. If the ratio of Nblts to number of unique + # entries is favorable, we can just rotate the antenna positions and save + # outselves a bit of work. + if to_enu: + # If to_enu, skip all this -- there's only one unique ha + dec combo + unique_mask = np.zeros(len(ant_1_index), dtype=np.bool_) + unique_mask[0] = True + else: + unique_mask = np.append( + True, + ( + ((lst_array[:-1] - app_ra[:-1]) != (lst_array[1:] - app_ra[1:])) + | (app_dec[:-1] != app_dec[1:]) + ), + ) + + # GHA -> Hour Angle as measured at Greenwich (because antenna coords are + # centered such that x-plane intersects the meridian at longitude 0). + if to_enu: + # Unprojected coordinates are given in the ENU convention -- that's + # equivalent to calculating uvw's based on zenith. We can use that to our + # advantage and spoof the gha and dec based on telescope lon and lat + unique_gha = np.zeros(1) - telescope_lon + unique_dec = np.zeros(1) + telescope_lat + unique_pa = None + else: + unique_gha = (lst_array[unique_mask] - app_ra[unique_mask]) - telescope_lon + unique_dec = app_dec[unique_mask] + unique_pa = 0.0 if frame_pa is None else frame_pa[unique_mask] + + # Tranpose the ant vectors so that they are in the proper shape + ant_vectors = np.transpose(antenna_positions)[np.newaxis, :, :] + # Apply rotations, and then reorganize the ndarray so that you can access + # individual antenna vectors quickly. + ant_rot_vectors = np.reshape( + np.transpose( + _rotate_one_axis( + _rotate_two_axis( + ant_vectors, + rot_amount1=unique_gha, + rot_amount2=unique_dec, + rot_axis1=2, + rot_axis2=1, + ), + rot_amount=unique_pa, + rot_axis=0, + ), + axes=[0, 2, 1], + ), + (-1, 3), + ) + + unique_mask[0] = False + unique_map = np.cumsum(unique_mask) * N_ants + new_coords = ( + ant_rot_vectors[unique_map + ant_2_index] + - ant_rot_vectors[unique_map + ant_1_index] + ) + else: + if uvw_array is None: + raise ValueError("Must include uvw_array if use_ant_pos=False.") + if from_enu: + if to_enu: + # Well this was pointless... returning your uvws unharmed + return uvw_array + # Unprojected coordinates appear to be stored in ENU coordinates -- that's + # equivalent to calculating uvw's based on zenith. We can use that to our + # advantage and spoof old_app_ra and old_app_dec based on lst_array and + # telescope_lat + if telescope_lat is None: + raise ValueError( + "Must include telescope_lat if moving between " + "ENU (i.e., 'unprojected') and uvw coordinates!" + ) + if lst_array is None: + raise ValueError( + "Must include lst_array if moving between ENU " + "(i.e., 'unprojected') and uvw coordinates!" + ) + else: + if (old_frame_pa is None) and not (frame_pa is None or to_enu): + raise ValueError( + "Must include old_frame_pa values if data are phased and " + "applying new position angle values (frame_pa)." + ) + if ((old_app_ra is None) and not (app_ra is None or to_enu)) or ( + (old_app_dec is None) and not (app_dec is None or to_enu) + ): + raise ValueError( + "Must include old_app_ra and old_app_dec values when data are " + "already phased and phasing to a new position." + ) + # For this operation, all we need is the delta-ha coverage, which _should_ be + # entirely encapsulated by the change in RA. + if (app_ra is None) and (old_app_ra is None): + gha_delta_array = 0.0 + else: + gha_delta_array = (lst_array if from_enu else old_app_ra) - ( + lst_array if to_enu else app_ra + ) + + # Notice below there's an axis re-orientation here, to go from uvw -> XYZ, + # where X is pointing in the direction of the source. This is mostly here + # for convenience and code legibility -- a slightly different pair of + # rotations would give you the same result w/o needing to cycle the axes. + + # Up front, we want to trap the corner-case where the sky position you are + # phasing up to hasn't changed, just the position angle (i.e., which way is + # up on the map). This is a much easier transform to handle. + if np.all(gha_delta_array == 0.0) and np.all(old_app_dec == app_dec): + new_coords = _rotate_one_axis( + uvw_array[:, [2, 0, 1], np.newaxis], + rot_amount=frame_pa - (0.0 if old_frame_pa is None else old_frame_pa), + rot_axis=0, + )[:, :, 0] + else: + new_coords = _rotate_two_axis( + _rotate_two_axis( + uvw_array[:, [2, 0, 1], np.newaxis], + rot_amount1=( + 0.0 if (from_enu or old_frame_pa is None) else (-old_frame_pa) + ), + rot_amount2=(-telescope_lat) if from_enu else (-old_app_dec), + rot_axis1=0, + rot_axis2=1, + ), + rot_amount1=gha_delta_array, + rot_amount2=telescope_lat if to_enu else app_dec, + rot_axis1=2, + rot_axis2=1, + ) + + # One final rotation applied here, to compensate for the fact that we want + # the Dec-axis of our image (Fourier dual to the v-axis) to be aligned with + # the chosen frame, if we not in ENU coordinates + if not to_enu: + new_coords = _rotate_one_axis( + new_coords, rot_amount=frame_pa, rot_axis=0 + ) + + # Finally drop the now-vestigal last axis of the array + new_coords = new_coords[:, :, 0] + + # There's one last task to do, which is to re-align the axes from projected + # XYZ -> uvw, where X (which points towards the source) falls on the w axis, + # and Y and Z fall on the u and v axes, respectively. + return new_coords[:, [1, 2, 0]] + + +def transform_sidereal_coords( + *, + longitude, + latitude, + in_coord_frame, + out_coord_frame, + in_coord_epoch=None, + out_coord_epoch=None, + time_array=None, +): + """ + Transform a given set of coordinates from one sidereal coordinate frame to another. + + Uses astropy to convert from a coordinates from sidereal frame into another. + This function will support transforms from several frames, including GCRS, + FK5 (i.e., J2000), FK4 (i.e., B1950), Galactic, Supergalactic, CIRS, HCRS, and + a few others (basically anything that doesn't require knowing the observers + location on Earth/other celestial body). + + Parameters + ---------- + lon_coord : float or ndarray of floats + Logitudinal coordinate to be transformed, typically expressed as the right + ascension, in units of radians. Can either be a float, or an ndarray of + floats with shape (Ncoords,). Must agree with lat_coord. + lat_coord : float or ndarray of floats + Latitudinal coordinate to be transformed, typically expressed as the + declination, in units of radians. Can either be a float, or an ndarray of + floats with shape (Ncoords,). Must agree with lon_coord. + in_coord_frame : string + Reference frame for the provided coordinates. Expected to match a list of + those supported within the astropy SkyCoord object. An incomplete list includes + 'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'. + out_coord_frame : string + Reference frame to output coordinates in. Expected to match a list of + those supported within the astropy SkyCoord object. An incomplete list includes + 'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'. + in_coord_epoch : float + Epoch for the input coordinate frame. Optional parameter, only required + when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are + in fractional years. + out_coord_epoch : float + Epoch for the output coordinate frame. Optional parameter, only required + when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are + in fractional years. + time_array : float or ndarray of floats + Julian date(s) to which the coordinates correspond to, only used in frames + with annular motion terms (e.g., abberation in GCRS). Can either be a float, + or an ndarray of floats with shape (Ntimes,), assuming that either lat_coord + and lon_coord are floats, or that Ntimes == Ncoords. + + Returns + ------- + new_lat : float or ndarray of floats + Longitudinal coordinates, in units of radians. Output will be an ndarray + if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs. + new_lon : float or ndarray of floats + Latidudinal coordinates, in units of radians. Output will be an ndarray + if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs. + """ + lon_coord = longitude * units.rad + lat_coord = latitude * units.rad + + # Check here to make sure that lat_coord and lon_coord are the same length, + # either 1 or len(time_array) + if lat_coord.shape != lon_coord.shape: + raise ValueError("lon and lat must be the same shape.") + + if lon_coord.ndim == 0: + lon_coord.shape += (1,) + lat_coord.shape += (1,) + + # Check to make sure that we have a properly formatted epoch for our in-bound + # coordinate frame + in_epoch = None + if isinstance(in_coord_epoch, str) or isinstance(in_coord_epoch, Time): + # If its a string or a Time object, we don't need to do anything more + in_epoch = Time(in_coord_epoch) + elif in_coord_epoch is not None: + if in_coord_frame.lower() in ["fk4", "fk4noeterms"]: + in_epoch = Time(in_coord_epoch, format="byear") + else: + in_epoch = Time(in_coord_epoch, format="jyear") + + # Now do the same for the outbound frame + out_epoch = None + if isinstance(out_coord_epoch, str) or isinstance(out_coord_epoch, Time): + # If its a string or a Time object, we don't need to do anything more + out_epoch = Time(out_coord_epoch) + elif out_coord_epoch is not None: + if out_coord_frame.lower() in ["fk4", "fk4noeterms"]: + out_epoch = Time(out_coord_epoch, format="byear") + else: + out_epoch = Time(out_coord_epoch, format="jyear") + + # Make sure that time array matched up with what we expect. Thanks to astropy + # weirdness, time_array has to be the same length as lat/lon coords + rep_time = False + rep_crds = False + if time_array is None: + time_obj_array = None + else: + if isinstance(time_array, Time): + time_obj_array = time_array + else: + time_obj_array = Time(time_array, format="jd", scale="utc") + if (time_obj_array.size != 1) and (lon_coord.size != 1): + if time_obj_array.shape != lon_coord.shape: + raise ValueError( + "Shape of time_array must be either that of " + " lat_coord/lon_coord if len(time_array) > 1." + ) + else: + rep_crds = (time_obj_array.size != 1) and (lon_coord.size == 1) + rep_time = (time_obj_array.size == 1) and (lon_coord.size != 1) + if rep_crds: + lon_coord = np.repeat(lon_coord, len(time_array)) + lat_coord = np.repeat(lat_coord, len(time_array)) + if rep_time: + time_obj_array = Time( + np.repeat(time_obj_array.jd, len(lon_coord)), format="jd", scale="utc" + ) + coord_object = SkyCoord( + lon_coord, + lat_coord, + frame=in_coord_frame, + equinox=in_epoch, + obstime=time_obj_array, + ) + + # Easiest, most general way to transform to the new frame is to create a dummy + # SkyCoord with all the attributes needed -- note that we particularly need this + # in order to use a non-standard equinox/epoch + new_coord = coord_object.transform_to( + SkyCoord(0, 0, unit="rad", frame=out_coord_frame, equinox=out_epoch) + ) + + return new_coord.spherical.lon.rad, new_coord.spherical.lat.rad + + +def transform_icrs_to_app( + *, + time_array, + ra, + dec, + telescope_loc, + telescope_frame="itrs", + ellipsoid=None, + epoch=2000.0, + pm_ra=None, + pm_dec=None, + vrad=None, + dist=None, + astrometry_library=None, +): + """ + Transform a set of coordinates in ICRS to topocentric/apparent coordinates. + + This utility uses one of three libraries (astropy, NOVAS, or ERFA) to calculate + the apparent (i.e., topocentric) coordinates of a source at a given time and + location, given a set of coordinates expressed in the ICRS frame. These coordinates + are most typically used for defining the phase center of the array (i.e, calculating + baseline vectors). + + As of astropy v4.2, the agreement between the three libraries is consistent down to + the level of better than 1 mas, with the values produced by astropy and pyERFA + consistent to bettter than 10 µas (this is not surprising, given that astropy uses + pyERFA under the hood for astrometry). ERFA is the default as it outputs + coordinates natively in the apparent frame (whereas NOVAS and astropy do not), as + well as the fact that of the three libraries, it produces results the fastest. + + Parameters + ---------- + time_array : float or array-like of float + Julian dates to calculate coordinate positions for. Can either be a single + float, or an array-like of shape (Ntimes,). + ra : float or array-like of float + ICRS RA of the celestial target, expressed in units of radians. Can either + be a single float or array of shape (Ntimes,), although this must be consistent + with other parameters (with the exception of telescope location parameters). + dec : float or array-like of float + ICRS Dec of the celestial target, expressed in units of radians. Can either + be a single float or array of shape (Ntimes,), although this must be consistent + with other parameters (with the exception of telescope location parameters). + telescope_loc : array-like of floats or EarthLocation or MoonLocation + ITRS latitude, longitude, and altitude (rel to sea-level) of the phase center + of the array. Can either be provided as an astropy EarthLocation, or a tuple + of shape (3,) containing (in order) the latitude, longitude, and altitude, + in units of radians, radians, and meters, respectively. + telescope_frame: str, optional + Reference frame for telescope location. Options are itrs (default) or mcmf. + Only used if telescope_loc is not an EarthLocation or MoonLocation. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is mcmf. + epoch : int or float or str or Time object + Epoch of the coordinate data supplied, only used when supplying proper motion + values. If supplying a number, it will assumed to be in Julian years. Default + is J2000.0. + pm_ra : float or array-like of float + Proper motion in RA of the source, expressed in units of milliarcsec / year. + Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS + values should be set to their expected values when the epoch is 2000.0). + Can either be a single float or array of shape (Ntimes,), although this must + be consistent with other parameters (namely ra_coord and dec_coord). Note that + units are in dRA/dt, not cos(Dec)*dRA/dt. Not required. + pm_dec : float or array-like of float + Proper motion in Dec of the source, expressed in units of milliarcsec / year. + Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS + values should be set to their expected values when the epoch is 2000.0). + Can either be a single float or array of shape (Ntimes,), although this must + be consistent with other parameters (namely ra_coord and dec_coord). Not + required. + vrad : float or array-like of float + Radial velocity of the source, expressed in units of km / sec. Can either be + a single float or array of shape (Ntimes,), although this must be consistent + with other parameters (namely ra_coord and dec_coord). Not required. + dist : float or array-like of float + Distance of the source, expressed in milliarcseconds. Can either be a single + float or array of shape (Ntimes,), although this must be consistent with other + parameters (namely ra_coord and dec_coord). Not required. + astrometry_library : str + Library used for running the coordinate conversions. Allowed options are + 'erfa' (which uses the pyERFA), 'novas' (which uses the python-novas library), + and 'astropy' (which uses the astropy utilities). Default is erfa unless + the telescope_location is a MoonLocation object, in which case the default is + astropy. + + Returns + ------- + app_ra : ndarray of floats + Apparent right ascension coordinates, in units of radians, of shape (Ntimes,). + app_dec : ndarray of floats + Apparent declination coordinates, in units of radians, of shape (Ntimes,). + """ + if telescope_frame.upper() == "MCMF": + if not hasmoon: + raise ValueError( + "Need to install `lunarsky` package to work with MCMF frame." + ) + if ellipsoid is None: + ellipsoid = "SPHERE" + + # Make sure that the library requested is actually permitted + if astrometry_library is None: + if hasmoon and isinstance(telescope_loc, MoonLocation): + astrometry_library = "astropy" + elif telescope_frame.upper() == "MCMF": + astrometry_library = "astropy" + else: + astrometry_library = "erfa" + + if astrometry_library not in ["erfa", "novas", "astropy"]: + raise ValueError( + "Requested coordinate transformation library is not supported, please " + "select either 'erfa', 'novas', or 'astropy' for astrometry_library." + ) + ra_coord = ra * units.rad + dec_coord = dec * units.rad + + # Check here to make sure that ra_coord and dec_coord are the same length, + # either 1 or len(time_array) + multi_coord = ra_coord.size != 1 + if ra_coord.shape != dec_coord.shape: + raise ValueError("ra and dec must be the same shape.") + + pm_ra_coord = None if pm_ra is None else pm_ra * (units.mas / units.yr) + pm_dec_coord = None if pm_dec is None else pm_dec * (units.mas / units.yr) + d_coord = ( + None if (dist is None or np.all(dist == 0.0)) else Distance(dist * units.pc) + ) + v_coord = None if vrad is None else vrad * (units.km / units.s) + + opt_list = [pm_ra_coord, pm_dec_coord, d_coord, v_coord] + opt_names = ["pm_ra", "pm_dec", "dist", "vrad"] + # Check the optional inputs, make sure that they're sensible + for item, name in zip(opt_list, opt_names): + if item is not None: + if ra_coord.shape != item.shape: + raise ValueError("%s must be the same shape as ra and dec." % name) + + if isinstance(telescope_loc, EarthLocation) or ( + hasmoon and isinstance(telescope_loc, MoonLocation) + ): + site_loc = telescope_loc + elif telescope_frame.upper() == "MCMF": + site_loc = MoonLocation.from_selenodetic( + telescope_loc[1] * (180.0 / np.pi), + telescope_loc[0] * (180.0 / np.pi), + height=telescope_loc[2], + ellipsoid=ellipsoid, + ) + else: + site_loc = EarthLocation.from_geodetic( + telescope_loc[1] * (180.0 / np.pi), + telescope_loc[0] * (180.0 / np.pi), + height=telescope_loc[2], + ) + + if ( + hasmoon + and isinstance(site_loc, MoonLocation) + and astrometry_library != "astropy" + ): + raise NotImplementedError( + "MoonLocation telescopes are only supported with the 'astropy' astrometry " + "library" + ) + + # Useful for both astropy and novas methods, the latter of which gives easy + # access to the IERS data that we want. + if isinstance(time_array, Time): + time_obj_array = time_array + else: + time_obj_array = Time(time_array, format="jd", scale="utc") + + if time_obj_array.size != 1: + if (time_obj_array.shape != ra_coord.shape) and multi_coord: + raise ValueError( + "time_array must be of either of length 1 (single " + "float) or same length as ra and dec." + ) + elif time_obj_array.ndim == 0: + # Make the array at least 1-dimensional so we don't run into indexing + # issues later. + time_obj_array = Time([time_obj_array]) + + # Check to make sure that we have a properly formatted epoch for our in-bound + # coordinate frame + coord_epoch = None + if isinstance(epoch, str) or isinstance(epoch, Time): + # If its a string or a Time object, we don't need to do anything more + coord_epoch = Time(epoch) + elif epoch is not None: + coord_epoch = Time(epoch, format="jyear") + + # Note if time_array is a single element + multi_time = time_obj_array.size != 1 + + # Get IERS data, which is needed for NOVAS and ERFA + polar_motion_data = iers.earth_orientation_table.get() + + pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array) + delta_x_array, delta_y_array = polar_motion_data.dcip_xy(time_obj_array) + + pm_x_array = pm_x_array.to_value("arcsec") + pm_y_array = pm_y_array.to_value("arcsec") + delta_x_array = delta_x_array.to_value("marcsec") + delta_y_array = delta_y_array.to_value("marcsec") + # Catch the case where we don't have CIP delta values yet (they don't typically have + # predictive values like the polar motion does) + delta_x_array[np.isnan(delta_x_array)] = 0.0 + delta_y_array[np.isnan(delta_y_array)] = 0.0 + + # If the source was instantiated w/ floats, it'll be a 0-dim object, which will + # throw errors if we try to treat it as an array. Reshape to a 1D array of len 1 + # so that all the calls can be uniform + if ra_coord.ndim == 0: + ra_coord.shape += (1,) + dec_coord.shape += (1,) + if pm_ra_coord is not None: + pm_ra + if d_coord is not None: + d_coord.shape += (1,) + if v_coord is not None: + v_coord.shape += (1,) + + # If there is an epoch and a proper motion, apply that motion now + + if astrometry_library == "astropy": + # Astropy doesn't have (oddly enough) a way of getting at the apparent RA/Dec + # directly, but we can cheat this by going to AltAz, and then coverting back + # to apparent RA/Dec using the telescope lat and LAST. + if (epoch is not None) and (pm_ra is not None) and (pm_dec is not None): + # astropy is a bit weird in how it handles proper motion, so rather than + # fight with it to do it all in one step, we separate it into two: first + # apply proper motion to ICRS, then transform to topocentric. + sky_coord = SkyCoord( + ra=ra_coord, + dec=dec_coord, + pm_ra_cosdec=pm_ra_coord * np.cos(dec_coord), + pm_dec=pm_dec_coord, + frame="icrs", + ) + + sky_coord = sky_coord.apply_space_motion(dt=(time_obj_array - coord_epoch)) + ra_coord = sky_coord.ra + dec_coord = sky_coord.dec + if d_coord is not None: + d_coord = d_coord.repeat(ra_coord.size) + if v_coord is not None: + v_coord = v_coord.repeat(ra_coord.size) + + if isinstance(site_loc, EarthLocation): + time_obj_array = Time(time_obj_array, location=site_loc) + + sky_coord = SkyCoord( + ra=ra_coord, + dec=dec_coord, + distance=d_coord, + radial_velocity=v_coord, + frame="icrs", + ) + + azel_data = sky_coord.transform_to( + SkyCoord( + np.zeros_like(time_obj_array) * units.rad, + np.zeros_like(time_obj_array) * units.rad, + location=site_loc, + obstime=time_obj_array, + frame="altaz", + ) + ) + else: + sky_coord = LunarSkyCoord( + ra=ra_coord, + dec=dec_coord, + distance=d_coord, + radial_velocity=v_coord, + frame="icrs", + ) + + azel_data = sky_coord.transform_to( + LunarSkyCoord( + np.zeros_like(time_obj_array) * units.rad, + np.zeros_like(time_obj_array) * units.rad, + location=site_loc, + obstime=time_obj_array, + frame="lunartopo", + ) + ) + time_obj_array = LTime(time_obj_array, location=site_loc) + + app_ha, app_dec = erfa.ae2hd( + azel_data.az.rad, azel_data.alt.rad, site_loc.lat.rad + ) + app_ra = np.mod( + time_obj_array.sidereal_time("apparent").rad - app_ha, 2 * np.pi + ) + + elif astrometry_library == "novas": + # Import the NOVAS library only if it's needed/available. + try: + import novas_de405 # noqa + from novas import compat as novas + from novas.compat import eph_manager + except ImportError as e: # pragma: no cover + raise ImportError( + "novas and/or novas_de405 are not installed but is required for " + "NOVAS functionality" + ) from e + + # Call is needed to load high-precision ephem data in NOVAS + jd_start, jd_end, number = eph_manager.ephem_open() + + # Define the obs location, which is needed to calculate diurnal abb term + # and polar wobble corrections + site_loc = novas.make_on_surface( + site_loc.lat.deg, # latitude in deg + site_loc.lon.deg, # Longitude in deg + site_loc.height.to_value("m"), # Height in meters + 0.0, # Temperature, set to 0 for now (no atm refrac) + 0.0, # Pressure, set to 0 for now (no atm refrac) + ) + + # NOVAS wants things in terrestial time and UT1 + tt_time_array = time_obj_array.tt.jd + ut1_time_array = time_obj_array.ut1.jd + gast_array = time_obj_array.sidereal_time("apparent", "greenwich").rad + + if np.any(tt_time_array < jd_start) or np.any(tt_time_array > jd_end): + raise ValueError( + "No current support for JPL ephems outside of 1700 - 2300 AD. " + "Check back later (or possibly earlier)..." + ) + + app_ra = np.zeros(tt_time_array.shape) + np.zeros(ra_coord.shape) + app_dec = np.zeros(tt_time_array.shape) + np.zeros(ra_coord.shape) + + for idx in range(len(app_ra)): + if multi_coord or (idx == 0): + # Create a catalog entry for the source in question + if pm_ra is None: + pm_ra_use = 0.0 + else: + pm_ra_use = pm_ra_coord.to_value("mas/yr") * np.cos( + dec_coord[idx].to_value("rad") + ) + + if pm_dec is None: + pm_dec_use = 0.0 + else: + pm_dec_use = pm_dec_coord.to_value("mas/yr") + + if dist is None or np.any(dist == 0.0): + parallax = 0.0 + else: + parallax = d_coord[idx].kiloparsec ** -1.0 + + if vrad is None: + vrad_use = 0.0 + else: + vrad_use = v_coord[idx].to_value("km/s") + + cat_entry = novas.make_cat_entry( + "dummy_name", # Dummy source name + "GKK", # Catalog ID, fixed for now + 156, # Star ID number, fixed for now + ra_coord[idx].to_value("hourangle"), + dec_coord[idx].to_value("deg"), + pm_ra_use, + pm_dec_use, + parallax, + vrad_use, + ) + + # Update polar wobble parameters for a given timestamp + if multi_time or (idx == 0): + gast = gast_array[idx] + pm_x = pm_x_array[idx] * np.cos(gast) + pm_y_array[idx] * np.sin(gast) + pm_y = pm_y_array[idx] * np.cos(gast) - pm_x_array[idx] * np.sin(gast) + tt_time = tt_time_array[idx] + ut1_time = ut1_time_array[idx] + novas.cel_pole(tt_time, 2, delta_x_array[idx], delta_y_array[idx]) + + # Calculate topocentric RA/Dec values + [temp_ra, temp_dec] = novas.topo_star( + tt_time, (tt_time - ut1_time) * 86400.0, cat_entry, site_loc, accuracy=0 + ) + xyz_array = polar2_to_cart3( + lon_array=temp_ra * (np.pi / 12.0), lat_array=temp_dec * (np.pi / 180.0) + ) + xyz_array = novas.wobble(tt_time, pm_x, pm_y, xyz_array, 1) + + app_ra[idx], app_dec[idx] = cart3_to_polar2(np.array(xyz_array)) + elif astrometry_library == "erfa": + # liberfa wants things in radians + pm_x_array *= np.pi / (3600.0 * 180.0) + pm_y_array *= np.pi / (3600.0 * 180.0) + + if pm_ra is None: + pm_ra_use = 0.0 + else: + pm_ra_use = pm_ra_coord.to_value("rad/yr") + + if pm_dec is None: + pm_dec_use = 0.0 + else: + pm_dec_use = pm_dec_coord.to_value("rad/yr") + + if dist is None or np.any(dist == 0.0): + parallax = 0.0 + else: + parallax = d_coord.pc**-1.0 + + if vrad is None: + vrad_use = 0 + else: + vrad_use = v_coord.to_value("km/s") + + [_, _, _, app_dec, app_ra, eqn_org] = erfa.atco13( + ra_coord.to_value("rad"), + dec_coord.to_value("rad"), + pm_ra_use, + pm_dec_use, + parallax, + vrad_use, + time_obj_array.utc.jd1, + time_obj_array.utc.jd2, + time_obj_array.delta_ut1_utc, + site_loc.lon.rad, + site_loc.lat.rad, + site_loc.height.to_value("m"), + pm_x_array, + pm_y_array, + 0, # ait pressure, used for refraction (ignored) + 0, # amb temperature, used for refraction (ignored) + 0, # rel humidity, used for refraction (ignored) + 0, # wavelength, used for refraction (ignored) + ) + + app_ra = np.mod(app_ra - eqn_org, 2 * np.pi) + + return app_ra, app_dec + + +def transform_app_to_icrs( + *, + time_array, + app_ra, + app_dec, + telescope_loc, + telescope_frame="itrs", + ellipsoid="SPHERE", + astrometry_library=None, +): + """ + Transform a set of coordinates in topocentric/apparent to ICRS coordinates. + + This utility uses either astropy or erfa to calculate the ICRS coordinates of + a given set of apparent source coordinates. These coordinates are most typically + used for defining the celestial/catalog position of a source. Note that at present, + this is only implemented in astropy and pyERFA, although it could hypothetically + be extended to NOVAS at some point. + + Parameters + ---------- + time_array : float or ndarray of float + Julian dates to calculate coordinate positions for. Can either be a single + float, or an ndarray of shape (Ntimes,). + app_ra : float or ndarray of float + ICRS RA of the celestial target, expressed in units of radians. Can either + be a single float or array of shape (Ncoord,). Note that if time_array is + not a singleton value, then Ncoord must be equal to Ntimes. + app_dec : float or ndarray of float + ICRS Dec of the celestial target, expressed in units of radians. Can either + be a single float or array of shape (Ncoord,). Note that if time_array is + not a singleton value, then Ncoord must be equal to Ntimes. + telescope_loc : tuple of floats or EarthLocation + ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center + of the array. Can either be provided as an astropy EarthLocation, or a tuple + of shape (3,) containing (in order) the latitude, longitude, and altitude, + in units of radians, radians, and meters, respectively. + telescope_frame: str, optional + Reference frame for telescope location. Options are itrs (default) or mcmf. + Only used if telescope_loc is not an EarthLocation or MoonLocation. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is mcmf. + astrometry_library : str + Library used for running the coordinate conversions. Allowed options are + 'erfa' (which uses the pyERFA), and 'astropy' (which uses the astropy + utilities). Default is erfa unless the telescope_location is a MoonLocation + object, in which case the default is astropy. + + Returns + ------- + icrs_ra : ndarray of floats + ICRS right ascension coordinates, in units of radians, of either shape + (Ntimes,) if Ntimes >1, otherwise (Ncoord,). + icrs_dec : ndarray of floats + ICRS declination coordinates, in units of radians, of either shape + (Ntimes,) if Ntimes >1, otherwise (Ncoord,). + """ + if telescope_frame.upper() == "MCMF": + if not hasmoon: + raise ValueError( + "Need to install `lunarsky` package to work with MCMF frame." + ) + + # Make sure that the library requested is actually permitted + if astrometry_library is None: + if hasmoon and isinstance(telescope_loc, MoonLocation): + astrometry_library = "astropy" + elif telescope_frame.upper() == "MCMF": + astrometry_library = "astropy" + else: + astrometry_library = "erfa" + + if astrometry_library not in ["erfa", "astropy"]: + raise ValueError( + "Requested coordinate transformation library is not supported, please " + "select either 'erfa' or 'astropy' for astrometry_library." + ) + + ra_coord = app_ra * units.rad + dec_coord = app_dec * units.rad + + # Check here to make sure that ra_coord and dec_coord are the same length, + # either 1 or len(time_array) + multi_coord = ra_coord.size != 1 + if ra_coord.shape != dec_coord.shape: + raise ValueError("app_ra and app_dec must be the same shape.") + + if isinstance(telescope_loc, EarthLocation) or ( + hasmoon and isinstance(telescope_loc, MoonLocation) + ): + site_loc = telescope_loc + elif telescope_frame.upper() == "MCMF": + site_loc = MoonLocation.from_selenodetic( + telescope_loc[1] * (180.0 / np.pi), + telescope_loc[0] * (180.0 / np.pi), + height=telescope_loc[2], + ellipsoid=ellipsoid, + ) + else: + site_loc = EarthLocation.from_geodetic( + telescope_loc[1] * (180.0 / np.pi), + telescope_loc[0] * (180.0 / np.pi), + height=telescope_loc[2], + ) + + if ( + hasmoon + and isinstance(site_loc, MoonLocation) + and astrometry_library != "astropy" + ): + raise NotImplementedError( + "MoonLocation telescopes are only supported with the 'astropy' astrometry " + "library" + ) + + assert time_array.size > 0 + if isinstance(time_array, Time): + time_obj_array = time_array + else: + time_obj_array = Time(time_array, format="jd", scale="utc") + + if time_obj_array.size != 1: + if (time_obj_array.shape != ra_coord.shape) and multi_coord: + raise ValueError( + "time_array must be of either of length 1 (single " + "float) or same length as ra and dec." + ) + elif time_obj_array.ndim == 0: + # Make the array at least 1-dimensional so we don't run into indexing + # issues later. + time_obj_array = Time([time_obj_array]) + + if astrometry_library == "astropy": + if hasmoon and isinstance(site_loc, MoonLocation): + time_obj_array = LTime(time_obj_array, location=site_loc) + else: + time_obj_array = Time(time_obj_array, location=site_loc) + + az_coord, el_coord = erfa.hd2ae( + np.mod( + time_obj_array.sidereal_time("apparent").rad - ra_coord.to_value("rad"), + 2 * np.pi, + ), + dec_coord.to_value("rad"), + site_loc.lat.rad, + ) + + if isinstance(site_loc, EarthLocation): + sky_coord = SkyCoord( + az_coord * units.rad, + el_coord * units.rad, + frame="altaz", + location=site_loc, + obstime=time_obj_array, + ) + else: + sky_coord = LunarSkyCoord( + az_coord * units.rad, + el_coord * units.rad, + frame="lunartopo", + location=site_loc, + obstime=time_obj_array, + ) + + coord_data = sky_coord.transform_to("icrs") + icrs_ra = coord_data.ra.rad + icrs_dec = coord_data.dec.rad + elif astrometry_library == "erfa": + # Get IERS data, which is needed for highest precision + polar_motion_data = iers.earth_orientation_table.get() + + pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array) + pm_x_array = pm_x_array.to_value("rad") + pm_y_array = pm_y_array.to_value("rad") + + bpn_matrix = erfa.pnm06a(time_obj_array.tt.jd1, time_obj_array.tt.jd2) + cip_x, cip_y = erfa.bpn2xy(bpn_matrix) + cio_s = erfa.s06(time_obj_array.tt.jd1, time_obj_array.tt.jd2, cip_x, cip_y) + eqn_org = erfa.eors(bpn_matrix, cio_s) + + # Observed to ICRS via ERFA + icrs_ra, icrs_dec = erfa.atoc13( + "r", + ra_coord.to_value("rad") + eqn_org, + dec_coord.to_value("rad"), + time_obj_array.utc.jd1, + time_obj_array.utc.jd2, + time_obj_array.delta_ut1_utc, + site_loc.lon.rad, + site_loc.lat.rad, + site_loc.height.value, + pm_x_array, + pm_y_array, + 0, # atm pressure, used for refraction (ignored) + 0, # amb temperature, used for refraction (ignored) + 0, # rel humidity, used for refraction (ignored) + 0, # wavelength, used for refraction (ignored) + ) + + # Return back the two RA/Dec arrays + return icrs_ra, icrs_dec + + +def calc_parallactic_angle(*, app_ra, app_dec, lst_array, telescope_lat): + """ + Calculate the parallactic angle between RA/Dec and the AltAz frame. + + Parameters + ---------- + app_ra : ndarray of floats + Array of apparent RA values in units of radians, shape (Ntimes,). + app_dec : ndarray of floats + Array of apparent dec values in units of radians, shape (Ntimes,). + telescope_lat : float + Latitude of the observatory, in units of radians. + lst_array : float or ndarray of float + Array of local apparent sidereal timesto calculate position angle values + for, in units of radians. Can either be a single float or an array of shape + (Ntimes,). + """ + # This is just a simple wrapped around the pas function in ERFA + return erfa.pas(app_ra, app_dec, lst_array, telescope_lat) + + +def calc_frame_pos_angle( + *, + time_array, + app_ra, + app_dec, + telescope_loc, + ref_frame, + ref_epoch=None, + telescope_frame="itrs", + ellipsoid="SPHERE", + offset_pos=(np.pi / 360.0), +): + """ + Calculate an position angle given apparent position and reference frame. + + This function is used to determine the position angle between the great + circle of declination in apparent coordinates, versus that in a given + reference frame. Note that this is slightly different than parallactic + angle, which is the difference between apparent declination and elevation. + + Paramters + --------- + time_array : ndarray of floats + Array of julian dates to calculate position angle values for, of shape + (Ntimes,). + app_ra : ndarray of floats + Array of apparent RA values in units of radians, shape (Ntimes,). + app_dec : ndarray of floats + Array of apparent dec values in units of radians, shape (Ntimes,). + telescope_loc : tuple of floats or EarthLocation + ITRF latitude, longitude, and altitude (rel to sea-level) of the observer. + Can either be provided as an astropy EarthLocation, or an array-like of shape + (3,) containing the latitude, longitude, and altitude, in that order, with units + of radians, radians, and meters, respectively. + ref_frame : str + Coordinate frame to calculate position angles for. Can be any of the + several supported frames in astropy (a limited list: fk4, fk5, icrs, + gcrs, cirs, galactic). + ref_epoch : str or flt + Epoch of the coordinates, only used when ref_frame = fk4 or fk5. Given + in unites of fractional years, either as a float or as a string with + the epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0). + telescope_frame: str, optional + Reference frame for telescope location. Options are itrs (default) or mcmf. + Only used if telescope_loc is not an EarthLocation or MoonLocation. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is mcmf. + offset_pos : float + Distance of the offset position used to calculate the frame PA. Default + is 0.5 degrees, which should be sufficent for most applications. + + + Returns + ------- + frame_pa : ndarray of floats + Array of position angles, in units of radians. + """ + # Check to see if the position angles should default to zero + if (ref_frame is None) or (ref_frame == "topo"): + # No-op detected, ENGAGE MAXIMUM SNARK! + return np.zeros_like(time_array) + + assert offset_pos > 0, "offset_pos must be greater than 0." + + # This creates an array of unique entries of ra + dec + time, since the processing + # time for each element can be non-negligible, and entries along the Nblt axis can + # be highly redundant. + unique_mask = np.union1d( + np.union1d( + np.unique(app_ra, return_index=True)[1], + np.unique(app_dec, return_index=True)[1], + ), + np.unique(time_array, return_index=True)[1], + ) + + # Pluck out the unique entries for each + unique_ra = app_ra[unique_mask] + unique_dec = app_dec[unique_mask] + unique_time = time_array[unique_mask] + + # Figure out how many elements we need to transform + n_coord = len(unique_mask) + + # Offset north/south positions by 0.5 deg, such that the PA is determined over a + # 1 deg arc. + up_dec = unique_dec + offset_pos + dn_dec = unique_dec - offset_pos + up_ra = dn_ra = unique_ra + + # Wrap the positions if they happen to go over the poles + up_ra[up_dec > (np.pi / 2.0)] = np.mod( + up_ra[up_dec > (np.pi / 2.0)] + np.pi, 2.0 * np.pi + ) + up_dec[up_dec > (np.pi / 2.0)] = np.pi - up_dec[up_dec > (np.pi / 2.0)] + + dn_ra[-dn_dec > (np.pi / 2.0)] = np.mod( + dn_ra[dn_dec > (np.pi / 2.0)] + np.pi, 2.0 * np.pi + ) + dn_dec[-dn_dec > (np.pi / 2.0)] = np.pi - dn_dec[-dn_dec > (np.pi / 2.0)] + + # Run the set of offset coordinates through the "reverse" transform. The two offset + # positions are concat'd together to help reduce overheads + ref_ra, ref_dec = calc_sidereal_coords( + time_array=np.tile(unique_time, 2), + app_ra=np.concatenate((dn_ra, up_ra)), + app_dec=np.concatenate((dn_dec, up_dec)), + telescope_loc=telescope_loc, + coord_frame=ref_frame, + telescope_frame=telescope_frame, + ellipsoid=ellipsoid, + coord_epoch=ref_epoch, + ) + + # Use the pas function from ERFA to calculate the position angle. The negative sign + # is here because we're measuring PA of app -> frame, but we want frame -> app. + unique_pa = -erfa.pas( + ref_ra[:n_coord], ref_dec[:n_coord], ref_ra[n_coord:], ref_dec[n_coord:] + ) + + # Finally, we have to go back through and "fill in" the redundant entries + frame_pa = np.zeros_like(app_ra) + for idx in range(n_coord): + select_mask = np.logical_and( + np.logical_and(unique_ra[idx] == app_ra, unique_dec[idx] == app_dec), + unique_time[idx] == time_array, + ) + frame_pa[select_mask] = unique_pa[idx] + + return frame_pa + + +def lookup_jplhorizons( + target_name, + time_array, + *, + telescope_loc=None, + high_cadence=False, + force_indv_lookup=None, +): + """ + Lookup solar system body coordinates via the JPL-Horizons service. + + This utility is useful for generating ephemerides, which can then be interpolated in + order to provide positional data for a target which is moving, such as planetary + bodies and other solar system objects. Use of this function requires the + installation of the `astroquery` module. + + + Parameters + ---------- + target_name : str + Name of the target to gather an ephemeris for. Must match the name + in the JPL-Horizons database. + time_array : array-like of float + Times in UTC Julian days to gather an ephemeris for. + telescope_loc : tuple of floats or EarthLocation + ITRS latitude, longitude, and altitude (rel to sea-level) of the observer. + Can either be provided as an EarthLocation object, or an + array-like of shape (3,) containing the latitude, longitude, and altitude, + in that order, with units of radians, radians, and meters, respectively. + high_cadence : bool + If set to True, will calculate ephemeris points every 3 minutes in time, as + opposed to the default of every 3 hours. + force_indv_lookup : bool + If set to True, will calculate coordinate values for each value found within + `time_array`. If False, a regularized time grid is sampled that encloses the + values contained within `time_array`. Default is False, unless `time_array` is + of length 1, in which the default is set to True. + + + Returns + ------- + ephem_times : ndarray of float + Times for which the ephemeris values were calculated, in UTC Julian days. + ephem_ra : ndarray of float + ICRS Right ascension of the target at the values within `ephem_times`, in + units of radians. + ephem_dec : ndarray of float + ICRS Declination of the target at the values within `ephem_times`, in units + of radians. + ephem_dist : ndarray of float + Distance of the target relative to the observer, at the values within + `ephem_times`, in units of parsecs. + ephem_vel : ndarray of float + Velocity of the targets relative to the observer, at the values within + `ephem_times`, in units of km/sec. + """ + try: + from astroquery.jplhorizons import Horizons + except ImportError as err: # pragma: no cover + raise ImportError( + "astroquery is not installed but is required for " + "planet ephemeris functionality" + ) from err + from json import load as json_load + from os.path import join as path_join + + from pyuvdata.data import DATA_PATH + + # Get the telescope location into a format that JPL-Horizons can understand, + # which is nominally a dict w/ entries for lon (units of deg), lat (units of + # deg), and elevation (units of km). + if isinstance(telescope_loc, EarthLocation): + site_loc = { + "lon": telescope_loc.lon.deg, + "lat": telescope_loc.lat.deg, + "elevation": telescope_loc.height.to_value(unit=units.km), + } + elif hasmoon and isinstance(telescope_loc, MoonLocation): + raise NotImplementedError( + "Cannot lookup JPL positions for telescopes with a MoonLocation" + ) + elif telescope_loc is None: + # Setting to None will report the geocentric position + site_loc = None + else: + site_loc = { + "lon": telescope_loc[1] * (180.0 / np.pi), + "lat": telescope_loc[0] * (180.0 / np.pi), + "elevation": telescope_loc[2] * (0.001), # m -> km + } + + # If force_indv_lookup is True, or unset but only providing a single value, then + # just calculate the RA/Dec for the times requested rather than creating a table + # to interpolate from. + if force_indv_lookup or ( + (np.array(time_array).size == 1) and (force_indv_lookup is None) + ): + epoch_list = np.unique(time_array) + if len(epoch_list) > 50: + raise ValueError( + "Requesting too many individual ephem points from JPL-Horizons. This " + "can be remedied by setting force_indv_lookup=False or limiting the " + "number of values in time_array." + ) + else: + # When querying for multiple times, its faster (and kinder to the + # good folks at JPL) to create a range to query, and then interpolate + # between values. The extra buffer of 0.001 or 0.25 days for high and + # low cadence is to give enough data points to allow for spline + # interpolation of the data. + if high_cadence: + start_time = np.min(time_array) - 0.001 + stop_time = np.max(time_array) + 0.001 + step_time = "3m" + n_entries = (stop_time - start_time) * (1440.0 / 3.0) + else: + # The start/stop time here are setup to maximize reusability of the + # data, since astroquery appears to cache the results from previous + # queries. + start_time = (0.25 * np.floor(4.0 * np.min(time_array))) - 0.25 + stop_time = (0.25 * np.ceil(4.0 * np.max(time_array))) + 0.25 + step_time = "3h" + n_entries = (stop_time - start_time) * (24.0 / 3.0) + # We don't want to overtax the JPL service, so limit ourselves to 1000 + # individual queries at a time. Note that this is likely a conservative + # cap for JPL-Horizons, but there should be exceptionally few applications + # that actually require more than this. + if n_entries > 1000: + if (len(np.unique(time_array)) <= 50) and (force_indv_lookup is None): + # If we have a _very_ sparse set of epochs, pass that along instead + epoch_list = np.unique(time_array) + else: + # Otherwise, time to raise an error + raise ValueError( + "Too many ephem points requested from JPL-Horizons. This " + "can be remedied by setting high_cadance=False or limiting " + "the number of values in time_array." + ) + else: + epoch_list = { + "start": Time(start_time, format="jd").isot, + "stop": Time(stop_time, format="jd").isot, + "step": step_time, + } + # Check to make sure dates are within the 1700-2200 time range, + # since not all targets are supported outside of this range + if (np.min(time_array) < 2341973.0) or (np.max(time_array) > 2524593.0): + raise ValueError( + "No current support for JPL ephems outside of 1700 - 2300 AD. " + "Check back later (or possibly earlier)..." + ) + + # JPL-Horizons has a separate catalog with what it calls 'major bodies', + # and will throw an error if you use the wrong catalog when calling for + # astrometry. We'll use the dict below to capture this behavior. + with open(path_join(DATA_PATH, "jpl_major_bodies.json"), "r") as fhandle: + major_body_dict = json_load(fhandle) + + target_id = target_name + id_type = "smallbody" + # If we find the target in the major body database, then we can extract the + # target ID to make the query a bit more robust (otherwise JPL-Horizons will fail + # on account that id will find multiple partial matches: e.g., "Mars" will be + # matched with "Mars", "Mars Explorer", "Mars Barycenter"..., and JPL-Horizons will + # not know which to choose). + if target_name in major_body_dict.keys(): + target_id = major_body_dict[target_name] + id_type = None + + query_obj = Horizons( + id=target_id, location=site_loc, epochs=epoch_list, id_type=id_type + ) + # If not in the major bodies catalog, try the minor bodies list, and if + # still not found, throw an error. + try: + ephem_data = query_obj.ephemerides(extra_precision=True) + except KeyError: + # This is a fix for an astroquery + JPL-Horizons bug, that's related to + # API change on JPL's side. In this case, the source is identified, but + # astroquery can't correctly parse the return message from JPL-Horizons. + # See astroquery issue #2169. + ephem_data = query_obj.ephemerides(extra_precision=False) # pragma: no cover + except ValueError as err: + query_obj._session.close() + if "Unknown target" in str(err): + raise ValueError( + "Target ID is not recognized in either the small or major bodies " + "catalogs, please consult the JPL-Horizons database for supported " + "targets (https://ssd.jpl.nasa.gov/?horizons)." + ) from err + else: + raise # pragma: no cover + # This is explicitly closed here to trap a bug that occassionally throws an + # unexpected warning, see astroquery issue #1807 + query_obj._session.close() + + # Now that we have the ephem data, extract out the relevant data + ephem_times = np.array(ephem_data["datetime_jd"]) + ephem_ra = np.array(ephem_data["RA"]) * (np.pi / 180.0) + ephem_dec = np.array(ephem_data["DEC"]) * (np.pi / 180.0) + ephem_dist = np.array(ephem_data["delta"]) # AU + ephem_vel = np.array(ephem_data["delta_rate"]) # km/s + + return ephem_times, ephem_ra, ephem_dec, ephem_dist, ephem_vel + + +def interpolate_ephem( + *, time_array, ephem_times, ephem_ra, ephem_dec, ephem_dist=None, ephem_vel=None +): + """ + Interpolates ephemerides to give positions for requested times. + + This is a simple tool for calculated interpolated RA and Dec positions, as well + as distances and velocities, for a given ephemeris. Under the hood, the method + uses as cubic spline interpolation to calculate values at the requested times, + provided that there are enough values to interpolate over to do so (requires + >= 4 points), otherwise a linear interpolation is used. + + Parameters + ---------- + time_array : array-like of floats + Times to interpolate positions for, in UTC Julian days. + ephem_times : array-like of floats + Times in UTC Julian days which describe that match to the recorded postions + of the target. Must be array-like, of shape (Npts,), where Npts is the number + of ephemeris points. + ephem_ra : array-like of floats + Right ascencion of the target, at the times given in `ephem_times`. Units are + in radians, must have the same shape as `ephem_times`. + ephem_dec : array-like of floats + Declination of the target, at the times given in `ephem_times`. Units are + in radians, must have the same shape as `ephem_times`. + ephem_dist : array-like of floats + Distance of the target from the observer, at the times given in `ephem_times`. + Optional argument, in units of parsecs. Must have the same shape as + `ephem_times`. + ephem_vel : array-like of floats + Velocities of the target, at the times given in `ephem_times`. Optional + argument, in units of km/sec. Must have the same shape as `ephem_times`. + + Returns + ------- + ra_vals : ndarray of float + Interpolated RA values, returned as an ndarray of floats with + units of radians, and the same shape as `time_array`. + dec_vals : ndarray of float + Interpolated declination values, returned as an ndarray of floats with + units of radians, and the same shape as `time_array`. + dist_vals : None or ndarray of float + If `ephem_dist` was provided, an ndarray of floats (with same shape as + `time_array`) with the interpolated target distances, in units of parsecs. + If `ephem_dist` was not provided, this returns as None. + vel_vals : None or ndarray of float + If `ephem_vals` was provided, an ndarray of floats (with same shape as + `time_array`) with the interpolated target velocities, in units of km/sec. + If `ephem_vals` was not provided, this returns as None. + + """ + # We're importing this here since it's only used for this one function + from scipy.interpolate import interp1d + + ephem_shape = np.array(ephem_times).shape + + # Make sure that things look reasonable + if np.array(ephem_ra).shape != ephem_shape: + raise ValueError("ephem_ra must have the same shape as ephem_times.") + + if np.array(ephem_dec).shape != ephem_shape: + raise ValueError("ephem_dec must have the same shape as ephem_times.") + + if (np.array(ephem_dist).shape != ephem_shape) and (ephem_dist is not None): + raise ValueError("ephem_dist must have the same shape as ephem_times.") + + if (np.array(ephem_vel).shape != ephem_shape) and (ephem_vel is not None): + raise ValueError("ephem_vel must have the same shape as ephem_times.") + + ra_vals = np.zeros_like(time_array, dtype=float) + dec_vals = np.zeros_like(time_array, dtype=float) + dist_vals = None if ephem_dist is None else np.zeros_like(time_array, dtype=float) + vel_vals = None if ephem_vel is None else np.zeros_like(time_array, dtype=float) + + if len(ephem_times) == 1: + ra_vals += ephem_ra + dec_vals += ephem_dec + if ephem_dist is not None: + dist_vals += ephem_dist + if ephem_vel is not None: + vel_vals += ephem_vel + else: + if len(ephem_times) > 3: + interp_kind = "cubic" + else: + interp_kind = "linear" + + # If we have values that line up perfectly, just use those directly + select_mask = np.isin(time_array, ephem_times) + if np.any(select_mask): + time_select = time_array[select_mask] + ra_vals[select_mask] = interp1d(ephem_times, ephem_ra, kind="nearest")( + time_select + ) + dec_vals[select_mask] = interp1d(ephem_times, ephem_dec, kind="nearest")( + time_select + ) + if ephem_dist is not None: + dist_vals[select_mask] = interp1d( + ephem_times, ephem_dist, kind="nearest" + )(time_select) + if ephem_vel is not None: + vel_vals[select_mask] = interp1d( + ephem_times, ephem_vel, kind="nearest" + )(time_select) + + # If we have values lining up between grid points, use spline interpolation + # to calculate their values + select_mask = ~select_mask + if np.any(select_mask): + time_select = time_array[select_mask] + ra_vals[select_mask] = interp1d(ephem_times, ephem_ra, kind=interp_kind)( + time_select + ) + dec_vals[select_mask] = interp1d(ephem_times, ephem_dec, kind=interp_kind)( + time_select + ) + if ephem_dist is not None: + dist_vals[select_mask] = interp1d( + ephem_times, ephem_dist, kind=interp_kind + )(time_select) + if ephem_vel is not None: + vel_vals[select_mask] = interp1d( + ephem_times, ephem_vel, kind=interp_kind + )(time_select) + + return (ra_vals, dec_vals, dist_vals, vel_vals) + + +def calc_app_coords( + *, + lon_coord, + lat_coord, + coord_frame="icrs", + coord_epoch=None, + coord_times=None, + coord_type="sidereal", + time_array=None, + lst_array=None, + telescope_loc=None, + telescope_frame="itrs", + ellipsoid=None, + pm_ra=None, + pm_dec=None, + vrad=None, + dist=None, +): + """ + Calculate apparent coordinates for several different coordinate types. + + This function calculates apparent positions at the current epoch. + + Parameters + ---------- + lon_coord : float or ndarray of float + Longitudinal (e.g., RA) coordinates, units of radians. Must match the same + shape as lat_coord. + lat_coord : float or ndarray of float + Latitudinal (e.g., Dec) coordinates, units of radians. Must match the same + shape as lon_coord. + coord_frame : string + The requested reference frame for the output coordinates, can be any frame + that is presently supported by astropy. + coord_epoch : float or str or Time object + Epoch for ref_frame, nominally only used if converting to either the FK4 or + FK5 frames, in units of fractional years. If provided as a float and the + coord_frame is an FK4-variant, value will assumed to be given in Besselian + years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be + in Julian years. + coord_times : float or ndarray of float + Only used when `coord_type="ephem"`, the JD UTC time for each value of + `lon_coord` and `lat_coord`. These values are used to interpolate `lon_coord` + and `lat_coord` values to those times listed in `time_array`. + coord_type : str + Type of source to calculate coordinates for. Must be one of: + "sidereal" (fixed RA/Dec), + "ephem" (RA/Dec that moves with time), + "driftscan" (fixed az/el position), + "unprojected" (alias for "driftscan" with (Az, Alt) = (0 deg, 90 deg)). + time_array : float or ndarray of float or Time object + Times for which the apparent coordinates were calculated, in UTC JD. If more + than a single element, must be the same shape as lon_coord and lat_coord if + both of those are arrays (instead of single floats). + telescope_loc : array-like of floats or EarthLocation or MoonLocation + ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center + of the array. Can either be provided as an astropy EarthLocation, a lunarsky + Moonlocation, or a tuple of shape (3,) containing (in order) the latitude, + longitude, and altitude for a position on Earth in units of radians, radians, + and meters, respectively. + telescope_frame: str, optional + Reference frame for telescope location. Options are itrs (default) or mcmf. + Only used if telescope_loc is not an EarthLocation or MoonLocation. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is mcmf. + pm_ra : float or ndarray of float + Proper motion in RA of the source, expressed in units of milliarcsec / year. + Can either be a single float or array of shape (Ntimes,), although this must + be consistent with other parameters (namely ra_coord and dec_coord). Not + required, motion is calculated relative to the value of `coord_epoch`. + pm_dec : float or ndarray of float + Proper motion in Dec of the source, expressed in units of milliarcsec / year. + Can either be a single float or array of shape (Ntimes,), although this must + be consistent with other parameters (namely ra_coord and dec_coord). Not + required, motion is calculated relative to the value of `coord_epoch`. + vrad : float or ndarray of float + Radial velocity of the source, expressed in units of km / sec. Can either be + a single float or array of shape (Ntimes,), although this must be consistent + with other parameters (namely ra_coord and dec_coord). Not required. + dist : float or ndarray of float + Distance of the source, expressed in milliarcseconds. Can either be a single + float or array of shape (Ntimes,), although this must be consistent with other + parameters (namely ra_coord and dec_coord). Not required. + + Returns + ------- + app_ra : ndarray of floats + Apparent right ascension coordinates, in units of radians. + app_dec : ndarray of floats + Apparent declination coordinates, in units of radians. + """ + if isinstance(telescope_loc, EarthLocation) or ( + hasmoon and isinstance(telescope_loc, MoonLocation) + ): + site_loc = telescope_loc + if hasmoon and isinstance(telescope_loc, MoonLocation): + ellipsoid = MoonLocation.ellipsoid + elif telescope_frame.upper() == "MCMF": + if not hasmoon: + raise ValueError( + "Need to install `lunarsky` package to work with MCMF frame." + ) + if ellipsoid is None: + ellipsoid = "SPHERE" + site_loc = MoonLocation.from_selenodetic( + telescope_loc[1] * (180.0 / np.pi), + telescope_loc[0] * (180.0 / np.pi), + height=telescope_loc[2], + ellipsoid=ellipsoid, + ) + else: + site_loc = EarthLocation.from_geodetic( + telescope_loc[1] * (180.0 / np.pi), + telescope_loc[0] * (180.0 / np.pi), + height=telescope_loc[2], + ) + + # Time objects and unique don't seem to play well together, so we break apart + # their handling here + if isinstance(time_array, Time): + time_array = time_array.utc.jd + + unique_time_array, unique_mask = np.unique(time_array, return_index=True) + + if coord_type in ["driftscan", "unprojected"]: + if lst_array is None: + unique_lst = get_lst_for_time(unique_time_array, telescope_loc=site_loc) + else: + unique_lst = lst_array[unique_mask] + + if coord_type == "sidereal": + # If the coordinates are not in the ICRS frame, go ahead and transform them now + if coord_frame != "icrs": + icrs_ra, icrs_dec = transform_sidereal_coords( + longitude=lon_coord, + latitude=lat_coord, + in_coord_frame=coord_frame, + out_coord_frame="icrs", + in_coord_epoch=coord_epoch, + time_array=unique_time_array, + ) + else: + icrs_ra = lon_coord + icrs_dec = lat_coord + unique_app_ra, unique_app_dec = transform_icrs_to_app( + time_array=unique_time_array, + ra=icrs_ra, + dec=icrs_dec, + telescope_loc=site_loc, + pm_ra=pm_ra, + pm_dec=pm_dec, + vrad=vrad, + dist=dist, + ) + + elif coord_type == "driftscan": + # Use the ERFA function ae2hd, which will do all the heavy + # lifting for us + unique_app_ha, unique_app_dec = erfa.ae2hd( + lon_coord, lat_coord, site_loc.lat.rad + ) + # The above returns HA/Dec, so we just need to rotate by + # the LST to get back app RA and Dec + unique_app_ra = np.mod(unique_app_ha + unique_lst, 2 * np.pi) + unique_app_dec = unique_app_dec + np.zeros_like(unique_app_ra) + elif coord_type == "ephem": + interp_ra, interp_dec, _, _ = interpolate_ephem( + time_array=unique_time_array, + ephem_times=coord_times, + ephem_ra=lon_coord, + ephem_dec=lat_coord, + ) + if coord_frame != "icrs": + icrs_ra, icrs_dec = transform_sidereal_coords( + longitude=interp_ra, + latitude=interp_dec, + in_coord_frame=coord_frame, + out_coord_frame="icrs", + in_coord_epoch=coord_epoch, + time_array=unique_time_array, + ) + else: + icrs_ra = interp_ra + icrs_dec = interp_dec + # TODO: Vel and distance handling to be integrated here, once they are are + # needed for velocity frame tracking + unique_app_ra, unique_app_dec = transform_icrs_to_app( + time_array=unique_time_array, + ra=icrs_ra, + dec=icrs_dec, + telescope_loc=site_loc, + pm_ra=pm_ra, + pm_dec=pm_dec, + ) + elif coord_type == "unprojected": + # This is the easiest one - this is just supposed to be ENU, so set the + # apparent coords to the current lst and telescope_lat. + unique_app_ra = unique_lst.copy() + unique_app_dec = np.zeros_like(unique_app_ra) + site_loc.lat.rad + else: + raise ValueError("Object type %s is not recognized." % coord_type) + + # Now that we've calculated all the unique values, time to backfill through the + # "redundant" entries in the Nblt axis. + app_ra = np.zeros(np.array(time_array).shape) + app_dec = np.zeros(np.array(time_array).shape) + + for idx, unique_time in enumerate(unique_time_array): + select_mask = time_array == unique_time + app_ra[select_mask] = unique_app_ra[idx] + app_dec[select_mask] = unique_app_dec[idx] + + return app_ra, app_dec + + +def calc_sidereal_coords( + *, + time_array, + app_ra, + app_dec, + telescope_loc, + coord_frame, + telescope_frame="itrs", + ellipsoid=None, + coord_epoch=None, +): + """ + Calculate sidereal coordinates given apparent coordinates. + + This function calculates coordinates in the requested frame (at a given epoch) + from a set of apparent coordinates. + + Parameters + ---------- + time_array : float or ndarray of float or Time object + Times for which the apparent coordinates were calculated, in UTC JD. Must + match the shape of app_ra and app_dec. + app_ra : float or ndarray of float + Array of apparent right ascension coordinates, units of radians. Must match + the shape of time_array and app_dec. + app_ra : float or ndarray of float + Array of apparent right declination coordinates, units of radians. Must match + the shape of time_array and app_dec. + telescope_loc : tuple of floats or EarthLocation + ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center + of the array. Can either be provided as an astropy EarthLocation, or a tuple + of shape (3,) containing (in order) the latitude, longitude, and altitude, + in units of radians, radians, and meters, respectively. + coord_frame : string + The requested reference frame for the output coordinates, can be any frame + that is presently supported by astropy. Default is ICRS. + telescope_frame: str, optional + Reference frame for telescope location. Options are itrs (default) or mcmf. + Only used if telescope_loc is not an EarthLocation or MoonLocation. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is mcmf. + coord_epoch : float or str or Time object + Epoch for ref_frame, nominally only used if converting to either the FK4 or + FK5 frames, in units of fractional years. If provided as a float and the + ref_frame is an FK4-variant, value will assumed to be given in Besselian + years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be + in Julian years. + + Returns + ------- + ref_ra : ndarray of floats + Right ascension coordinates in the requested frame, in units of radians. + Either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,). + ref_dec : ndarray of floats + Declination coordinates in the requested frame, in units of radians. + Either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,). + """ + # Check to make sure that we have a properly formatted epoch for our in-bound + # coordinate frame + epoch = None + if isinstance(coord_epoch, str) or isinstance(coord_epoch, Time): + # If its a string or a Time object, we don't need to do anything more + epoch = Time(coord_epoch) + elif coord_epoch is not None: + if coord_frame.lower() in ["fk4", "fk4noeterms"]: + epoch = Time(coord_epoch, format="byear") + else: + epoch = Time(coord_epoch, format="jyear") + + if telescope_frame == "mcmf" and ellipsoid is None: + ellipsoid = "SPHERE" + + icrs_ra, icrs_dec = transform_app_to_icrs( + time_array=time_array, + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=ellipsoid, + ) + + if coord_frame == "icrs": + ref_ra, ref_dec = (icrs_ra, icrs_dec) + else: + ref_ra, ref_dec = transform_sidereal_coords( + longitude=icrs_ra, + latitude=icrs_dec, + in_coord_frame="icrs", + out_coord_frame=coord_frame, + out_coord_epoch=epoch, + time_array=time_array, + ) + + return ref_ra, ref_dec + + +def uvw_track_generator( + *, + lon_coord=None, + lat_coord=None, + coord_frame="icrs", + coord_epoch=None, + coord_type="sidereal", + time_array=None, + telescope_loc=None, + telescope_frame="itrs", + ellipsoid=None, + antenna_positions=None, + antenna_numbers=None, + ant_1_array=None, + ant_2_array=None, + uvw_array=None, + force_postive_u=False, +): + """ + Calculate uvw coordinates (among other values) for a given position on the sky. + + This function is meant to be a user-friendly wrapper around several pieces of code + for effectively simulating a track. + + Parameters + ---------- + lon_coord : float or ndarray of float + Longitudinal (e.g., RA) coordinates, units of radians. Must match the same + shape as lat_coord. + lat_coord : float or ndarray of float + Latitudinal (e.g., Dec) coordinates, units of radians. Must match the same + shape as lon_coord. + coord_frame : string + The requested reference frame for the output coordinates, can be any frame + that is presently supported by astropy. + coord_epoch : float or str or Time object, optional + Epoch for ref_frame, nominally only used if converting to either the FK4 or + FK5 frames, in units of fractional years. If provided as a float and the + ref_frame is an FK4-variant, value will assumed to be given in Besselian + years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be + in Julian years. + coord_type : str + Type of source to calculate coordinates for. Must be one of: + "sidereal" (fixed RA/Dec), + "ephem" (RA/Dec that moves with time), + "driftscan" (fixed az/el position), + "unprojected" (alias for "driftscan" with (Az, Alt) = (0 deg, 90 deg)). + time_array : ndarray of float or Time object + Times for which the apparent coordinates were calculated, in UTC JD. Must + match the shape of lon_coord and lat_coord. + telescope_loc : array-like of floats or EarthLocation or MoonLocation + ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center + of the array. Can either be provided as an astropy EarthLocation, a lunarsky + Moonlocation, or a tuple of shape (3,) containing (in order) the latitude, + longitude, and altitude for a position on Earth in units of degrees, degrees, + and meters, respectively. + telescope_frame : str, optional + Reference frame for latitude/longitude/altitude. Options are itrs (default) or + mcmf. Only used if telescope_loc is not an EarthLocation or MoonLocation. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is mcmf. + antenna_positions : ndarray of float + List of antenna positions relative to array center in ECEF coordinates, + required if not providing `uvw_array`. Shape is (Nants, 3). + antenna_numbers: ndarray of int, optional + List of antenna numbers, ordered in the same way as `antenna_positions` (e.g., + `antenna_numbers[0]` should given the number of antenna that resides at ECEF + position given by `antenna_positions[0]`). Shape is (Nants,), requred if + supplying ant_1_array and ant_2_array. + ant_1_array : ndarray of int, optional + Antenna number of the first antenna in the baseline pair, for all baselines + Required if not providing `uvw_array`, shape is (Nblts,). If not supplied, then + the method will automatically fill in ant_1_array with all unique antenna + pairings for each time/position. + ant_2_array : ndarray of int, optional + Antenna number of the second antenna in the baseline pair, for all baselines + Required if not providing `uvw_array`, shape is (Nblts,). If not supplied, then + the method will automatically fill in ant_2_array with all unique antenna + pairings for each time/position. + uvw_array : ndarray of float, optional + Array of baseline coordinates (in ENU), required if not deriving new coordinates + from antenna positions. Setting this value will will cause antenna positions to + be ignored. Shape is (Nblts, 3). + force_positive_u : bool, optional + If set to true, then forces the conjugation of each individual baseline to be + set such that the uvw coordinates land on the positive-u side of the uv-plane. + Default is False. + + Returns + ------- + obs_dict : dict + Dictionary containing the results of the simulation, which includes: + "uvw" the uvw-coordinates (meters), + "app_ra" apparent RA of the sources (radians), + "app_dec" apparent Dec of the sources (radians), + "frame_pa" ngle between apparent north and `coord_frame` north (radians), + "lst" local apparent sidereal time (radians), + "site_loc" EarthLocation or MoonLocation for the telescope site. + """ + if isinstance(telescope_loc, EarthLocation) or ( + hasmoon and isinstance(telescope_loc, MoonLocation) + ): + site_loc = telescope_loc + elif telescope_frame.upper() == "MCMF": + if not hasmoon: + raise ValueError( + "Need to install `lunarsky` package to work with MCMF frame." + ) + if ellipsoid is None: + ellipsoid = "SPHERE" + + site_loc = MoonLocation.from_selenodetic( + Angle(telescope_loc[1], unit="deg"), + Angle(telescope_loc[0], unit="deg"), + telescope_loc[2], + ellipsoid=ellipsoid, + ) + else: + site_loc = EarthLocation.from_geodetic( + Angle(telescope_loc[1], unit="deg"), + Angle(telescope_loc[0], unit="deg"), + height=telescope_loc[2], + ) + + if not isinstance(lon_coord, np.ndarray): + lon_coord = np.array(lon_coord) + if not isinstance(lat_coord, np.ndarray): + lat_coord = np.array(lat_coord) + if not isinstance(time_array, np.ndarray): + time_array = np.array(time_array) + + if lon_coord.ndim == 0: + lon_coord = lon_coord.reshape(1) + if lat_coord.ndim == 0: + lat_coord = lat_coord.reshape(1) + if time_array.ndim == 0: + time_array = time_array.reshape(1) + + Ntimes = len(time_array) + if uvw_array is None: + if all(item is None for item in [antenna_numbers, ant_1_array, ant_2_array]): + antenna_numbers = np.arange(1, 1 + len(antenna_positions)) + ant_1_array = [] + ant_2_array = [] + for idx in range(len(antenna_positions)): + for jdx in range(idx + 1, len(antenna_positions)): + ant_1_array.append(idx + 1) + ant_2_array.append(jdx + 1) + + Nbase = len(ant_1_array) + + ant_1_array = np.tile(ant_1_array, Ntimes) + ant_2_array = np.tile(ant_2_array, Ntimes) + if len(lon_coord) == len(time_array): + lon_coord = np.repeat(lon_coord, Nbase) + lat_coord = np.repeat(lat_coord, Nbase) + + time_array = np.repeat(time_array, Nbase) + + lst_array = get_lst_for_time(jd_array=time_array, telescope_loc=site_loc) + app_ra, app_dec = calc_app_coords( + lon_coord=lon_coord, + lat_coord=lat_coord, + coord_frame=coord_frame, + coord_type=coord_type, + time_array=time_array, + lst_array=lst_array, + telescope_loc=site_loc, + ) + + frame_pa = calc_frame_pos_angle( + time_array=time_array, + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=site_loc, + ref_frame=coord_frame, + ref_epoch=coord_epoch, + ) + + uvws = calc_uvw( + app_ra=app_ra, + app_dec=app_dec, + frame_pa=frame_pa, + lst_array=lst_array, + antenna_positions=antenna_positions, + antenna_numbers=antenna_numbers, + ant_1_array=ant_1_array, + ant_2_array=ant_2_array, + telescope_lon=site_loc.lon.rad, + telescope_lat=site_loc.lat.rad, + uvw_array=uvw_array, + use_ant_pos=(uvw_array is None), + from_enu=(uvw_array is not None), + ) + + if force_postive_u: + mask = (uvws[:, 0] < 0.0) | ((uvws[:, 0] == 0.0) & (uvws[:, 1] < 0.0)) + uvws[mask, :] *= -1.0 + + return { + "uvw": uvws, + "app_ra": app_ra, + "app_dec": app_dec, + "frame_pa": frame_pa, + "lst": lst_array, + "site_loc": site_loc, + } diff --git a/src/pyuvdata/utils/phasing.pyx b/src/pyuvdata/utils/phasing.pyx new file mode 100644 index 0000000000..b1e6d32c17 --- /dev/null +++ b/src/pyuvdata/utils/phasing.pyx @@ -0,0 +1,96 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License + +# distutils: language = c +# cython: linetrace=True + +# cython imports + +cimport cython +cimport numpy +from libc.math cimport cos, sin + +numpy.import_array() + + +# inital_uvw is a memoryviewed array as an input +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] _old_uvw_calc( + numpy.float64_t ra, + numpy.float64_t dec, + numpy.float64_t[:, ::1] initial_uvw +): + cdef int i + cdef int ndim = 2 + cdef int nuvw = initial_uvw.shape[1] + cdef numpy.npy_intp * dims = [3, nuvw] + cdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] uvw = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) + + # make a memoryview for the numpy array in c + cdef numpy.float64_t[:, ::1] _uvw = uvw + + cdef numpy.float64_t sin_ra, cos_ra, sin_dec, cos_dec + + sin_ra = sin(ra) + cos_ra = cos(ra) + sin_dec = sin(dec) + cos_dec = cos(dec) + + for i in range(nuvw): + _uvw[0, i] = - sin_ra * initial_uvw[0, i] + cos_ra * initial_uvw[1, i] + + _uvw[1, i] = ( + - sin_dec * cos_ra * initial_uvw[0, i] + - sin_dec * sin_ra * initial_uvw[1, i] + + cos_dec * initial_uvw[2, i] + ) + + _uvw[2, i] = ( + cos_dec * cos_ra * initial_uvw[0, i] + + cos_dec * sin_ra * initial_uvw[1, i] + + sin_dec * initial_uvw[2, i] + ) + return uvw + +# uvw is a memoryviewed array as an input +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] _undo_old_uvw_calc( + numpy.float64_t ra, + numpy.float64_t dec, + numpy.float64_t[:, ::1] uvw +): + cdef int i + cdef int ndim = 2 + cdef int nuvw = uvw.shape[1] + cdef numpy.npy_intp * dims = [3, nuvw] + cdef numpy.ndarray[dtype=numpy.float64_t, ndim=2] unphased_uvw = numpy.PyArray_EMPTY(ndim, dims, numpy.NPY_FLOAT64, 0) + + # make a memoryview for the numpy array in c + cdef numpy.float64_t[:, ::1] _u_uvw = unphased_uvw + + cdef numpy.float64_t sin_ra, cos_ra, sin_dec, cos_dec + + sin_ra = sin(ra) + cos_ra = cos(ra) + sin_dec = sin(dec) + cos_dec = cos(dec) + + for i in range(nuvw): + _u_uvw[0, i] = ( + - sin_ra * uvw[0, i] + - sin_dec * cos_ra * uvw[1, i] + + cos_dec * cos_ra * uvw[2, i] + ) + + _u_uvw[1, i] = ( + cos_ra * uvw[0, i] + - sin_dec * sin_ra * uvw[1, i] + + cos_dec * sin_ra * uvw[2, i] + ) + + _u_uvw[2, i] = cos_dec * uvw[1, i] + sin_dec * uvw[2, i] + + return unphased_uvw diff --git a/src/pyuvdata/utils/pol.py b/src/pyuvdata/utils/pol.py new file mode 100644 index 0000000000..d70250e0b8 --- /dev/null +++ b/src/pyuvdata/utils/pol.py @@ -0,0 +1,499 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for working with polarizations.""" +import warnings +from collections.abc import Iterable +from copy import deepcopy +from functools import lru_cache, wraps +from typing import Iterable as IterableType + +import numpy as np + +__all__ = [ + "POL_STR2NUM_DICT", + "POL_NUM2STR_DICT", + "CONJ_POL_DICT", + "JONES_STR2NUM_DICT", + "JONES_NUM2STR_DICT", + "XORIENTMAP", + "polstr2num", + "polnum2str", + "jstr2num", + "jnum2str", + "conj_pol", + "_x_orientation_rep_dict", + "parse_polstr", + "parse_jpolstr", +] + +# fmt: off +# polarization constants +# maps polarization strings to polarization integers +POL_STR2NUM_DICT = {"pI": 1, "pQ": 2, "pU": 3, "pV": 4, + "I": 1, "Q": 2, "U": 3, "V": 4, # support straight stokes names + "rr": -1, "ll": -2, "rl": -3, "lr": -4, + "xx": -5, "yy": -6, "xy": -7, "yx": -8, + "hh": -5, "vv": -6, "hv": -7, "vh": -8} + +# maps polarization integers to polarization strings +POL_NUM2STR_DICT = {1: "pI", 2: "pQ", 3: "pU", 4: "pV", + -1: "rr", -2: "ll", -3: "rl", -4: "lr", + -5: "xx", -6: "yy", -7: "xy", -8: "yx"} + +# maps how polarizations change when antennas are swapped +CONJ_POL_DICT = {"xx": "xx", "yy": "yy", "xy": "yx", "yx": "xy", + "ee": "ee", "nn": "nn", "en": "ne", "ne": "en", + "rr": "rr", "ll": "ll", "rl": "lr", "lr": "rl", + "I": "I", "Q": "Q", "U": "U", "V": "V", + "pI": "pI", "pQ": "pQ", "pU": "pU", "pV": "pV"} + +# maps jones matrix element strings to jones integers +# Add entries that don't start with "J" to allow shorthand versions +JONES_STR2NUM_DICT = {"Jxx": -5, "Jyy": -6, "Jxy": -7, "Jyx": -8, + "xx": -5, "x": -5, "yy": -6, "y": -6, "xy": -7, "yx": -8, + "Jrr": -1, "Jll": -2, "Jrl": -3, "Jlr": -4, + "rr": -1, "r": -1, "ll": -2, "l": -2, "rl": -3, "lr": -4} +# maps jones integers to jones matrix element strings +JONES_NUM2STR_DICT = {-1: "Jrr", -2: "Jll", -3: "Jrl", -4: "Jlr", + -5: "Jxx", -6: "Jyy", -7: "Jxy", -8: "Jyx"} + +# maps uvdata pols to input feed polarizations. Note that this dict is also used for +# CASA MS writing, so the pseudo-stokes parameters are included here to provide mapping +# to a consistent (if non-physical) set of feeds for the pseudo-stokes visibilities, +# which are nominally supported by the CASA MS format. +POL_TO_FEED_DICT = {"xx": ["x", "x"], "yy": ["y", "y"], + "xy": ["x", "y"], "yx": ["y", "x"], + "ee": ["e", "e"], "nn": ["n", "n"], + "en": ["e", "n"], "ne": ["n", "e"], + "rr": ["r", "r"], "ll": ["l", "l"], + "rl": ["r", "l"], "lr": ["l", "r"], + "pI": ["I", "I"], "pQ": ["Q", "Q"], + "pU": ["U", "U"], "pV": ["V", "V"]} + +# fmt: on + +XORIENTMAP = { + "east": "east", + "north": "north", + "e": "east", + "n": "north", + "ew": "east", + "ns": "north", +} + + +def _x_orientation_rep_dict(x_orientation): + """Create replacement dict based on x_orientation.""" + try: + if XORIENTMAP[x_orientation.lower()] == "east": + return {"x": "e", "y": "n"} + elif XORIENTMAP[x_orientation.lower()] == "north": + return {"x": "n", "y": "e"} + except KeyError as e: + raise ValueError("x_orientation not recognized.") from e + + +def np_cache(function): + function = lru_cache(function) + + @wraps(function) + def wrapper(pol, x_orientation=None): + try: + return function(pol, x_orientation=x_orientation) + except TypeError: + if isinstance(pol, Iterable): + # Assume the reason that we got a type error is that pol was an array. + pol = tuple(pol) + return function(pol, x_orientation=x_orientation) + + # copy lru_cache attributes over too + wrapper.cache_info = function.cache_info + wrapper.cache_clear = function.cache_clear + + return wrapper + + +@np_cache +def polstr2num(pol: str | IterableType[str], *, x_orientation: str | None = None): + """ + Convert polarization str to number according to AIPS Memo 117. + + Prefer 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes, + not true Stokes, but also supports 'I', 'Q', 'U', 'V'. + + Parameters + ---------- + pol : str + polarization string + x_orientation : str, optional + Orientation of the physical dipole corresponding to what is + labelled as the x polarization ("east" or "north") to allow for + converting from E/N strings. See corresonding parameter on UVData + for more details. + + Returns + ------- + int + Number corresponding to string + + Raises + ------ + ValueError + If the pol string cannot be converted to a polarization number. + + Warns + ----- + UserWarning + If the x_orientation not recognized. + + """ + dict_use = deepcopy(POL_STR2NUM_DICT) + if x_orientation is not None: + try: + rep_dict = _x_orientation_rep_dict(x_orientation) + for key, value in POL_STR2NUM_DICT.items(): + new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) + dict_use[new_key] = value + except ValueError: + warnings.warn("x_orientation not recognized.") + + poldict = {k.lower(): v for k, v in dict_use.items()} + if isinstance(pol, str): + out = poldict[pol.lower()] + elif isinstance(pol, Iterable): + out = [poldict[key.lower()] for key in pol] + else: + raise ValueError( + f"Polarization {pol} cannot be converted to a polarization number." + ) + return out + + +@np_cache +def polnum2str(num, *, x_orientation=None): + """ + Convert polarization number to str according to AIPS Memo 117. + + Uses 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes, + not true Stokes + + Parameters + ---------- + num : int + polarization number + x_orientation : str, optional + Orientation of the physical dipole corresponding to what is + labelled as the x polarization ("east" or "north") to convert to + E/N strings. See corresonding parameter on UVData for more details. + + Returns + ------- + str + String corresponding to polarization number + + Raises + ------ + ValueError + If the polarization number cannot be converted to a polarization string. + + Warns + ----- + UserWarning + If the x_orientation not recognized. + + """ + dict_use = deepcopy(POL_NUM2STR_DICT) + if x_orientation is not None: + try: + rep_dict = _x_orientation_rep_dict(x_orientation) + for key, value in POL_NUM2STR_DICT.items(): + new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) + dict_use[key] = new_val + except ValueError: + warnings.warn("x_orientation not recognized.") + + if isinstance(num, (int, np.int32, np.int64)): + out = dict_use[num] + elif isinstance(num, Iterable): + out = [dict_use[i] for i in num] + else: + raise ValueError(f"Polarization {num} cannot be converted to string.") + return out + + +@np_cache +def jstr2num(jstr, *, x_orientation=None): + """ + Convert jones polarization str to number according to calfits memo. + + Parameters + ---------- + jstr : str or array_like of str + antenna (jones) polarization string(s) to convert. + x_orientation : str, optional + Orientation of the physical dipole corresponding to what is + labelled as the x polarization ("east" or "north") to allow for + converting from E/N strings. See corresonding parameter on UVData + for more details. + + Returns + ------- + int or list of int + antenna (jones) polarization number(s) corresponding to the input string(s) + + Raises + ------ + ValueError + If the jones string cannot be converted to a polarization number. + + Warns + ----- + UserWarning + If the x_orientation not recognized. + + """ + dict_use = deepcopy(JONES_STR2NUM_DICT) + if x_orientation is not None: + try: + rep_dict = _x_orientation_rep_dict(x_orientation) + for key, value in JONES_STR2NUM_DICT.items(): + new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) + dict_use[new_key] = value + except ValueError: + warnings.warn("x_orientation not recognized.") + + jdict = {k.lower(): v for k, v in dict_use.items()} + if isinstance(jstr, str): + out = jdict[jstr.lower()] + elif isinstance(jstr, Iterable): + out = [jdict[key.lower()] for key in jstr] + else: + raise ValueError(f"Jones polarization {jstr} cannot be converted to index.") + return out + + +@np_cache +def jnum2str(jnum, *, x_orientation=None): + """ + Convert jones polarization number to str according to calfits memo. + + Parameters + ---------- + num : int or array_like of int + antenna (jones) polarization number(s) to convert to strings + x_orientation : str, optional + Orientation of the physical dipole corresponding to what is + labelled as the x polarization ("east" or "north") to convert to + E/N strings. See corresonding parameter on UVData for more details. + + Returns + ------- + str or list of str + antenna (jones) polarization string(s) corresponding to number + + Raises + ------ + ValueError + If the jones polarization number cannot be converted to a jones + polarization string. + + Warns + ----- + UserWarning + If the x_orientation not recognized. + + """ + dict_use = deepcopy(JONES_NUM2STR_DICT) + if x_orientation is not None: + try: + rep_dict = _x_orientation_rep_dict(x_orientation) + for key, value in JONES_NUM2STR_DICT.items(): + new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) + dict_use[key] = new_val + except ValueError: + warnings.warn("x_orientation not recognized.") + + if isinstance(jnum, (int, np.int32, np.int64)): + out = dict_use[jnum] + elif isinstance(jnum, Iterable): + out = [dict_use[i] for i in jnum] + else: + raise ValueError(f"Jones polarization {jnum} cannot be converted to string.") + return out + + +@np_cache +def parse_polstr(polstr, *, x_orientation=None): + """ + Parse a polarization string and return pyuvdata standard polarization string. + + See utils.POL_STR2NUM_DICT for options. + + Parameters + ---------- + polstr : str + polarization string + x_orientation : str, optional + Orientation of the physical dipole corresponding to what is + labelled as the x polarization ("east" or "north") to allow for + converting from E/N strings. See corresonding parameter on UVData + for more details. + + Returns + ------- + str + AIPS Memo 117 standard string + + Raises + ------ + ValueError + If the pol string cannot be converted to a polarization number. + + Warns + ----- + UserWarning + If the x_orientation not recognized. + + """ + return polnum2str( + polstr2num(polstr, x_orientation=x_orientation), x_orientation=x_orientation + ) + + +@np_cache +def parse_jpolstr(jpolstr, *, x_orientation=None): + """ + Parse a Jones polarization string and return pyuvdata standard jones string. + + See utils.JONES_STR2NUM_DICT for options. + + Parameters + ---------- + jpolstr : str + Jones polarization string + + Returns + ------- + str + calfits memo standard string + + Raises + ------ + ValueError + If the jones string cannot be converted to a polarization number. + + Warns + ----- + UserWarning + If the x_orientation not recognized. + + """ + return jnum2str( + jstr2num(jpolstr, x_orientation=x_orientation), x_orientation=x_orientation + ) + + +def conj_pol(pol): + """ + Return the polarization for the conjugate baseline. + + For example, (1, 2, 'xy') = conj(2, 1, 'yx'). + The returned polarization is determined by assuming the antenna pair is + reversed in the data, and finding the correct polarization correlation + which will yield the requested baseline when conjugated. Note this means + changing the polarization for linear cross-pols, but keeping auto-pol + (e.g. xx) and Stokes the same. + + Parameters + ---------- + pol : str or int + Polarization string or integer. + + Returns + ------- + cpol : str or int + Polarization as if antennas are swapped (type matches input) + + """ + cpol_dict = {k.lower(): v for k, v in CONJ_POL_DICT.items()} + + if isinstance(pol, str): + cpol = cpol_dict[pol.lower()] + elif isinstance(pol, Iterable): + cpol = [conj_pol(p) for p in pol] + elif isinstance(pol, (int, np.int32, np.int64)): + cpol = polstr2num(cpol_dict[polnum2str(pol).lower()]) + else: + raise ValueError("Polarization not recognized, cannot be conjugated.") + return cpol + + +def reorder_conj_pols(pols): + """ + Reorder multiple pols, swapping pols that are conjugates of one another. + + For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy') + This is useful for the _key2inds function in the case where an antenna + pair is specified but the conjugate pair exists in the data. The conjugated + data should be returned in the order of the polarization axis, so after + conjugating the data, the pols need to be reordered. + For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but + the user requests antpair (1, 0), they should get: + [(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)] + + Parameters + ---------- + pols : array_like of str or int + Polarization array (strings or ints). + + Returns + ------- + conj_order : ndarray of int + Indices to reorder polarization array. + """ + if not isinstance(pols, Iterable): + raise ValueError("reorder_conj_pols must be given an array of polarizations.") + cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where + conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols] + if -1 in conj_order: + raise ValueError( + "Not all conjugate pols exist in the polarization array provided." + ) + return conj_order + + +def determine_pol_order(pols, *, order="AIPS"): + """ + Determine order of input polarization numbers. + + Determines the order by which to sort a given list of polarizations, according to + the ordering scheme. Two orders are currently supported: "AIPS" and "CASA". The + main difference between the two is the grouping of same-handed polarizations for + AIPS (whereas CASA orders the polarizations such that same-handed pols are on the + ends of the array). + + Parameters + ---------- + pols : array_like of str or int + Polarization array (strings or ints). + order : str + Polarization ordering scheme, either "CASA" or "AIPS". + + Returns + ------- + index_array : ndarray of int + Indices to reorder polarization array. + """ + if order == "AIPS": + index_array = np.argsort(np.abs(pols)) + elif order == "CASA": + casa_order = np.array([1, 2, 3, 4, -1, -3, -4, -2, -5, -7, -8, -6, 0]) + pol_inds = [] + for pol in pols: + pol_inds.append(np.where(casa_order == pol)[0][0]) + index_array = np.argsort(pol_inds) + else: + raise ValueError('order must be either "AIPS" or "CASA".') + + return index_array diff --git a/src/pyuvdata/utils/ps_cat.py b/src/pyuvdata/utils/ps_cat.py new file mode 100644 index 0000000000..0759451dc6 --- /dev/null +++ b/src/pyuvdata/utils/ps_cat.py @@ -0,0 +1,796 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for working with phase center catalogs.""" + +import numpy as np +from astropy.time import Time + +from . import RADIAN_TOL + +allowed_cat_types = ["sidereal", "ephem", "unprojected", "driftscan"] + + +def look_in_catalog( + phase_center_catalog, + *, + cat_name=None, + cat_type=None, + cat_lon=None, + cat_lat=None, + cat_frame=None, + cat_epoch=None, + cat_times=None, + cat_pm_ra=None, + cat_pm_dec=None, + cat_dist=None, + cat_vrad=None, + ignore_name=False, + target_cat_id=None, + phase_dict=None, +): + """ + Check the catalog to see if an existing entry matches provided data. + + This is a helper function for verifying if an entry already exists within + the catalog, contained within the supplied phase center catalog. + + Parameters + ---------- + phase_center_catalog : dict + Dictionary containing the entries to check. + cat_name : str + Name of the phase center, which should match a the value of "cat_name" + inside an entry of `phase_center_catalog`. + cat_type : str + Type of phase center of the entry. Must be one of: + "sidereal" (fixed RA/Dec), + "ephem" (RA/Dec that moves with time), + "driftscan" (fixed az/el position), + "unprojected" (no w-projection, equivalent to the old + `phase_type` == "drift"). + cat_lon : float or ndarray + Value of the longitudinal coordinate (e.g., RA, Az, l) in radians of the + phase center. No default unless `cat_type="unprojected"`, in which case the + default is zero. Expected to be a float for sidereal and driftscan phase + centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. + cat_lat : float or ndarray + Value of the latitudinal coordinate (e.g., Dec, El, b) in radians of the + phase center. No default unless `cat_type="unprojected"`, in which case the + default is pi/2. Expected to be a float for sidereal and driftscan phase + centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. + cat_frame : str + Coordinate frame that cat_lon and cat_lat are given in. Only used for + sidereal and ephem phase centers. Can be any of the several supported frames + in astropy (a limited list: fk4, fk5, icrs, gcrs, cirs, galactic). + cat_epoch : str or float + Epoch of the coordinates, only used when cat_frame = fk4 or fk5. Given + in units of fractional years, either as a float or as a string with the + epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0). + cat_times : ndarray of floats + Only used when `cat_type="ephem"`. Describes the time for which the values + of `cat_lon` and `cat_lat` are caclulated, in units of JD. Shape is (Npts,). + cat_pm_ra : float + Proper motion in RA, in units of mas/year. Only used for sidereal phase + centers. + cat_pm_dec : float + Proper motion in Dec, in units of mas/year. Only used for sidereal phase + centers. + cat_dist : float or ndarray of float + Distance of the source, in units of pc. Only used for sidereal and ephem + phase centers. Expected to be a float for sidereal and driftscan phase + centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. + cat_vrad : float or ndarray of float + Radial velocity of the source, in units of km/s. Only used for sidereal and + ephem phase centers. Expected to be a float for sidereal and driftscan phase + centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. + ignore_name : bool + Nominally, this method will only look at entries where `cat_name` + matches the name of an entry in the catalog. However, by setting this to + True, the method will search all entries in the catalog and see if any + match all of the provided data (excluding `cat_name`). + target_cat_id : int + Optional argument to specify a particular cat_id to check against. + phase_dict : dict + Instead of providing individual parameters, one may provide a dict which + matches that format used within `phase_center_catalog` for checking for + existing entries. If used, all other parameters (save for `ignore_name` + and `cat_name`) are disregarded. + + Returns + ------- + cat_id : int or None + The unique ID number for the phase center added to the internal catalog. + This value is used in the `phase_center_id_array` attribute to denote which + source a given baseline-time corresponds to. If no catalog entry matches, + then None is returned. + cat_diffs : int + The number of differences between the information provided and the catalog + entry contained within `phase_center_catalog`. If everything matches, then + `cat_diffs=0`. + """ + # 1 marcsec tols + radian_tols = (0, RADIAN_TOL) + default_tols = (1e-5, 1e-8) + match_id = None + match_diffs = 99999 + + if (cat_name is None) and (not ignore_name): + if phase_dict is None: + raise ValueError( + "Must specify either phase_dict or cat_name if ignore_name=False." + ) + cat_name = phase_dict["cat_name"] + + if cat_type is not None and cat_type not in allowed_cat_types: + raise ValueError(f"If set, cat_type must be one of {allowed_cat_types}") + + # Emulate the defaults that are set if None is detected for + # unprojected and driftscan types. + if cat_type in ["unprojected", "driftscan"]: + if cat_lon is None: + cat_lon = 0.0 + if cat_lat is None: + cat_lat = np.pi / 2 + if cat_frame is None: + cat_frame = "altaz" + + if phase_dict is None: + phase_dict = { + "cat_type": cat_type, + "cat_lon": cat_lon, + "cat_lat": cat_lat, + "cat_frame": cat_frame, + "cat_epoch": cat_epoch, + "cat_times": cat_times, + "cat_pm_ra": cat_pm_ra, + "cat_pm_dec": cat_pm_dec, + "cat_dist": cat_dist, + "cat_vrad": cat_vrad, + } + + tol_dict = { + "cat_type": None, + "cat_lon": radian_tols, + "cat_lat": radian_tols, + "cat_frame": None, + "cat_epoch": None, + "cat_times": default_tols, + "cat_pm_ra": default_tols, + "cat_pm_dec": default_tols, + "cat_dist": default_tols, + "cat_vrad": default_tols, + } + + if target_cat_id is not None: + if target_cat_id not in phase_center_catalog: + raise ValueError(f"No phase center with ID number {target_cat_id}.") + name_dict = {target_cat_id: phase_center_catalog[target_cat_id]["cat_name"]} + else: + name_dict = { + key: cat_dict["cat_name"] for key, cat_dict in phase_center_catalog.items() + } + + for cat_id, name in name_dict.items(): + cat_diffs = 0 + if (cat_name != name) and (not ignore_name): + continue + check_dict = phase_center_catalog[cat_id] + for key in tol_dict.keys(): + if phase_dict.get(key) is not None: + if check_dict.get(key) is None: + cat_diffs += 1 + elif tol_dict[key] is None: + # If no tolerance specified, expect attributes to be identical + cat_diffs += phase_dict.get(key) != check_dict.get(key) + else: + # allclose will throw a Value error if you have two arrays + # of different shape, which we can catch to flag that + # the two arrays are actually not within tolerance. + if np.shape(phase_dict[key]) != np.shape(check_dict[key]): + cat_diffs += 1 + else: + cat_diffs += not np.allclose( + phase_dict[key], + check_dict[key], + tol_dict[key][0], + tol_dict[key][1], + ) + else: + cat_diffs += check_dict[key] is not None + if (cat_diffs == 0) or (cat_name == name): + if cat_diffs < match_diffs: + # If our current match is an improvement on any previous matches, + # then record it as the best match. + match_id = cat_id + match_diffs = cat_diffs + if match_diffs == 0: + # If we have a total match, we can bail at this point + break + + return match_id, match_diffs + + +def look_for_name(phase_center_catalog, cat_name): + """ + Look up catalog IDs which match a given name. + + Parameters + ---------- + phase_center_catalog : dict + Catalog to look for matching names in. + cat_name : str or list of str + Name to match against entries in phase_center_catalog. + + Returns + ------- + cat_id_list : list + List of all catalog IDs which match the given name. + """ + if isinstance(cat_name, str): + return [ + pc_id + for pc_id, pc_dict in phase_center_catalog.items() + if pc_dict["cat_name"] == cat_name + ] + else: + return [ + pc_id + for pc_id, pc_dict in phase_center_catalog.items() + if pc_dict["cat_name"] in cat_name + ] + + +def print_phase_center_info( + phase_center_catalog, + catalog_identifier=None, + *, + hms_format=None, + return_str=False, + print_table=True, +): + """ + Print out the details of the phase centers. + + Prints out an ASCII table that contains the details of the supploed phase center + catalog, which typically acts as the internal source catalog for various UV objects. + + Parameters + ---------- + phase_center_catalog : dict + Dict containing the list of phase centers (and corresponding data) to be + printed out. + catalog_identifier : str or int or list of str or int + Optional parameter which, if provided, will cause the method to only return + information on the phase center(s) with the matching name(s) or catalog ID + number(s). Default is to print out information on all catalog entries. + hms_format : bool + Optional parameter, which if selected, can be used to force coordinates to + be printed out in Hours-Min-Sec (if set to True) or Deg-Min-Sec (if set to + False) format. Default is to print out in HMS if all the objects have + coordinate frames of icrs, gcrs, fk5, fk4, and top; otherwise, DMS format + is used. + return_str: bool + If set to True, the method returns an ASCII string which contains all the + table infrmation. Default is False. + print_table : bool + If set to True, prints the table to the terminal window. Default is True. + + Returns + ------- + table_str : bool + If return_str=True, an ASCII string containing the entire table text + + Raises + ------ + ValueError + If `cat_name` matches no keys in `phase_center_catalog`. + """ + r2d = 180.0 / np.pi + r2m = 60.0 * 180.0 / np.pi + r2s = 3600.0 * 180.0 / np.pi + ra_frames = ["icrs", "gcrs", "fk5", "fk4", "topo"] + + if catalog_identifier is not None: + if isinstance(catalog_identifier, (str, int)): + pass + elif isinstance(catalog_identifier, list) and all( + isinstance(cat, (str, int)) for cat in catalog_identifier + ): + pass + else: + raise TypeError( + "catalog_identifier must be a string, an integer or a list of " + "strings or integers." + ) + + if not isinstance(catalog_identifier, list): + catalog_identifier = [catalog_identifier] + + cat_id_list = [] + for cat in catalog_identifier: + if isinstance(cat, str): + this_list = [] + for key, ps_dict in phase_center_catalog.items(): + if ps_dict["cat_name"] == cat: + this_list.append(key) + if len(this_list) == 0: + raise ValueError(f"No entry by the name {cat} in the catalog.") + cat_id_list.extend(this_list) + else: + # Force cat_id to be a list to make downstream code simpler. + # If cat_id is an int, it will throw a TypeError on casting to + # list, which we can catch. + if cat not in phase_center_catalog: + raise ValueError(f"No entry with the ID {cat} in the catalog.") + cat_id_list.append(cat) + else: + cat_id_list = list(phase_center_catalog) + + dict_list = [phase_center_catalog[cat_id] for cat_id in cat_id_list] + + # We want to check and actually see which fields we need to print + any_lon = any_lat = any_frame = any_epoch = any_times = False + any_pm_ra = any_pm_dec = any_dist = any_vrad = False + + for indv_dict in dict_list: + any_lon = any_lon or indv_dict.get("cat_lon") is not None + any_lat = any_lat or indv_dict.get("cat_lat") is not None + any_frame = any_frame or indv_dict.get("cat_frame") is not None + any_epoch = any_epoch or indv_dict.get("cat_epoch") is not None + any_times = any_times or indv_dict.get("cat_times") is not None + any_pm_ra = any_pm_ra or indv_dict.get("cat_pm_ra") is not None + any_pm_dec = any_pm_dec or indv_dict.get("cat_pm_dec") is not None + any_dist = any_dist or indv_dict.get("cat_dist") is not None + any_vrad = any_vrad or indv_dict.get("cat_vrad") is not None + + if any_lon and (hms_format is None): + cat_frame = indv_dict.get("cat_frame") + cat_type = indv_dict["cat_type"] + if (cat_frame not in ra_frames) or (cat_type == "driftscan"): + hms_format = False + + if hms_format is None: + hms_format = True + + col_list = [] + col_list.append( + {"hdr": ("ID", "#"), "fmt": "% 4i", "field": " %4s ", "name": "cat_id"} + ) + col_list.append( + { + "hdr": ("Cat Entry", "Name"), + "fmt": "%12s", + "field": " %12s ", + "name": "cat_name", + } + ) + col_list.append( + {"hdr": ("Type", ""), "fmt": "%12s", "field": " %12s ", "name": "cat_type"} + ) + + if any_lon: + col_list.append( + { + "hdr": ("Az/Lon/RA", "hours" if hms_format else "deg"), + "fmt": "% 3i:%02i:%05.2f", + "field": " %12s " if hms_format else " %13s ", + "name": "cat_lon", + } + ) + if any_lat: + col_list.append( + { + "hdr": ("El/Lat/Dec", "deg"), + "fmt": "%1s%2i:%02i:%05.2f", + "field": " %12s ", + "name": "cat_lat", + } + ) + if any_frame: + col_list.append( + {"hdr": ("Frame", ""), "fmt": "%5s", "field": " %5s ", "name": "cat_frame"} + ) + if any_epoch: + col_list.append( + {"hdr": ("Epoch", ""), "fmt": "%7s", "field": " %7s ", "name": "cat_epoch"} + ) + if any_times: + col_list.append( + { + "hdr": (" Ephem Range ", "Start-MJD End-MJD"), + "fmt": " %8.2f % 8.2f", + "field": " %20s ", + "name": "cat_times", + } + ) + if any_pm_ra: + col_list.append( + { + "hdr": ("PM-Ra", "mas/yr"), + "fmt": "%.4g", + "field": " %6s ", + "name": "cat_pm_ra", + } + ) + if any_pm_dec: + col_list.append( + { + "hdr": ("PM-Dec", "mas/yr"), + "fmt": "%.4g", + "field": " %6s ", + "name": "cat_pm_dec", + } + ) + if any_dist: + col_list.append( + {"hdr": ("Dist", "pc"), "fmt": "%.1e", "field": " %7s ", "name": "cat_dist"} + ) + if any_vrad: + col_list.append( + { + "hdr": ("V_rad", "km/s"), + "fmt": "%.4g", + "field": " %6s ", + "name": "cat_vrad", + } + ) + + top_str = "" + bot_str = "" + for col in col_list: + top_str += col["field"] % col["hdr"][0] + bot_str += col["field"] % col["hdr"][1] + + info_str = "" + + info_str += top_str + "\n" + info_str += bot_str + "\n" + info_str += ("-" * len(bot_str)) + "\n" + # We want to print in the order of cat_id + for idx in np.argsort(cat_id_list): + tbl_str = "" + for col in col_list: + # If we have a "special" field that needs extra handling, + # take care of that up front + if col["name"] == "cat_id": + temp_val = cat_id_list[idx] + else: + temp_val = dict_list[idx][col["name"]] + if temp_val is None: + temp_str = "" + elif col["name"] == "cat_lon": + # Force the longitude component to be a positive value + temp_val = np.mod(np.median(temp_val), 2 * np.pi) + temp_val /= 15.0 if hms_format else 1.0 + coord_tuple = ( + np.mod(temp_val * r2d, 360.0), + np.mod(temp_val * r2m, 60.0), + np.mod(temp_val * r2s, 60.0), + ) + temp_str = col["fmt"] % coord_tuple + elif col["name"] == "cat_lat": + temp_val = np.median(temp_val) + coord_tuple = ( + "-" if temp_val < 0.0 else "+", + np.mod(np.abs(temp_val) * r2d, 360.0), + np.mod(np.abs(temp_val) * r2m, 60.0), + np.mod(np.abs(temp_val) * r2s, 60.0), + ) + temp_str = col["fmt"] % coord_tuple + elif col["name"] == "cat_epoch": + use_byrs = dict_list[idx]["cat_frame"] in ["fk4", "fk4noeterms"] + temp_val = ("B%6.1f" if use_byrs else "J%6.1f") % temp_val + temp_str = col["fmt"] % temp_val + elif col["name"] == "cat_times": + time_tuple = ( + np.min(temp_val) - 2400000.5, + np.max(temp_val) - 2400000.5, + ) + temp_str = col["fmt"] % time_tuple + elif (col["name"] == "cat_dist") or (col["name"] == "cat_vrad"): + temp_val = np.median(temp_val) + temp_str = col["fmt"] % temp_val + else: + temp_str = col["fmt"] % temp_val + tbl_str += col["field"] % temp_str + info_str += tbl_str + "\n" + + if print_table: + # We need this extra bit of code to handle trailing whitespace, since + # otherwise some checks (e.g., doc check on tutorials) will balk + print( + "\n".join([line.rstrip() for line in info_str.split("\n")]), end="" + ) # pragma: nocover + if return_str: + return info_str + + +def generate_new_phase_center_id( + phase_center_catalog=None, *, cat_id=None, old_id=None, reserved_ids=None +): + """ + Update a phase center with a new catalog ID number. + + Parameters + ---------- + phase_center_catalog : dict + Catalog to be updated. Note that the supplied catalog will be modified in situ. + cat_id : int + Optional argument. If supplied, then the method will check to see that the + supplied ID is not in either the supplied catalog or in the reserved IDs. + provided value as the new catalog ID, provided that an existing catalog + If not supplied, then the method will automatically assign a value, defaulting + to the value in `cat_id` if supplied (and assuming that ID value has no + conflicts with the reserved IDs). + old_id : int + Optional argument, current catalog ID of the phase center, which corresponds to + a key in `phase_center_catalog`. + reserved_ids : array-like in int + Optional argument. An array-like of ints that denotes which ID numbers + are already reserved. Useful for when combining two separate catalogs. + + Returns + ------- + new_id : int + New phase center ID. + + Raises + ------ + ValueError + If there's no entry that matches `cat_id`, or of the value `new_id` + is already taken. + """ + used_cat_ids = set() + if phase_center_catalog is None: + if old_id is not None: + raise ValueError("Cannot specify old_id if no catalog is supplied.") + else: + used_cat_ids = set(phase_center_catalog) + if old_id is not None: + if old_id not in phase_center_catalog: + raise ValueError(f"No match in catalog to an entry with id {cat_id}.") + used_cat_ids.remove(old_id) + + if reserved_ids is not None: + used_cat_ids = used_cat_ids.union(reserved_ids) + + if cat_id is None: + # Default to using the old ID if available. + cat_id = old_id + + # If the old ID is in the reserved list, then we'll need to update it + if (old_id is None) or (old_id in used_cat_ids): + cat_id = set(range(len(used_cat_ids) + 1)).difference(used_cat_ids).pop() + elif cat_id in used_cat_ids: + if phase_center_catalog is not None and cat_id in phase_center_catalog: + raise ValueError( + "Provided cat_id belongs to another source (%s)." + % phase_center_catalog[cat_id]["cat_name"] + ) + else: + raise ValueError("Provided cat_id was found in reserved_ids.") + + return cat_id + + +def generate_phase_center_cat_entry( + cat_name=None, + *, + cat_type=None, + cat_lon=None, + cat_lat=None, + cat_frame=None, + cat_epoch=None, + cat_times=None, + cat_pm_ra=None, + cat_pm_dec=None, + cat_dist=None, + cat_vrad=None, + info_source="user", + force_update=False, + cat_id=None, +): + """ + Add an entry to a object/source catalog or find a matching one. + + This is a helper function for identifying and adding a phase center to a catalog, + typically contained within the attribute `phase_center_catalog`. If a matching + phase center is found, the catalog ID associated with that phase center is returned. + + Parameters + ---------- + cat_name : str + Name of the phase center to be added. + cat_type : str + Type of phase center to be added. Must be one of: + "sidereal" (fixed RA/Dec), + "ephem" (RA/Dec that moves with time), + "driftscan" (fixed az/el position), + "unprojected" (no w-projection, equivalent to the old + `phase_type` == "drift"). + cat_lon : float or ndarray + Value of the longitudinal coordinate (e.g., RA, Az, l) in radians of the + phase center. No default unless `cat_type="unprojected"`, in which case the + default is zero. Expected to be a float for sidereal and driftscan phase + centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. + cat_lat : float or ndarray + Value of the latitudinal coordinate (e.g., Dec, El, b) in radians of the + phase center. No default unless `cat_type="unprojected"`, in which case the + default is pi/2. Expected to be a float for sidereal and driftscan phase + centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. + cat_frame : str + Coordinate frame that cat_lon and cat_lat are given in. Only used + for sidereal and ephem targets. Can be any of the several supported frames + in astropy (a limited list: fk4, fk5, icrs, gcrs, cirs, galactic). + cat_epoch : str or float + Epoch of the coordinates, only used when cat_frame = fk4 or fk5. Given + in units of fractional years, either as a float or as a string with the + epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0). + cat_times : ndarray of floats + Only used when `cat_type="ephem"`. Describes the time for which the values + of `cat_lon` and `cat_lat` are caclulated, in units of JD. Shape is (Npts,). + cat_pm_ra : float + Proper motion in RA, in units of mas/year. Only used for sidereal phase + centers. + cat_pm_dec : float + Proper motion in Dec, in units of mas/year. Only used for sidereal phase + centers. + cat_dist : float or ndarray of float + Distance of the source, in units of pc. Only used for sidereal and ephem + phase centers. Expected to be a float for sidereal and driftscan phase + centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. + cat_vrad : float or ndarray of float + Radial velocity of the source, in units of km/s. Only used for sidereal and + ephem phase centers. Expected to be a float for sidereal and driftscan phase + centers, and an ndarray of floats of shape (Npts,) for ephem phase centers. + info_source : str + Optional string describing the source of the information provided. Used + primarily in UVData to denote when an ephemeris has been supplied by the + JPL-Horizons system, user-supplied, or read in by one of the various file + interpreters. Default is 'user'. + force_update : bool + Normally, `_add_phase_center` will throw an error if there already exists a + phase_center with the given cat_id. However, if one sets + `force_update=True`, the method will overwrite the existing entry in + `phase_center_catalog` with the parameters supplied. Note that doing this + will _not_ update other attributes of the `UVData` object. Default is False. + cat_id : int + An integer signifying the ID number for the phase center, used in the + `phase_center_id_array` attribute. If a matching phase center entry exists + already, that phase center ID will be returned, which may be different than + the value specified to this parameter. The default is for the method to + assign this value automatically. + + Returns + ------- + phase_center_entry : dict + Catalog containing the phase centers. + cat_id : int + The unique ID number for the phase center that either matches the specified + parameters or was added to the internal catalog. If a matching entry was + found, this may not be the value passed to the `cat_id` parameter. This + value is used in the `phase_center_id_array` attribute to denote which + source a given baseline-time corresponds to. + + Raises + ------ + ValueError + If attempting to add a non-unique source name or if adding a sidereal + source without coordinates. + + """ + if not isinstance(cat_name, str): + raise ValueError("cat_name must be a string.") + + # We currently only have 4 supported types -- make sure the user supplied + # one of those + if cat_type not in allowed_cat_types: + raise ValueError(f"cat_type must be one of {allowed_cat_types}.") + + # Both proper motion parameters need to be set together + if (cat_pm_ra is None) != (cat_pm_dec is None): + raise ValueError( + "Must supply values for either both or neither of " + "cat_pm_ra and cat_pm_dec." + ) + + # If left unset, unprojected and driftscan defaulted to Az, El = (0 deg, 90 deg) + if cat_type in ["unprojected", "driftscan"]: + if cat_lon is None: + cat_lon = 0.0 + if cat_lat is None: + cat_lat = np.pi / 2 + if cat_frame is None: + cat_frame = "altaz" + + # check some case-specific things and make sure all the entries are acceptable + if (cat_times is None) and (cat_type == "ephem"): + raise ValueError("cat_times cannot be None for ephem object.") + elif (cat_times is not None) and (cat_type != "ephem"): + raise ValueError("cat_times cannot be used for non-ephem phase centers.") + + if (cat_lon is None) and (cat_type in ["sidereal", "ephem"]): + raise ValueError("cat_lon cannot be None for sidereal or ephem phase centers.") + + if (cat_lat is None) and (cat_type in ["sidereal", "ephem"]): + raise ValueError("cat_lat cannot be None for sidereal or ephem phase centers.") + + if (cat_frame is None) and (cat_type in ["sidereal", "ephem"]): + raise ValueError( + "cat_frame cannot be None for sidereal or ephem phase centers." + ) + elif (cat_frame != "altaz") and (cat_type in ["driftscan", "unprojected"]): + raise ValueError( + "cat_frame must be either None or 'altaz' when the cat type " + "is either driftscan or unprojected." + ) + + if (cat_type == "unprojected") and (cat_lon != 0.0): + raise ValueError( + "Catalog entries that are unprojected must have cat_lon set to either " + "0 or None." + ) + if (cat_type == "unprojected") and (cat_lat != (np.pi / 2)): + raise ValueError( + "Catalog entries that are unprojected must have cat_lat set to either " + "pi/2 or None." + ) + + if (cat_type != "sidereal") and ( + (cat_pm_ra is not None) or (cat_pm_dec is not None) + ): + raise ValueError( + "Non-zero proper motion values (cat_pm_ra, cat_pm_dec) " + "for cat types other than sidereal are not supported." + ) + + if isinstance(cat_epoch, Time) or isinstance(cat_epoch, str): + if cat_frame in ["fk4", "fk4noeterms"]: + cat_epoch = Time(cat_epoch).byear + else: + cat_epoch = Time(cat_epoch).jyear + elif cat_epoch is not None: + cat_epoch = float(cat_epoch) + + if cat_type == "ephem": + cat_times = np.array(cat_times, dtype=float).reshape(-1) + cshape = cat_times.shape + try: + cat_lon = np.array(cat_lon, dtype=float).reshape(cshape) + cat_lat = np.array(cat_lat, dtype=float).reshape(cshape) + if cat_dist is not None: + cat_dist = np.array(cat_dist, dtype=float).reshape(cshape) + if cat_vrad is not None: + cat_vrad = np.array(cat_vrad, dtype=float).reshape(cshape) + except ValueError as err: + raise ValueError( + "Object properties -- lon, lat, pm_ra, pm_dec, dist, vrad -- must " + "be of the same size as cat_times for ephem phase centers." + ) from err + else: + if cat_lon is not None: + cat_lon = float(cat_lon) + cat_lon = None if cat_lon is None else float(cat_lon) + cat_lat = None if cat_lat is None else float(cat_lat) + cat_pm_ra = None if cat_pm_ra is None else float(cat_pm_ra) + cat_pm_dec = None if cat_pm_dec is None else float(cat_pm_dec) + cat_dist = None if cat_dist is None else float(cat_dist) + cat_vrad = None if cat_vrad is None else float(cat_vrad) + + cat_entry = { + "cat_name": cat_name, + "cat_type": cat_type, + "cat_lon": cat_lon, + "cat_lat": cat_lat, + "cat_frame": cat_frame, + "cat_epoch": cat_epoch, + "cat_times": cat_times, + "cat_pm_ra": cat_pm_ra, + "cat_pm_dec": cat_pm_dec, + "cat_vrad": cat_vrad, + "cat_dist": cat_dist, + "info_source": info_source, + } + + return cat_entry diff --git a/src/pyuvdata/utils/redundancy.py b/src/pyuvdata/utils/redundancy.py new file mode 100644 index 0000000000..4e98bc6caf --- /dev/null +++ b/src/pyuvdata/utils/redundancy.py @@ -0,0 +1,369 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for working with redundant baselines.""" +import warnings +from copy import deepcopy + +import numpy as np +from scipy.spatial.distance import cdist + +from .bls import antnums_to_baseline, baseline_index_flip + + +def _adj_list(vecs, tol, n_blocks=None): + """Identify neighbors of each vec in vecs, to distance tol.""" + n_items = len(vecs) + max_items = 2**10 # Max array size used is max_items**2. Avoid using > 1 GiB + + if n_blocks is None: + n_blocks = max(n_items // max_items, 1) + + # We may sort blocks so that some pairs of blocks may be skipped. + # Reorder vectors by x. + + order = np.argsort(vecs[:, 0]) + blocks = np.array_split(order, n_blocks) + adj = [{k} for k in range(n_items)] # Adjacency lists + for b1 in blocks: + for b2 in blocks: + v1, v2 = vecs[b1], vecs[b2] + # Check for no overlap, with tolerance. + xmin1 = v1[0, 0] - tol + xmax1 = v1[-1, 0] + tol + xmin2 = v2[0, 0] - tol + xmax2 = v2[-1, 0] + tol + if max(xmin1, xmin2) > min(xmax1, xmax2): + continue + + adj_mat = cdist(vecs[b1], vecs[b2]) < tol + for bi, col in enumerate(adj_mat): + adj[b1[bi]] = adj[b1[bi]].union(b2[col]) + return [frozenset(g) for g in adj] + + +def _find_cliques(adj, strict=False): + n_items = len(adj) + + loc_gps = [] + visited = np.zeros(n_items, dtype=bool) + for k in range(n_items): + if visited[k]: + continue + a0 = adj[k] + visited[k] = True + if all(adj[it].__hash__() == a0.__hash__() for it in a0): + group = list(a0) + group.sort() + visited[list(a0)] = True + loc_gps.append(group) + + # Require all adjacency lists to be isolated maximal cliques: + if strict: + if not all(sorted(st) in loc_gps for st in adj): + raise ValueError("Non-isolated cliques found in graph.") + + return loc_gps + + +def find_clusters(*, location_ids, location_vectors, tol, strict=False): + """ + Find clusters of vectors (e.g. redundant baselines, times). + + Parameters + ---------- + location_ids : array_like of int + ID labels for locations. + location_vectors : array_like of float + location vectors, can be multidimensional + tol : float + tolerance for clusters + strict : bool + Require that all adjacency lists be isolated maximal cliques. + This ensures that vectors do not fall into multiple clusters. + Default: False + + Returns + ------- + list of list of location_ids + + """ + location_vectors = np.asarray(location_vectors) + location_ids = np.asarray(location_ids) + if location_vectors.ndim == 1: + location_vectors = location_vectors[:, np.newaxis] + + adj = _adj_list(location_vectors, tol) # adj = list of sets + + loc_gps = _find_cliques(adj, strict=strict) + loc_gps = [np.sort(location_ids[gp]).tolist() for gp in loc_gps] + return loc_gps + + +def find_clusters_grid(location_ids, location_vectors, tol=1.0): + """ + Find redundant groups using a gridding algorithm developed by the HERA team. + + This is essentially a gridding approach, but it only keeps track of the grid + points that have baselines assigned to them. It iterates through the + baselines and assigns each baseline to a an existing group if it is within + a grid spacing or makes a new group if there is no group. The location of + the group is the baseline vector of the first baseline assigned to it, rounded + to the grid spacing, so the resulting assigned grid point can depend on the + order in which baseline vectors are passed to it. It is possible for a baseline + to be assigned to a group that is up to but strictly less than 4 times the + grid spacing from its true location, so we use a grid a factor of 4 smaller + than the passed tolerance (`tol`). This method is quite robust for regular + arrays if the tolerance is properly specified, but may not behave predictably + for highly non-redundant arrays. + + Parameters + ---------- + baselines : array_like of int + Baseline numbers, shape (Nbls,) + baseline_vecs : array_like of float + Baseline vectors in meters, shape (Nbls, 3). + tol : float + Absolute tolerance of redundancy, in meters. + + Returns + ------- + baseline_groups : list of lists of int + list of lists of redundant baseline numbers + baseline_ind_conj : list of int + List of baselines that are redundant when reversed. Only returned if + include_conjugates is True + + """ + bl_gps = {} + # reduce the grid size to ensure baselines won't be assigned to a group + # more than the tol away from their location. The factor of 4 is a personal + # communication from Josh Dillon who developed this algorithm. + grid_size = tol / 4.0 + + p_or_m = (0, -1, 1) + epsilons = [[dx, dy, dz] for dx in p_or_m for dy in p_or_m for dz in p_or_m] + + def check_neighbors(delta): + # Check to make sure bl_gps doesn't have the key plus or minus rounding error + for epsilon in epsilons: + newKey = ( + delta[0] + epsilon[0], + delta[1] + epsilon[1], + delta[2] + epsilon[2], + ) + if newKey in bl_gps: + return newKey + return + + baseline_ind_conj = [] + for bl_i, bl in enumerate(location_ids): + delta = tuple(np.round(location_vectors[bl_i] / grid_size).astype(int)) + new_key = check_neighbors(delta) + if new_key is not None: + # this has a match + bl_gps[new_key].append(bl) + else: + # this is a new group + bl_gps[delta] = [bl] + + bl_list = [sorted(gv) for gv in bl_gps.values()] + + return bl_list, baseline_ind_conj + + +def get_baseline_redundancies( + baselines, baseline_vecs, *, tol=1.0, include_conjugates=False, use_grid_alg=None +): + """ + Find redundant baseline groups. + + Parameters + ---------- + baselines : array_like of int + Baseline numbers, shape (Nbls,) + baseline_vecs : array_like of float + Baseline vectors in meters, shape (Nbls, 3). + tol : float + Absolute tolerance of redundancy, in meters. + include_conjugates : bool + Option to include baselines that are redundant when flipped. + use_grid_alg : bool + Option to use the gridding based algorithm (developed by the HERA team) + to find redundancies rather than the older clustering algorithm. + + Returns + ------- + baseline_groups : list of lists of int + list of lists of redundant baseline numbers + vec_bin_centers : list of array_like of float + List of vectors describing redundant group centers + lengths : list of float + List of redundant group baseline lengths in meters + baseline_ind_conj : list of int + List of baselines that are redundant when reversed. Only returned if + include_conjugates is True + + """ + if use_grid_alg is None: + # This was added in v2.4.2 (Feb 2024). It should go away at some point. + # Normally it would be in v2.6 or later, but if v3.0 comes out + # very soon we could consider delaying the removal of this until v3.1 + warnings.warn( + "The use_grid_alg parameter is not set. Defaulting to True to " + "use the new gridding based algorithm (developed by the HERA team) " + "rather than the older clustering based algorithm. This is change " + "to the default, to use the clustering algorithm set use_grid_alg=False." + ) + use_grid_alg = True + + Nbls = baselines.shape[0] + + if not baseline_vecs.shape == (Nbls, 3): + raise ValueError("Baseline vectors must be shape (Nbls, 3)") + + baseline_vecs = deepcopy(baseline_vecs) # Protect the vectors passed in. + + if include_conjugates: + conjugates = [] + for bv in baseline_vecs: + uneg = bv[0] < -tol + uzer = np.isclose(bv[0], 0.0, atol=tol) + vneg = bv[1] < -tol + vzer = np.isclose(bv[1], 0.0, atol=tol) + wneg = bv[2] < -tol + conjugates.append(uneg or (uzer and vneg) or (uzer and vzer and wneg)) + + conjugates = np.array(conjugates, dtype=bool) + baseline_vecs[conjugates] *= -1 + baseline_ind_conj = baselines[conjugates] + bl_gps, vec_bin_centers, lens = get_baseline_redundancies( + baselines, + baseline_vecs, + tol=tol, + include_conjugates=False, + use_grid_alg=use_grid_alg, + ) + return bl_gps, vec_bin_centers, lens, baseline_ind_conj + + if use_grid_alg: + output = find_clusters_grid( + location_ids=baselines, location_vectors=baseline_vecs, tol=tol + ) + bl_gps, baseline_ind_conj = output + else: + try: + bl_gps = find_clusters( + location_ids=baselines, + location_vectors=baseline_vecs, + tol=tol, + strict=True, + ) + except ValueError as exc: + raise ValueError( + "Some baselines are falling into multiple redundant groups. " + "Lower the tolerance to resolve ambiguity or use the gridding " + "based algorithm (developed by the HERA team) to find redundancies " + "by setting use_grid_alg=True." + ) from exc + + n_unique = len(bl_gps) + vec_bin_centers = np.zeros((n_unique, 3)) + for gi, gp in enumerate(bl_gps): + inds = [np.where(i == baselines)[0] for i in gp] + vec_bin_centers[gi] = np.mean(baseline_vecs[inds, :], axis=0) + + lens = np.sqrt(np.sum(vec_bin_centers**2, axis=1)) + return bl_gps, vec_bin_centers, lens + + +def get_antenna_redundancies( + antenna_numbers, + antenna_positions, + *, + tol=1.0, + include_autos=False, + use_grid_alg=None, +): + """ + Find redundant baseline groups based on antenna positions. + + Parameters + ---------- + antenna_numbers : array_like of int + Antenna numbers, shape (Nants,). + antenna_positions : array_like of float + Antenna position vectors in the ENU (topocentric) frame in meters, + shape (Nants, 3). + tol : float + Redundancy tolerance in meters. + include_autos : bool + Option to include autocorrelations. + use_grid_alg : bool + Option to use the gridding based algorithm (developed by the HERA team) + to find redundancies rather than the older clustering algorithm. + + Returns + ------- + baseline_groups : list of lists of int + list of lists of redundant baseline numbers + vec_bin_centers : list of array_like of float + List of vectors describing redundant group centers + lengths : list of float + List of redundant group baseline lengths in meters + + Notes + ----- + The baseline numbers refer to antenna pairs (a1, a2) such that + the baseline vector formed from ENU antenna positions, + blvec = enu[a1] - enu[a2] + is close to the other baselines in the group. + + This is achieved by putting baselines in a form of the u>0 + convention, but with a tolerance in defining the signs of + vector components. + + To guarantee that the same baseline numbers are present in a UVData + object, ``UVData.conjugate_bls('u>0', uvw_tol=tol)``, where `tol` is + the tolerance used here. + + """ + if use_grid_alg is None: + # This was added in v2.4.2 (Feb 2024). It should go away at some point. + # Normally it would be in v2.6 or later, but if v3.0 comes out + # very soon we could consider delaying the removal of this until v3.1 + warnings.warn( + "The use_grid_alg parameter is not set. Defaulting to True to " + "use the new gridding based algorithm (developed by the HERA team) " + "rather than the older clustering based algorithm. This is change " + "to the default, to use the clustering algorithm set use_grid_alg=False." + ) + use_grid_alg = True + + Nants = antenna_numbers.size + + bls = [] + bl_vecs = [] + + for aj in range(Nants): + mini = aj + 1 + if include_autos: + mini = aj + for ai in range(mini, Nants): + anti, antj = antenna_numbers[ai], antenna_numbers[aj] + bidx = antnums_to_baseline(antj, anti, Nants_telescope=Nants) + bv = antenna_positions[ai] - antenna_positions[aj] + bl_vecs.append(bv) + bls.append(bidx) + bls = np.array(bls) + bl_vecs = np.array(bl_vecs) + gps, vecs, lens, conjs = get_baseline_redundancies( + bls, bl_vecs, tol=tol, include_conjugates=True, use_grid_alg=use_grid_alg + ) + # Flip the baselines in the groups. + for gi, gp in enumerate(gps): + for bi, bl in enumerate(gp): + if bl in conjs: + gps[gi][bi] = baseline_index_flip(bl, Nants_telescope=Nants) + + return gps, vecs, lens diff --git a/src/pyuvdata/uvbase.py b/src/pyuvdata/uvbase.py index 8ce366ebfa..8e460c0db1 100644 --- a/src/pyuvdata/uvbase.py +++ b/src/pyuvdata/uvbase.py @@ -16,7 +16,7 @@ from . import __version__ from . import parameter as uvp -from .utils import _get_iterable +from .utils.helpers import _get_iterable __all__ = ["UVBase"] diff --git a/src/pyuvdata/uvbeam/beamfits.py b/src/pyuvdata/uvbeam/beamfits.py index 6b1971c24b..ba1f82e5ab 100644 --- a/src/pyuvdata/uvbeam/beamfits.py +++ b/src/pyuvdata/uvbeam/beamfits.py @@ -10,8 +10,9 @@ from astropy.io import fits from docstring_parser import DocstringStyle -from .. import utils from ..docstrings import copy_replace_short_description +from ..utils import helpers +from ..utils.file_io import fits as fits_utils from . import UVBeam __all__ = ["BeamFITS"] @@ -87,7 +88,7 @@ def read_beamfits( with fits.open(filename) as fname: primary_hdu = fname[0] primary_header = primary_hdu.header.copy() - hdunames = utils._fits_indexhdus(fname) # find the rest of the tables + hdunames = fits_utils._indexhdus(fname) # find the rest of the tables data = primary_hdu.data # only support simple antenna_types for now. # support for phased arrays should be added @@ -157,10 +158,10 @@ def read_beamfits( self.Naxes1 = primary_header.pop("NAXIS" + str(ax_nums["img_ax1"])) self.Naxes2 = primary_header.pop("NAXIS" + str(ax_nums["img_ax2"])) - self.axis1_array = utils._fits_gethduaxis( + self.axis1_array = fits_utils._gethduaxis( primary_hdu, ax_nums["img_ax1"] ) - self.axis2_array = utils._fits_gethduaxis( + self.axis2_array = fits_utils._gethduaxis( primary_hdu, ax_nums["img_ax2"] ) @@ -244,7 +245,7 @@ def read_beamfits( while len(data.shape) < n_efield_dims - 1: data = np.expand_dims(data, axis=0) - self.freq_array = utils._fits_gethduaxis(primary_hdu, ax_nums["freq"]) + self.freq_array = fits_utils._gethduaxis(primary_hdu, ax_nums["freq"]) # default frequency axis is Hz, but check for corresonding CUNIT freq_units = primary_header.pop("CUNIT" + str(ax_nums["freq"]), "Hz") if freq_units != "Hz": @@ -293,7 +294,7 @@ def read_beamfits( self.Npols = primary_header.pop("NAXIS" + str(ax_nums["feed_pol"])) self.polarization_array = np.int32( - utils._fits_gethduaxis(primary_hdu, ax_nums["feed_pol"]) + fits_utils._gethduaxis(primary_hdu, ax_nums["feed_pol"]) ) self._set_power() elif self.beam_type == "efield": @@ -330,12 +331,12 @@ def read_beamfits( self.x_orientation = primary_header.pop("XORIENT", None) self.history = str(primary_header.get("HISTORY", "")) - if not utils._check_history_version( + if not helpers._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str - self.extra_keywords = utils._get_fits_extra_keywords(primary_header) + self.extra_keywords = fits_utils._get_extra_keywords(primary_header) # read BASISVEC HDU if present if "BASISVEC" in hdunames: @@ -382,10 +383,10 @@ def read_beamfits( "CTYPE" + str(basisvec_ax_nums["img_ax2"]) ].lower(), ] - basisvec_axis1_array = utils._fits_gethduaxis( + basisvec_axis1_array = fits_utils._gethduaxis( basisvec_hdu, basisvec_ax_nums["img_ax1"] ) - basisvec_axis2_array = utils._fits_gethduaxis( + basisvec_axis2_array = fits_utils._gethduaxis( basisvec_hdu, basisvec_ax_nums["img_ax2"] ) @@ -533,7 +534,9 @@ def write_beamfits( if self.Nfreqs > 1: freq_spacing = self.freq_array[1:] - self.freq_array[:-1] - if not utils._test_array_constant(freq_spacing, tols=self._freq_array.tols): + if not helpers._test_array_constant( + freq_spacing, tols=self._freq_array.tols + ): raise ValueError( "The frequencies are not evenly spaced (probably " "because of a select operation). The beamfits format " @@ -548,7 +551,7 @@ def write_beamfits( else: ax_nums = reg_primary_ax_nums if self.Naxes1 > 1: - if not utils._test_array_constant_spacing(self._axis1_array): + if not helpers._test_array_constant_spacing(self._axis1_array): raise ValueError( "The pixels are not evenly spaced along first axis. " "The beam fits format does not support " @@ -559,7 +562,7 @@ def write_beamfits( axis1_spacing = 1 if self.Naxes2 > 1: - if not utils._test_array_constant_spacing(self._axis2_array): + if not helpers._test_array_constant_spacing(self._axis2_array): raise ValueError( "The pixels are not evenly spaced along second axis. " "The beam fits format does not support " @@ -639,7 +642,7 @@ def write_beamfits( # set up feed or pol axis if self.beam_type == "power": if self.Npols > 1: - if not utils._test_array_constant_spacing(self._polarization_array): + if not helpers._test_array_constant_spacing(self._polarization_array): raise ValueError( "The polarization values are not evenly " "spaced (probably because of a select operation). " diff --git a/src/pyuvdata/uvbeam/cst_beam.py b/src/pyuvdata/uvbeam/cst_beam.py index 0387972452..4b297dc013 100644 --- a/src/pyuvdata/uvbeam/cst_beam.py +++ b/src/pyuvdata/uvbeam/cst_beam.py @@ -9,6 +9,7 @@ import numpy as np from .. import utils +from ..utils import helpers from . import UVBeam __all__ = ["CSTBeam"] @@ -152,7 +153,7 @@ def read_cst_beam( self.model_name = model_name self.model_version = model_version self.history = history - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str if x_orientation is not None: @@ -241,14 +242,14 @@ def read_cst_beam( theta_data = theta_data.reshape((theta_axis.size, phi_axis.size), order="F") phi_data = phi_data.reshape((theta_axis.size, phi_axis.size), order="F") - if not utils._test_array_constant_spacing( + if not helpers._test_array_constant_spacing( theta_axis, tols=self._axis2_array.tols ): raise ValueError( "Data does not appear to be regularly gridded in zenith angle" ) - if not utils._test_array_constant_spacing( + if not helpers._test_array_constant_spacing( phi_axis, tols=self._axis1_array.tols ): raise ValueError( diff --git a/src/pyuvdata/uvbeam/mwa_beam.py b/src/pyuvdata/uvbeam/mwa_beam.py index e6a08c9a37..3791335847 100644 --- a/src/pyuvdata/uvbeam/mwa_beam.py +++ b/src/pyuvdata/uvbeam/mwa_beam.py @@ -611,7 +611,9 @@ def read_mwa_beam( gain_str = "[" + ", ".join(gain_str_list) + "]" self.history += " delays set to " + delay_str + " gains set to " + gain_str - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.helpers._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str self.x_orientation = "east" diff --git a/src/pyuvdata/uvbeam/uvbeam.py b/src/pyuvdata/uvbeam/uvbeam.py index 8d3dd4956c..b23cab94c9 100644 --- a/src/pyuvdata/uvbeam/uvbeam.py +++ b/src/pyuvdata/uvbeam/uvbeam.py @@ -18,6 +18,7 @@ from .. import parameter as uvp from .. import utils from ..docstrings import combine_docstrings, copy_replace_short_description +from ..utils import helpers from ..uvbase import UVBase from . import initializers @@ -2488,7 +2489,7 @@ def __add__( ) # Update filename parameter - this.filename = utils._combine_filenames(this.filename, other.filename) + this.filename = helpers._combine_filenames(this.filename, other.filename) if this.filename is not None: this._filename.form = (len(this.filename),) @@ -2832,7 +2833,7 @@ def __add__( # Check specific requirements if this.Nfreqs > 1: - if not utils._test_array_constant_spacing( + if not helpers._test_array_constant_spacing( this.freq_array, tols=this._freq_array.tols ): warnings.warn( @@ -2841,7 +2842,7 @@ def __add__( ) if self.beam_type == "power" and this.Npols > 2: - if not utils._test_array_constant_spacing(this._polarization_array): + if not helpers._test_array_constant_spacing(this._polarization_array): warnings.warn( "Combined polarizations are not evenly spaced. This will " "make it impossible to write this data out to some file types." @@ -2849,14 +2850,14 @@ def __add__( if n_axes > 0: history_update_string += " axis using pyuvdata." - histories_match = utils._check_histories(this.history, other.history) + histories_match = helpers._check_histories(this.history, other.history) this.history += history_update_string if not histories_match: if verbose_history: this.history += " Next object history follows. " + other.history else: - extra_history = utils._combine_history_addition( + extra_history = helpers._combine_history_addition( this.history, other.history ) if extra_history is not None: @@ -2973,7 +2974,7 @@ def select( beam_object.axis1_array = beam_object.axis1_array[axis1_inds] if beam_object.Naxes1 > 1: - if not utils._test_array_constant_spacing(beam_object._axis1_array): + if not helpers._test_array_constant_spacing(beam_object._axis1_array): warnings.warn( "Selected values along first image axis are " "not evenly spaced. This is not supported by " @@ -3005,7 +3006,7 @@ def select( beam_object.axis2_array = beam_object.axis2_array[axis2_inds] if beam_object.Naxes2 > 1: - if not utils._test_array_constant_spacing(beam_object._axis2_array): + if not helpers._test_array_constant_spacing(beam_object._axis2_array): warnings.warn( "Selected values along second image axis are " "not evenly spaced. This is not supported by " @@ -3049,17 +3050,17 @@ def select( ] if freq_chans is not None: - freq_chans = utils._get_iterable(freq_chans) + freq_chans = helpers._get_iterable(freq_chans) if frequencies is None: frequencies = beam_object.freq_array[freq_chans] else: - frequencies = utils._get_iterable(frequencies) + frequencies = helpers._get_iterable(frequencies) frequencies = np.sort( list(set(frequencies) | set(beam_object.freq_array[freq_chans])) ) if frequencies is not None: - frequencies = utils._get_iterable(frequencies) + frequencies = helpers._get_iterable(frequencies) if n_selects > 0: history_update_string += ", frequencies" else: @@ -3085,7 +3086,7 @@ def select( freq_separation = ( beam_object.freq_array[1:] - beam_object.freq_array[:-1] ) - if not utils._test_array_constant( + if not helpers._test_array_constant( freq_separation, tols=beam_object._freq_array.tols ): warnings.warn( @@ -3134,7 +3135,7 @@ def select( if key in beam_object.feed_array: x_orient_dict[value] = key - feeds = utils._get_iterable(feeds) + feeds = helpers._get_iterable(feeds) feeds = [f.lower() for f in feeds] if n_selects > 0: history_update_string += ", feeds" @@ -3180,7 +3181,7 @@ def select( if beam_object.beam_type == "efield": raise ValueError("polarizations cannot be used with efield beams") - polarizations = utils._get_iterable(polarizations) + polarizations = helpers._get_iterable(polarizations) if np.array(polarizations).ndim > 1: polarizations = np.array(polarizations).flatten() @@ -3218,7 +3219,7 @@ def select( beam_object.polarization_array[1:] - beam_object.polarization_array[:-1] ) - if not utils._test_array_constant(pol_separation): + if not helpers._test_array_constant(pol_separation): warnings.warn( "Selected polarizations are not evenly spaced. This " "is not supported by the regularly gridded beam fits format" @@ -3763,7 +3764,7 @@ def read_cst_beam( if not isinstance(filename, (list, tuple)) and filename.endswith("yaml"): # update filelist basename = os.path.basename(filename) - self.filename = utils._combine_filenames(self.filename, [basename]) + self.filename = helpers._combine_filenames(self.filename, [basename]) self._filename.form = (len(self.filename),) def read_mwa_beam(self, h5filepath, **kwargs): diff --git a/src/pyuvdata/uvcal/calfits.py b/src/pyuvdata/uvcal/calfits.py index 6c9356cecd..c5fd3569ea 100644 --- a/src/pyuvdata/uvcal/calfits.py +++ b/src/pyuvdata/uvcal/calfits.py @@ -18,8 +18,9 @@ except ImportError: hasmoon = False -from .. import utils from ..docstrings import copy_replace_short_description +from ..utils import helpers +from ..utils.file_io import fits as fits_utils from . import UVCal __all__ = ["CALFITS"] @@ -126,14 +127,14 @@ def write_calfits( "The calfits file format does not support time_range when there is " "more than one time." ) - if not utils._test_array_constant_spacing(self._time_array): + if not helpers._test_array_constant_spacing(self._time_array): raise ValueError( "The times are not evenly spaced (probably " "because of a select operation). The calfits format " "does not support unevenly spaced times." ) time_spacing = np.diff(self.time_array) - if not utils._test_array_constant(self._integration_time): + if not helpers._test_array_constant(self._integration_time): raise ValueError( "The integration times are variable. The calfits format " "does not support variable integration times." @@ -158,7 +159,7 @@ def write_calfits( time_zero = self.time_array[0] if self.Njones > 1: - if not utils._test_array_constant_spacing(self._jones_array): + if not helpers._test_array_constant_spacing(self._jones_array): raise ValueError( "The jones values are not evenly spaced." "The calibration fits file format does not" @@ -511,7 +512,7 @@ def read_calfits( with fits.open(filename) as fname: hdr = fname[0].header.copy() - hdunames = utils._fits_indexhdus(fname) + hdunames = fits_utils._indexhdus(fname) anthdu = fname[hdunames["ANTENNAS"]] self.telescope.Nants = anthdu.header["NAXIS2"] @@ -588,7 +589,7 @@ def read_calfits( self.history = str(hdr.get("HISTORY", "")) - if not utils._check_history_version( + if not helpers._check_history_version( self.history, self.pyuvdata_version_str ): if not self.history.endswith("\n"): @@ -630,9 +631,9 @@ def read_calfits( # generate polarization and time array for either cal_type. self.Njones = hdr.pop("NAXIS2") - self.jones_array = utils._fits_gethduaxis(fname[0], 2) + self.jones_array = fits_utils._gethduaxis(fname[0], 2) self.Ntimes = hdr.pop("NAXIS3") - main_hdr_time_array = utils._fits_gethduaxis(fname[0], 3) + main_hdr_time_array = fits_utils._gethduaxis(fname[0], 3) self.integration_time = np.full(self.Ntimes, hdr.pop("INTTIME")) # needs to come after Ntimes is defined. @@ -658,13 +659,13 @@ def read_calfits( "supported by the calfits format." ) # subtract 1 to be zero-indexed - self.spw_array = utils._fits_gethduaxis(fname[0], 5) - 1 + self.spw_array = fits_utils._gethduaxis(fname[0], 5) - 1 self.Nants_data = hdr.pop("NAXIS6") if self.cal_type == "gain": self._set_gain() self.Nfreqs = hdr.pop("NAXIS4") - self.freq_array = utils._fits_gethduaxis(fname[0], 4) + self.freq_array = fits_utils._gethduaxis(fname[0], 4) self.channel_width = np.full(self.Nfreqs, hdr.pop("CHWIDTH")) self.flex_spw_id_array = np.full( @@ -679,7 +680,7 @@ def read_calfits( "This file appears to have multiple spectral windows, which is not " "supported by the calfits format." ) - spw_array = utils._fits_gethduaxis(sechdu, 5) - 1 + spw_array = fits_utils._gethduaxis(sechdu, 5) - 1 if not np.allclose(spw_array, self.spw_array): raise ValueError( @@ -687,7 +688,7 @@ def read_calfits( " in primary HDU" ) - time_array = utils._fits_gethduaxis(sechdu, 3) + time_array = fits_utils._gethduaxis(sechdu, 3) if not np.allclose( time_array, main_hdr_time_array, @@ -698,7 +699,7 @@ def read_calfits( "Time values are different in FLAGS HDU than in primary HDU" ) - jones_array = utils._fits_gethduaxis(sechdu, 2) + jones_array = fits_utils._gethduaxis(sechdu, 2) if not np.allclose( jones_array, self.jones_array, @@ -735,7 +736,7 @@ def read_calfits( if "TOTQLTY" in hdunames: totqualhdu = fname[hdunames["TOTQLTY"]] self.total_quality_array = totqualhdu.data[0] - spw_array = utils._fits_gethduaxis(totqualhdu, 4) - 1 + spw_array = fits_utils._gethduaxis(totqualhdu, 4) - 1 if not np.allclose(spw_array, self.spw_array): raise ValueError( "Spectral window values are different in " @@ -747,7 +748,7 @@ def read_calfits( if self.cal_type != "delay": # delay-type files won't have a freq_array - freq_array = utils._fits_gethduaxis(totqualhdu, 3) + freq_array = fits_utils._gethduaxis(totqualhdu, 3) if not np.allclose( freq_array, self.freq_array, @@ -759,7 +760,7 @@ def read_calfits( " in primary HDU" ) - time_array = utils._fits_gethduaxis(totqualhdu, 2) + time_array = fits_utils._gethduaxis(totqualhdu, 2) if not np.allclose( time_array, main_hdr_time_array, @@ -771,7 +772,7 @@ def read_calfits( "primary HDU" ) - jones_array = utils._fits_gethduaxis(totqualhdu, 1) + jones_array = fits_utils._gethduaxis(totqualhdu, 1) if not np.allclose( jones_array, self.jones_array, @@ -786,7 +787,7 @@ def read_calfits( else: self.total_quality_array = None - self.extra_keywords = utils._get_fits_extra_keywords(hdr) + self.extra_keywords = fits_utils._get_extra_keywords(hdr) # wait for LSTs if set in background if proc is not None: diff --git a/src/pyuvdata/uvcal/calh5.py b/src/pyuvdata/uvcal/calh5.py index a7921f0c89..24f4b36ee3 100644 --- a/src/pyuvdata/uvcal/calh5.py +++ b/src/pyuvdata/uvcal/calh5.py @@ -12,9 +12,11 @@ import numpy as np from docstring_parser import DocstringStyle -from .. import hdf5_utils, utils +from .. import utils from ..docstrings import copy_replace_short_description from ..telescopes import Telescope +from ..utils import helpers +from ..utils.file_io import hdf5 as hdf5_utils from .uvcal import UVCal hdf5plugin_present = True @@ -266,7 +268,7 @@ def _read_header( # versions allowed one to store this even if it wasn't actually being used optional_parameters.remove("flex_spw_id_array") - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str # Optional parameters @@ -285,14 +287,14 @@ def _read_header( if run_check_acceptability: if self.time_array is not None: - utils.check_lsts_against_times( + helpers.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, telescope_loc=self.telescope.location, lst_tols=(0, utils.LST_RAD_TOL), ) if self.time_range is not None: - utils.check_lsts_against_times( + helpers.check_lsts_against_times( jd_array=self.time_range, lst_array=self.lst_range, telescope_loc=self.telescope.location, @@ -416,15 +418,15 @@ def _get_data( # no select, read in all the data inds = (np.s_[:], np.s_[:], np.s_[:], np.s_[:]) if self.cal_type == "gain": - self.gain_array = utils._index_dset(dgrp["gains"], inds) + self.gain_array = hdf5_utils._index_dset(dgrp["gains"], inds) else: - self.delay_array = utils._index_dset(dgrp["delays"], inds) - self.flag_array = utils._index_dset(dgrp["flags"], inds) + self.delay_array = hdf5_utils._index_dset(dgrp["delays"], inds) + self.flag_array = hdf5_utils._index_dset(dgrp["flags"], inds) if quality_present: - self.quality_array = utils._index_dset(dgrp["qualities"], inds) + self.quality_array = hdf5_utils._index_dset(dgrp["qualities"], inds) if total_quality_present: tq_inds = (np.s_[:], np.s_[:], np.s_[:]) - self.total_quality_array = utils._index_dset( + self.total_quality_array = hdf5_utils._index_dset( dgrp["total_qualities"], tq_inds ) else: @@ -444,7 +446,7 @@ def _get_data( # TODO: this logic is similar to what is in uvh5. See if an abstracted # version can be pulled out into a util function. if ant_inds is not None: - ant_slices, ant_sliceable = utils._convert_to_slices( + ant_slices, ant_sliceable = helpers._convert_to_slices( ant_inds, max_nslice_frac=0.1 ) else: @@ -452,7 +454,7 @@ def _get_data( ant_sliceable = True if time_inds is not None: - time_slices, time_sliceable = utils._convert_to_slices( + time_slices, time_sliceable = helpers._convert_to_slices( time_inds, max_nslice_frac=0.1 ) else: @@ -460,7 +462,7 @@ def _get_data( time_sliceable = True if freq_inds is not None: - freq_slices, freq_sliceable = utils._convert_to_slices( + freq_slices, freq_sliceable = helpers._convert_to_slices( freq_inds, max_nslice_frac=0.1 ) else: @@ -468,7 +470,7 @@ def _get_data( freq_sliceable = True if spw_inds is not None: - spw_slices, spw_sliceable = utils._convert_to_slices( + spw_slices, spw_sliceable = helpers._convert_to_slices( spw_inds, max_nslice_frac=0.1 ) else: @@ -476,7 +478,7 @@ def _get_data( spw_sliceable = True if jones_inds is not None: - jones_slices, jones_sliceable = utils._convert_to_slices( + jones_slices, jones_sliceable = helpers._convert_to_slices( jones_inds, max_nslice_frac=0.5 ) else: @@ -549,13 +551,13 @@ def _get_data( jones_frac = 1 # index datasets - cal_data = utils._index_dset(caldata_dset, inds) - flags = utils._index_dset(flags_dset, inds) + cal_data = hdf5_utils._index_dset(caldata_dset, inds) + flags = hdf5_utils._index_dset(flags_dset, inds) if quality_present: - qualities = utils._index_dset(qualities_dset, inds) + qualities = hdf5_utils._index_dset(qualities_dset, inds) if total_quality_present: tq_inds = inds[1:] - total_qualities = utils._index_dset(total_qualities_dset, tq_inds) + total_qualities = hdf5_utils._index_dset(total_qualities_dset, tq_inds) # down select on other dimensions if necessary # use indices not slices here: generally not the bottleneck if ant_frac < 1: diff --git a/src/pyuvdata/uvcal/fhd_cal.py b/src/pyuvdata/uvcal/fhd_cal.py index 12f0be9f4c..475693c4c1 100644 --- a/src/pyuvdata/uvcal/fhd_cal.py +++ b/src/pyuvdata/uvcal/fhd_cal.py @@ -259,7 +259,9 @@ def read_fhd_cal( else: self.history += "\n" + extra_history - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.helpers._check_history_version( + self.history, self.pyuvdata_version_str + ): if self.history.endswith("\n"): self.history += self.pyuvdata_version_str else: diff --git a/src/pyuvdata/uvcal/ms_cal.py b/src/pyuvdata/uvcal/ms_cal.py index be33c7980b..9a2b74bc5e 100644 --- a/src/pyuvdata/uvcal/ms_cal.py +++ b/src/pyuvdata/uvcal/ms_cal.py @@ -11,8 +11,10 @@ from astropy.time import Time from docstring_parser import DocstringStyle -from .. import ms_utils, utils +from .. import utils from ..docstrings import copy_replace_short_description +from ..utils import helpers +from ..utils.file_io import ms as ms_utils from . import UVCal __all__ = ["MSCal"] @@ -532,7 +534,7 @@ def write_ms_cal(self, filename, clobber=False): else: spw_selection = np.equal(self.flex_spw_id_array, spw_id) spw_nchan = sum(spw_selection) - [spw_selection], _ = utils._convert_to_slices( + [spw_selection], _ = helpers._convert_to_slices( spw_selection, max_nslice=1, return_index_on_fail=True ) spw_sel_dict[spw_id] = (spw_selection, spw_nchan) @@ -610,8 +612,8 @@ def write_ms_cal(self, filename, clobber=False): # Determine polarization order for writing out in CASA standard order, check # if this order can be represented by a single slice. - pol_order = utils.determine_pol_order(self.jones_array, order="CASA") - [pol_order], _ = utils._convert_to_slices( + pol_order = utils.pol.determine_pol_order(self.jones_array, order="CASA") + [pol_order], _ = helpers._convert_to_slices( pol_order, max_nslice=1, return_index_on_fail=True ) diff --git a/src/pyuvdata/uvcal/uvcal.py b/src/pyuvdata/uvcal/uvcal.py index dae01a6fc1..0ed2b2ce95 100644 --- a/src/pyuvdata/uvcal/uvcal.py +++ b/src/pyuvdata/uvcal/uvcal.py @@ -15,6 +15,7 @@ from .. import parameter as uvp from .. import utils from ..docstrings import combine_docstrings, copy_replace_short_description +from ..utils import helpers from ..uvbase import UVBase from . import initializers @@ -1166,7 +1167,7 @@ def _check_flex_spw_contiguous(self): """ if not self.wide_band: - utils._check_flex_spw_contiguous( + helpers._check_flex_spw_contiguous( spw_array=self.spw_array, flex_spw_id_array=self.flex_spw_id_array ) @@ -1191,7 +1192,7 @@ def _check_freq_spacing(self, *, raise_errors=True): """ if (self.freq_array is None) or (self.Nfreqs == 1): return False, False - return utils._check_freq_spacing( + return helpers._check_freq_spacing( freq_array=self.freq_array, freq_tols=self._freq_array.tols, channel_width=self.channel_width, @@ -1308,7 +1309,7 @@ def _add_phase_center( source without coordinates. """ - cat_entry = utils.generate_phase_center_cat_entry( + cat_entry = utils.ps_cat.generate_phase_center_cat_entry( cat_name=cat_name, cat_type=cat_type, cat_lon=cat_lon, @@ -1327,7 +1328,7 @@ def _add_phase_center( # The logic below ensures that we pick the lowest positive integer that is # not currently being used by another source if cat_id is None or not force_update: - cat_id = utils.generate_new_phase_center_id( + cat_id = utils.ps_cat.generate_new_phase_center_id( phase_center_catalog=self.phase_center_catalog, cat_id=cat_id ) @@ -1336,7 +1337,7 @@ def _add_phase_center( self.phase_center_catalog = {} else: # Let's warn if this entry has the same name as an existing one - temp_id, cat_diffs = utils.look_in_catalog( + temp_id, cat_diffs = utils.ps_cat.look_in_catalog( self.phase_center_catalog, phase_dict=cat_entry ) @@ -1451,7 +1452,7 @@ def print_phase_center_info( ValueError If `cat_name` matches no keys in `phase_center_catalog`. """ - return utils.print_phase_center_info( + return utils.ps_cat.print_phase_center_info( self.phase_center_catalog, catalog_identifier=catalog_identifier, hms_format=hms_format, @@ -1483,7 +1484,7 @@ def _update_phase_center_id(self, cat_id, *, new_id=None, reserved_ids=None): If not using the method on a multi-phase-ctr data set, if there's no entry that matches `cat_name`, or of the value `new_id` is already taken. """ - new_id = utils.generate_new_phase_center_id( + new_id = utils.ps_cat.generate_new_phase_center_id( phase_center_catalog=self.phase_center_catalog, cat_id=new_id, old_id=cat_id, @@ -1553,7 +1554,7 @@ def _consolidate_phase_center_catalogs( # testing it's sometimes convenient to use self.phase_center_catalog as # the ref catalog, which causes a RunTime error due to updates to the dict. cat_entry = reference_catalog[cat_id] - match_id, match_diffs = utils.look_in_catalog( + match_id, match_diffs = utils.ps_cat.look_in_catalog( self.phase_center_catalog, phase_dict=cat_entry, ignore_name=ignore_name ) if match_id is None or match_diffs != 0: @@ -1666,7 +1667,7 @@ def check( # check that time ranges are well formed and do not overlap if self.time_range is not None: - if utils._check_range_overlap(self.time_range): + if helpers._check_range_overlap(self.time_range): raise ValueError("Some time_ranges overlap.") # note: do not check lst range overlap because of branch cut. # Assume they are ok if time_ranges are ok. @@ -1723,21 +1724,21 @@ def check( if run_check_acceptability: # Check antenna positions - utils.check_surface_based_positions( + helpers.check_surface_based_positions( antenna_positions=self.telescope.antenna_positions, telescope_loc=self.telescope.location, raise_error=False, ) if self.time_array is not None: - utils.check_lsts_against_times( + helpers.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, telescope_loc=self.telescope.location, lst_tols=self._lst_array.tols if lst_tol is None else [0, lst_tol], ) if self.time_range is not None: - utils.check_lsts_against_times( + helpers.check_lsts_against_times( jd_array=self.time_range, lst_array=self.lst_range, telescope_loc=self.telescope.location, @@ -1866,7 +1867,7 @@ def _slice_array(self, key, data_array, *, squeeze_pol=True): :class: numpy ndarray Slice of the data_array for the key. """ - key = utils._get_iterable(key) + key = helpers._get_iterable(key) if len(key) == 1: # interpret as a single antenna output = data_array[self.ant2ind(key[0]), :, :, :] @@ -2212,7 +2213,7 @@ def reorder_freqs( index_array = np.flip(index_array) else: - index_array = utils._sort_freq_helper( + index_array = helpers._sort_freq_helper( Nfreqs=self.Nfreqs, freq_array=self.freq_array, Nspws=self.Nspws, @@ -2692,7 +2693,7 @@ def __add__( ) if this.time_range is not None: - if utils._check_range_overlap( + if helpers._check_range_overlap( np.concatenate((this.time_range, other.time_range), axis=0) ): raise ValueError("A time_range overlaps in the two objects.") @@ -2828,7 +2829,7 @@ def __add__( this.reorder_jones(temp_ind) # Update filename parameter - this.filename = utils._combine_filenames(this.filename, other.filename) + this.filename = helpers._combine_filenames(this.filename, other.filename) if this.filename is not None: this._filename.form = (len(this.filename),) @@ -3339,7 +3340,7 @@ def __add__( ) if this.Njones > 2: - if not utils._test_array_constant_spacing(this._jones_array): + if not helpers._test_array_constant_spacing(this._jones_array): warnings.warn( "Combined Jones elements are not evenly spaced. This will " "make it impossible to write this data out to calfits files." @@ -3348,14 +3349,14 @@ def __add__( if n_axes > 0: history_update_string += " axis using pyuvdata." - histories_match = utils._check_histories(this.history, other.history) + histories_match = helpers._check_histories(this.history, other.history) this.history += history_update_string if not histories_match: if verbose_history: this.history += " Next object history follows. " + other.history else: - extra_history = utils._combine_history_addition( + extra_history = helpers._combine_history_addition( this.history, other.history ) if extra_history is not None: @@ -3608,7 +3609,7 @@ def fast_concat( history_update_string += " axis using pyuvdata." histories_match = [] for obj in other: - histories_match.append(utils._check_histories(this.history, obj.history)) + histories_match.append(helpers._check_histories(this.history, obj.history)) this.history += history_update_string for obj_num, obj in enumerate(other): @@ -3616,7 +3617,7 @@ def fast_concat( if verbose_history: this.history += " Next object history follows. " + obj.history else: - extra_history = utils._combine_history_addition( + extra_history = helpers._combine_history_addition( this.history, obj.history ) if extra_history is not None: @@ -3853,7 +3854,7 @@ def fast_concat( # update filename attribute for obj in other: - this.filename = utils._combine_filenames(this.filename, obj.filename) + this.filename = helpers._combine_filenames(this.filename, obj.filename) if this.filename is not None: this._filename.form = len(this.filename) @@ -3969,7 +3970,7 @@ def _select_preprocess( "Only one of antenna_nums and antenna_names can be provided." ) - antenna_names = utils._get_iterable(antenna_names) + antenna_names = helpers._get_iterable(antenna_names) antenna_nums = [] for s in antenna_names: if s not in self.telescope.antenna_names: @@ -3980,7 +3981,7 @@ def _select_preprocess( antenna_nums.append(self.telescope.antenna_numbers[ind]) if antenna_nums is not None: - antenna_nums = utils._get_iterable(antenna_nums) + antenna_nums = helpers._get_iterable(antenna_nums) history_update_string += "antennas" n_selects += 1 @@ -4009,11 +4010,11 @@ def _select_preprocess( ) if catalog_names is not None: - phase_center_ids = utils.look_for_name( + phase_center_ids = utils.ps_cat.look_for_name( self.phase_center_catalog, catalog_names ) - time_inds = utils._select_times_helper( + time_inds = helpers._select_times_helper( times=times, time_range=time_range, lsts=lsts, @@ -4044,7 +4045,7 @@ def _select_preprocess( if phase_center_ids is not None: pc_check = np.isin(self.phase_center_id_array, phase_center_ids) - time_inds = utils._sorted_unique_intersection( + time_inds = helpers._sorted_unique_intersection( np.where(pc_check)[0], time_inds ) @@ -4063,7 +4064,7 @@ def _select_preprocess( time_inds_arr = np.array(time_inds) if time_inds_arr.size > 1: time_ind_separation = time_inds_arr[1:] - time_inds_arr[:-1] - if not utils._test_array_constant(time_ind_separation): + if not helpers._test_array_constant(time_ind_separation): warnings.warn( "Selected times are not evenly spaced. This " "is not supported by the calfits format." @@ -4078,7 +4079,7 @@ def _select_preprocess( else: if not self.wide_band: # Translate the spws into frequencies - freq_chans = utils._sorted_unique_union( + freq_chans = helpers._sorted_unique_union( np.where(np.isin(self.flex_spw_id_array, spws))[0], freq_chans ) spw_inds = None @@ -4110,7 +4111,7 @@ def _select_preprocess( ) if frequencies is not None: - frequencies = utils._get_iterable(frequencies) + frequencies = helpers._get_iterable(frequencies) freq_arr_use = self.freq_array freq_check = np.isin(frequencies, freq_arr_use) @@ -4120,7 +4121,7 @@ def _select_preprocess( "present in the freq_array" ) - freq_chans = utils._sorted_unique_union( + freq_chans = helpers._sorted_unique_union( np.where(np.isin(freq_arr_use, frequencies))[0], freq_chans ) @@ -4135,7 +4136,7 @@ def _select_preprocess( if frequencies is not None: pass - freq_inds = np.array(sorted(utils._get_iterable(freq_chans))) + freq_inds = np.array(sorted(helpers._get_iterable(freq_chans))) if len(freq_inds) > 1: freq_ind_separation = freq_inds[1:] - freq_inds[:-1] @@ -4143,7 +4144,7 @@ def _select_preprocess( freq_ind_separation = freq_ind_separation[ np.diff(self.flex_spw_id_array[freq_inds]) == 0 ] - if not utils._test_array_constant(freq_ind_separation): + if not helpers._test_array_constant(freq_ind_separation): warnings.warn( "Selected frequencies are not evenly spaced. This " "will make it impossible to write this data out to " @@ -4161,7 +4162,7 @@ def _select_preprocess( freq_inds = None if jones is not None: - jones = utils._get_iterable(jones) + jones = helpers._get_iterable(jones) if np.array(jones).ndim > 1: jones = np.array(jones).flatten() if n_selects > 0: @@ -4202,10 +4203,10 @@ def _select_preprocess( jones_chans = np.where( np.isin(self.flex_spw_id_array, self.spw_array[jones_spws]) )[0] - freq_inds = utils._sorted_unique_intersection( + freq_inds = helpers._sorted_unique_intersection( jones_chans, freq_inds ) - spw_inds = utils._sorted_unique_intersection(jones_spws, spw_inds) + spw_inds = helpers._sorted_unique_intersection(jones_spws, spw_inds) # Trap a corner case here where the frequency and polarization selects # on a flex-pol data set end up with no actual data being selected. @@ -4214,12 +4215,12 @@ def _select_preprocess( "No data matching this Jones selection in this flex-Jones " " UVCal object." ) - spacing_check = utils._test_array_constant_spacing( + spacing_check = helpers._test_array_constant_spacing( np.unique(self.flex_jones_array[spw_inds]) ) else: jones_inds = sorted(set(jones_inds)) - spacing_check = utils._test_array_constant_spacing( + spacing_check = helpers._test_array_constant_spacing( self.jones_array[jones_inds] ) if not spacing_check: diff --git a/src/pyuvdata/uvcalibrate.py b/src/pyuvdata/uvcalibrate.py new file mode 100644 index 0000000000..19b54edc03 --- /dev/null +++ b/src/pyuvdata/uvcalibrate.py @@ -0,0 +1,421 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Code to apply calibration solutions to visibility data.""" +import warnings + +import numpy as np + +from .utils.pol import POL_TO_FEED_DICT, jnum2str, parse_jpolstr, polnum2str, polstr2num + + +def uvcalibrate( + uvdata, + uvcal, + *, + inplace=True, + prop_flags=True, + d_term_cal=False, + flip_gain_conj=False, + delay_convention="minus", + undo=False, + time_check=True, + ant_check=True, +): + """ + Calibrate a UVData object with a UVCal object. + + Parameters + ---------- + uvdata : UVData object + UVData object to calibrate. + uvcal : UVCal object + UVCal object containing the calibration. + inplace : bool, optional + if True edit uvdata in place, else return a calibrated copy + prop_flags : bool, optional + if True, propagate calibration flags to data flags + and doesn't use flagged gains. Otherwise, uses flagged gains and + does not propagate calibration flags to data flags. + Dterm_cal : bool, optional + Calibrate the off-diagonal terms in the Jones matrix if present + in uvcal. Default is False. Currently not implemented. + flip_gain_conj : bool, optional + This function uses the UVData ant_1_array and ant_2_array to specify the + antennas in the UVCal object. By default, the conjugation convention, which + follows the UVData convention (i.e. ant2 - ant1), is that the applied + gain = ant1_gain * conjugate(ant2_gain). If the other convention is required, + set flip_gain_conj=True. + delay_convention : str, optional + Exponent sign to use in conversion of 'delay' to 'gain' cal_type + if the input uvcal is not inherently 'gain' cal_type. Default to 'minus'. + undo : bool, optional + If True, undo the provided calibration. i.e. apply the calibration with + flipped gain_convention. Flag propagation rules apply the same. + time_check : bool + Option to check that times match between the UVCal and UVData + objects if UVCal has a single time or time range. Times are always + checked if UVCal has multiple times. + ant_check : bool + Option to check that all antennas with data on the UVData + object have calibration solutions in the UVCal object. If this option is + set to False, uvcalibrate will proceed without erroring and data for + antennas without calibrations will be flagged. + + Returns + ------- + UVData, optional + Returns if not inplace + + """ + if uvcal.cal_type == "gain" and uvcal.wide_band: + raise ValueError( + "uvcalibrate currently does not support wide-band calibrations" + ) + if uvcal.cal_type == "delay" and uvcal.Nspws > 1: + # To fix this, need to make UVCal.convert_to_gain support multiple spws + raise ValueError( + "uvcalibrate currently does not support multi spectral window delay " + "calibrations" + ) + + if not inplace: + uvdata = uvdata.copy() + + # check both objects + uvdata.check() + uvcal.check() + + # Check whether the UVData antennas *that have data associated with them* + # have associated data in the UVCal object + uvdata_unique_nums = np.unique(np.append(uvdata.ant_1_array, uvdata.ant_2_array)) + uvdata.telescope.antenna_names = np.asarray(uvdata.telescope.antenna_names) + uvdata_used_antnames = np.array( + [ + uvdata.telescope.antenna_names[ + np.where(uvdata.telescope.antenna_numbers == antnum) + ][0] + for antnum in uvdata_unique_nums + ] + ) + uvcal_unique_nums = np.unique(uvcal.ant_array) + uvcal.telescope.antenna_names = np.asarray(uvcal.telescope.antenna_names) + uvcal_used_antnames = np.array( + [ + uvcal.telescope.antenna_names[ + np.where(uvcal.telescope.antenna_numbers == antnum) + ][0] + for antnum in uvcal_unique_nums + ] + ) + + ant_arr_match = uvcal_used_antnames.tolist() == uvdata_used_antnames.tolist() + + if not ant_arr_match: + # check more carefully + name_missing = [] + for this_ant_name in uvdata_used_antnames: + wh_ant_match = np.nonzero(uvcal_used_antnames == this_ant_name) + if wh_ant_match[0].size == 0: + name_missing.append(this_ant_name) + + if len(name_missing) > 0: + if len(name_missing) == uvdata_used_antnames.size: + # all antenna_names with data on UVData are missing on UVCal. + if not ant_check: + warnings.warn( + "All antenna names with data on UVData are missing " + "on UVCal. Since ant_check is False, calibration will " + "proceed but all data will be flagged." + ) + else: + raise ValueError( + "All antenna names with data on UVData are missing " + "on UVCal. To continue with calibration " + "(and flag all the data), set ant_check=False." + ) + else: + # Only some antenna_names with data on UVData are missing on UVCal + if not ant_check: + warnings.warn( + f"Antennas {name_missing} have data on UVData but are missing " + "on UVCal. Since ant_check is False, calibration will " + "proceed and the data for these antennas will be flagged." + ) + else: + raise ValueError( + f"Antennas {name_missing} have data on UVData but " + "are missing on UVCal. To continue calibration and " + "flag the data from missing antennas, set ant_check=False." + ) + + uvdata_times, uvd_time_ri = np.unique(uvdata.time_array, return_inverse=True) + downselect_cal_times = False + # time_range supercedes time_array. + if uvcal.time_range is not None: + if np.min(uvdata_times) < np.min(uvcal.time_range[:, 0]) or np.max( + uvdata_times + ) > np.max(uvcal.time_range[:, 1]): + if not time_check and uvcal.Ntimes == 1: + warnings.warn( + "Time_range on UVCal does not cover all UVData times " + "but time_check is False, so calibration " + "will be applied anyway." + ) + else: + msg = "Time_ranges on UVCal do not cover all UVData times." + if uvcal.Ntimes == 1: + msg = ( + "Time_range on UVCal does not cover all UVData times. " + "Set time_check=False to apply calibration anyway." + ) + else: + msg = "Time_ranges on UVCal do not cover all UVData times." + raise ValueError(msg) + + # now check in detail that all UVData times fall in a UVCal time range. + # also create the indexing array to match UVData blts to UVCal time inds + if uvcal.Ntimes > 1: + trange_ind_arr = np.full_like(uvdata.time_array, -1, dtype=int) + for tr_ind, trange in enumerate(uvcal.time_range): + time_inds = np.nonzero( + (uvdata_times >= trange[0]) & (uvdata_times <= trange[1]) + )[0] + for tind in time_inds: + trange_ind_arr[np.nonzero(uvd_time_ri == tind)[0]] = tr_ind + if np.any(trange_ind_arr < 0): + raise ValueError("Time_ranges on UVCal do not cover all UVData times.") + else: + if uvcal.Ntimes > 1 and uvcal.Ntimes < uvdata.Ntimes: + raise ValueError( + "The uvcal object has more than one time but fewer than the " + "number of unique times on the uvdata object." + ) + uvcal_times = np.unique(uvcal.time_array) + try: + time_arr_match = np.allclose( + uvcal_times, + uvdata_times, + atol=uvdata._time_array.tols[1], + rtol=uvdata._time_array.tols[0], + ) + except ValueError: + time_arr_match = False + + if not time_arr_match: + if uvcal.Ntimes == 1: + if not time_check: + warnings.warn( + "Times do not match between UVData and UVCal " + "but time_check is False, so calibration " + "will be applied anyway." + ) + else: + raise ValueError( + "Times do not match between UVData and UVCal. " + "Set time_check=False to apply calibration anyway. " + ) + else: + # check more carefully + uvcal_times_to_keep = [] + for this_time in uvdata_times: + wh_time_match = np.nonzero( + np.isclose( + uvcal.time_array - this_time, + 0, + atol=uvdata._time_array.tols[1], + rtol=uvdata._time_array.tols[0], + ) + ) + if wh_time_match[0].size > 0: + uvcal_times_to_keep.append(uvcal.time_array[wh_time_match][0]) + else: + raise ValueError( + f"Time {this_time} exists on UVData but not on UVCal." + ) + if len(uvcal_times_to_keep) < uvcal.Ntimes: + downselect_cal_times = True + + downselect_cal_freq = False + if uvcal.freq_array is not None: + uvdata_freq_arr_use = uvdata.freq_array + uvcal_freq_arr_use = uvcal.freq_array + try: + freq_arr_match = np.allclose( + np.sort(uvcal_freq_arr_use), + np.sort(uvdata_freq_arr_use), + atol=uvdata._freq_array.tols[1], + rtol=uvdata._freq_array.tols[0], + ) + except ValueError: + freq_arr_match = False + + if freq_arr_match is False: + # check more carefully + uvcal_freqs_to_keep = [] + for this_freq in uvdata_freq_arr_use: + wh_freq_match = np.nonzero( + np.isclose( + uvcal.freq_array - this_freq, + 0, + atol=uvdata._freq_array.tols[1], + rtol=uvdata._freq_array.tols[0], + ) + ) + if wh_freq_match[0].size > 0: + uvcal_freqs_to_keep.append(uvcal.freq_array[wh_freq_match][0]) + else: + raise ValueError( + f"Frequency {this_freq} exists on UVData but not on UVCal." + ) + if len(uvcal_freqs_to_keep) < uvcal.Nfreqs: + downselect_cal_freq = True + + # check if uvdata.telescope.x_orientation isn't set (it's required for uvcal) + uvd_x = uvdata.telescope.x_orientation + if uvd_x is None: + # use the uvcal x_orientation throughout + uvd_x = uvcal.telescope.x_orientation + warnings.warn( + "UVData object does not have `x_orientation` specified but UVCal does. " + "Matching based on `x` and `y` only " + ) + + uvdata_pol_strs = polnum2str(uvdata.polarization_array, x_orientation=uvd_x) + uvcal_pol_strs = jnum2str( + uvcal.jones_array, x_orientation=uvcal.telescope.x_orientation + ) + uvdata_feed_pols = { + feed for pol in uvdata_pol_strs for feed in POL_TO_FEED_DICT[pol] + } + for feed in uvdata_feed_pols: + # get diagonal jones str + jones_str = parse_jpolstr(feed, x_orientation=uvcal.telescope.x_orientation) + if jones_str not in uvcal_pol_strs: + raise ValueError( + f"Feed polarization {feed} exists on UVData but not on UVCal. " + ) + + # downselect UVCal times, frequencies + if downselect_cal_freq or downselect_cal_times: + if not downselect_cal_times: + uvcal_times_to_keep = None + elif not downselect_cal_freq: + uvcal_freqs_to_keep = None + + uvcal_use = uvcal.select( + times=uvcal_times_to_keep, frequencies=uvcal_freqs_to_keep, inplace=False + ) + + new_uvcal = True + else: + uvcal_use = uvcal + new_uvcal = False + + # input checks + if uvcal_use.cal_type == "delay": + if not new_uvcal: + # make a copy to convert to gain + uvcal_use = uvcal_use.copy() + new_uvcal = True + freq_array_use = uvdata.freq_array + channel_width = uvdata.channel_width + uvcal_use.convert_to_gain( + delay_convention=delay_convention, + freq_array=freq_array_use, + channel_width=channel_width, + ) + + # D-term calibration + if d_term_cal: + # check for D-terms + if -7 not in uvcal_use.jones_array and -8 not in uvcal_use.jones_array: + raise ValueError( + "Cannot apply D-term calibration without -7 or -8" + "Jones polarization in uvcal object." + ) + raise NotImplementedError("D-term calibration is not yet implemented.") + + # No D-term calibration + else: + # key is number, value is name + uvdata_ant_dict = dict( + zip(uvdata.telescope.antenna_numbers, uvdata.telescope.antenna_names) + ) + # opposite: key is name, value is number + uvcal_ant_dict = dict( + zip(uvcal.telescope.antenna_names, uvcal.telescope.antenna_numbers) + ) + + # iterate over keys + for key in uvdata.get_antpairpols(): + # get indices for this key + blt_inds = uvdata.antpair2ind(key) + pol_ind = np.argmin( + np.abs(uvdata.polarization_array - polstr2num(key[2], uvd_x)) + ) + + # try to get gains for each antenna + ant1_num = key[0] + ant2_num = key[1] + + feed1, feed2 = POL_TO_FEED_DICT[key[2]] + try: + uvcal_ant1_num = uvcal_ant_dict[uvdata_ant_dict[ant1_num]] + except KeyError: + uvcal_ant1_num = None + try: + uvcal_ant2_num = uvcal_ant_dict[uvdata_ant_dict[ant2_num]] + except KeyError: + uvcal_ant2_num = None + + if (uvcal_ant1_num is None or uvcal_ant2_num is None) or not ( + uvcal_use._key_exists(antnum=uvcal_ant1_num, jpol=feed1) + and uvcal_use._key_exists(antnum=uvcal_ant2_num, jpol=feed2) + ): + uvdata.flag_array[blt_inds, :, pol_ind] = True + continue + + uvcal_key1 = (uvcal_ant1_num, feed1) + uvcal_key2 = (uvcal_ant2_num, feed2) + if flip_gain_conj: + gain = ( + np.conj(uvcal_use.get_gains(uvcal_key1)) + * uvcal_use.get_gains(uvcal_key2) + ).T # tranpose to match uvdata shape + else: + gain = ( + uvcal_use.get_gains(uvcal_key1) + * np.conj(uvcal_use.get_gains(uvcal_key2)) + ).T # tranpose to match uvdata shape + flag = (uvcal_use.get_flags(uvcal_key1) | uvcal_use.get_flags(uvcal_key2)).T + + if uvcal.time_range is not None and uvcal.Ntimes > 1: + gain = gain[trange_ind_arr[blt_inds], :] + flag = flag[trange_ind_arr[blt_inds], :] + + # propagate flags + if prop_flags: + mask = np.isclose(gain, 0.0) | flag + gain[mask] = 1.0 + uvdata.flag_array[blt_inds, :, pol_ind] += mask + + # apply to data + mult_gains = uvcal_use.gain_convention == "multiply" + if undo: + mult_gains = not mult_gains + if mult_gains: + uvdata.data_array[blt_inds, :, pol_ind] *= gain + else: + uvdata.data_array[blt_inds, :, pol_ind] /= gain + + # update attributes + uvdata.history += "\nCalibrated with pyuvdata.uvcalibrate." + if undo: + uvdata.vis_units = "uncalib" + else: + if uvcal_use.gain_scale is not None: + uvdata.vis_units = uvcal_use.gain_scale + + if not inplace: + return uvdata diff --git a/src/pyuvdata/uvdata/fhd.py b/src/pyuvdata/uvdata/fhd.py index b653e7582e..0d0d140bb7 100644 --- a/src/pyuvdata/uvdata/fhd.py +++ b/src/pyuvdata/uvdata/fhd.py @@ -585,7 +585,7 @@ def read_fhd( # because they depend on the phasing of the visibilities) # the values in bl_info.JDATE are the JD for each integration. # We need to expand up to Nblts. - int_times = list(utils._get_iterable(bl_info["JDATE"][0])) + int_times = list(utils.helpers._get_iterable(bl_info["JDATE"][0])) bin_offset = bl_info["BIN_OFFSET"][0] if self.Ntimes != len(int_times): warnings.warn( @@ -772,7 +772,9 @@ def read_fhd( else: self.history = "" - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.helpers._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str if read_data: diff --git a/src/pyuvdata/uvdata/initializers.py b/src/pyuvdata/uvdata/initializers.py index 135fae03df..b8b07a6abd 100644 --- a/src/pyuvdata/uvdata/initializers.py +++ b/src/pyuvdata/uvdata/initializers.py @@ -211,7 +211,7 @@ def configure_blt_rectangularity( ) (blts_are_rectangular, time_axis_faster_than_bls) = ( - utils.determine_rectangularity( + utils.helpers.determine_rectangularity( time_array=times, baseline_array=baselines, nbls=nbl, ntimes=nt ) ) diff --git a/src/pyuvdata/uvdata/mir.py b/src/pyuvdata/uvdata/mir.py index bf7a1fea69..ca2806e6af 100644 --- a/src/pyuvdata/uvdata/mir.py +++ b/src/pyuvdata/uvdata/mir.py @@ -706,7 +706,7 @@ def _init_from_mir_parser( time_arr = Time( mir_data.in_data["mjd"][source_mask], scale="tt", format="mjd" ).utc.jd - source_ra, source_dec = utils.transform_app_to_icrs( + source_ra, source_dec = utils.phasing.transform_app_to_icrs( time_array=time_arr, app_ra=mir_data.in_data["ara"][source_mask], app_dec=mir_data.in_data["adec"][source_mask], @@ -749,7 +749,7 @@ def _init_from_mir_parser( # frame (ICRS) and applying the rotation below (via `calc_uvw`). self._set_app_coords_helper(pa_only=True) - self.uvw_array = utils.calc_uvw( + self.uvw_array = utils.phasing.calc_uvw( uvw_array=self.uvw_array, old_frame_pa=0.0, frame_pa=self.phase_center_frame_pa, diff --git a/src/pyuvdata/uvdata/mir_parser.py b/src/pyuvdata/uvdata/mir_parser.py index 8b80f7f5d3..c4bc9cc71b 100644 --- a/src/pyuvdata/uvdata/mir_parser.py +++ b/src/pyuvdata/uvdata/mir_parser.py @@ -4130,7 +4130,7 @@ def _make_v3_compliant(self): from astropy.time import Time - from .. import utils as uvutils + from .. import utils from ..telescopes import known_telescope_location # First thing -- we only want modern (i.e., SWARM) data, since the older (ASIC) @@ -4162,12 +4162,12 @@ def _make_v3_compliant(self): mjd_arr = Time(mjd_arr, format="mjd", scale="utc").tt.mjd # Calculate the LST at the time of obs - lst_arr = (12.0 / np.pi) * uvutils.get_lst_for_time( + lst_arr = (12.0 / np.pi) * utils.get_lst_for_time( jd_array=jd_arr, telescope_loc=telescope_location ) # Finally, calculate the apparent coordinates based on what we have in the data - app_ra, app_dec = uvutils.calc_app_coords( + app_ra, app_dec = utils.phasing.calc_app_coords( lon_coord=self.in_data["rar"], lat_coord=self.in_data["decr"], time_array=jd_arr, diff --git a/src/pyuvdata/uvdata/miriad.py b/src/pyuvdata/uvdata/miriad.py index 306e6b61f1..f1fcc4083e 100644 --- a/src/pyuvdata/uvdata/miriad.py +++ b/src/pyuvdata/uvdata/miriad.py @@ -18,6 +18,7 @@ from .. import known_telescope_location, utils from ..docstrings import copy_replace_short_description +from ..utils import helpers from . import UVData from .uvdata import reporting_request @@ -211,7 +212,7 @@ def _load_miriad_variables(self, uv): self.spw_array = np.arange(self.Nspws) self.history = uv["history"] - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str # check for pyuvdata variables that are not recognized miriad variables @@ -429,7 +430,7 @@ def _load_antpos(self, uv, *, sorted_unique_ants=None, correct_lat_lon=True): self.telescope.location = EarthLocation.from_geocentric( *np.mean(ecef_antpos[good_antpos, :], axis=0) * units.m ) - valid_location = utils.check_surface_based_positions( + valid_location = helpers.check_surface_based_positions( telescope_loc=self.telescope.location, raise_error=False, raise_warning=False, @@ -1408,7 +1409,7 @@ def read_miriad( # which do not test as matching, so also test for all nans if not np.all( np.isnan(epoch_list[select_mask]) - ) and not utils._test_array_constant( + ) and not helpers._test_array_constant( epoch_list[select_mask], tols=(1e-05, 1e-08) ): # This is unusual but allowed within Miriad. @@ -1440,10 +1441,10 @@ def read_miriad( cat_frame = "fk5" radian_tols = self._phase_center_app_ra.tols - this_single_ra = utils._test_array_constant( + this_single_ra = helpers._test_array_constant( ra_list[select_mask], tols=radian_tols ) - this_single_dec = utils._test_array_constant( + this_single_dec = helpers._test_array_constant( dec_list[select_mask], tols=radian_tols ) if not cat_type == "unprojected" and ( @@ -1463,7 +1464,7 @@ def read_miriad( ) if np.max(counts) > 1: for t_ind in np.arange(unique_times.size): - if not utils._test_array_constant( + if not helpers._test_array_constant( lon_use[inverse == t_ind], tols=radian_tols ): raise ValueError( @@ -1471,7 +1472,7 @@ def read_miriad( "different baselines at the same time." + reporting_request ) - if not utils._test_array_constant( + if not helpers._test_array_constant( lat_use[inverse == t_ind], tols=radian_tols ): raise ValueError( diff --git a/src/pyuvdata/uvdata/ms.py b/src/pyuvdata/uvdata/ms.py index 01f493d3d3..90356fb224 100644 --- a/src/pyuvdata/uvdata/ms.py +++ b/src/pyuvdata/uvdata/ms.py @@ -14,8 +14,10 @@ from astropy.time import Time from docstring_parser import DocstringStyle -from .. import ms_utils, utils +from .. import utils from ..docstrings import copy_replace_short_description +from ..utils import helpers +from ..utils.file_io import ms as ms_utils from . import UVData __all__ = ["MS"] @@ -122,8 +124,8 @@ def write_ms( # Determine polarization order for writing out in CASA standard order, check # if this order can be represented by a single slice. - pol_order = utils.determine_pol_order(self.polarization_array, order="CASA") - [pol_order], _ = utils._convert_to_slices( + pol_order = utils.pol.determine_pol_order(self.polarization_array, order="CASA") + [pol_order], _ = helpers._convert_to_slices( pol_order, max_nslice=1, return_index_on_fail=True ) @@ -243,7 +245,7 @@ def write_ms( # See if we can represent scan_screen with a single slice, which # reduces overhead of copying a new array. - [scan_slice], _ = utils._convert_to_slices( + [scan_slice], _ = helpers._convert_to_slices( scan_screen, max_nslice=1, return_index_on_fail=True ) diff --git a/src/pyuvdata/uvdata/mwa_corr_fits.py b/src/pyuvdata/uvdata/mwa_corr_fits.py index 94d767fd48..e2890e3116 100644 --- a/src/pyuvdata/uvdata/mwa_corr_fits.py +++ b/src/pyuvdata/uvdata/mwa_corr_fits.py @@ -20,6 +20,8 @@ from .. import Telescope, _corr_fits, utils from ..data import DATA_PATH from ..docstrings import copy_replace_short_description +from ..utils import helpers +from ..utils.file_io import fits as fits_utils from . import UVData __all__ = ["input_output_mapping", "MWACorrFITS"] @@ -187,7 +189,7 @@ def read_metafits( "CALIBDEL", ] # store remaining keys in extra keywords - meta_extra_keywords = utils._get_fits_extra_keywords( + meta_extra_keywords = fits_utils._get_extra_keywords( meta_hdr, keywords_to_skip=["DATE-OBS"] + mwax_keys_to_skip ) @@ -1361,7 +1363,7 @@ def read_mwa_corr_fits( for filename in filelist: # update filename attribute basename = os.path.basename(filename) - self.filename = utils._combine_filenames(self.filename, [basename]) + self.filename = helpers._combine_filenames(self.filename, [basename]) self._filename.form = (len(self.filename),) if filename.lower().endswith(".metafits"): @@ -1371,11 +1373,11 @@ def read_mwa_corr_fits( metafits_file = filename elif filename.lower().endswith(".fits"): with fits.open(filename, memmap=True) as hdu_list: - hdunames = utils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) if "PPDS" in hdunames.keys(): ppds_file = filename ppd_meta_header = hdu_list[0].header - ppd_extra_keywords = utils._get_fits_extra_keywords( + ppd_extra_keywords = fits_utils._get_extra_keywords( ppd_meta_header, keywords_to_skip=["DATE-OBS", "TELESCOP", "INSTRUME"], ) @@ -1523,7 +1525,7 @@ def read_mwa_corr_fits( self.telescope.antenna_names = meta_dict["antenna_names"] self.telescope.antenna_positions = meta_dict["antenna_positions"] self.history = meta_dict["history"] - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str for key, value in meta_dict["extra_keywords"].items(): self.extra_keywords[key] = value diff --git a/src/pyuvdata/uvdata/uvdata.py b/src/pyuvdata/uvdata/uvdata.py index 9b5efa8905..2a2765be16 100644 --- a/src/pyuvdata/uvdata/uvdata.py +++ b/src/pyuvdata/uvdata/uvdata.py @@ -12,10 +12,10 @@ from collections.abc import Iterable from typing import Literal -import astropy.units as units import numpy as np from astropy import constants as const from astropy import coordinates as coord +from astropy import units from astropy.coordinates import Angle, SkyCoord from astropy.time import Time from docstring_parser import DocstringStyle @@ -25,6 +25,9 @@ from .. import parameter as uvp from .. import utils from ..docstrings import combine_docstrings, copy_replace_short_description +from ..utils import helpers +from ..utils import phasing as phs_utils +from ..utils.file_io import hdf5 as hdf5_utils from ..uvbase import UVBase from .initializers import new_uvdata @@ -33,8 +36,6 @@ logger = logging.getLogger(__name__) -allowed_cat_types = ["sidereal", "ephem", "unprojected", "driftscan"] - reporting_request = ( " Please report this in our issue log, we have not been able to find a file with " "this feature, we would like to investigate this more." @@ -771,7 +772,7 @@ def _add_phase_center( source without coordinates. """ - cat_entry = utils.generate_phase_center_cat_entry( + cat_entry = utils.ps_cat.generate_phase_center_cat_entry( cat_name=cat_name, cat_type=cat_type, cat_lon=cat_lon, @@ -790,7 +791,7 @@ def _add_phase_center( # The logic below ensures that we pick the lowest positive integer that is # not currently being used by another source if cat_id is None or not force_update: - cat_id = utils.generate_new_phase_center_id( + cat_id = utils.ps_cat.generate_new_phase_center_id( phase_center_catalog=self.phase_center_catalog, cat_id=cat_id ) @@ -799,7 +800,7 @@ def _add_phase_center( self.phase_center_catalog = {} else: # Let's warn if this entry has the same name as an existing one - temp_id, cat_diffs = utils.look_in_catalog( + temp_id, cat_diffs = utils.ps_cat.look_in_catalog( self.phase_center_catalog, phase_dict=cat_entry ) @@ -1198,7 +1199,7 @@ def merge_phase_centers( # First, let's check and see if the dict entries are identical for cat_id in cat_id_list[1:]: - pc_id, pc_diffs = utils.look_in_catalog( + pc_id, pc_diffs = utils.ps_cat.look_in_catalog( self.phase_center_catalog, phase_dict=self.phase_center_catalog[cat_id], ignore_name=ignore_name, @@ -1269,7 +1270,7 @@ def print_phase_center_info( ValueError If `cat_name` matches no keys in `phase_center_catalog`. """ - return utils.print_phase_center_info( + return utils.ps_cat.print_phase_center_info( self.phase_center_catalog, catalog_identifier=catalog_identifier, hms_format=hms_format, @@ -1301,7 +1302,7 @@ def _update_phase_center_id(self, cat_id, *, new_id=None, reserved_ids=None): If not using the method on a multi-phase-ctr data set, if there's no entry that matches `cat_name`, or of the value `new_id` is already taken. """ - new_id = utils.generate_new_phase_center_id( + new_id = utils.ps_cat.generate_new_phase_center_id( phase_center_catalog=self.phase_center_catalog, cat_id=new_id, old_id=cat_id, @@ -1370,7 +1371,7 @@ def _consolidate_phase_center_catalogs( # testing it's sometimes convenient to use self.phase_center_catalog as # the ref catalog, which causes a RunTime error due to updates to the dict. cat_entry = reference_catalog[cat_id] - match_id, match_diffs = utils.look_in_catalog( + match_id, match_diffs = utils.ps_cat.look_in_catalog( self.phase_center_catalog, phase_dict=cat_entry, ignore_name=ignore_name ) if match_id is None or match_diffs != 0: @@ -1555,7 +1556,7 @@ def _set_app_coords_helper(self, *, pa_only=False): vrad = temp_dict.get("vrad") dist = temp_dict.get("cat_dist") - app_ra[select_mask], app_dec[select_mask] = utils.calc_app_coords( + app_ra[select_mask], app_dec[select_mask] = phs_utils.calc_app_coords( lon_coord=lon_val, lat_coord=lat_val, coord_frame=frame, @@ -1582,7 +1583,7 @@ def _set_app_coords_helper(self, *, pa_only=False): frame = temp_dict.get("cat_frame") epoch = temp_dict.get("cat_epoch") if not frame == "altaz": - frame_pa[select_mask] = utils.calc_frame_pos_angle( + frame_pa[select_mask] = phs_utils.calc_frame_pos_angle( time_array=self.time_array[select_mask], app_ra=app_ra[select_mask], app_dec=app_dec[select_mask], @@ -1637,7 +1638,7 @@ def _check_flex_spw_contiguous(self): UVH5 and UVData objects can handle this, but MIRIAD, MIR, UVFITS, and MS file formats cannot, so we just consider it forbidden. """ - utils._check_flex_spw_contiguous( + helpers._check_flex_spw_contiguous( spw_array=self.spw_array, flex_spw_id_array=self.flex_spw_id_array ) @@ -1660,7 +1661,7 @@ def _check_freq_spacing(self, *, raise_errors=True): Flag that channel spacing does not match channel width. """ - return utils._check_freq_spacing( + return helpers._check_freq_spacing( freq_array=self.freq_array, freq_tols=self._freq_array.tols, channel_width=self.channel_width, @@ -2307,14 +2308,14 @@ def check( if run_check_acceptability: # Check antenna positions - utils.check_surface_based_positions( + helpers.check_surface_based_positions( antenna_positions=self.telescope.antenna_positions, telescope_loc=self.telescope.location, raise_error=False, ) # Check the LSTs against what we expect given up-to-date IERS data - utils.check_lsts_against_times( + helpers.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, lst_tols=self._lst_array.tols if lst_tol is None else [0, lst_tol], @@ -2647,7 +2648,7 @@ def antpair2ind( if inds.size == 0: inds = None - inds = utils.slicify(inds) + inds = helpers.slicify(inds) self.__antpair2ind_cache[(ant1, ant2, ordered)] = inds return inds @@ -2694,7 +2695,7 @@ def _key2inds(self, key: str | tuple[int] | tuple[int, int] | tuple[int, int, st """ orig_key = key - key = utils._get_iterable(key) + key = helpers._get_iterable(key) if not isinstance(key, str): key = tuple(key) @@ -2772,7 +2773,7 @@ def _key2inds(self, key: str | tuple[int] | tuple[int, int] | tuple[int, int, st else: if len(key) == 2: try: - pol_ind2 = utils.reorder_conj_pols(self.polarization_array) + pol_ind2 = utils.pol.reorder_conj_pols(self.polarization_array) except ValueError as err: if blt_ind1 is None: if isinstance(orig_key, int): @@ -2805,7 +2806,7 @@ def _key2inds(self, key: str | tuple[int] | tuple[int, int] | tuple[int, int, st raise KeyError(f"Polarization {key_print} not found in data.") # Convert to slices if possible - pol_ind = (utils.slicify(pol_ind[0]), utils.slicify(pol_ind[1])) + pol_ind = (helpers.slicify(pol_ind[0]), helpers.slicify(pol_ind[1])) self.__key2ind_cache[key] = (blt_ind1, blt_ind2, pol_ind) return (blt_ind1, blt_ind2, pol_ind) @@ -3020,7 +3021,7 @@ def get_data( if isinstance(val, str): key.append(val) elif val is not None: - key += list(utils._get_iterable(val)) + key += list(helpers._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") ind1, ind2, indp = self._key2inds(key) @@ -3072,7 +3073,7 @@ def get_flags( if isinstance(val, str): key.append(val) elif val is not None: - key += list(utils._get_iterable(val)) + key += list(helpers._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") ind1, ind2, indp = self._key2inds(key) @@ -3132,7 +3133,7 @@ def get_nsamples( if isinstance(val, str): key.append(val) elif val is not None: - key += list(utils._get_iterable(val)) + key += list(helpers._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") ind1, ind2, indp = self._key2inds(key) @@ -3176,7 +3177,7 @@ def get_times(self, key1, key2=None, key3=None): if isinstance(val, str): key.append(val) elif val is not None: - key += list(utils._get_iterable(val)) + key += list(helpers._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") inds1, inds2, indp = self._key2inds(key) @@ -3221,7 +3222,7 @@ def get_lsts(self, key1, key2=None, key3=None): if isinstance(val, str): key.append(val) elif val is not None: - key += list(utils._get_iterable(val)) + key += list(helpers._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") inds1, inds2, indp = self._key2inds(key) @@ -3325,7 +3326,7 @@ def _set_method_helper(self, dshape, key1, key2=None, key3=None): if isinstance(val, str): key.append(val) elif val is not None: - key += list(utils._get_iterable(val)) + key += list(helpers._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") ind1, ind2, indp = self._key2inds(key) @@ -3344,8 +3345,10 @@ def _set_method_helper(self, dshape, key1, key2=None, key3=None): f"Input array shape is {dshape}, expected shape is {expected_shape}." ) - blt_slices, blt_sliceable = utils._convert_to_slices(ind1, max_nslice_frac=0.1) - pol_slices, pol_sliceable = utils._convert_to_slices( + blt_slices, blt_sliceable = helpers._convert_to_slices( + ind1, max_nslice_frac=0.1 + ) + pol_slices, pol_sliceable = helpers._convert_to_slices( indp[0], max_nslice_frac=0.5 ) @@ -3398,7 +3401,7 @@ def set_data(self, data, key1, key2=None, key3=None): """ dshape = data.shape inds = self._set_method_helper(dshape, key1, key2, key3) - utils._index_dset(self.data_array, inds, input_array=data) + hdf5_utils._index_dset(self.data_array, inds, input_array=data) return @@ -3443,7 +3446,7 @@ def set_flags(self, flags, key1, key2=None, key3=None): """ dshape = flags.shape inds = self._set_method_helper(dshape, key1, key2, key3) - utils._index_dset(self.flag_array, inds, input_array=flags) + hdf5_utils._index_dset(self.flag_array, inds, input_array=flags) return @@ -3490,7 +3493,7 @@ def set_nsamples(self, nsamples, key1, key2=None, key3=None): """ dshape = nsamples.shape inds = self._set_method_helper(dshape, key1, key2, key3) - utils._index_dset(self.nsample_array, inds, input_array=nsamples) + hdf5_utils._index_dset(self.nsample_array, inds, input_array=nsamples) return @@ -3625,7 +3628,7 @@ def conjugate_bls(self, convention="ant1 0: - new_pol_inds = utils.reorder_conj_pols(self.polarization_array) + new_pol_inds = utils.pol.reorder_conj_pols(self.polarization_array) self.uvw_array[index_array] *= -1 @@ -3699,7 +3702,7 @@ def reorder_pols( ) index_array = order elif (order == "AIPS") or (order == "CASA"): - index_array = utils.determine_pol_order( + index_array = utils.pol.determine_pol_order( self.polarization_array, order=order ) else: @@ -3741,7 +3744,7 @@ def set_rectangularity(self, *, force: bool = False) -> None: if self.blts_are_rectangular is not None and not force: return - rect, time = utils.determine_rectangularity( + rect, time = helpers.determine_rectangularity( time_array=self.time_array, baseline_array=self.baseline_array, nbls=self.Nbls, @@ -3756,7 +3759,7 @@ def determine_blt_order(self) -> tuple[str] | tuple[str, str] | None: if self.blt_order is not None: return self.blt_order - order = utils.determine_blt_order( + order = helpers.determine_blt_order( time_array=self.time_array, baseline_array=self.baseline_array, ant_1_array=self.ant_1_array, @@ -4067,7 +4070,7 @@ def reorder_freqs( is not the same length as freq_array. """ - index_array = utils._sort_freq_helper( + index_array = helpers._sort_freq_helper( Nfreqs=self.Nfreqs, freq_array=self.freq_array, Nspws=self.Nspws, @@ -4299,7 +4302,7 @@ def unproject_phase( if np.all(~select_mask_use): warnings.warn("No selected baselines are projected, doing nothing") - new_uvw = utils.calc_uvw( + new_uvw = phs_utils.calc_uvw( lst_array=self.lst_array, use_ant_pos=use_ant_pos, uvw_array=self.uvw_array, @@ -4321,7 +4324,7 @@ def unproject_phase( self.uvw_array = new_uvw # remove/update phase center - match_id, match_diffs = utils.look_in_catalog( + match_id, match_diffs = utils.ps_cat.look_in_catalog( self.phase_center_catalog, cat_name=cat_name, cat_type="unprojected" ) if match_diffs == 0: @@ -4373,7 +4376,7 @@ def _phase_dict_helper( } if lookup_name: - if len(utils.look_for_name(self.phase_center_catalog, cat_name)) > 1: + if len(utils.ps_cat.look_for_name(self.phase_center_catalog, cat_name)) > 1: raise ValueError( "Name of object has multiple matches in phase center catalog. " "Set lookup_name=False in order to continue." @@ -4382,7 +4385,7 @@ def _phase_dict_helper( if lookup_name and (cat_name not in name_dict): if (cat_type is None) or (cat_type == "ephem"): [cat_times, cat_lon, cat_lat, cat_dist, cat_vrad] = ( - utils.lookup_jplhorizons( + phs_utils.lookup_jplhorizons( cat_name, time_array, telescope_loc=self.telescope.location ) ) @@ -4405,7 +4408,7 @@ def _phase_dict_helper( cat_id = name_dict[cat_name] cat_diffs = 0 else: - cat_id, cat_diffs = utils.look_in_catalog( + cat_id, cat_diffs = utils.ps_cat.look_in_catalog( self.phase_center_catalog, cat_name=cat_name, cat_type=cat_type, @@ -4489,7 +4492,7 @@ def _phase_dict_helper( # Concat the two time ranges to make sure that we cover both the # requested time range _and_ the original time range. [cat_times, cat_lon, cat_lat, cat_dist, cat_vrad] = ( - utils.lookup_jplhorizons( + phs_utils.lookup_jplhorizons( cat_name, np.concatenate((np.reshape(time_array, -1), cat_times)), telescope_loc=self.telescope.location, @@ -4710,7 +4713,7 @@ def phase( # We got the meta-data, now handle calculating the apparent coordinates. # First, check if we need to look up the phase center in question - new_app_ra, new_app_dec = utils.calc_app_coords( + new_app_ra, new_app_dec = phs_utils.calc_app_coords( lon_coord=phase_dict["cat_lon"], lat_coord=phase_dict["cat_lat"], coord_frame=phase_dict["cat_frame"], @@ -4728,7 +4731,7 @@ def phase( # Now calculate position angles. if not phase_frame == "altaz": - new_frame_pa = utils.calc_frame_pos_angle( + new_frame_pa = phs_utils.calc_frame_pos_angle( time_array=time_array, app_ra=new_app_ra, app_dec=new_app_dec, @@ -4740,7 +4743,7 @@ def phase( new_frame_pa = np.zeros(time_array.shape, dtype=float) # Now its time to do some rotations and calculate the new coordinates - new_uvw = utils.calc_uvw( + new_uvw = phs_utils.calc_uvw( app_ra=new_app_ra, app_dec=new_app_dec, frame_pa=new_frame_pa, @@ -4842,8 +4845,10 @@ def phase_to_time( # Generate ra/dec of zenith at time in the phase_frame coordinate # system to use for phasing - if utils.hasmoon and isinstance(self.telescope.location, utils.MoonLocation): - zenith_coord = utils.LunarSkyCoord( + if phs_utils.hasmoon and isinstance( + self.telescope.location, phs_utils.MoonLocation + ): + zenith_coord = phs_utils.LunarSkyCoord( alt=Angle(90 * units.deg), az=Angle(0 * units.deg), obstime=time, @@ -4888,7 +4893,7 @@ def set_uvws_from_antenna_positions(self, *, update_vis=True): """ unprojected_blts = self._check_for_cat_type("unprojected") - new_uvw = utils.calc_uvw( + new_uvw = phs_utils.calc_uvw( app_ra=self.phase_center_app_ra, app_dec=self.phase_center_app_dec, frame_pa=self.phase_center_frame_pa, @@ -4967,7 +4972,7 @@ def update_antenna_positions( # upated antenna positions, and B is the old positions. I.e., this is the # same as independently calculating uvws from old and new and subtracting # one from the other. - delta_uvw = utils.calc_uvw( + delta_uvw = phs_utils.calc_uvw( app_ra=self.phase_center_app_ra, app_dec=self.phase_center_app_dec, frame_pa=self.phase_center_frame_pa, @@ -5117,7 +5122,7 @@ def fix_phase(self, *, use_ant_pos=True): uvws_use = self.uvw_array[inds, :] - uvw_rel_positions = utils.undo_old_uvw_calc( + uvw_rel_positions = phs_utils.undo_old_uvw_calc( frame_phase_center.ra.rad, frame_phase_center.dec.rad, uvws_use ) @@ -5696,7 +5701,7 @@ def __add__( this.Nants_data = this._calc_nants_data() # Update filename parameter - this.filename = utils._combine_filenames(this.filename, other.filename) + this.filename = helpers._combine_filenames(this.filename, other.filename) if this.filename is not None: this._filename.form = (len(this.filename),) @@ -5722,14 +5727,14 @@ def __add__( if n_axes > 0: history_update_string += " axis using pyuvdata." - histories_match = utils._check_histories(this.history, other.history) + histories_match = helpers._check_histories(this.history, other.history) this.history += history_update_string if not histories_match: if verbose_history: this.history += " Next object history follows. " + other.history else: - extra_history = utils._combine_history_addition( + extra_history = helpers._combine_history_addition( this.history, other.history ) if extra_history is not None: @@ -5973,7 +5978,7 @@ def fast_concat( histories_match = [] for obj in other: - histories_match.append(utils._check_histories(this.history, obj.history)) + histories_match.append(helpers._check_histories(this.history, obj.history)) this.history += history_update_string for obj_num, obj in enumerate(other): @@ -5981,7 +5986,7 @@ def fast_concat( if verbose_history: this.history += " Next object history follows. " + obj.history else: - extra_history = utils._combine_history_addition( + extra_history = helpers._combine_history_addition( this.history, obj.history ) if extra_history is not None: @@ -6057,7 +6062,7 @@ def fast_concat( ) this.Npols = sum([this.Npols] + [obj.Npols for obj in other]) - if not utils._test_array_constant_spacing(this._polarization_array): + if not helpers._test_array_constant_spacing(this._polarization_array): warnings.warn( "Combined polarizations are not evenly spaced. This will " "make it impossible to write this data out to some file types." @@ -6127,7 +6132,7 @@ def fast_concat( # update filename attribute for obj in other: - this.filename = utils._combine_filenames(this.filename, obj.filename) + this.filename = helpers._combine_filenames(this.filename, obj.filename) if this.filename is not None: this._filename.form = len(this.filename) @@ -6290,14 +6295,14 @@ def sum_vis( this.data_array = this.data_array + other.data_array history_update_string = " Visibilities summed using pyuvdata." - histories_match = utils._check_histories(this.history, other.history) + histories_match = helpers._check_histories(this.history, other.history) this.history += history_update_string if not histories_match: if verbose_history: this.history += " Second object history follows. " + other.history else: - extra_history = utils._combine_history_addition( + extra_history = helpers._combine_history_addition( this.history, other.history ) if extra_history is not None: @@ -6307,7 +6312,7 @@ def sum_vis( ) # merge file names - this.filename = utils._combine_filenames(this.filename, other.filename) + this.filename = helpers._combine_filenames(this.filename, other.filename) # Check final object is self-consistent if run_check: @@ -6430,7 +6435,7 @@ def parse_ants(self, ant_str, *, print_toggle=False): polarization specification. """ - return utils.parse_ants( + return utils.bls.parse_ants( uv=self, ant_str=ant_str, print_toggle=print_toggle, @@ -6570,7 +6575,7 @@ def _select_preprocess( # test for blt_inds presence before adding inds from antennas & times if blt_inds is not None: - blt_inds = utils._get_iterable(blt_inds) + blt_inds = helpers._get_iterable(blt_inds) if np.array(blt_inds).ndim > 1: blt_inds = np.array(blt_inds).flatten() history_update_string += "baseline-times" @@ -6580,12 +6585,12 @@ def _select_preprocess( raise ValueError("Cannot set both phase_center_ids and catalog_names.") if catalog_names is not None: - phase_center_ids = utils.look_for_name( + phase_center_ids = utils.ps_cat.look_for_name( self.phase_center_catalog, catalog_names ) if phase_center_ids is not None: - phase_center_ids = np.array(utils._get_iterable(phase_center_ids)) + phase_center_ids = np.array(helpers._get_iterable(phase_center_ids)) pc_blt_inds = np.nonzero( np.isin(self.phase_center_id_array, phase_center_ids) )[0] @@ -6631,7 +6636,7 @@ def _select_preprocess( ) if antenna_nums is not None: - antenna_nums = utils._get_iterable(antenna_nums) + antenna_nums = helpers._get_iterable(antenna_nums) antenna_nums = np.asarray(antenna_nums) if antenna_nums.ndim > 1: antenna_nums = antenna_nums.flatten() @@ -6751,7 +6756,7 @@ def _select_preprocess( else: blt_inds = ant_blt_inds - time_blt_inds = utils._select_times_helper( + time_blt_inds = helpers._select_times_helper( times=times, time_range=time_range, lsts=lsts, @@ -6807,19 +6812,19 @@ def _select_preprocess( blt_inds = sorted(set(blt_inds)) if freq_chans is not None: - freq_chans = utils._get_iterable(freq_chans) + freq_chans = helpers._get_iterable(freq_chans) if np.array(freq_chans).ndim > 1: freq_chans = np.array(freq_chans).flatten() if frequencies is None: frequencies = self.freq_array[freq_chans] else: - frequencies = utils._get_iterable(frequencies) + frequencies = helpers._get_iterable(frequencies) frequencies = np.sort( list(set(frequencies) | set(self.freq_array[freq_chans])) ) if frequencies is not None: - frequencies = utils._get_iterable(frequencies) + frequencies = helpers._get_iterable(frequencies) if np.array(frequencies).ndim > 1: frequencies = np.array(frequencies).flatten() if n_selects > 0: @@ -6843,7 +6848,7 @@ def _select_preprocess( freq_ind_separation = freq_ind_separation[ np.diff(self.flex_spw_id_array[freq_inds]) == 0 ] - if not utils._test_array_constant(freq_ind_separation): + if not helpers._test_array_constant(freq_ind_separation): warnings.warn( "Selected frequencies are not evenly spaced. This " "will make it impossible to write this data out to " @@ -6861,7 +6866,7 @@ def _select_preprocess( freq_inds = None if polarizations is not None: - polarizations = utils._get_iterable(polarizations) + polarizations = helpers._get_iterable(polarizations) if np.array(polarizations).ndim > 1: polarizations = np.array(polarizations).flatten() if n_selects > 0: @@ -6922,7 +6927,7 @@ def _select_preprocess( "No data matching this polarization and frequency selection " "in this UVData object." ) - if not utils._test_array_constant_spacing( + if not helpers._test_array_constant_spacing( np.unique(self.flex_spw_polarization_array[spw_inds]) ): warnings.warn( @@ -6933,7 +6938,7 @@ def _select_preprocess( else: pol_inds = np.unique(pol_inds) if len(pol_inds) > 2: - if not utils._test_array_constant_spacing(pol_inds): + if not helpers._test_array_constant_spacing(pol_inds): warnings.warn( "Selected polarization values are not evenly spaced. This " "will make it impossible to write this data out to " @@ -7744,7 +7749,7 @@ def downsample_in_time( int_times = int_times if len(np.unique(int_times)) == 1: # this baseline has all the same integration times - if len(np.unique(dtime)) > 1 and not utils._test_array_constant( + if len(np.unique(dtime)) > 1 and not helpers._test_array_constant( dtime, tols=self._integration_time.tols ): warnings.warn( @@ -8528,7 +8533,7 @@ def get_redundancies( if use_antpos: antpos = self.telescope.get_enu_antpos() - result = utils.get_antenna_redundancies( + result = utils.redundancy.get_antenna_redundancies( self.telescope.antenna_numbers, antpos, tol=tol, @@ -8568,7 +8573,7 @@ def get_redundancies( antpos, ant1_inds, axis=0 ) - return utils.get_baseline_redundancies( + return utils.redundancy.get_baseline_redundancies( baselines, baseline_vecs, tol=tol, @@ -8675,7 +8680,7 @@ def compress_by_redundancy( # now we have to figure out which times are the same to a tolerance # so we can average over them. time_inds = np.arange(len(group_times + conj_group_times)) - time_gps = utils.find_clusters( + time_gps = utils.redundancy.find_clusters( location_ids=time_inds, location_vectors=np.array(group_times + conj_group_times), tol=self._time_array.tols[1], @@ -11848,7 +11853,7 @@ def normalize_by_autos(self, *, skip_autos=True, invert=False): pol_list = list(self.polarization_array) for pol in pol_list: try: - feed_pols = utils.POL_TO_FEED_DICT[utils.POL_NUM2STR_DICT[pol]] + feed_pols = utils.pol.POL_TO_FEED_DICT[utils.POL_NUM2STR_DICT[pol]] pol_groups.append( [ pol_list.index(utils.POL_STR2NUM_DICT[item + item]) diff --git a/src/pyuvdata/uvdata/uvfits.py b/src/pyuvdata/uvdata/uvfits.py index 089492052b..d02f6cf28b 100644 --- a/src/pyuvdata/uvdata/uvfits.py +++ b/src/pyuvdata/uvdata/uvfits.py @@ -24,6 +24,8 @@ from .. import utils from ..docstrings import copy_replace_short_description +from ..utils import helpers +from ..utils.file_io import fits as fits_utils from . import UVData __all__ = ["UVFITS"] @@ -69,7 +71,7 @@ def _get_parameter_data( # angles in uvfits files are stored in degrees, so convert to radians self.lst_array = np.deg2rad(vis_hdu.data.par("lst")) if run_check_acceptability: - utils.check_lsts_against_times( + helpers.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, telescope_loc=self.telescope.location, @@ -392,7 +394,7 @@ def read_uvfits( with fits.open(filename, memmap=True) as hdu_list: vis_hdu = hdu_list[0] # assumes the visibilities are in the primary hdu vis_hdr = vis_hdu.header.copy() - hdunames = utils._fits_indexhdus(hdu_list) # find the rest of the tables + hdunames = fits_utils._indexhdus(hdu_list) # find the rest of the tables # First get everything we can out of the header. @@ -412,7 +414,7 @@ def read_uvfits( # check if we have an spw dimension if vis_hdr["NAXIS"] == 7: self.Nspws = vis_hdr.pop("NAXIS5") - self.spw_array = utils._fits_gethduaxis(vis_hdu, 5).astype(np.int64) - 1 + self.spw_array = fits_utils._gethduaxis(vis_hdu, 5).astype(np.int64) - 1 ra_axis = 6 dec_axis = 7 else: @@ -440,7 +442,7 @@ def read_uvfits( # the AIPS SU table. # Get rest freq value - ref_freq = utils._fits_gethduaxis(vis_hdu, 4)[0] + ref_freq = fits_utils._gethduaxis(vis_hdu, 4)[0] self.channel_width = np.transpose( np.tile(abs(fq_hdu.data["CH WIDTH"]), (uvfits_nchan, 1)) ).flatten() @@ -455,11 +457,11 @@ def read_uvfits( # If there's only one window, then the UVFITS file may not have an # FQ table, in which case pull the info from the main table self.Nfreqs = vis_hdr.pop("NAXIS4") - self.freq_array = utils._fits_gethduaxis(vis_hdu, 4) + self.freq_array = fits_utils._gethduaxis(vis_hdu, 4) self.channel_width = np.full(self.Nfreqs, vis_hdr.pop("CDELT4")) self.flex_spw_id_array = np.zeros(self.Nfreqs, dtype=int) - self.polarization_array = np.int32(utils._fits_gethduaxis(vis_hdu, 3)) + self.polarization_array = np.int32(fits_utils._gethduaxis(vis_hdu, 3)) # other info -- not required but frequently used self.telescope.name = vis_hdr.pop("TELESCOP", None) self.telescope.instrument = vis_hdr.pop("INSTRUME", None) @@ -473,7 +475,7 @@ def read_uvfits( if self.blt_order == ("bda",): self._blt_order.form = (1,) self.history = str(vis_hdr.get("HISTORY", "")) - if not utils._check_history_version( + if not helpers._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -529,7 +531,7 @@ def read_uvfits( ) self.phase_center_id_array = np.zeros(self.Nblts, dtype=int) + cat_id - self.extra_keywords = utils._get_fits_extra_keywords( + self.extra_keywords = fits_utils._get_extra_keywords( vis_hdr, keywords_to_skip=[ "DATE-OBS", @@ -765,7 +767,7 @@ def read_uvfits( # fix up the uvws if in the NCP baseline coordinate frame. # Must be done here because it requires the phase_center_app_dec if "UU---NCP" in vis_hdu.data.parnames: - self.uvw_array = utils._rotate_one_axis( + self.uvw_array = utils.phasing._rotate_one_axis( xyz_array=self.uvw_array[:, :, None], rot_amount=self.phase_center_app_dec - np.pi / 2, rot_axis=0, @@ -950,7 +952,7 @@ def write_uvfits( if self.Npols > 1: pol_indexing = np.argsort(np.abs(self.polarization_array)) polarization_array = self.polarization_array[pol_indexing] - if not utils._test_array_constant_spacing(polarization_array): + if not helpers._test_array_constant_spacing(polarization_array): raise ValueError( "The polarization values are not evenly spaced (probably " "because of a select operation). The uvfits format " @@ -1478,7 +1480,7 @@ def write_uvfits( # coordinate frame, although nothing in phase_center_catalog forces # objects to share the same frame. So we want to make sure that # everything lines up with the coordinate frame listed. - new_ra, new_dec = utils.transform_sidereal_coords( + new_ra, new_dec = utils.phasing.transform_sidereal_coords( longitude=phase_dict["cat_lon"], latitude=phase_dict["cat_lat"], in_coord_frame=phase_dict["cat_frame"], diff --git a/src/pyuvdata/uvdata/uvh5.py b/src/pyuvdata/uvdata/uvh5.py index d352d1a7ba..7ff2e7a1bb 100644 --- a/src/pyuvdata/uvdata/uvh5.py +++ b/src/pyuvdata/uvdata/uvh5.py @@ -16,8 +16,10 @@ import numpy as np from docstring_parser import DocstringStyle -from .. import Telescope, hdf5_utils, utils +from .. import Telescope, utils from ..docstrings import copy_replace_short_description +from ..utils import helpers +from ..utils.file_io import hdf5 as hdf5_utils from . import UVData __all__ = ["UVH5", "FastUVH5Meta"] @@ -203,7 +205,7 @@ def Nbls(self) -> int: # noqa: N802 def get_blt_order(self) -> tuple[str]: """Get the blt order from analysing metadata.""" - return utils.determine_blt_order( + return helpers.determine_blt_order( time_array=self.time_array, ant_1_array=self.ant_1_array, ant_2_array=self.ant_2_array, @@ -250,7 +252,7 @@ def blts_are_rectangular(self) -> bool: ): return True - is_rect, self.__time_first = utils.determine_rectangularity( + is_rect, self.__time_first = helpers.determine_rectangularity( time_array=self.time_array, baseline_array=self.baseline_array, nbls=self.Nbls, @@ -512,7 +514,7 @@ def _read_header_with_fast_meta( proc = None if run_check_acceptability: - utils.check_lsts_against_times( + helpers.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, telescope_loc=self.telescope.location, @@ -586,7 +588,7 @@ def _read_header_with_fast_meta( if "time_axis_faster_than_bls" in obj.header: self.time_axis_faster_than_bls = obj.time_axis_faster_than_bls - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str # Optional parameters @@ -799,9 +801,9 @@ def _get_data( dgrp["visdata"], inds, data_array_dtype ) else: - self.data_array = utils._index_dset(dgrp["visdata"], inds) - self.flag_array = utils._index_dset(dgrp["flags"], inds) - self.nsample_array = utils._index_dset(dgrp["nsamples"], inds) + self.data_array = hdf5_utils._index_dset(dgrp["visdata"], inds) + self.flag_array = hdf5_utils._index_dset(dgrp["flags"], inds) + self.nsample_array = hdf5_utils._index_dset(dgrp["nsamples"], inds) else: # do select operations on everything except data_array, flag_array # and nsample_array @@ -817,7 +819,7 @@ def _get_data( # max_nslice_frac of 0.1 yields slice speedup over fancy index for HERA data # See pyuvdata PR #805 if blt_inds is not None: - blt_slices, blt_sliceable = utils._convert_to_slices( + blt_slices, blt_sliceable = helpers._convert_to_slices( blt_inds, max_nslice_frac=0.1 ) else: @@ -825,7 +827,7 @@ def _get_data( blt_sliceable = True if freq_inds is not None: - freq_slices, freq_sliceable = utils._convert_to_slices( + freq_slices, freq_sliceable = helpers._convert_to_slices( freq_inds, max_nslice_frac=0.1 ) else: @@ -833,7 +835,7 @@ def _get_data( freq_sliceable = True if pol_inds is not None: - pol_slices, pol_sliceable = utils._convert_to_slices( + pol_slices, pol_sliceable = helpers._convert_to_slices( pol_inds, max_nslice_frac=0.5 ) else: @@ -876,9 +878,9 @@ def _get_data( visdata_dset, inds, data_array_dtype ) else: - visdata = utils._index_dset(visdata_dset, inds) - flags = utils._index_dset(flags_dset, inds) - nsamples = utils._index_dset(nsamples_dset, inds) + visdata = hdf5_utils._index_dset(visdata_dset, inds) + flags = hdf5_utils._index_dset(flags_dset, inds) + nsamples = hdf5_utils._index_dset(nsamples_dset, inds) # down select on other dimensions if necessary # use indices not slices here: generally not the bottleneck if not multidim_index and freq_frac < 1: @@ -914,9 +916,9 @@ def _get_data( visdata_dset, inds, data_array_dtype ) else: - visdata = utils._index_dset(visdata_dset, inds) - flags = utils._index_dset(flags_dset, inds) - nsamples = utils._index_dset(nsamples_dset, inds) + visdata = hdf5_utils._index_dset(visdata_dset, inds) + flags = hdf5_utils._index_dset(flags_dset, inds) + nsamples = hdf5_utils._index_dset(nsamples_dset, inds) # down select on other dimensions if necessary # use indices not slices here: generally not the bottleneck @@ -952,9 +954,9 @@ def _get_data( visdata_dset, inds, data_array_dtype ) else: - visdata = utils._index_dset(visdata_dset, inds) - flags = utils._index_dset(flags_dset, inds) - nsamples = utils._index_dset(nsamples_dset, inds) + visdata = hdf5_utils._index_dset(visdata_dset, inds) + flags = hdf5_utils._index_dset(flags_dset, inds) + nsamples = hdf5_utils._index_dset(nsamples_dset, inds) # down select on other dimensions if necessary # use indices not slices here: generally not the bottleneck diff --git a/src/pyuvdata/uvflag/uvflag.py b/src/pyuvdata/uvflag/uvflag.py index 7398e39d8e..1a1725fbcc 100644 --- a/src/pyuvdata/uvflag/uvflag.py +++ b/src/pyuvdata/uvflag/uvflag.py @@ -16,6 +16,7 @@ from .. import Telescope, UVCal, UVData from .. import parameter as uvp from .. import utils +from ..utils import helpers from ..uvbase import UVBase __all__ = ["UVFlag", "flags2waterfall", "and_rows_cols"] @@ -825,13 +826,13 @@ def check( if run_check_acceptability: # Check antenna positions - utils.check_surface_based_positions( + helpers.check_surface_based_positions( antenna_positions=self.telescope.antenna_positions, telescope_loc=self.telescope.location, raise_error=False, ) - utils.check_lsts_against_times( + helpers.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, telescope_loc=self.telescope.location, @@ -1127,7 +1128,7 @@ def parse_ants(self, ant_str, *, print_toggle=False): "UVFlag objects can only call 'parse_ants' function " "if type is 'baseline'." ) - return utils.parse_ants( + return utils.bls.parse_ants( self, ant_str=ant_str, print_toggle=print_toggle, @@ -1200,7 +1201,7 @@ def collapse_pol( self.clear_unused_attributes() self.history += "Pol axis collapse. " - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str if run_check: @@ -1322,7 +1323,7 @@ def to_waterfall( self._set_type_waterfall() self.history += 'Collapsed to type "waterfall". ' # + self.pyuvdata_version_str - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str self.clear_unused_attributes() @@ -1392,7 +1393,7 @@ def to_baseline( """Convert a UVFlag object of type "waterfall" or "antenna" to type "baseline". Broadcasts the flag array to all baselines. - This function does NOT apply flags to uv (see utils.apply_uvflag for that). + This function does NOT apply flags to uv (see pyuvdata.apply_uvflag for that). Note that the antenna metadata arrays (`antenna_names`, `antenna_numbers` and `antenna_positions`) may be reordered to match the ordering on `uv`. @@ -1595,7 +1596,7 @@ def to_baseline( self.history += 'Broadcast to type "baseline". ' - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str if run_check: @@ -1615,7 +1616,7 @@ def to_antenna( """Convert a UVFlag object of type "waterfall" to type "antenna". Broadcasts the flag array to all antennas. - This function does NOT apply flags to uv (see utils.apply_uvflag for that). + This function does NOT apply flags to uv (see pyuvdata.apply_uvflag for that). Note that the antenna metadata arrays (`antenna_names`, `antenna_numbers` and `antenna_positions`) may be reordered to match the ordering on `uv`. @@ -1760,7 +1761,7 @@ def to_antenna( self._set_type_antenna() self.history += 'Broadcast to type "antenna". ' - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str if run_check: @@ -1806,7 +1807,7 @@ def to_flag( "Unknown UVFlag mode: " + self.mode + ". Cannot convert to flag." ) self.history += 'Converted to mode "flag". ' - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str self.clear_unused_attributes() @@ -1879,7 +1880,7 @@ def to_metric( ) self.history += 'Converted to mode "metric". ' - if not utils._check_history_version(self.history, self.pyuvdata_version_str): + if not helpers._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str self.clear_unused_attributes() @@ -1945,7 +1946,7 @@ def __add__( ) # Update filename parameter - this.filename = utils._combine_filenames(this.filename, other.filename) + this.filename = helpers._combine_filenames(this.filename, other.filename) if this.filename is not None: this._filename.form = (len(this.filename),) @@ -2147,7 +2148,7 @@ def __add__( ) this.history += "Data combined along " + axis + " axis. " - if not utils._check_history_version(this.history, this.pyuvdata_version_str): + if not helpers._check_history_version(this.history, this.pyuvdata_version_str): this.history += this.pyuvdata_version_str this.Ntimes = np.unique(this.time_array).size @@ -2243,7 +2244,7 @@ def __or__( if other.history not in this.history: this.history += "Flags OR'd with: " + other.history - if not utils._check_history_version(this.history, this.pyuvdata_version_str): + if not helpers._check_history_version(this.history, this.pyuvdata_version_str): this.history += this.pyuvdata_version_str if run_check: @@ -2309,7 +2310,7 @@ def combine_metrics( """ # Ensure others is iterable (in case of single UVFlag object) - # cannot use utils._get_iterable because the object itself is iterable + # cannot use helpers._get_iterable because the object itself is iterable if not isinstance(others, (list, tuple, np.ndarray)): others = [others] @@ -2341,7 +2342,7 @@ def combine_metrics( this.weights_array = warray this.history += "Combined metric arrays. " - if not utils._check_history_version(this.history, this.pyuvdata_version_str): + if not helpers._check_history_version(this.history, this.pyuvdata_version_str): this.history += this.pyuvdata_version_str if run_check: @@ -2459,7 +2460,7 @@ def _select_preprocess( # test for blt_inds presence before adding inds from antennas & times if blt_inds is not None: - blt_inds = utils._get_iterable(blt_inds) + blt_inds = helpers._get_iterable(blt_inds) if np.array(blt_inds).ndim > 1: blt_inds = np.array(blt_inds).flatten() if self.type == "baseline": @@ -2469,7 +2470,7 @@ def _select_preprocess( n_selects += 1 if antenna_nums is not None: - antenna_nums = utils._get_iterable(antenna_nums) + antenna_nums = helpers._get_iterable(antenna_nums) if np.array(antenna_nums).ndim > 1: antenna_nums = np.array(antenna_nums).flatten() if n_selects > 0: @@ -2600,7 +2601,7 @@ def _select_preprocess( blt_inds = ant_blt_inds if times is not None: - times = utils._get_iterable(times) + times = helpers._get_iterable(times) if np.array(times).ndim > 1: times = np.array(times).flatten() @@ -2649,14 +2650,14 @@ def _select_preprocess( blt_inds = sorted(set(blt_inds)) if freq_chans is not None: - freq_chans = utils._get_iterable(freq_chans) + freq_chans = helpers._get_iterable(freq_chans) if np.array(freq_chans).ndim > 1: freq_chans = np.array(freq_chans).flatten() if frequencies is None: frequencies = np.squeeze(self.freq_array)[freq_chans] else: - frequencies = utils._get_iterable(frequencies) + frequencies = helpers._get_iterable(frequencies) frequencies = np.sort( list( set(frequencies) | set(np.squeeze(self.freq_array)[freq_chans]) @@ -2664,7 +2665,7 @@ def _select_preprocess( ) if frequencies is not None: - frequencies = utils._get_iterable(frequencies) + frequencies = helpers._get_iterable(frequencies) if np.array(frequencies).ndim > 1: frequencies = np.array(frequencies).flatten() if n_selects > 0: @@ -2688,7 +2689,7 @@ def _select_preprocess( freq_inds = None if polarizations is not None: - polarizations = utils._get_iterable(polarizations) + polarizations = helpers._get_iterable(polarizations) if np.array(polarizations).ndim > 1: polarizations = np.array(polarizations).flatten() if n_selects > 0: @@ -3129,7 +3130,7 @@ def read( "freq_array spacing." ) freq_delta = np.diff(np.squeeze(self.freq_array)) - if utils._test_array_constant_spacing( + if helpers._test_array_constant_spacing( self.freq_array, tols=self._freq_array.tols ): self.channel_width = np.full(self.Nfreqs, freq_delta[0]) @@ -3225,7 +3226,7 @@ def read( self.history += history - if not utils._check_history_version( + if not helpers._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -3449,7 +3450,7 @@ def write(self, filename, *, clobber=False, data_compression="lzf"): polarization_array = self.polarization_array header["polarization_array"] = polarization_array - if not utils._check_history_version( + if not helpers._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -3586,7 +3587,7 @@ def from_uvdata( if waterfall: self._set_type_waterfall() self.history += 'Flag object with type "waterfall" created. ' - if not utils._check_history_version( + if not helpers._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -3610,7 +3611,7 @@ def from_uvdata( else: self._set_type_baseline() self.history += 'Flag object with type "baseline" created. ' - if not utils._check_history_version( + if not helpers._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -3749,7 +3750,7 @@ def from_uvcal( if waterfall: self._set_type_waterfall() self.history += 'Flag object with type "waterfall" created. ' - if not utils._check_history_version( + if not helpers._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -3772,7 +3773,7 @@ def from_uvcal( else: self._set_type_antenna() self.history += 'Flag object with type "antenna" created. ' - if not utils._check_history_version( + if not helpers._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str diff --git a/tests/conftest.py b/tests/conftest.py index 7d4c47d9bc..f880a7425f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -105,3 +105,18 @@ def uvcalibrate_data(uvcalibrate_data_main): uvcal = uvcal_in.copy() yield uvdata, uvcal + + +@pytest.fixture(scope="session") +def uvcalibrate_uvdata_oldfiles_main(): + uvd = UVData() + uvd.read(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcAA.uvh5")) + + yield uvd + + +@pytest.fixture(scope="function") +def uvcalibrate_uvdata_oldfiles(uvcalibrate_uvdata_oldfiles_main): + uvd = uvcalibrate_uvdata_oldfiles_main.copy() + + yield uvd diff --git a/tests/test_apply_uvflag.py b/tests/test_apply_uvflag.py new file mode 100644 index 0000000000..71d516c68a --- /dev/null +++ b/tests/test_apply_uvflag.py @@ -0,0 +1,124 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for apply_uvflag function.""" + +import numpy as np +import pytest + +from pyuvdata import UVFlag, apply_uvflag, utils +from pyuvdata.testing import check_warnings + + +@pytest.mark.filterwarnings("ignore:The shapes of several attributes will be changing") +@pytest.mark.filterwarnings("ignore:Fixing auto-correlations to be be real-only,") +@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values") +def test_apply_uvflag(uvcalibrate_uvdata_oldfiles): + # load data and insert some flags + uvd = uvcalibrate_uvdata_oldfiles + uvd.flag_array[uvd.antpair2ind(9, 20)] = True + + # load a UVFlag into flag type + uvf = UVFlag(uvd) + uvf.to_flag() + + # insert flags for 2 out of 3 times + uvf.flag_array[uvf.antpair2ind(9, 10)[:2]] = True + + # apply flags and check for basic flag propagation + with check_warnings( + DeprecationWarning, + match="uvcalibrate has moved, please import it as 'from pyuvdata import " + "uvcalibrate'. This warnings will become an error in version 3.2", + ): + uvdf = utils.apply_uvflag(uvd, uvf, inplace=False) + assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2]) + + # test inplace + uvdf = uvd.copy() + apply_uvflag(uvdf, uvf, inplace=True) + assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2]) + + # test flag missing + uvf2 = uvf.select(bls=uvf.get_antpairs()[:-1], inplace=False) + uvdf = apply_uvflag(uvd, uvf2, inplace=False, flag_missing=True) + assert np.all(uvdf.flag_array[uvdf.antpair2ind(uvf.get_antpairs()[-1])]) + uvdf = apply_uvflag(uvd, uvf2, inplace=False, flag_missing=False) + assert not np.any(uvdf.flag_array[uvdf.antpair2ind(uvf.get_antpairs()[-1])]) + + # test force polarization + uvdf = uvd.copy() + uvdf2 = uvd.copy() + uvdf2.polarization_array[0] = -6 + uvdf += uvdf2 + uvdf = apply_uvflag(uvdf, uvf, inplace=False, force_pol=True) + assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2]) + with pytest.raises( + ValueError, match="Input uvf and uvd polarizations do not match" + ): + apply_uvflag(uvdf, uvf, inplace=False, force_pol=False) + + # test unflag first + uvdf = apply_uvflag(uvd, uvf, inplace=False, unflag_first=True) + assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2]) + assert not np.any(uvdf.flag_array[uvdf.antpair2ind(9, 20)]) + + # convert uvf to waterfall and test + uvfw = uvf.copy() + uvfw.to_waterfall(method="or") + uvdf = apply_uvflag(uvd, uvfw, inplace=False) + assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2]) + assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 20)][:2]) + assert np.all(uvdf.flag_array[uvdf.antpair2ind(20, 22)][:2]) + + # test mode exception + uvfm = uvf.copy() + uvfm.mode = "metric" + with pytest.raises(ValueError, match="UVFlag must be flag mode"): + apply_uvflag(uvd, uvfm) + + # test polarization exception + uvd2 = uvd.copy() + uvd2.polarization_array[0] = -6 + uvf2 = UVFlag(uvd) + uvf2.to_flag() + uvd2.polarization_array[0] = -8 + with pytest.raises( + ValueError, match="Input uvf and uvd polarizations do not match" + ): + apply_uvflag(uvd2, uvf2, force_pol=False) + + # test time and frequency mismatch exceptions + uvf2 = uvf.select(frequencies=uvf.freq_array[:2], inplace=False) + with pytest.raises( + ValueError, match="UVFlag and UVData have mismatched frequency arrays" + ): + apply_uvflag(uvd, uvf2) + + uvf2 = uvf.copy() + uvf2.freq_array += 1.0 + with pytest.raises( + ValueError, match="UVFlag and UVData have mismatched frequency arrays" + ): + apply_uvflag(uvd, uvf2) + + uvf2 = uvf.select(times=np.unique(uvf.time_array)[:2], inplace=False) + with pytest.raises( + ValueError, match="UVFlag and UVData have mismatched time arrays" + ): + apply_uvflag(uvd, uvf2) + + uvf2 = uvf.copy() + uvf2.time_array += 1.0 + with pytest.raises( + ValueError, match="UVFlag and UVData have mismatched time arrays" + ): + apply_uvflag(uvd, uvf2) + + # assert implicit broadcasting works + uvf2 = uvf.select(frequencies=uvf.freq_array[:1], inplace=False) + uvd2 = apply_uvflag(uvd, uvf2, inplace=False) + assert np.all(uvd2.get_flags(9, 10)[:2]) + uvf2 = uvf.select(times=np.unique(uvf.time_array)[:1], inplace=False) + uvd2 = apply_uvflag(uvd, uvf2, inplace=False) + assert np.all(uvd2.get_flags(9, 10)) diff --git a/tests/test_parameter.py b/tests/test_parameter.py index b0c8b8bdbc..196a75722d 100644 --- a/tests/test_parameter.py +++ b/tests/test_parameter.py @@ -3,9 +3,9 @@ # Licensed under the 2-clause BSD License import copy -import astropy.units as units import numpy as np import pytest +from astropy import units from astropy.coordinates import ( CartesianRepresentation, EarthLocation, @@ -25,7 +25,7 @@ from pyuvdata.parameter import allowed_location_types from pyuvdata.uvbase import UVBase -from .test_utils import ( +from .utils.test_coordinates import ( frame_selenoid, ref_latlonalt, ref_latlonalt_moon, diff --git a/tests/test_utils.py b/tests/test_utils.py deleted file mode 100644 index f31765c36b..0000000000 --- a/tests/test_utils.py +++ /dev/null @@ -1,5045 +0,0 @@ -# -*- mode: python; coding: utf-8 -*- -# Copyright (c) 2018 Radio Astronomy Software Group -# Licensed under the 2-clause BSD License - -"""Tests for common utility functions.""" -import copy -import os -import re - -import numpy as np -import pytest -from astropy import units -from astropy import units as un -from astropy.coordinates import Angle, EarthLocation, SkyCoord -from astropy.time import Time - -import pyuvdata.utils as uvutils -from pyuvdata import UVCal, UVData, UVFlag -from pyuvdata.data import DATA_PATH -from pyuvdata.testing import check_warnings -from pyuvdata.utils import hasmoon - -selenoids = ["SPHERE", "GSFC", "GRAIL23", "CE-1-LAM-GEO"] - -if hasmoon: - from pyuvdata.utils import LTime, MoonLocation - - frame_selenoid = [["itrs", None]] - for snd in selenoids: - frame_selenoid.append(["mcmf", snd]) -else: - frame_selenoid = [["itrs", None]] - - -# Earth -ref_latlonalt = (-26.7 * np.pi / 180.0, 116.7 * np.pi / 180.0, 377.8) -ref_xyz = (-2562123.42683, 5094215.40141, -2848728.58869) - -# Moon -ref_latlonalt_moon = (0.6875 * np.pi / 180.0, 24.433 * np.pi / 180.0, 0.3) -ref_xyz_moon = { - "SPHERE": (1581421.43506347, 718463.12201783, 20843.2071012), - "GSFC": (1582332.08831085, 718876.84524219, 20805.18709001), - "GRAIL23": (1581855.3916402, 718660.27490195, 20836.2107652), - "CE-1-LAM-GEO": (1581905.99108228, 718683.26297605, 20806.77965693), -} - -pytestmark = pytest.mark.filterwarnings( - "ignore:telescope_location is not set. Using known values", - "ignore:antenna_positions are not set or are being overwritten. Using known values", -) - - -@pytest.fixture(scope="session") -def astrometry_args(): - default_args = { - "time_array": 2456789.0 + np.array([0.0, 1.25, 10.5, 100.75]), - "icrs_ra": 2.468, - "icrs_dec": 1.234, - "epoch": 2000.0, - "telescope_loc": (0.123, -0.456, 4321.0), - "telescope_frame": "itrs", - "pm_ra": 12.3, - "pm_dec": 45.6, - "vrad": 31.4, - "dist": 73.31, - "library": "erfa", - } - default_args["lst_array"] = uvutils.get_lst_for_time( - jd_array=default_args["time_array"], - latitude=default_args["telescope_loc"][0] * (180.0 / np.pi), - longitude=default_args["telescope_loc"][1] * (180.0 / np.pi), - altitude=default_args["telescope_loc"][2], - frame="itrs", - ) - - default_args["drift_coord"] = SkyCoord( - default_args["lst_array"], - [default_args["telescope_loc"][0]] * len(default_args["lst_array"]), - unit="rad", - ) - - if hasmoon: - default_args["moon_telescope_loc"] = ( - 0.6875 * np.pi / 180.0, - 24.433 * np.pi / 180.0, - 0.3, - ) - default_args["moon_lst_array"] = {} - default_args["moon_drift_coord"] = {} - for selenoid in selenoids: - default_args["moon_lst_array"][selenoid] = uvutils.get_lst_for_time( - jd_array=default_args["time_array"], - latitude=default_args["moon_telescope_loc"][0] * (180.0 / np.pi), - longitude=default_args["moon_telescope_loc"][1] * (180.0 / np.pi), - altitude=default_args["moon_telescope_loc"][2], - frame="mcmf", - ellipsoid=selenoid, - ) - default_args["moon_drift_coord"][selenoid] = SkyCoord( - default_args["moon_lst_array"][selenoid], - [default_args["moon_telescope_loc"][0]] - * len(default_args["moon_lst_array"][selenoid]), - unit="rad", - ) - - default_args["icrs_coord"] = SkyCoord( - default_args["icrs_ra"], default_args["icrs_dec"], unit="rad" - ) - - default_args["fk5_ra"], default_args["fk5_dec"] = uvutils.transform_sidereal_coords( - longitude=default_args["icrs_ra"], - latitude=default_args["icrs_dec"], - in_coord_frame="icrs", - out_coord_frame="fk5", - in_coord_epoch="J2000.0", - out_coord_epoch="J2000.0", - ) - - # These are values calculated w/o the optional arguments, e.g. pm, vrad, dist - default_args["app_ra"], default_args["app_dec"] = uvutils.transform_icrs_to_app( - time_array=default_args["time_array"], - ra=default_args["icrs_ra"], - dec=default_args["icrs_dec"], - telescope_loc=default_args["telescope_loc"], - ) - - default_args["app_coord"] = SkyCoord( - default_args["app_ra"], default_args["app_dec"], unit="rad" - ) - - if hasmoon: - default_args["moon_app_ra"] = {} - default_args["moon_app_dec"] = {} - default_args["moon_app_coord"] = {} - for selenoid in selenoids: - ( - default_args["moon_app_ra"][selenoid], - default_args["moon_app_dec"][selenoid], - ) = uvutils.transform_icrs_to_app( - time_array=default_args["time_array"], - ra=default_args["icrs_ra"], - dec=default_args["icrs_dec"], - telescope_loc=default_args["moon_telescope_loc"], - telescope_frame="mcmf", - ellipsoid=selenoid, - ) - - default_args["moon_app_coord"][selenoid] = SkyCoord( - default_args["moon_app_ra"][selenoid], - default_args["moon_app_dec"][selenoid], - unit="rad", - ) - - yield default_args - - -@pytest.fixture -def vector_list(): - x_vecs = np.array([[1, 0, 0], [2, 0, 0]], dtype=float).T - y_vecs = np.array([[0, 1, 0], [0, 2, 0]], dtype=float).T - z_vecs = np.array([[0, 0, 1], [0, 0, 2]], dtype=float).T - test_vecs = np.array([[1, 1, 1], [2, 2, 2]], dtype=float).T - - yield x_vecs, y_vecs, z_vecs, test_vecs - - -@pytest.fixture -def calc_uvw_args(): - default_args = { - "app_ra": np.zeros(3), - "app_dec": np.zeros(3) + 1.0, - "frame_pa": np.zeros(3) + 1e-3, - "lst_array": np.zeros(3) + np.pi, - "use_ant_pos": True, - "uvw_array": np.array([[1, -1, 0], [0, -1, 1], [-1, 0, 1]], dtype=float), - "antenna_positions": np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=float), - "antenna_numbers": [1, 2, 3], - "ant_1_array": np.array([1, 1, 2]), - "ant_2_array": np.array([2, 3, 3]), - "old_app_ra": np.zeros(3) + np.pi, - "old_app_dec": np.zeros(3), - "old_frame_pa": np.zeros(3), - "telescope_lat": 1.0, - "telescope_lon": 0.0, - "to_enu": False, - "from_enu": False, - } - yield default_args - - -@pytest.fixture(scope="session") -def utils_uvdata_main(): - uvd = UVData() - uvd.read(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcAA.uvh5")) - - yield uvd - - -@pytest.fixture(scope="function") -def utils_uvdata(utils_uvdata_main): - uvd = utils_uvdata_main.copy() - - yield uvd - - -def test_XYZ_from_LatLonAlt(): - """Test conversion from lat/lon/alt to ECEF xyz with reference values.""" - out_xyz = uvutils.XYZ_from_LatLonAlt( - ref_latlonalt[0], ref_latlonalt[1], ref_latlonalt[2] - ) - # Got reference by forcing http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm - # to give additional precision. - np.testing.assert_allclose(ref_xyz, out_xyz, rtol=0, atol=1e-3) - - # test error checking - with pytest.raises( - ValueError, - match="latitude, longitude and altitude must all have the same length", - ): - uvutils.XYZ_from_LatLonAlt( - ref_latlonalt[0], - ref_latlonalt[1], - np.array([ref_latlonalt[2], ref_latlonalt[2]]), - ) - - with pytest.raises( - ValueError, - match="latitude, longitude and altitude must all have the same length", - ): - uvutils.XYZ_from_LatLonAlt( - ref_latlonalt[0], - np.array([ref_latlonalt[1], ref_latlonalt[1]]), - ref_latlonalt[2], - ) - - -def test_LatLonAlt_from_XYZ(): - """Test conversion from ECEF xyz to lat/lon/alt with reference values.""" - out_latlonalt = uvutils.LatLonAlt_from_XYZ(ref_xyz) - # Got reference by forcing http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm - # to give additional precision. - np.testing.assert_allclose(ref_latlonalt, out_latlonalt, rtol=0, atol=1e-3) - pytest.raises(ValueError, uvutils.LatLonAlt_from_XYZ, ref_latlonalt) - - # test passing multiple values - xyz_mult = np.stack((np.array(ref_xyz), np.array(ref_xyz))) - lat_vec, lon_vec, alt_vec = uvutils.LatLonAlt_from_XYZ(xyz_mult) - np.testing.assert_allclose( - ref_latlonalt, (lat_vec[1], lon_vec[1], alt_vec[1]), rtol=0, atol=1e-3 - ) - # check error if array transposed - with pytest.raises( - ValueError, - match=re.escape("The expected shape of ECEF xyz array is (Npts, 3)."), - ): - uvutils.LatLonAlt_from_XYZ(xyz_mult.T) - - # check error if only 2 coordinates - with pytest.raises( - ValueError, - match=re.escape("The expected shape of ECEF xyz array is (Npts, 3)."), - ): - uvutils.LatLonAlt_from_XYZ(xyz_mult[:, 0:2]) - - # test error checking - pytest.raises(ValueError, uvutils.LatLonAlt_from_XYZ, ref_xyz[0:1]) - - -@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") -@pytest.mark.parametrize("selenoid", selenoids) -def test_XYZ_from_LatLonAlt_mcmf(selenoid): - """Test MCMF lat/lon/alt to xyz with reference values.""" - lat, lon, alt = ref_latlonalt_moon - out_xyz = uvutils.XYZ_from_LatLonAlt( - lat, lon, alt, frame="mcmf", ellipsoid=selenoid - ) - np.testing.assert_allclose(ref_xyz_moon[selenoid], out_xyz, rtol=0, atol=1e-3) - - # test default ellipsoid - if selenoid == "SPHERE": - out_xyz = uvutils.XYZ_from_LatLonAlt(lat, lon, alt, frame="mcmf") - np.testing.assert_allclose(ref_xyz_moon[selenoid], out_xyz, rtol=0, atol=1e-3) - - # Test errors with invalid frame - with pytest.raises( - ValueError, match="No cartesian to spherical transform defined for frame" - ): - uvutils.XYZ_from_LatLonAlt(lat, lon, alt, frame="undef") - - -@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") -@pytest.mark.parametrize("selenoid", selenoids) -def test_LatLonAlt_from_XYZ_mcmf(selenoid): - """Test MCMF xyz to lat/lon/alt with reference values.""" - out_latlonalt = uvutils.LatLonAlt_from_XYZ( - ref_xyz_moon[selenoid], frame="mcmf", ellipsoid=selenoid - ) - np.testing.assert_allclose(ref_latlonalt_moon, out_latlonalt, rtol=0, atol=1e-3) - - # test default ellipsoid - if selenoid == "SPHERE": - out_latlonalt = uvutils.LatLonAlt_from_XYZ(ref_xyz_moon[selenoid], frame="mcmf") - np.testing.assert_allclose(ref_latlonalt_moon, out_latlonalt, rtol=0, atol=1e-3) - - # Test errors with invalid frame - with pytest.raises( - ValueError, match="Cannot check acceptability for unknown frame" - ): - out_latlonalt = uvutils.LatLonAlt_from_XYZ( - ref_xyz_moon[selenoid], frame="undef" - ) - with pytest.raises( - ValueError, match="No spherical to cartesian transform defined for frame" - ): - uvutils.LatLonAlt_from_XYZ( - ref_xyz_moon[selenoid], frame="undef", check_acceptability=False - ) - - -@pytest.mark.skipif(hasmoon, reason="Test only when lunarsky not installed.") -def test_no_moon(): - """Check errors when calling functions with MCMF without lunarsky.""" - msg = "Need to install `lunarsky` package to work with MCMF frame." - with pytest.raises(ValueError, match=msg): - uvutils.LatLonAlt_from_XYZ(ref_xyz_moon["SPHERE"], frame="mcmf") - lat, lon, alt = ref_latlonalt_moon - with pytest.raises(ValueError, match=msg): - uvutils.XYZ_from_LatLonAlt(lat, lon, alt, frame="mcmf") - with pytest.raises(ValueError, match=msg): - uvutils.get_lst_for_time( - [2451545.0], latitude=0, longitude=0, altitude=0, frame="mcmf" - ) - with pytest.raises(ValueError, match=msg): - uvutils.ENU_from_ECEF( - None, latitude=0.0, longitude=1.0, altitude=10.0, frame="mcmf" - ) - with pytest.raises(ValueError, match=msg): - uvutils.ECEF_from_ENU( - None, latitude=0.0, longitude=1.0, altitude=10.0, frame="mcmf" - ) - with pytest.raises(ValueError, match=msg): - uvutils.transform_icrs_to_app( - time_array=[2451545.0], - ra=0, - dec=0, - telescope_loc=(0, 0, 0), - telescope_frame="mcmf", - ) - with pytest.raises(ValueError, match=msg): - uvutils.transform_app_to_icrs( - time_array=[2451545.0], - app_ra=0, - app_dec=0, - telescope_loc=(0, 0, 0), - telescope_frame="mcmf", - ) - with pytest.raises(ValueError, match=msg): - uvutils.calc_app_coords(lon_coord=0.0, lat_coord=0.0, telescope_frame="mcmf") - - -def test_lla_xyz_lla_roundtrip(): - """Test roundtripping an array will yield the same values.""" - np.random.seed(0) - lats = -30.721 + np.random.normal(0, 0.0005, size=30) - lons = 21.428 + np.random.normal(0, 0.0005, size=30) - alts = np.random.uniform(1051, 1054, size=30) - lats *= np.pi / 180.0 - lons *= np.pi / 180.0 - xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) - lats_new, lons_new, alts_new = uvutils.LatLonAlt_from_XYZ(xyz) - np.testing.assert_allclose(lats_new, lats) - np.testing.assert_allclose(lons_new, lons) - np.testing.assert_allclose(alts_new, alts) - - -@pytest.fixture(scope="module") -def enu_ecef_info(): - """Some setup info for ENU/ECEF calculations.""" - center_lat = -30.7215261207 * np.pi / 180.0 - center_lon = 21.4283038269 * np.pi / 180.0 - center_alt = 1051.7 - # fmt: off - lats = (np.array([-30.72218216, -30.72138101, -30.7212785, -30.7210011, - -30.72159853, -30.72206199, -30.72174614, -30.72188775, - -30.72183915, -30.72100138]) - * np.pi / 180.0) - lons = (np.array([21.42728211, 21.42811727, 21.42814544, 21.42795736, - 21.42686739, 21.42918772, 21.42785662, 21.4286408, - 21.42750933, 21.42896567]) - * np.pi / 180.0) - alts = np.array([1052.25, 1051.35, 1051.2, 1051., 1051.45, 1052.04, 1051.68, - 1051.87, 1051.77, 1051.06]) - # used pymap3d, which implements matlab code, as a reference. - x = [5109327.46674067, 5109339.76407785, 5109344.06370947, - 5109365.11297147, 5109372.115673, 5109266.94314734, - 5109329.89620962, 5109295.13656657, 5109337.21810468, - 5109329.85680612] - - y = [2005130.57953031, 2005221.35184577, 2005225.93775268, - 2005214.8436201, 2005105.42364036, 2005302.93158317, - 2005190.65566222, 2005257.71335575, 2005157.78980089, - 2005304.7729239] - - z = [-3239991.24516348, -3239914.4185286, -3239904.57048431, - -3239878.02656316, -3239935.20415493, -3239979.68381865, - -3239949.39266985, -3239962.98805772, -3239958.30386264, - -3239878.08403833] - - east = [-97.87631659, -17.87126443, -15.17316938, -33.19049252, -137.60520964, - 84.67346748, -42.84049408, 32.28083937, -76.1094745, 63.40285935] - north = [-72.7437482, 16.09066646, 27.45724573, 58.21544651, -8.02964511, - -59.41961437, -24.39698388, -40.09891961, -34.70965816, 58.18410876] - up = [0.54883333, -0.35004539, -0.50007736, -0.70035299, -0.25148791, 0.33916067, - -0.02019057, 0.16979185, 0.06945155, -0.64058124] - # fmt: on - yield ( - center_lat, - center_lon, - center_alt, - lats, - lons, - alts, - x, - y, - z, - east, - north, - up, - ) - - -@pytest.fixture(scope="module") -def enu_mcmf_info(): - center_lat, center_lon, center_alt = [ - 0.6875 * np.pi / 180.0, - 24.433 * np.pi / 180.0, - 0.3, - ] - - # Creating a test pattern of a circle of antennas, radius 500 m in ENU coordinates. - angs = np.linspace(0, 2 * np.pi, 10, endpoint=False) - enus = 500 * np.array([np.cos(angs), np.sin(angs), [0] * angs.size]) - east = enus[0].tolist() - north = enus[1].tolist() - up = enus[2].tolist() - - # fmt: off - lats = { - "SPHERE": np.deg2rad( - [ - 0.68749997, 0.69719361, 0.70318462, 0.70318462, 0.69719361, - 0.68749997, 0.67780635, 0.67181538, 0.67181538, 0.67780635 - ] - ), - "GSFC": np.deg2rad( - [ - 0.68749997, 0.69721132, 0.70321328, 0.70321328, 0.69721132, - 0.68749997, 0.67778864, 0.67178672, 0.67178672, 0.67778864 - ] - ), - "GRAIL23": np.deg2rad( - [ - 0.68749997, 0.69719686, 0.70318988, 0.70318988, 0.69719686, - 0.68749997, 0.6778031 , 0.67181011, 0.67181011, 0.6778031 - ] - ), - "CE-1-LAM-GEO": np.deg2rad( - [ - 0.68749997, 0.69721058, 0.70321207, 0.70321207, 0.69721058, - 0.68749997, 0.67778938, 0.67178792, 0.67178792, 0.67778938 - ] - ), - } - lons = { - "SPHERE": np.deg2rad( - [ - 24.44949297, 24.44634312, 24.43809663, 24.42790337, 24.41965688, - 24.41650703, 24.41965693, 24.42790341, 24.43809659, 24.44634307 - ] - ), - "GSFC": np.deg2rad( - [ - 24.44948348, 24.44633544, 24.43809369, 24.42790631, 24.41966456, - 24.41651652, 24.41966461, 24.42790634, 24.43809366, 24.44633539 - ] - ), - "GRAIL23": np.deg2rad( - [ - 24.44948845, 24.44633946, 24.43809523, 24.42790477, 24.41966054, - 24.41651155, 24.41966059, 24.42790481, 24.43809519, 24.44633941 - ] - ), - "CE-1-LAM-GEO": np.deg2rad( - [ - 24.44948792, 24.44633904, 24.43809507, 24.42790493, 24.41966096, - 24.41651208, 24.41966102, 24.42790497, 24.43809503, 24.44633898 - ] - ), - } - alts = { - "SPHERE": [ - 0.371959, 0.371959, 0.371959, 0.371959, 0.371959, 0.371959, - 0.371959, 0.371959, 0.371959, 0.371959 - ], - "GSFC": [ - 0.37191758, 0.37197732, 0.37207396, 0.37207396, 0.37197732, - 0.37191758, 0.37197732, 0.37207396, 0.37207396, 0.37197732 - ], - "GRAIL23": [ - 0.37193926, 0.37195442, 0.37197896, 0.37197896, 0.37195442, - 0.37193926, 0.37195442, 0.37197896, 0.37197896, 0.37195442 - ], - "CE-1-LAM-GEO": [ - 0.37193696, 0.37198809, 0.37207083, 0.37207083, 0.37198809, - 0.37193696, 0.37198809, 0.37207083, 0.37207083, 0.37198809 - ], - } - x = { - "SPHERE": [ - 1581214.62062477, 1581250.9080965 , 1581352.33107362, - 1581480.14942611, 1581585.54088769, 1581628.24950218, - 1581591.96203044, 1581490.53905332, 1581362.72070084, - 1581257.32923925 - ], - "GSFC": [ - 1582125.27387214, 1582161.56134388, 1582262.984321, - 1582390.80267348, 1582496.19413507, 1582538.90274956, - 1582502.61527782, 1582401.1923007 , 1582273.37394822, - 1582167.98248663 - ], - "GRAIL23": [ - 1581648.57720149, 1581684.86467323, 1581786.28765035, - 1581914.10600283, 1582019.49746442, 1582062.2060789 , - 1582025.91860717, 1581924.49563005, 1581796.67727756, - 1581691.28581598 - ], - "CE-1-LAM-GEO": [ - 1581699.17664357, 1581735.46411531, 1581836.88709243, - 1581964.70544491, 1582070.0969065 , 1582112.80552098, - 1582076.51804925, 1581975.09507213, 1581847.27671964, - 1581741.88525806 - ] - } - - y = { - "SPHERE": [ - 718918.34480718, 718829.94638063, 718601.4335154 , 718320.09035913, - 718093.38043501, 718007.89922848, 718096.29765503, 718324.81052027, - 718606.15367654, 718832.86360065 - ], - "GSFC": [ - 719332.06803154, 719243.66960499, 719015.15673976, 718733.81358349, - 718507.10365937, 718421.62245284, 718510.02087939, 718738.53374463, - 719019.8769009 , 719246.58682501 - ], - "GRAIL23": [ - 719115.4976913 , 719027.09926475, 718798.58639952, 718517.24324325, - 718290.53331913, 718205.0521126 , 718293.45053915, 718521.96340439, - 718803.30656066, 719030.01648477 - ], - "CE-1-LAM-GEO": [ - 719138.4857654 , 719050.08733885, 718821.57447362, 718540.23131734, - 718313.52139323, 718228.0401867 , 718316.43861325, 718544.95147849, - 718826.29463476, 719053.00455887 - ], - } - z = { - "SPHERE": [ - 20843.2071012 , 21137.07857037, 21318.70112664, 21318.70112664, - 21137.07857037, 20843.2071012 , 20549.33563204, 20367.71307577, - 20367.71307577, 20549.33563204 - ], - "GSFC": [ - 20805.18709001, 21099.05855918, 21280.68111545, 21280.68111545, - 21099.05855918, 20805.18709001, 20511.31562084, 20329.69306457, - 20329.69306457, 20511.31562084 - ], - "GRAIL23": [ - 20836.2107652 , 21130.08223437, 21311.70479064, 21311.70479064, - 21130.08223437, 20836.2107652 , 20542.33929603, 20360.71673976, - 20360.71673976, 20542.33929603 - ], - "CE-1-LAM-GEO": [ - 20806.77965693, 21100.6511261 , 21282.27368237, 21282.27368237, - 21100.6511261 , 20806.77965693, 20512.90818776, 20331.28563149, - 20331.28563149, 20512.90818776 - ], - } - - # fmt: on - yield ( - center_lat, - center_lon, - center_alt, - lats, - lons, - alts, - x, - y, - z, - east, - north, - up, - ) - - -def test_xyz_from_latlonalt(enu_ecef_info): - """Test calculating xyz from lat lot alt.""" - (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( - enu_ecef_info - ) - xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) - np.testing.assert_allclose(np.stack((x, y, z), axis=1), xyz, atol=1e-3) - - -def test_enu_from_ecef(enu_ecef_info): - """Test calculating ENU from ECEF coordinates.""" - (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( - enu_ecef_info - ) - xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) - - enu = uvutils.ENU_from_ECEF( - xyz, latitude=center_lat, longitude=center_lon, altitude=center_alt - ) - np.testing.assert_allclose(np.stack((east, north, up), axis=1), enu, atol=1e-3) - - enu2 = uvutils.ENU_from_ECEF( - xyz, - center_loc=EarthLocation.from_geodetic( - lat=center_lat * units.rad, - lon=center_lon * units.rad, - height=center_alt * units.m, - ), - ) - np.testing.assert_allclose(enu, enu2) - - -@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") -@pytest.mark.parametrize("selenoid", selenoids) -def test_enu_from_mcmf(enu_mcmf_info, selenoid): - (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( - enu_mcmf_info - ) - xyz = uvutils.XYZ_from_LatLonAlt( - lats[selenoid], lons[selenoid], alts[selenoid], frame="mcmf", ellipsoid=selenoid - ) - enu = uvutils.ENU_from_ECEF( - xyz, - latitude=center_lat, - longitude=center_lon, - altitude=center_alt, - frame="mcmf", - ellipsoid=selenoid, - ) - - np.testing.assert_allclose(np.stack((east, north, up), axis=1), enu, atol=1e-3) - - enu2 = uvutils.ENU_from_ECEF( - xyz, - center_loc=MoonLocation.from_selenodetic( - lat=center_lat * units.rad, - lon=center_lon * units.rad, - height=center_alt * units.m, - ellipsoid=selenoid, - ), - ) - np.testing.assert_allclose(enu, enu2, atol=1e-3) - - -def test_invalid_frame(): - """Test error is raised when an invalid frame name is passed in.""" - with pytest.raises( - ValueError, match='No ENU_from_ECEF transform defined for frame "UNDEF".' - ): - uvutils.ENU_from_ECEF( - np.zeros((2, 3)), latitude=0.0, longitude=0.0, altitude=0.0, frame="undef" - ) - with pytest.raises( - ValueError, match='No ECEF_from_ENU transform defined for frame "UNDEF".' - ): - uvutils.ECEF_from_ENU( - np.zeros((2, 3)), latitude=0.0, longitude=0.0, altitude=0.0, frame="undef" - ) - - with pytest.raises( - ValueError, match="center_loc is not a supported type. It must be one of " - ): - uvutils.ENU_from_ECEF( - np.zeros((2, 3)), center_loc=units.Quantity(np.array([0, 0, 0]) * units.m) - ) - - with pytest.raises( - ValueError, match="center_loc is not a supported type. It must be one of " - ): - uvutils.ECEF_from_ENU( - np.zeros((2, 3)), center_loc=units.Quantity(np.array([0, 0, 0]) * units.m) - ) - - -@pytest.mark.parametrize("shape_type", ["transpose", "Nblts,2", "Nblts,1"]) -def test_enu_from_ecef_shape_errors(enu_ecef_info, shape_type): - """Test ENU_from_ECEF input shape errors.""" - (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( - enu_ecef_info - ) - xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) - if shape_type == "transpose": - xyz = xyz.T.copy() - elif shape_type == "Nblts,2": - xyz = xyz.copy()[:, 0:2] - elif shape_type == "Nblts,1": - xyz = xyz.copy()[:, 0:1] - - # check error if array transposed - with pytest.raises( - ValueError, - match=re.escape("The expected shape of ECEF xyz array is (Npts, 3)."), - ): - uvutils.ENU_from_ECEF( - xyz, longitude=center_lat, latitude=center_lon, altitude=center_alt - ) - - -def test_enu_from_ecef_magnitude_error(enu_ecef_info): - """Test ENU_from_ECEF input magnitude errors.""" - (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( - enu_ecef_info - ) - xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) - # error checking - with pytest.raises( - ValueError, - match="ITRS vector magnitudes must be on the order of the radius of the earth", - ): - uvutils.ENU_from_ECEF( - xyz / 2.0, latitude=center_lat, longitude=center_lon, altitude=center_alt - ) - - -def test_enu_from_ecef_error(): - # check error no center location info passed - with pytest.raises( - ValueError, - match="Either center_loc or all of latitude, longitude and altitude " - "must be passed.", - ): - uvutils.ENU_from_ECEF(np.array([0, 0, 0])) - - with pytest.raises( - ValueError, - match="Either center_loc or all of latitude, longitude and altitude " - "must be passed.", - ): - uvutils.ECEF_from_ENU(np.array([0, 0, 0])) - - -@pytest.mark.parametrize(["frame", "selenoid"], frame_selenoid) -def test_ecef_from_enu_roundtrip(enu_ecef_info, enu_mcmf_info, frame, selenoid): - """Test ECEF_from_ENU values.""" - (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( - enu_ecef_info if frame == "itrs" else enu_mcmf_info - ) - if frame == "mcmf": - lats = lats[selenoid] - lons = lons[selenoid] - alts = alts[selenoid] - loc_obj = MoonLocation.from_selenodetic( - lat=center_lat * units.rad, - lon=center_lon * units.rad, - height=center_alt * units.m, - ellipsoid=selenoid, - ) - else: - loc_obj = EarthLocation.from_geodetic( - lat=center_lat * units.rad, - lon=center_lon * units.rad, - height=center_alt * units.m, - ) - - xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts, frame=frame, ellipsoid=selenoid) - enu = uvutils.ENU_from_ECEF( - xyz, - latitude=center_lat, - longitude=center_lon, - altitude=center_alt, - frame=frame, - ellipsoid=selenoid, - ) - # check that a round trip gives the original value. - xyz_from_enu = uvutils.ECEF_from_ENU( - enu, - latitude=center_lat, - longitude=center_lon, - altitude=center_alt, - frame=frame, - ellipsoid=selenoid, - ) - np.testing.assert_allclose(xyz, xyz_from_enu, atol=1e-3) - - xyz_from_enu2 = uvutils.ECEF_from_ENU(enu, center_loc=loc_obj) - np.testing.assert_allclose(xyz_from_enu, xyz_from_enu2, atol=1e-3) - - if selenoid == "SPHERE": - enu = uvutils.ENU_from_ECEF( - xyz, - latitude=center_lat, - longitude=center_lon, - altitude=center_alt, - frame=frame, - ) - # check that a round trip gives the original value. - xyz_from_enu = uvutils.ECEF_from_ENU( - enu, - latitude=center_lat, - longitude=center_lon, - altitude=center_alt, - frame=frame, - ) - np.testing.assert_allclose(xyz, xyz_from_enu, atol=1e-3) - - -@pytest.mark.parametrize("shape_type", ["transpose", "Nblts,2", "Nblts,1"]) -def test_ecef_from_enu_shape_errors(enu_ecef_info, shape_type): - (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( - enu_ecef_info - ) - xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) - enu = uvutils.ENU_from_ECEF( - xyz, latitude=center_lat, longitude=center_lon, altitude=center_alt - ) - if shape_type == "transpose": - enu = enu.copy().T - elif shape_type == "Nblts,2": - enu = enu.copy()[:, 0:2] - elif shape_type == "Nblts,1": - enu = enu.copy()[:, 0:1] - - # check error if array transposed - with pytest.raises( - ValueError, match=re.escape("The expected shape of the ENU array is (Npts, 3).") - ): - uvutils.ECEF_from_ENU( - enu, latitude=center_lat, longitude=center_lon, altitude=center_alt - ) - - -def test_ecef_from_enu_single(enu_ecef_info): - """Test single coordinate transform.""" - (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( - enu_ecef_info - ) - xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) - # check passing a single value - enu_single = uvutils.ENU_from_ECEF( - xyz[0, :], latitude=center_lat, longitude=center_lon, altitude=center_alt - ) - - np.testing.assert_allclose( - np.array((east[0], north[0], up[0])), enu_single, atol=1e-3 - ) - - -def test_ecef_from_enu_single_roundtrip(enu_ecef_info): - """Test single coordinate roundtrip.""" - (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( - enu_ecef_info - ) - xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) - # check passing a single value - enu = uvutils.ENU_from_ECEF( - xyz, latitude=center_lat, longitude=center_lon, altitude=center_alt - ) - - enu_single = uvutils.ENU_from_ECEF( - xyz[0, :], latitude=center_lat, longitude=center_lon, altitude=center_alt - ) - np.testing.assert_allclose( - np.array((east[0], north[0], up[0])), enu[0, :], atol=1e-3 - ) - - xyz_from_enu = uvutils.ECEF_from_ENU( - enu_single, latitude=center_lat, longitude=center_lon, altitude=center_alt - ) - np.testing.assert_allclose(xyz[0, :], xyz_from_enu, atol=1e-3) - - -def test_mwa_ecef_conversion(): - """ - Test based on comparing the antenna locations in a Cotter uvfits file to - the antenna locations in MWA_tools. - """ - - test_data_file = os.path.join(DATA_PATH, "mwa128_ant_layouts.npz") - f = np.load(test_data_file) - - # From the STABXYZ table in a cotter-generated uvfits file, obsid = 1066666832 - xyz = f["stabxyz"] - # From the East/North/Height columns in a cotter-generated metafits file, - # obsid = 1066666832 - enh = f["ENH"] - # From a text file antenna_locations.txt in MWA_Tools/scripts - txt_topo = f["txt_topo"] - - # From the unphased uvw coordinates of obsid 1066666832, positions relative - # to antenna 0 - # these aren't used in the current test, but are interesting and might help - # with phasing diagnosis in the future - uvw_topo = f["uvw_topo"] - # Sky coordinates are flipped for uvw derived values - uvw_topo = -uvw_topo - uvw_topo += txt_topo[0] - - # transpose these arrays to get them into the right shape - txt_topo = txt_topo.T - uvw_topo = uvw_topo.T - - # ARRAYX, ARRAYY, ARRAYZ in ECEF frame from Cotter file - arrcent = f["arrcent"] - lat, lon, alt = uvutils.LatLonAlt_from_XYZ(arrcent) - - # The STABXYZ coordinates are defined with X through the local meridian, - # so rotate back to the prime meridian - new_xyz = uvutils.ECEF_from_rotECEF(xyz.T, lon) - # add in array center to get real ECEF - ecef_xyz = new_xyz + arrcent - - enu = uvutils.ENU_from_ECEF(ecef_xyz, latitude=lat, longitude=lon, altitude=alt) - - np.testing.assert_allclose(enu, enh) - - # test other direction of ECEF rotation - rot_xyz = uvutils.rotECEF_from_ECEF(new_xyz, lon) - np.testing.assert_allclose(rot_xyz.T, xyz) - - -@pytest.mark.parametrize( - "lon_array,lat_array,msg", - ( - [0.0, np.array([0.0]), "lon_array and lat_array must either both be floats or"], - [np.array([0.0, 1.0]), np.array([0.0]), "lon_array and lat_array must have "], - ), -) -def test_polar2_to_cart3_arg_errs(lon_array, lat_array, msg): - """ - Test that bad arguments to polar2_to_cart3 throw appropriate errors. - """ - with pytest.raises(ValueError, match=msg): - uvutils.polar2_to_cart3(lon_array=lon_array, lat_array=lat_array) - - -@pytest.mark.parametrize( - "input1,msg", - ( - [0.0, "xyz_array must be an ndarray."], - [np.array(0.0), "xyz_array must have ndim > 0"], - [np.array([0.0]), "xyz_array must be length 3"], - ), -) -def test_cart3_to_polar2_arg_errs(input1, msg): - """ - Test that bad arguments to cart3_to_polar2 throw appropriate errors. - """ - with pytest.raises(ValueError, match=msg): - uvutils.cart3_to_polar2(input1) - - -@pytest.mark.parametrize( - "input1,input2,input3,msg", - ( - [np.zeros((1, 3, 1)), np.zeros((1, 3, 3)), 2, "rot_matrix must be of shape "], - [np.zeros((1, 2, 1)), np.zeros((1, 3, 3)), 1, "Misshaped xyz_array - expected"], - [np.zeros((2, 1)), np.zeros((1, 3, 3)), 1, "Misshaped xyz_array - expected"], - [np.zeros((2)), np.zeros((1, 3, 3)), 1, "Misshaped xyz_array - expected shape"], - ), -) -def test_rotate_matmul_wrapper_arg_errs(input1, input2, input3, msg): - """ - Test that bad arguments to _rotate_matmul_wrapper throw appropriate errors. - """ - with pytest.raises(ValueError, match=msg): - uvutils._rotate_matmul_wrapper( - xyz_array=input1, rot_matrix=input2, n_rot=input3 - ) - - -def test_cart_to_polar_roundtrip(): - """ - Test that polar->cart coord transformation is the inverse of cart->polar. - """ - # Basic round trip with vectors - assert uvutils.cart3_to_polar2( - uvutils.polar2_to_cart3(lon_array=0.0, lat_array=0.0) - ) == (0.0, 0.0) - - -def test_rotate_one_axis(vector_list): - """ - Tests some basic vector rotation operations with a single axis rotation. - """ - # These tests are used to verify the basic functionality of the primary - # functions used to perform rotations - x_vecs, y_vecs, z_vecs, test_vecs = vector_list - - # Test no-ops w/ 0 deg rotations - assert np.all( - uvutils._rotate_one_axis(xyz_array=x_vecs, rot_amount=0.0, rot_axis=0) == x_vecs - ) - assert np.all( - uvutils._rotate_one_axis(xyz_array=x_vecs[:, 0], rot_amount=0.0, rot_axis=1) - == x_vecs[np.newaxis, :, 0, np.newaxis] - ) - assert np.all( - uvutils._rotate_one_axis( - xyz_array=x_vecs[:, :, np.newaxis], rot_amount=0.0, rot_axis=2 - ) - == x_vecs[:, :, np.newaxis] - ) - - # Test no-ops w/ None - assert np.all( - uvutils._rotate_one_axis(xyz_array=test_vecs, rot_amount=None, rot_axis=1) - == test_vecs - ) - assert np.all( - uvutils._rotate_one_axis(xyz_array=test_vecs[:, 0], rot_amount=None, rot_axis=2) - == test_vecs[np.newaxis, :, 0, np.newaxis] - ) - assert np.all( - uvutils._rotate_one_axis( - xyz_array=test_vecs[:, :, np.newaxis], rot_amount=None, rot_axis=0 - ) - == test_vecs[:, :, np.newaxis] - ) - - # Test some basic equivalencies to make sure rotations are working correctly - assert np.allclose( - x_vecs, uvutils._rotate_one_axis(xyz_array=x_vecs, rot_amount=1.0, rot_axis=0) - ) - assert np.allclose( - y_vecs, uvutils._rotate_one_axis(xyz_array=y_vecs, rot_amount=2.0, rot_axis=1) - ) - assert np.allclose( - z_vecs, uvutils._rotate_one_axis(xyz_array=z_vecs, rot_amount=3.0, rot_axis=2) - ) - - assert np.allclose( - x_vecs, - uvutils._rotate_one_axis(xyz_array=y_vecs, rot_amount=-np.pi / 2.0, rot_axis=2), - ) - assert np.allclose( - y_vecs, - uvutils._rotate_one_axis(xyz_array=x_vecs, rot_amount=np.pi / 2.0, rot_axis=2), - ) - assert np.allclose( - x_vecs, - uvutils._rotate_one_axis(xyz_array=z_vecs, rot_amount=np.pi / 2.0, rot_axis=1), - ) - assert np.allclose( - z_vecs, - uvutils._rotate_one_axis(xyz_array=x_vecs, rot_amount=-np.pi / 2.0, rot_axis=1), - ) - assert np.allclose( - y_vecs, - uvutils._rotate_one_axis(xyz_array=z_vecs, rot_amount=-np.pi / 2.0, rot_axis=0), - ) - assert np.allclose( - z_vecs, - uvutils._rotate_one_axis(xyz_array=y_vecs, rot_amount=np.pi / 2.0, rot_axis=0), - ) - - assert np.all( - np.equal( - uvutils._rotate_one_axis(xyz_array=test_vecs, rot_amount=1.0, rot_axis=2), - uvutils._rotate_one_axis( - xyz_array=test_vecs, rot_amount=1.0, rot_axis=np.array([2]) - ), - ) - ) - - # Testing a special case, where the xyz_array vectors are reshaped if there - # is only a single rotation matrix used (helps speed things up significantly) - mod_vec = x_vecs.T.reshape((2, 3, 1)) - assert np.all( - uvutils._rotate_one_axis(xyz_array=mod_vec, rot_amount=1.0, rot_axis=0) - == mod_vec - ) - - -def test_rotate_two_axis(vector_list): - """ - Tests some basic vector rotation operations with a double axis rotation. - """ - x_vecs, y_vecs, z_vecs, test_vecs = vector_list - - # These tests are used to verify the basic functionality of the primary - # functions used to two-axis rotations - assert np.allclose( - x_vecs, - uvutils._rotate_two_axis( - xyz_array=x_vecs, - rot_amount1=2 * np.pi, - rot_amount2=1.0, - rot_axis1=1, - rot_axis2=0, - ), - ) - assert np.allclose( - y_vecs, - uvutils._rotate_two_axis( - xyz_array=y_vecs, - rot_amount1=2 * np.pi, - rot_amount2=2.0, - rot_axis1=2, - rot_axis2=1, - ), - ) - assert np.allclose( - z_vecs, - uvutils._rotate_two_axis( - xyz_array=z_vecs, - rot_amount1=2 * np.pi, - rot_amount2=3.0, - rot_axis1=0, - rot_axis2=2, - ), - ) - - # Do one more test, which verifies that we can filp our (1,1,1) test vector to - # the postiion at (-1, -1 , -1) - mod_vec = test_vecs.T.reshape((2, 3, 1)) - assert np.allclose( - uvutils._rotate_two_axis( - xyz_array=mod_vec, - rot_amount1=np.pi, - rot_amount2=np.pi / 2.0, - rot_axis1=0, - rot_axis2=1, - ), - -mod_vec, - ) - - -@pytest.mark.parametrize( - "rot1,axis1,rot2,rot3,axis2,axis3", - ( - [2.0, 0, 1.0, 1.0, 0, 0], - [2.0, 0, 2.0, 0.0, 0, 1], - [2.0, 0, None, 2.0, 1, 0], - [0.0, 0, None, 0.0, 1, 2], - ), -) -def test_compare_one_to_two_axis(vector_list, rot1, axis1, rot2, rot3, axis2, axis3): - """ - Check that one-axis and two-axis rotations provide the same values when the - two-axis rotations are fundamentally rotating around a single axis. - """ - x_vecs, y_vecs, z_vecs, test_vecs = vector_list - # If performing two rots on the same axis, that should be identical to using - # a single rot (with the rot angle equal to the sum of the two rot angles) - assert np.all( - np.equal( - uvutils._rotate_one_axis( - xyz_array=test_vecs, rot_amount=rot1, rot_axis=axis1 - ), - uvutils._rotate_two_axis( - xyz_array=test_vecs, - rot_amount1=rot2, - rot_amount2=rot3, - rot_axis1=axis2, - rot_axis2=axis3, - ), - ) - ) - - -@pytest.mark.parametrize( - "arg_dict,err", - ( - [ - {"lst_array": None, "to_enu": True, "use_ant_pos": False}, - (ValueError, "Must include lst_array to calculate baselines in ENU"), - ], - [ - {"lst_array": None, "to_enu": True, "telescope_lat": None}, - (ValueError, "Must include telescope_lat to calculate baselines"), - ], - [ - {"lst_array": None}, - (ValueError, "Must include lst_array if use_ant_pos=True and not"), - ], - [ - {"app_ra": None, "frame_pa": None}, - (ValueError, "Must include both app_ra and app_dec, or frame_pa to"), - ], - [ - {"app_dec": None, "frame_pa": None}, - (ValueError, "Must include both app_ra and app_dec, or frame_pa to"), - ], - [ - {"app_ra": None, "app_dec": None, "frame_pa": None}, - (ValueError, "Must include both app_ra and app_dec, or frame_pa to"), - ], - [ - {"antenna_positions": None}, - (ValueError, "Must include antenna_positions if use_ant_pos=True."), - ], - [ - {"ant_1_array": None}, - (ValueError, "Must include ant_1_array, ant_2_array, and antenna_numbers"), - ], - [ - {"ant_2_array": None}, - (ValueError, "Must include ant_1_array, ant_2_array, and antenna_numbers"), - ], - [ - {"antenna_numbers": None}, - (ValueError, "Must include ant_1_array, ant_2_array, and antenna_numbers"), - ], - [ - {"telescope_lon": None}, - (ValueError, "Must include telescope_lon if use_ant_pos=True."), - ], - [ - {"uvw_array": None, "use_ant_pos": False}, - (ValueError, "Must include uvw_array if use_ant_pos=False."), - ], - [ - {"telescope_lat": None, "use_ant_pos": False, "from_enu": True}, - (ValueError, "Must include telescope_lat if moving "), - ], - [ - {"lst_array": None, "use_ant_pos": False, "from_enu": True}, - ( - ValueError, - re.escape("Must include lst_array if moving between ENU (i.e.,"), - ), - ], - [ - {"use_ant_pos": False, "old_app_ra": None}, - (ValueError, "Must include old_app_ra and old_app_dec values when data"), - ], - [ - {"use_ant_pos": False, "old_app_dec": None}, - (ValueError, "Must include old_app_ra and old_app_dec values when data"), - ], - [ - {"use_ant_pos": False, "old_frame_pa": None}, - (ValueError, "Must include old_frame_pa values if data are phased and "), - ], - ), -) -def test_calc_uvw_input_errors(calc_uvw_args, arg_dict, err): - """ - Check for argument errors with calc_uvw. - """ - for key in arg_dict.keys(): - calc_uvw_args[key] = arg_dict[key] - - with pytest.raises(err[0], match=err[1]): - uvutils.calc_uvw( - app_ra=calc_uvw_args["app_ra"], - app_dec=calc_uvw_args["app_dec"], - frame_pa=calc_uvw_args["frame_pa"], - lst_array=calc_uvw_args["lst_array"], - use_ant_pos=calc_uvw_args["use_ant_pos"], - uvw_array=calc_uvw_args["uvw_array"], - antenna_positions=calc_uvw_args["antenna_positions"], - antenna_numbers=calc_uvw_args["antenna_numbers"], - ant_1_array=calc_uvw_args["ant_1_array"], - ant_2_array=calc_uvw_args["ant_2_array"], - old_app_ra=calc_uvw_args["old_app_ra"], - old_app_dec=calc_uvw_args["old_app_dec"], - old_frame_pa=calc_uvw_args["old_frame_pa"], - telescope_lat=calc_uvw_args["telescope_lat"], - telescope_lon=calc_uvw_args["telescope_lon"], - from_enu=calc_uvw_args["from_enu"], - to_enu=calc_uvw_args["to_enu"], - ) - - -def test_calc_uvw_no_op(calc_uvw_args): - """ - Test that transfroming ENU -> ENU gives you an output identical to the input. - """ - # This should be a no-op, check for equality - uvw_check = uvutils.calc_uvw( - lst_array=calc_uvw_args["lst_array"], - use_ant_pos=False, - uvw_array=calc_uvw_args["uvw_array"], - telescope_lat=calc_uvw_args["telescope_lat"], - telescope_lon=calc_uvw_args["telescope_lon"], - to_enu=True, - from_enu=True, - ) - assert np.all(np.equal(calc_uvw_args["uvw_array"], uvw_check)) - - -def test_calc_uvw_same_place(calc_uvw_args): - """ - Check and see that the uvw calculator derives the same values derived by hand - (i.e, that calculating for the same position returns the same answer). - """ - # Check ant make sure that when we plug in the original values, we recover the - # exact same values that we calculated above. - uvw_ant_check = uvutils.calc_uvw( - app_ra=calc_uvw_args["old_app_ra"], - app_dec=calc_uvw_args["old_app_dec"], - frame_pa=calc_uvw_args["old_frame_pa"], - lst_array=calc_uvw_args["lst_array"], - use_ant_pos=True, - antenna_positions=calc_uvw_args["antenna_positions"], - antenna_numbers=calc_uvw_args["antenna_numbers"], - ant_1_array=calc_uvw_args["ant_1_array"], - ant_2_array=calc_uvw_args["ant_2_array"], - telescope_lat=calc_uvw_args["telescope_lat"], - telescope_lon=calc_uvw_args["telescope_lon"], - ) - - uvw_base_check = uvutils.calc_uvw( - app_ra=calc_uvw_args["old_app_ra"], - app_dec=calc_uvw_args["old_app_dec"], - frame_pa=calc_uvw_args["old_frame_pa"], - lst_array=calc_uvw_args["lst_array"], - use_ant_pos=False, - uvw_array=calc_uvw_args["uvw_array"], - old_app_ra=calc_uvw_args["old_app_ra"], - old_app_dec=calc_uvw_args["old_app_dec"], - old_frame_pa=calc_uvw_args["old_frame_pa"], - ) - - np.testing.assert_allclose(uvw_ant_check, calc_uvw_args["uvw_array"]) - np.testing.assert_allclose(uvw_base_check, calc_uvw_args["uvw_array"]) - - -@pytest.mark.parametrize("to_enu", [False, True]) -def test_calc_uvw_base_vs_ants(calc_uvw_args, to_enu): - """ - Check to see that we get the same values for uvw coordinates whether we calculate - them using antenna positions or the previously calculated uvw's. - """ - - # Now change position, and make sure that whether we used ant positions of rotated - # uvw vectors, we derived the same uvw-coordinates at the end - uvw_ant_check = uvutils.calc_uvw( - app_ra=calc_uvw_args["app_ra"], - app_dec=calc_uvw_args["app_dec"], - frame_pa=calc_uvw_args["frame_pa"], - lst_array=calc_uvw_args["lst_array"], - use_ant_pos=True, - antenna_positions=calc_uvw_args["antenna_positions"], - antenna_numbers=calc_uvw_args["antenna_numbers"], - ant_1_array=calc_uvw_args["ant_1_array"], - ant_2_array=calc_uvw_args["ant_2_array"], - telescope_lat=calc_uvw_args["telescope_lat"], - telescope_lon=calc_uvw_args["telescope_lon"], - to_enu=to_enu, - ) - - uvw_base_check = uvutils.calc_uvw( - app_ra=calc_uvw_args["app_ra"], - app_dec=calc_uvw_args["app_dec"], - frame_pa=calc_uvw_args["frame_pa"], - lst_array=calc_uvw_args["lst_array"], - use_ant_pos=False, - uvw_array=calc_uvw_args["uvw_array"], - old_app_ra=calc_uvw_args["old_app_ra"], - old_app_dec=calc_uvw_args["old_app_dec"], - old_frame_pa=calc_uvw_args["old_frame_pa"], - telescope_lat=calc_uvw_args["telescope_lat"], - telescope_lon=calc_uvw_args["telescope_lon"], - to_enu=to_enu, - ) - - np.testing.assert_allclose(uvw_ant_check, uvw_base_check) - - -def test_calc_uvw_enu_roundtrip(calc_uvw_args): - """ - Check and see that we can go from uvw to ENU and back to uvw using the `uvw_array` - argument alone (i.e., without antenna positions). - """ - # Now attempt to round trip from projected to ENU back to projected -- that should - # give us the original set of uvw-coordinates. - temp_uvw = uvutils.calc_uvw( - lst_array=calc_uvw_args["lst_array"], - use_ant_pos=False, - uvw_array=calc_uvw_args["uvw_array"], - old_app_ra=calc_uvw_args["old_app_ra"], - old_app_dec=calc_uvw_args["old_app_dec"], - old_frame_pa=calc_uvw_args["old_frame_pa"], - telescope_lat=calc_uvw_args["telescope_lat"], - telescope_lon=calc_uvw_args["telescope_lon"], - to_enu=True, - ) - - uvw_base_enu_check = uvutils.calc_uvw( - app_ra=calc_uvw_args["old_app_ra"], - app_dec=calc_uvw_args["old_app_dec"], - frame_pa=calc_uvw_args["old_frame_pa"], - lst_array=calc_uvw_args["lst_array"], - use_ant_pos=False, - uvw_array=temp_uvw, - telescope_lat=calc_uvw_args["telescope_lat"], - telescope_lon=calc_uvw_args["telescope_lon"], - from_enu=True, - ) - - np.testing.assert_allclose( - calc_uvw_args["uvw_array"], uvw_base_enu_check, atol=1e-15, rtol=0 - ) - - -def test_calc_uvw_pa_ex_post_facto(calc_uvw_args): - """ - Check and see that one can apply the frame position angle rotation after-the-fact - and still get out the same answer you get if you were doing it during the initial - uvw coordinate calculation. - """ - # Finally, check and see what happens if you do the PA rotation as part of the - # first uvw calcuation, and make sure it agrees with what you get if you decide - # to apply the PA rotation after-the-fact. - uvw_base_check = uvutils.calc_uvw( - app_ra=calc_uvw_args["app_ra"], - app_dec=calc_uvw_args["app_dec"], - frame_pa=calc_uvw_args["frame_pa"], - lst_array=calc_uvw_args["lst_array"], - use_ant_pos=False, - uvw_array=calc_uvw_args["uvw_array"], - old_app_ra=calc_uvw_args["old_app_ra"], - old_app_dec=calc_uvw_args["old_app_dec"], - old_frame_pa=calc_uvw_args["old_frame_pa"], - ) - - temp_uvw = uvutils.calc_uvw( - app_ra=calc_uvw_args["app_ra"], - app_dec=calc_uvw_args["app_dec"], - lst_array=calc_uvw_args["lst_array"], - use_ant_pos=False, - uvw_array=calc_uvw_args["uvw_array"], - old_app_ra=calc_uvw_args["old_app_ra"], - old_app_dec=calc_uvw_args["old_app_dec"], - old_frame_pa=calc_uvw_args["old_frame_pa"], - ) - - uvw_base_late_pa_check = uvutils.calc_uvw( - frame_pa=calc_uvw_args["frame_pa"], - use_ant_pos=False, - uvw_array=temp_uvw, - old_frame_pa=calc_uvw_args["old_frame_pa"], - ) - - np.testing.assert_allclose(uvw_base_check, uvw_base_late_pa_check) - - -@pytest.mark.filterwarnings('ignore:ERFA function "pmsafe" yielded') -@pytest.mark.filterwarnings('ignore:ERFA function "dtdtf" yielded') -@pytest.mark.filterwarnings('ignore:ERFA function "utcut1" yielded') -@pytest.mark.filterwarnings('ignore:ERFA function "utctai" yielded') -@pytest.mark.parametrize( - "arg_dict,msg", - ( - [{"library": "xyz"}, "Requested coordinate transformation library is not"], - [{"icrs_ra": np.arange(10)}, "ra and dec must be the same shape."], - [{"icrs_dec": np.arange(10)}, "ra and dec must be the same shape."], - [{"pm_ra": np.arange(10)}, "pm_ra must be the same shape as ra and dec."], - [{"pm_dec": np.arange(10)}, "pm_dec must be the same shape as ra and dec."], - [{"dist": np.arange(10)}, "dist must be the same shape as ra and dec."], - [{"vrad": np.arange(10)}, "vrad must be the same shape as ra and dec."], - [ - { - "icrs_ra": [0, 0], - "icrs_dec": [0, 0], - "pm_ra": None, - "pm_dec": None, - "dist": None, - "vrad": None, - }, - "time_array must be of either of", - ], - [{"time_array": 0.0, "library": "novas"}, "No current support for JPL ephems"], - ), -) -def test_transform_icrs_to_app_arg_errs(astrometry_args, arg_dict, msg): - """ - Check for argument errors with transform_icrs_to_app - """ - pytest.importorskip("novas") - default_args = astrometry_args.copy() - for key in arg_dict.keys(): - default_args[key] = arg_dict[key] - - # Start w/ the transform_icrs_to_app block - with pytest.raises(ValueError, match=msg): - uvutils.transform_icrs_to_app( - time_array=default_args["time_array"], - ra=default_args["icrs_ra"], - dec=default_args["icrs_dec"], - telescope_loc=default_args["telescope_loc"], - telescope_frame=default_args["telescope_frame"], - pm_ra=default_args["pm_ra"], - pm_dec=default_args["pm_dec"], - dist=default_args["dist"], - vrad=default_args["vrad"], - epoch=default_args["epoch"], - astrometry_library=default_args["library"], - ) - - -@pytest.mark.parametrize( - "arg_dict,msg", - ( - [{"library": "xyz"}, "Requested coordinate transformation library is not"], - [{"app_ra": np.arange(10)}, "app_ra and app_dec must be the same shape."], - [{"app_dec": np.arange(10)}, "app_ra and app_dec must be the same shape."], - [{"time_array": np.arange(10)}, "time_array must be of either of length 1"], - ), -) -def test_transform_app_to_icrs_arg_errs(astrometry_args, arg_dict, msg): - """ - Check for argument errors with transform_app_to_icrs - """ - default_args = astrometry_args.copy() - for key in arg_dict.keys(): - default_args[key] = arg_dict[key] - - with pytest.raises(ValueError, match=msg): - uvutils.transform_app_to_icrs( - time_array=default_args["time_array"], - app_ra=default_args["app_ra"], - app_dec=default_args["app_dec"], - telescope_loc=default_args["telescope_loc"], - telescope_frame=default_args["telescope_frame"], - astrometry_library=default_args["library"], - ) - - -def test_transform_sidereal_coords_arg_errs(): - """ - Check for argument errors with transform_sidereal_coords - """ - # Next on to sidereal to sidereal - with pytest.raises(ValueError, match="lon and lat must be the same shape."): - uvutils.transform_sidereal_coords( - longitude=[0.0], - latitude=[0.0, 1.0], - in_coord_frame="fk5", - out_coord_frame="icrs", - in_coord_epoch="J2000.0", - time_array=[0.0, 1.0, 2.0], - ) - - with pytest.raises(ValueError, match="Shape of time_array must be either that of "): - uvutils.transform_sidereal_coords( - longitude=[0.0, 1.0], - latitude=[0.0, 1.0], - in_coord_frame="fk4", - out_coord_frame="fk4", - in_coord_epoch=1950.0, - out_coord_epoch=1984.0, - time_array=[0.0, 1.0, 2.0], - ) - - -@pytest.mark.filterwarnings('ignore:ERFA function "d2dtf" yielded') -@pytest.mark.parametrize( - ["arg_dict", "msg"], - [ - [ - {"force_lookup": True, "time_array": np.arange(100000)}, - "Requesting too many individual ephem points from JPL-Horizons.", - ], - [{"force_lookup": False, "high_cadence": True}, "Too many ephem points"], - [{"time_array": np.arange(10)}, "No current support for JPL ephems outside"], - [{"targ_name": "whoami"}, "Target ID is not recognized in either the small"], - ], -) -def test_lookup_jplhorizons_arg_errs(arg_dict, msg): - """ - Check for argument errors with lookup_jplhorizons. - """ - # Don't do this test if we don't have astroquery loaded - pytest.importorskip("astroquery") - - from ssl import SSLError - - from requests import RequestException - - default_args = { - "targ_name": "Mars", - "time_array": np.array([0.0, 1000.0]) + 2456789.0, - "telescope_loc": EarthLocation.from_geodetic(0, 0, height=0.0), - "high_cadence": False, - "force_lookup": None, - } - - for key in arg_dict.keys(): - default_args[key] = arg_dict[key] - - # We have to handle this piece a bit carefully, since some queries fail due to - # intermittent failures connecting to the JPL-Horizons service. - with pytest.raises(Exception) as cm: - uvutils.lookup_jplhorizons( - default_args["targ_name"], - default_args["time_array"], - telescope_loc=default_args["telescope_loc"], - high_cadence=default_args["high_cadence"], - force_indv_lookup=default_args["force_lookup"], - ) - - if issubclass(cm.type, RequestException) or issubclass(cm.type, SSLError): - pytest.skip("SSL/Connection error w/ JPL Horizons") - - assert issubclass(cm.type, ValueError) - assert str(cm.value).startswith(msg) - - -@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") -def test_lookup_jplhorizons_moon_err(): - """ - Check for argument errors with lookup_jplhorizons. - """ - # Don't do this test if we don't have astroquery loaded - pytest.importorskip("astroquery") - - from ssl import SSLError - - from requests import RequestException - - default_args = { - "targ_name": "Mars", - "time_array": np.array([0.0, 1000.0]) + 2456789.0, - "telescope_loc": MoonLocation.from_selenodetic(0.6875, 24.433, 0), - "high_cadence": False, - "force_lookup": None, - } - - # We have to handle this piece a bit carefully, since some queries fail due to - # intermittent failures connecting to the JPL-Horizons service. - with pytest.raises(Exception) as cm: - uvutils.lookup_jplhorizons( - default_args["targ_name"], - default_args["time_array"], - telescope_loc=default_args["telescope_loc"], - high_cadence=default_args["high_cadence"], - force_indv_lookup=default_args["force_lookup"], - ) - - if issubclass(cm.type, RequestException) or issubclass(cm.type, SSLError): - pytest.skip("SSL/Connection error w/ JPL Horizons") - - assert issubclass(cm.type, NotImplementedError) - assert str(cm.value).startswith( - "Cannot lookup JPL positions for telescopes with a MoonLocation" - ) - - -@pytest.mark.parametrize( - "bad_arg,msg", - [ - ["etimes", "ephem_ra must have the same shape as ephem_times."], - ["ra", "ephem_ra must have the same shape as ephem_times."], - ["dec", "ephem_dec must have the same shape as ephem_times."], - ["dist", "ephem_dist must have the same shape as ephem_times."], - ["vel", "ephem_vel must have the same shape as ephem_times."], - ], -) -def test_interpolate_ephem_arg_errs(bad_arg, msg): - """ - Check for argument errors with interpolate_ephem - """ - # Now moving on to the interpolation scheme - with pytest.raises(ValueError, match=msg): - uvutils.interpolate_ephem( - time_array=0.0, - ephem_times=0.0 if ("etimes" == bad_arg) else [0.0, 1.0], - ephem_ra=0.0 if ("ra" == bad_arg) else [0.0, 1.0], - ephem_dec=0.0 if ("dec" == bad_arg) else [0.0, 1.0], - ephem_dist=0.0 if ("dist" == bad_arg) else [0.0, 1.0], - ephem_vel=0.0 if ("vel" == bad_arg) else [0.0, 1.0], - ) - - -def test_calc_app_coords_arg_errs(): - """ - Check for argument errors with calc_app_coords - """ - # Now on to app_coords - with pytest.raises(ValueError, match="Object type whoknows is not recognized."): - uvutils.calc_app_coords( - lon_coord=0.0, lat_coord=0.0, telescope_loc=(0, 1, 2), coord_type="whoknows" - ) - - -def test_transform_multi_sidereal_coords(astrometry_args): - """ - Perform some basic tests to verify that we can transform between sidereal frames - with multiple coordinates. - """ - # Check and make sure that we can deal with non-singleton times or coords with - # singleton coords and times, respectively. - check_ra, check_dec = uvutils.transform_sidereal_coords( - longitude=astrometry_args["icrs_ra"] * np.ones(2), - latitude=astrometry_args["icrs_dec"] * np.ones(2), - in_coord_frame="icrs", - out_coord_frame="fk5", - in_coord_epoch=2000.0, - out_coord_epoch=2000.0, - time_array=astrometry_args["time_array"][0] * np.ones(2), - ) - assert np.all(np.equal(astrometry_args["fk5_ra"], check_ra)) - assert np.all(np.equal(astrometry_args["fk5_dec"], check_dec)) - - -def test_transform_fk5_fk4_icrs_loop(astrometry_args): - """ - Do a roundtrip test between ICRS, FK5, FK4 and back to ICRS to verify that we can - handle transformation between different sidereal frames correctly. - """ - # Now do a triangle between ICRS -> FK5 -> FK4 -> ICRS. If all is working well, - # then we should recover the same position we started with. - fk5_ra, fk5_dec = uvutils.transform_sidereal_coords( - longitude=astrometry_args["icrs_ra"], - latitude=astrometry_args["icrs_dec"], - in_coord_frame="icrs", - out_coord_frame="fk5", - in_coord_epoch=2000.0, - out_coord_epoch=2000.0, - time_array=astrometry_args["time_array"][0], - ) - - fk4_ra, fk4_dec = uvutils.transform_sidereal_coords( - longitude=fk5_ra, - latitude=fk5_dec, - in_coord_frame="fk5", - out_coord_frame="fk4", - in_coord_epoch="J2000.0", - out_coord_epoch="B1950.0", - ) - - check_ra, check_dec = uvutils.transform_sidereal_coords( - longitude=fk4_ra, - latitude=fk4_dec, - in_coord_frame="fk4", - out_coord_frame="icrs", - in_coord_epoch="B1950.0", - out_coord_epoch="J2000.0", - ) - - check_coord = SkyCoord(check_ra, check_dec, unit="rad") - assert np.all(check_coord.separation(astrometry_args["icrs_coord"]).uarcsec < 0.1) - - -@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) -@pytest.mark.parametrize("in_lib", ["erfa", "astropy"]) -@pytest.mark.parametrize("out_lib", ["erfa", "astropy"]) -def test_roundtrip_icrs(astrometry_args, telescope_frame, selenoid, in_lib, out_lib): - """ - Performs a roundtrip test to verify that one can transform between - ICRS <-> topocentric to the precision limit, without running into - issues. - """ - if telescope_frame == "itrs": - telescope_loc = astrometry_args["telescope_loc"] - else: - telescope_loc = astrometry_args["moon_telescope_loc"] - - if telescope_frame == "mcmf" and in_lib != "astropy": - with pytest.raises( - NotImplementedError, - match="MoonLocation telescopes are only supported with the 'astropy' " - "astrometry library", - ): - app_ra, app_dec = uvutils.transform_icrs_to_app( - time_array=astrometry_args["time_array"], - ra=astrometry_args["icrs_ra"], - dec=astrometry_args["icrs_dec"], - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=selenoid, - epoch=astrometry_args["epoch"], - astrometry_library=in_lib, - ) - return - - if telescope_frame == "mcmf" and out_lib == "astropy": - kwargs = {"telescope_frame": telescope_frame, "ellipsoid": selenoid} - else: - # don't pass telescope frame here so something still happens if frame and - # astrometry lib conflict - kwargs = {} - - app_ra, app_dec = uvutils.transform_icrs_to_app( - time_array=astrometry_args["time_array"], - ra=astrometry_args["icrs_ra"], - dec=astrometry_args["icrs_dec"], - telescope_loc=telescope_loc, - epoch=astrometry_args["epoch"], - astrometry_library=in_lib, - **kwargs, - ) - - if telescope_frame == "mcmf" and out_lib != "astropy": - with pytest.raises( - NotImplementedError, - match="MoonLocation telescopes are only supported with the 'astropy' " - "astrometry library", - ): - check_ra, check_dec = uvutils.transform_app_to_icrs( - time_array=astrometry_args["time_array"], - app_ra=app_ra, - app_dec=app_dec, - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=selenoid, - astrometry_library=out_lib, - ) - return - - if telescope_frame == "mcmf": - from spiceypy.utils.exceptions import SpiceUNKNOWNFRAME - - try: - check_ra, check_dec = uvutils.transform_app_to_icrs( - time_array=astrometry_args["time_array"], - app_ra=app_ra, - app_dec=app_dec, - telescope_loc=telescope_loc, - astrometry_library=out_lib, - **kwargs, - ) - except SpiceUNKNOWNFRAME as err: - pytest.skip("SpiceUNKNOWNFRAME error: " + str(err)) - else: - check_ra, check_dec = uvutils.transform_app_to_icrs( - time_array=astrometry_args["time_array"], - app_ra=app_ra, - app_dec=app_dec, - telescope_loc=telescope_loc, - astrometry_library=out_lib, - **kwargs, - ) - - check_coord = SkyCoord(check_ra, check_dec, unit="rad", frame="icrs") - # Verify that everything agrees to better than µas-level accuracy if the - # libraries are the same, otherwise to 100 µas if cross-comparing libraries - if in_lib == out_lib: - assert np.all( - astrometry_args["icrs_coord"].separation(check_coord).uarcsec < 1.0 - ) - else: - assert np.all( - astrometry_args["icrs_coord"].separation(check_coord).uarcsec < 100.0 - ) - - if selenoid == "SPHERE": - # check defaults - app_ra, app_dec = uvutils.transform_icrs_to_app( - time_array=astrometry_args["time_array"], - ra=astrometry_args["icrs_ra"], - dec=astrometry_args["icrs_dec"], - telescope_loc=telescope_loc, - epoch=astrometry_args["epoch"], - astrometry_library=in_lib, - telescope_frame=telescope_frame, - ) - check_ra, check_dec = uvutils.transform_app_to_icrs( - time_array=astrometry_args["time_array"], - app_ra=app_ra, - app_dec=app_dec, - telescope_loc=telescope_loc, - astrometry_library=out_lib, - telescope_frame=telescope_frame, - ) - check_coord = SkyCoord(check_ra, check_dec, unit="rad", frame="icrs") - # Verify that everything agrees to better than µas-level accuracy if the - # libraries are the same, otherwise to 100 µas if cross-comparing libraries - assert np.all( - astrometry_args["icrs_coord"].separation(check_coord).uarcsec < 1.0 - ) - - -def test_calc_parallactic_angle(): - """ - A relatively straightforward test to verify that we recover the parallactic - angles we expect given some known inputs - """ - expected_vals = np.array([1.0754290375762232, 0.0, -0.6518070715011698]) - meas_vals = uvutils.calc_parallactic_angle( - app_ra=[0.0, 1.0, 2.0], - app_dec=[-1.0, 0.0, 1.0], - lst_array=[2.0, 1.0, 0], - telescope_lat=1.0, - ) - # Make sure things agree to better than ~0.1 uas (as it definitely should) - np.testing.assert_allclose(expected_vals, meas_vals, 0.0, 1e-12) - - -def test_calc_frame_pos_angle(): - """ - Verify that we recover frame position angles correctly - """ - # First test -- plug in "topo" for the frame, which should always produce an - # array of all zeros (the topo frame is what the apparent coords are in) - frame_pa = uvutils.calc_frame_pos_angle( - time_array=np.array([2456789.0] * 100), - app_ra=np.arange(100) * (np.pi / 50), - app_dec=np.zeros(100), - telescope_loc=(0, 0, 0), - ref_frame="topo", - ) - assert len(frame_pa) == 100 - assert np.all(frame_pa == 0.0) - # PA of zero degrees (they're always aligned) - # Next test -- plug in J2000 and see that we actually get back a frame PA - # of basically 0 degrees. - j2000_jd = Time(2000.0, format="jyear").utc.jd - frame_pa = uvutils.calc_frame_pos_angle( - time_array=np.array([j2000_jd] * 100), - app_ra=np.arange(100) * (np.pi / 50), - app_dec=np.zeros(100), - telescope_loc=(0, 0, 0), - ref_frame="fk5", - ref_epoch=2000.0, - ) - # At J2000, the only frame PA terms come from aberation, which basically max out - # at ~< 1e-4 rad. Check to make sure that lines up with what we measure. - assert np.all(np.abs(frame_pa) < 1e-4) - - # JD 2458849.5 is Jan-01-2020, so 20 years of parallax ought to have accumulated - # (with about 1 arcmin/yr of precession). Make sure these values are sensible - frame_pa = uvutils.calc_frame_pos_angle( - time_array=np.array([2458849.5] * 100), - app_ra=np.arange(100) * (np.pi / 50), - app_dec=np.zeros(100), - telescope_loc=(0, 0, 0), - ref_frame="fk5", - ref_epoch=2000.0, - ) - assert np.all(np.abs(frame_pa) < 20 * (50.3 / 3600) * (np.pi / 180.0)) - # Check the PA at a couple of chosen points, which just so happen to be very close - # in magnitude (as they're basically in the same plane as the motion of the Earth) - assert np.isclose(frame_pa[25], 0.001909957544309159) - assert np.isclose(frame_pa[-25], -0.0019098101664715339) - - -def test_jphl_lookup(astrometry_args): - """ - A very simple lookup query to verify that the astroquery tools for accessing - JPL-Horizons are working. This test is very limited, on account of not wanting to - slam JPL w/ coordinate requests. - """ - pytest.importorskip("astroquery") - - from ssl import SSLError - - from requests import RequestException - - # If we can't connect to JPL-Horizons, then skip this test and don't outright fail. - try: - [ephem_times, ephem_ra, ephem_dec, ephem_dist, ephem_vel] = ( - uvutils.lookup_jplhorizons("Sun", 2456789.0) - ) - except (SSLError, RequestException) as err: - pytest.skip("SSL/Connection error w/ JPL Horizons: " + str(err)) - - assert np.all(np.equal(ephem_times, 2456789.0)) - np.testing.assert_allclose(ephem_ra, 0.8393066751804976) - np.testing.assert_allclose(ephem_dec, 0.3120687480116649) - np.testing.assert_allclose(ephem_dist, 1.00996185750717) - np.testing.assert_allclose(ephem_vel, 0.386914) - - # check calling lookup_jplhorizons with EarthLocation vs lat/lon/alt passed - try: - ephem_info_latlon = uvutils.lookup_jplhorizons( - "Sun", 2456789.0, telescope_loc=astrometry_args["telescope_loc"] - ) - ephem_info_el = uvutils.lookup_jplhorizons( - "Sun", - 2456789.0, - telescope_loc=EarthLocation.from_geodetic( - lat=astrometry_args["telescope_loc"][0] * units.rad, - lon=astrometry_args["telescope_loc"][1] * units.rad, - height=astrometry_args["telescope_loc"][2] * units.m, - ), - ) - except (SSLError, RequestException) as err: - pytest.skip("SSL/Connection error w/ JPL Horizons: " + str(err)) - - for ind, item in enumerate(ephem_info_latlon): - assert item == ephem_info_el[ind] - - -def test_ephem_interp_one_point(): - """ - These tests do some simple checks to verify that the interpolator behaves properly - when only being provided singleton values. - """ - # First test the case where there is only one ephem point, and thus everything - # takes on that value - time_array = np.arange(100) * 0.01 - ephem_times = np.array([0]) - ephem_ra = np.array([1.0]) - ephem_dec = np.array([2.0]) - ephem_dist = np.array([3.0]) - ephem_vel = np.array([4.0]) - - ra_vals0, dec_vals0, dist_vals0, vel_vals0 = uvutils.interpolate_ephem( - time_array=time_array, - ephem_times=ephem_times, - ephem_ra=ephem_ra, - ephem_dec=ephem_dec, - ephem_dist=ephem_dist, - ephem_vel=ephem_vel, - ) - - assert np.all(ra_vals0 == 1.0) - assert np.all(dec_vals0 == 2.0) - assert np.all(dist_vals0 == 3.0) - assert np.all(vel_vals0 == 4.0) - - -def test_ephem_interp_multi_point(): - """ - Test that ephem coords are interpolated correctly when supplying more than a - singleton value for the various arrays. - """ - # Next test the case where the ephem only has a couple of points, in which case the - # code will default to using a simple, linear interpolation scheme. - time_array = np.arange(100) * 0.01 - ephem_times = np.array([0, 1]) - ephem_ra = np.array([0, 1]) + 1.0 - ephem_dec = np.array([0, 1]) + 2.0 - ephem_dist = np.array([0, 1]) + 3.0 - ephem_vel = np.array([0, 1]) + 4.0 - - ra_vals1, dec_vals1, dist_vals1, vel_vals1 = uvutils.interpolate_ephem( - time_array=time_array, - ephem_times=ephem_times, - ephem_ra=ephem_ra, - ephem_dec=ephem_dec, - ephem_dist=ephem_dist, - ephem_vel=ephem_vel, - ) - - # When there are lots more data points, the interpolator will default to using a - # cubic spline, which _should_ be very close (to numerical precision limits) to what - # we get with the method above. - ephem_times = np.arange(11) * 0.1 - ephem_ra = (np.arange(11) * 0.1) + 1.0 - ephem_dec = (np.arange(11) * 0.1) + 2.0 - ephem_dist = (np.arange(11) * 0.1) + 3.0 - ephem_vel = (np.arange(11) * 0.1) + 4.0 - - ra_vals2, dec_vals2, dist_vals2, vel_vals2 = uvutils.interpolate_ephem( - time_array=time_array, - ephem_times=ephem_times, - ephem_ra=ephem_ra, - ephem_dec=ephem_dec, - ephem_dist=ephem_dist, - ephem_vel=ephem_vel, - ) - - # Make sure that everything is consistent to floating point precision - np.testing.assert_allclose(ra_vals1, ra_vals2, 1e-15, 0.0) - np.testing.assert_allclose(dec_vals1, dec_vals2, 1e-15, 0.0) - np.testing.assert_allclose(dist_vals1, dist_vals2, 1e-15, 0.0) - np.testing.assert_allclose(vel_vals1, vel_vals2, 1e-15, 0.0) - np.testing.assert_allclose(time_array + 1.0, ra_vals2, 1e-15, 0.0) - np.testing.assert_allclose(time_array + 2.0, dec_vals2, 1e-15, 0.0) - np.testing.assert_allclose(time_array + 3.0, dist_vals2, 1e-15, 0.0) - np.testing.assert_allclose(time_array + 4.0, vel_vals2, 1e-15, 0.0) - - -@pytest.mark.parametrize("frame", ["icrs", "fk5"]) -@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) -def test_calc_app_sidereal(astrometry_args, frame, telescope_frame, selenoid): - """ - Tests that we can calculate app coords for sidereal objects - """ - # First step is to check and make sure we can do sidereal coords. This is the most - # basic thing to check, so this really _should work. - if telescope_frame == "itrs": - telescope_loc = astrometry_args["telescope_loc"] - else: - from spiceypy.utils.exceptions import SpiceUNKNOWNFRAME - - telescope_loc = astrometry_args["moon_telescope_loc"] - - try: - check_ra, check_dec = uvutils.calc_app_coords( - lon_coord=( - astrometry_args["fk5_ra"] - if (frame == "fk5") - else astrometry_args["icrs_ra"] - ), - lat_coord=( - astrometry_args["fk5_dec"] - if (frame == "fk5") - else astrometry_args["icrs_dec"] - ), - coord_type="sidereal", - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=selenoid, - time_array=astrometry_args["time_array"], - coord_frame=frame, - coord_epoch=astrometry_args["epoch"], - ) - except SpiceUNKNOWNFRAME as err: - pytest.skip("SpiceUNKNOWNFRAME error: " + str(err)) - - check_coord = SkyCoord(check_ra, check_dec, unit="rad") - - if telescope_frame == "itrs": - app_coord = astrometry_args["app_coord"] - else: - app_coord = astrometry_args["moon_app_coord"][selenoid] - - assert np.all(app_coord.separation(check_coord).uarcsec < 1.0) - - -@pytest.mark.parametrize("frame", ["icrs", "fk5"]) -@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) -def test_calc_app_ephem(astrometry_args, frame, telescope_frame, selenoid): - """ - Tests that we can calculate app coords for ephem objects - """ - # Next, see what happens when we pass an ephem. Note that this is just a single - # point ephem, so its not testing any of the fancy interpolation, but we have other - # tests for poking at that. The two tests here are to check bot the ICRS and FK5 - # paths through the ephem. - if telescope_frame == "itrs": - telescope_loc = astrometry_args["telescope_loc"] - else: - telescope_loc = astrometry_args["moon_telescope_loc"] - - if frame == "fk5": - ephem_ra = astrometry_args["fk5_ra"] - ephem_dec = astrometry_args["fk5_dec"] - else: - ephem_ra = np.array([astrometry_args["icrs_ra"]]) - ephem_dec = np.array([astrometry_args["icrs_dec"]]) - - ephem_times = np.array([astrometry_args["time_array"][0]]) - check_ra, check_dec = uvutils.calc_app_coords( - lon_coord=ephem_ra, - lat_coord=ephem_dec, - coord_times=ephem_times, - coord_type="ephem", - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=selenoid, - time_array=astrometry_args["time_array"], - coord_epoch=astrometry_args["epoch"], - coord_frame=frame, - ) - check_coord = SkyCoord(check_ra, check_dec, unit="rad") - - if telescope_frame == "itrs": - app_coord = astrometry_args["app_coord"] - else: - app_coord = astrometry_args["moon_app_coord"][selenoid] - assert np.all(app_coord.separation(check_coord).uarcsec < 1.0) - - -@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) -def test_calc_app_driftscan(astrometry_args, telescope_frame, selenoid): - """ - Tests that we can calculate app coords for driftscan objects - """ - # Now on to the driftscan, which takes in arguments in terms of az and el (and - # the values we've given below should also be for zenith) - if telescope_frame == "itrs": - telescope_loc = astrometry_args["telescope_loc"] - else: - telescope_loc = astrometry_args["moon_telescope_loc"] - - check_ra, check_dec = uvutils.calc_app_coords( - lon_coord=0.0, - lat_coord=np.pi / 2.0, - coord_type="driftscan", - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=selenoid, - time_array=astrometry_args["time_array"], - ) - check_coord = SkyCoord(check_ra, check_dec, unit="rad") - if telescope_frame == "itrs": - drift_coord = astrometry_args["drift_coord"] - else: - drift_coord = astrometry_args["moon_drift_coord"][selenoid] - - assert np.all(drift_coord.separation(check_coord).uarcsec < 1.0) - - -@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) -def test_calc_app_unprojected(astrometry_args, telescope_frame, selenoid): - """ - Tests that we can calculate app coords for unphased objects - """ - # Finally, check unprojected, which is forced to point toward zenith (unlike - # driftscan, which is allowed to point at any az/el position) - # use "unphased" to check for deprecation warning - if telescope_frame == "itrs": - telescope_loc = astrometry_args["telescope_loc"] - lst_array = astrometry_args["lst_array"] - else: - telescope_loc = astrometry_args["moon_telescope_loc"] - lst_array = astrometry_args["moon_lst_array"][selenoid] - - check_ra, check_dec = uvutils.calc_app_coords( - lon_coord=None, - lat_coord=None, - coord_type="unprojected", - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=selenoid, - time_array=astrometry_args["time_array"], - lst_array=lst_array, - ) - - check_coord = SkyCoord(check_ra, check_dec, unit="rad") - - if telescope_frame == "itrs": - drift_coord = astrometry_args["drift_coord"] - else: - drift_coord = astrometry_args["moon_drift_coord"][selenoid] - assert np.all(drift_coord.separation(check_coord).uarcsec < 1.0) - - -@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) -def test_calc_app_fk5_roundtrip(astrometry_args, telescope_frame, selenoid): - # Do a round-trip with the two top-level functions and make sure they agree to - # better than 1 µas, first in FK5 - if telescope_frame == "itrs": - telescope_loc = astrometry_args["telescope_loc"] - else: - telescope_loc = astrometry_args["moon_telescope_loc"] - - app_ra, app_dec = uvutils.calc_app_coords( - lon_coord=0.0, - lat_coord=0.0, - coord_type="sidereal", - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=selenoid, - time_array=astrometry_args["time_array"], - coord_frame="fk5", - coord_epoch="J2000.0", - ) - - if telescope_frame == "mcmf": - from spiceypy.utils.exceptions import SpiceUNKNOWNFRAME - - try: - check_ra, check_dec = uvutils.calc_sidereal_coords( - time_array=astrometry_args["time_array"], - app_ra=app_ra, - app_dec=app_dec, - telescope_loc=telescope_loc, - coord_frame="fk5", - telescope_frame=telescope_frame, - ellipsoid=selenoid, - coord_epoch=2000.0, - ) - except SpiceUNKNOWNFRAME as err: - pytest.skip("SpiceUNKNOWNFRAME error: " + str(err)) - else: - check_ra, check_dec = uvutils.calc_sidereal_coords( - time_array=astrometry_args["time_array"], - app_ra=app_ra, - app_dec=app_dec, - telescope_loc=telescope_loc, - coord_frame="fk5", - telescope_frame=telescope_frame, - ellipsoid=selenoid, - coord_epoch=2000.0, - ) - - check_coord = SkyCoord(check_ra, check_dec, unit="rad") - assert np.all(SkyCoord(0, 0, unit="rad").separation(check_coord).uarcsec < 1.0) - - if selenoid == "SPHERE": - # check defaults - - app_ra, app_dec = uvutils.calc_app_coords( - lon_coord=0.0, - lat_coord=0.0, - coord_type="sidereal", - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - time_array=astrometry_args["time_array"], - coord_frame="fk5", - coord_epoch="J2000.0", - ) - - check_ra, check_dec = uvutils.calc_sidereal_coords( - time_array=astrometry_args["time_array"], - app_ra=app_ra, - app_dec=app_dec, - telescope_loc=telescope_loc, - coord_frame="fk5", - telescope_frame=telescope_frame, - coord_epoch=2000.0, - ) - check_coord = SkyCoord(check_ra, check_dec, unit="rad") - assert np.all(SkyCoord(0, 0, unit="rad").separation(check_coord).uarcsec < 1.0) - - -@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) -def test_calc_app_fk4_roundtrip(astrometry_args, telescope_frame, selenoid): - # Finally, check and make sure that FK4 performs similarly - if telescope_frame == "itrs": - telescope_loc = astrometry_args["telescope_loc"] - else: - telescope_loc = astrometry_args["moon_telescope_loc"] - - app_ra, app_dec = uvutils.calc_app_coords( - lon_coord=0.0, - lat_coord=0.0, - coord_type="sidereal", - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=selenoid, - time_array=astrometry_args["time_array"], - coord_frame="fk4", - coord_epoch=1950.0, - ) - - check_ra, check_dec = uvutils.calc_sidereal_coords( - time_array=astrometry_args["time_array"], - app_ra=app_ra, - app_dec=app_dec, - telescope_loc=telescope_loc, - coord_frame="fk4", - telescope_frame=telescope_frame, - ellipsoid=selenoid, - coord_epoch=1950.0, - ) - - check_coord = SkyCoord(check_ra, check_dec, unit="rad") - assert np.all(SkyCoord(0, 0, unit="rad").separation(check_coord).uarcsec < 1.0) - - -@pytest.mark.filterwarnings('ignore:ERFA function "pmsafe" yielded 4 of') -@pytest.mark.filterwarnings('ignore:ERFA function "utcut1" yielded 2 of') -@pytest.mark.filterwarnings('ignore:ERFA function "d2dtf" yielded 1 of') -@pytest.mark.parametrize("use_extra", [True, False]) -def test_astrometry_icrs_to_app(astrometry_args, use_extra): - """ - Check for consistency beteen astrometry libraries when converting ICRS -> TOPP - - This test checks for consistency in apparent coordinate calculations using the - three different libraries that are available to pyuvdata, namely: astropy, pyERFA, - and python-novas. Between these three, we expect agreement within 100 µas in - most instances, although for pyuvdata we tolerate differences of up to 1 mas since - we don't expect to need astrometry better than this. - """ - pytest.importorskip("novas") - pytest.importorskip("novas_de405") - # Do some basic cross-checking between the different astrometry libraries - # to see if they all line up correctly. - astrometry_list = ["novas", "erfa", "astropy"] - coord_results = [None, None, None, None] - - # These values were indepedently calculated using erfa v1.7.2, which at the - # time of coding agreed to < 1 mas with astropy v4.2.1 and novas 3.1.1.5. We - # use those values here as a sort of history check to make sure that something - # hasn't changed in the underlying astrometry libraries without being caught - precalc_ra = np.array( - [2.4736400623737507, 2.4736352750862760, 2.4736085367439893, 2.4734781687162820] - ) - precalc_dec = np.array( - [1.2329576409345270, 1.2329556410623417, 1.2329541289890513, 1.2328577308430242] - ) - - coord_results[3] = (precalc_ra, precalc_dec) - - kwargs = {} - extra_args = ["pm_ra", "pm_dec", "vrad", "dist"] - if use_extra: - for key in extra_args: - kwargs[key] = astrometry_args[key] - else: - # don't compare to precalc if not using extra arguments - coord_results = coord_results[:-1] - - for idx, name in enumerate(astrometry_list): - coord_results[idx] = uvutils.transform_icrs_to_app( - time_array=astrometry_args["time_array"], - ra=astrometry_args["icrs_ra"], - dec=astrometry_args["icrs_dec"], - telescope_loc=astrometry_args["telescope_loc"], - epoch=astrometry_args["epoch"], - astrometry_library=name, - **kwargs, - ) - - for idx in range(len(coord_results) - 1): - for jdx in range(idx + 1, len(coord_results)): - alpha_coord = SkyCoord( - coord_results[idx][0], coord_results[idx][1], unit="rad" - ) - beta_coord = SkyCoord( - coord_results[jdx][0], coord_results[jdx][1], unit="rad" - ) - assert np.all(alpha_coord.separation(beta_coord).marcsec < 1.0) - - -def test_astrometry_app_to_icrs(astrometry_args): - """ - Check for consistency beteen astrometry libraries when converting TOPO -> ICRS - - This test checks for consistency between the pyERFA and astropy libraries for - converting apparent coords back to ICRS. Between these two, we expect agreement - within 100 µas in most instances, although for pyuvdata we tolerate differences of - up to 1 mas since we don't expect to need astrometry better than this. - """ - astrometry_list = ["erfa", "astropy"] - coord_results = [None, None, None] - - # These values were indepedently calculated using erfa v1.7.2, which at the - # time of coding agreed to < 1 mas with astropy v4.2.1. We again are using - # those values here as a sort of history check to make sure that something - # hasn't changed in the underlying astrometry libraries without being caught - precalc_ra = np.array( - [2.4623360300722170, 2.4623407989706756, 2.4623676572008280, 2.4624965192217900] - ) - precalc_dec = np.array( - [1.2350407132378372, 1.2350427272595987, 1.2350443204758008, 1.2351412288987034] - ) - coord_results[2] = (precalc_ra, precalc_dec) - - for idx, name in enumerate(astrometry_list): - # Note we're using icrs_ra and icrs_dec instead of app_ra and app_dec keys - # because the above pre-calculated values were generated using the ICRS - # coordinate values - coord_results[idx] = uvutils.transform_app_to_icrs( - time_array=astrometry_args["time_array"], - app_ra=astrometry_args["icrs_ra"], - app_dec=astrometry_args["icrs_dec"], - telescope_loc=astrometry_args["telescope_loc"], - astrometry_library=name, - ) - - for idx in range(len(coord_results) - 1): - for jdx in range(idx + 1, len(coord_results)): - alpha_coord = SkyCoord( - coord_results[idx][0], coord_results[idx][1], unit="rad" - ) - beta_coord = SkyCoord( - coord_results[jdx][0], coord_results[jdx][1], unit="rad" - ) - assert np.all(alpha_coord.separation(beta_coord).marcsec < 1.0) - - -def test_sidereal_reptime(astrometry_args): - """ - Check for equality when supplying a singleton time versus an array of identical - values for transform_sidereal_coords - """ - - gcrs_ra, gcrs_dec = uvutils.transform_sidereal_coords( - longitude=astrometry_args["icrs_ra"] * np.ones(2), - latitude=astrometry_args["icrs_dec"] * np.ones(2), - in_coord_frame="icrs", - out_coord_frame="gcrs", - time_array=Time(astrometry_args["time_array"][0], format="jd"), - ) - - check_ra, check_dec = uvutils.transform_sidereal_coords( - longitude=astrometry_args["icrs_ra"] * np.ones(2), - latitude=astrometry_args["icrs_dec"] * np.ones(2), - in_coord_frame="icrs", - out_coord_frame="gcrs", - time_array=Time(astrometry_args["time_array"][0] * np.ones(2), format="jd"), - ) - - assert np.all(gcrs_ra == check_ra) - assert np.all(gcrs_dec == check_dec) - - -@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) -def test_transform_icrs_to_app_time_obj(astrometry_args, telescope_frame, selenoid): - """ - Test that we recover identical values when using a Time objects instead of a floats - for the various time-related arguments in transform_icrs_to_app. - """ - if telescope_frame == "itrs": - telescope_loc = astrometry_args["telescope_loc"] - else: - telescope_loc = astrometry_args["moon_telescope_loc"] - - check_ra, check_dec = uvutils.transform_icrs_to_app( - time_array=Time(astrometry_args["time_array"], format="jd"), - ra=astrometry_args["icrs_ra"], - dec=astrometry_args["icrs_dec"], - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=selenoid, - epoch=Time(astrometry_args["epoch"], format="jyear"), - ) - - if telescope_frame == "itrs": - app_ra = astrometry_args["app_ra"] - app_dec = astrometry_args["app_dec"] - else: - app_ra = astrometry_args["moon_app_ra"][selenoid] - app_dec = astrometry_args["moon_app_dec"][selenoid] - - assert np.all(check_ra == app_ra) - assert np.all(check_dec == app_dec) - - -def test_transform_app_to_icrs_objs(astrometry_args): - """ - Test that we recover identical values when using Time/EarthLocation objects instead - of floats for time_array and telescope_loc, respectively for transform_app_to_icrs. - """ - telescope_loc = EarthLocation.from_geodetic( - astrometry_args["telescope_loc"][1] * (180.0 / np.pi), - astrometry_args["telescope_loc"][0] * (180.0 / np.pi), - height=astrometry_args["telescope_loc"][2], - ) - - icrs_ra, icrs_dec = uvutils.transform_app_to_icrs( - time_array=astrometry_args["time_array"][0], - app_ra=astrometry_args["app_ra"][0], - app_dec=astrometry_args["app_dec"][0], - telescope_loc=astrometry_args["telescope_loc"], - ) - - check_ra, check_dec = uvutils.transform_app_to_icrs( - time_array=Time(astrometry_args["time_array"][0], format="jd"), - app_ra=astrometry_args["app_ra"][0], - app_dec=astrometry_args["app_dec"][0], - telescope_loc=telescope_loc, - ) - - assert np.all(check_ra == icrs_ra) - assert np.all(check_dec == icrs_dec) - - -@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) -def test_calc_app_coords_objs(astrometry_args, telescope_frame, selenoid): - """ - Test that we recover identical values when using Time/EarthLocation objects instead - of floats for time_array and telescope_loc, respectively for calc_app_coords. - """ - if telescope_frame == "itrs": - telescope_loc = EarthLocation.from_geodetic( - astrometry_args["telescope_loc"][1] * (180.0 / np.pi), - astrometry_args["telescope_loc"][0] * (180.0 / np.pi), - height=astrometry_args["telescope_loc"][2], - ) - TimeClass = Time - else: - telescope_loc = MoonLocation.from_selenodetic( - astrometry_args["telescope_loc"][1] * (180.0 / np.pi), - astrometry_args["telescope_loc"][0] * (180.0 / np.pi), - height=astrometry_args["telescope_loc"][2], - ellipsoid=selenoid, - ) - TimeClass = LTime - - app_ra, app_dec = uvutils.calc_app_coords( - lon_coord=astrometry_args["icrs_ra"], - lat_coord=astrometry_args["icrs_dec"], - time_array=astrometry_args["time_array"][0], - telescope_loc=astrometry_args["telescope_loc"], - telescope_frame=telescope_frame, - ellipsoid=selenoid, - ) - - check_ra, check_dec = uvutils.calc_app_coords( - lon_coord=astrometry_args["icrs_ra"], - lat_coord=astrometry_args["icrs_dec"], - time_array=TimeClass(astrometry_args["time_array"][0], format="jd"), - telescope_loc=telescope_loc, - telescope_frame=telescope_frame, - ellipsoid=selenoid, - ) - - assert np.all(check_ra == app_ra) - assert np.all(check_dec == app_dec) - - -def test_astrometry_lst(astrometry_args): - """ - Check for consistency beteen astrometry libraries when calculating LAST - - This test evaluates consistency in calculating local apparent sidereal time when - using the different astrometry libraries available in pyuvdata, namely: astropy, - pyERFA, and python-novas. Between these three, we expect agreement within 6 µs in - most instances, although for pyuvdata we tolerate differences of up to ~60 µs - (which translates to 1 mas in sky position error) since we don't expect to need - astrometry better than this. - """ - pytest.importorskip("novas") - pytest.importorskip("novas_de405") - astrometry_list = ["erfa", "astropy", "novas"] - lst_results = [None, None, None, None] - # These values were indepedently calculated using erfa v1.7.2, which at the - # time of coding agreed to < 50 µs with astropy v4.2.1 and novas 3.1.1.5. We - # use those values here as a sort of history check to make sure that something - # hasn't changed in the underlying astrometry libraries without being caught - lst_results[3] = np.array( - [0.8506741803481069, 2.442973468758589, 4.1728965710160555, 1.0130589895999587] - ) - - for idx, name in enumerate(astrometry_list): - # Note that the units aren't right here (missing a rad-> deg conversion), but - # the above values were calculated using the arguments below. - lst_results[idx] = uvutils.get_lst_for_time( - jd_array=astrometry_args["time_array"], - latitude=astrometry_args["telescope_loc"][0], - longitude=astrometry_args["telescope_loc"][1], - altitude=astrometry_args["telescope_loc"][2], - astrometry_library=name, - ) - - for idx in range(len(lst_results) - 1): - for jdx in range(idx + 1, len(lst_results)): - alpha_time = lst_results[idx] * units.rad - beta_time = lst_results[jdx] * units.rad - assert np.all(np.abs(alpha_time - beta_time).to_value("mas") < 1.0) - - -@pytest.mark.parametrize("astrometry_lib", ["astropy", "novas", "erfa"]) -def test_lst_for_time_smooth(astrometry_lib): - """ - Test that LSTs are smooth and do not have large discontinuities. - - Inspired by a bug found by the HERA validation team in our original implemenatation - using the erfa library. - """ - if astrometry_lib == "novas": - pytest.importorskip("novas") - pytest.importorskip("novas_de405") - - hera_loc = EarthLocation.from_geodetic( - lat=-30.72152612068957, lon=21.428303826863015, height=1051.6900000218302 - ) - - start_time = 2458101.5435486115 - n_times = 28728 - integration_time = 1.0 - - daysperhour = 1 / 24.0 - hourspersec = 1 / 60.0**2 - dayspersec = daysperhour * hourspersec - inttime_days = integration_time * dayspersec - duration = inttime_days * n_times - end_time = start_time + duration - inttime_days - times = np.linspace(start_time, end_time + inttime_days, n_times, endpoint=False) - - uv_lsts = uvutils.get_lst_for_time( - times, - latitude=hera_loc.lat.deg, - longitude=hera_loc.lon.deg, - altitude=hera_loc.height.value, - astrometry_library=astrometry_lib, - frame="itrs", - ) - - dtimes = times - int(times[0]) - poly_fit = np.poly1d(np.polyfit(dtimes, uv_lsts, 2)) - diff_poly = uv_lsts - poly_fit(dtimes) - assert np.max(np.abs(diff_poly)) < 1e-10 - - -@pytest.mark.parametrize("astrolib", ["novas", "astropy", "erfa"]) -def test_lst_for_time_float_vs_array(astrometry_args, astrolib): - """ - Test for equality when passing a single float vs an ndarray (of length 1) when - calling get_lst_for_time. - """ - if astrolib == "novas": - pytest.importorskip("novas") - pytest.importorskip("novas_de405") - - r2d = 180.0 / np.pi - - lst_array = uvutils.get_lst_for_time( - jd_array=np.array(astrometry_args["time_array"][0]), - latitude=astrometry_args["telescope_loc"][0] * r2d, - longitude=astrometry_args["telescope_loc"][1] * r2d, - altitude=astrometry_args["telescope_loc"][2], - astrometry_library=astrolib, - ) - - check_lst = uvutils.get_lst_for_time( - jd_array=astrometry_args["time_array"][0], - telescope_loc=np.multiply(astrometry_args["telescope_loc"], [r2d, r2d, 1]), - astrometry_library=astrolib, - ) - - assert np.all(lst_array == check_lst) - - -def test_get_lst_for_time_errors(astrometry_args): - with pytest.raises( - ValueError, - match="Requested coordinate transformation library is not supported, please " - "select either 'erfa' or 'astropy' for astrometry_library.", - ): - uvutils.get_lst_for_time( - jd_array=np.array(astrometry_args["time_array"][0]), - latitude=astrometry_args["telescope_loc"][0] * (180.0 / np.pi), - longitude=astrometry_args["telescope_loc"][1] * (180.0 / np.pi), - altitude=astrometry_args["telescope_loc"][2], - astrometry_library="foo", - ) - - with pytest.raises( - ValueError, - match="Cannot set both telescope_loc and latitude/longitude/altitude", - ): - uvutils.get_lst_for_time( - np.array(astrometry_args["time_array"][0]), - latitude=astrometry_args["telescope_loc"][0] * (180.0 / np.pi), - telescope_loc=astrometry_args["telescope_loc"][2], - ) - - -@pytest.mark.filterwarnings("ignore:The get_frame_attr_names") -@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") -@pytest.mark.parametrize("selenoid", selenoids) -def test_lst_for_time_moon(astrometry_args, selenoid): - """Test the get_lst_for_time function with MCMF frame""" - from lunarsky import SkyCoord as LSkyCoord - - lat, lon, alt = (0.6875, 24.433, 0) # Degrees - - # check error if try to use the wrong astrometry library - with pytest.raises( - NotImplementedError, - match="The MCMF frame is only supported with the 'astropy' astrometry library", - ): - lst_array = uvutils.get_lst_for_time( - jd_array=astrometry_args["time_array"], - latitude=lat, - longitude=lon, - altitude=alt, - frame="mcmf", - ellipsoid=selenoid, - astrometry_library="novas", - ) - - lst_array = uvutils.get_lst_for_time( - jd_array=astrometry_args["time_array"], - latitude=lat, - longitude=lon, - altitude=alt, - frame="mcmf", - ellipsoid=selenoid, - ) - - # Verify that lsts are close to local zenith RA - loc = MoonLocation.from_selenodetic(lon, lat, alt, ellipsoid=selenoid) - for ii, tt in enumerate( - LTime(astrometry_args["time_array"], format="jd", scale="utc", location=loc) - ): - src = LSkyCoord(alt="90d", az="0d", frame="lunartopo", obstime=tt, location=loc) - # TODO: would be nice to get this down to uvutils.RADIAN_TOL - # seems like maybe the ellipsoid isn't being used properly? - assert np.isclose(lst_array[ii], src.transform_to("icrs").ra.rad, atol=1e-5) - - # test default ellipsoid - if selenoid == "SPHERE": - lst_array_default = uvutils.get_lst_for_time( - jd_array=astrometry_args["time_array"], - latitude=lat, - longitude=lon, - altitude=alt, - frame="mcmf", - ) - np.testing.assert_allclose(lst_array, lst_array_default) - - -def test_phasing_funcs(): - # these tests are based on a notebook where I tested against the mwa_tools - # phasing code - ra_hrs = 12.1 - dec_degs = -42.3 - mjd = 55780.1 - - array_center_xyz = np.array([-2559454.08, 5095372.14, -2849057.18]) - lat_lon_alt = uvutils.LatLonAlt_from_XYZ(array_center_xyz) - - obs_time = Time(mjd, format="mjd", location=(lat_lon_alt[1], lat_lon_alt[0])) - - icrs_coord = SkyCoord( - ra=Angle(ra_hrs, unit="hr"), dec=Angle(dec_degs, unit="deg"), obstime=obs_time - ) - gcrs_coord = icrs_coord.transform_to("gcrs") - - # in east/north/up frame (relative to array center) in meters: (Nants, 3) - ants_enu = np.array([-101.94, 156.41, 1.24]) - - ant_xyz_abs = uvutils.ECEF_from_ENU( - ants_enu, - latitude=lat_lon_alt[0], - longitude=lat_lon_alt[1], - altitude=lat_lon_alt[2], - ) - - array_center_coord = SkyCoord( - x=array_center_xyz[0] * units.m, - y=array_center_xyz[1] * units.m, - z=array_center_xyz[2] * units.m, - frame="itrs", - obstime=obs_time, - ) - - itrs_coord = SkyCoord( - x=ant_xyz_abs[0] * units.m, - y=ant_xyz_abs[1] * units.m, - z=ant_xyz_abs[2] * units.m, - frame="itrs", - obstime=obs_time, - ) - - gcrs_array_center = array_center_coord.transform_to("gcrs") - gcrs_from_itrs_coord = itrs_coord.transform_to("gcrs") - - gcrs_rel = ( - (gcrs_from_itrs_coord.cartesian - gcrs_array_center.cartesian).get_xyz().T - ) - - gcrs_uvw = uvutils.old_uvw_calc( - gcrs_coord.ra.rad, gcrs_coord.dec.rad, gcrs_rel.value - ) - - mwa_tools_calcuvw_u = -97.122828 - mwa_tools_calcuvw_v = 50.388281 - mwa_tools_calcuvw_w = -151.27976 - - np.testing.assert_allclose(gcrs_uvw[0, 0], mwa_tools_calcuvw_u, atol=1e-3) - np.testing.assert_allclose(gcrs_uvw[0, 1], mwa_tools_calcuvw_v, atol=1e-3) - np.testing.assert_allclose(gcrs_uvw[0, 2], mwa_tools_calcuvw_w, atol=1e-3) - - # also test unphasing - temp2 = uvutils.undo_old_uvw_calc( - gcrs_coord.ra.rad, gcrs_coord.dec.rad, np.squeeze(gcrs_uvw) - ) - np.testing.assert_allclose(gcrs_rel.value, np.squeeze(temp2)) - - -def test_pol_funcs(): - """Test utility functions to convert between polarization strings and numbers""" - - pol_nums = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4] - pol_str = ["yx", "xy", "yy", "xx", "lr", "rl", "ll", "rr", "pI", "pQ", "pU", "pV"] - assert pol_nums == uvutils.polstr2num(pol_str) - assert pol_str == uvutils.polnum2str(pol_nums) - # Check individuals - assert -6 == uvutils.polstr2num("YY") - assert "pV" == uvutils.polnum2str(4) - # Check errors - pytest.raises(KeyError, uvutils.polstr2num, "foo") - pytest.raises(ValueError, uvutils.polstr2num, 1) - pytest.raises(ValueError, uvutils.polnum2str, 7.3) - # Check parse - assert uvutils.parse_polstr("xX") == "xx" - assert uvutils.parse_polstr("XX") == "xx" - assert uvutils.parse_polstr("i") == "pI" - - -def test_pol_funcs_x_orientation(): - """Test functions to convert between pol strings and numbers with x_orientation.""" - - pol_nums = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4] - - x_orient1 = "e" - pol_str = ["ne", "en", "nn", "ee", "lr", "rl", "ll", "rr", "pI", "pQ", "pU", "pV"] - assert pol_nums == uvutils.polstr2num(pol_str, x_orientation=x_orient1) - assert pol_str == uvutils.polnum2str(pol_nums, x_orientation=x_orient1) - # Check individuals - assert -6 == uvutils.polstr2num("NN", x_orientation=x_orient1) - assert "pV" == uvutils.polnum2str(4) - # Check errors - pytest.raises(KeyError, uvutils.polstr2num, "foo", x_orientation=x_orient1) - pytest.raises(ValueError, uvutils.polstr2num, 1, x_orientation=x_orient1) - pytest.raises(ValueError, uvutils.polnum2str, 7.3, x_orientation=x_orient1) - # Check parse - assert uvutils.parse_polstr("eE", x_orientation=x_orient1) == "ee" - assert uvutils.parse_polstr("xx", x_orientation=x_orient1) == "ee" - assert uvutils.parse_polstr("NN", x_orientation=x_orient1) == "nn" - assert uvutils.parse_polstr("yy", x_orientation=x_orient1) == "nn" - assert uvutils.parse_polstr("i", x_orientation=x_orient1) == "pI" - - x_orient2 = "n" - pol_str = ["en", "ne", "ee", "nn", "lr", "rl", "ll", "rr", "pI", "pQ", "pU", "pV"] - assert pol_nums == uvutils.polstr2num(pol_str, x_orientation=x_orient2) - assert pol_str == uvutils.polnum2str(pol_nums, x_orientation=x_orient2) - # Check individuals - assert -6 == uvutils.polstr2num("EE", x_orientation=x_orient2) - assert "pV" == uvutils.polnum2str(4) - # Check errors - pytest.raises(KeyError, uvutils.polstr2num, "foo", x_orientation=x_orient2) - pytest.raises(ValueError, uvutils.polstr2num, 1, x_orientation=x_orient2) - pytest.raises(ValueError, uvutils.polnum2str, 7.3, x_orientation=x_orient2) - # Check parse - assert uvutils.parse_polstr("nN", x_orientation=x_orient2) == "nn" - assert uvutils.parse_polstr("xx", x_orientation=x_orient2) == "nn" - assert uvutils.parse_polstr("EE", x_orientation=x_orient2) == "ee" - assert uvutils.parse_polstr("yy", x_orientation=x_orient2) == "ee" - assert uvutils.parse_polstr("i", x_orientation=x_orient2) == "pI" - - # check warnings for non-recognized x_orientation - with check_warnings(UserWarning, "x_orientation not recognized"): - assert uvutils.polstr2num("xx", x_orientation="foo") == -5 - - with check_warnings(UserWarning, "x_orientation not recognized"): - assert uvutils.polnum2str(-6, x_orientation="foo") == "yy" - - -def test_jones_num_funcs(): - """Test functions to convert between jones polarization strings and numbers.""" - - jnums = [-8, -7, -6, -5, -4, -3, -2, -1] - jstr = ["Jyx", "Jxy", "Jyy", "Jxx", "Jlr", "Jrl", "Jll", "Jrr"] - assert jnums == uvutils.jstr2num(jstr) - assert jstr, uvutils.jnum2str(jnums) - # Check shorthands - jstr = ["yx", "xy", "yy", "y", "xx", "x", "lr", "rl", "ll", "l", "rr", "r"] - jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1] - assert jnums == uvutils.jstr2num(jstr) - # Check individuals - assert -6 == uvutils.jstr2num("jyy") - assert "Jxy" == uvutils.jnum2str(-7) - # Check errors - pytest.raises(KeyError, uvutils.jstr2num, "foo") - pytest.raises(ValueError, uvutils.jstr2num, 1) - pytest.raises(ValueError, uvutils.jnum2str, 7.3) - - # check parse method - assert uvutils.parse_jpolstr("x") == "Jxx" - assert uvutils.parse_jpolstr("xy") == "Jxy" - assert uvutils.parse_jpolstr("XY") == "Jxy" - - -def test_jones_num_funcs_x_orientation(): - """Test functions to convert jones pol strings and numbers with x_orientation.""" - - jnums = [-8, -7, -6, -5, -4, -3, -2, -1] - x_orient1 = "east" - jstr = ["Jne", "Jen", "Jnn", "Jee", "Jlr", "Jrl", "Jll", "Jrr"] - assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient1) - assert jstr == uvutils.jnum2str(jnums, x_orientation=x_orient1) - # Check shorthands - jstr = ["ne", "en", "nn", "n", "ee", "e", "lr", "rl", "ll", "l", "rr", "r"] - jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1] - assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient1) - # Check individuals - assert -6 == uvutils.jstr2num("jnn", x_orientation=x_orient1) - assert "Jen" == uvutils.jnum2str(-7, x_orientation=x_orient1) - # Check errors - pytest.raises(KeyError, uvutils.jstr2num, "foo", x_orientation=x_orient1) - pytest.raises(ValueError, uvutils.jstr2num, 1, x_orientation=x_orient1) - pytest.raises(ValueError, uvutils.jnum2str, 7.3, x_orientation=x_orient1) - - # check parse method - assert uvutils.parse_jpolstr("e", x_orientation=x_orient1) == "Jee" - assert uvutils.parse_jpolstr("x", x_orientation=x_orient1) == "Jee" - assert uvutils.parse_jpolstr("y", x_orientation=x_orient1) == "Jnn" - assert uvutils.parse_jpolstr("en", x_orientation=x_orient1) == "Jen" - assert uvutils.parse_jpolstr("NE", x_orientation=x_orient1) == "Jne" - - jnums = [-8, -7, -6, -5, -4, -3, -2, -1] - x_orient2 = "north" - jstr = ["Jen", "Jne", "Jee", "Jnn", "Jlr", "Jrl", "Jll", "Jrr"] - assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient2) - assert jstr == uvutils.jnum2str(jnums, x_orientation=x_orient2) - # Check shorthands - jstr = ["en", "ne", "ee", "e", "nn", "n", "lr", "rl", "ll", "l", "rr", "r"] - jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1] - assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient2) - # Check individuals - assert -6 == uvutils.jstr2num("jee", x_orientation=x_orient2) - assert "Jne" == uvutils.jnum2str(-7, x_orientation=x_orient2) - # Check errors - pytest.raises(KeyError, uvutils.jstr2num, "foo", x_orientation=x_orient2) - pytest.raises(ValueError, uvutils.jstr2num, 1, x_orientation=x_orient2) - pytest.raises(ValueError, uvutils.jnum2str, 7.3, x_orientation=x_orient2) - - # check parse method - assert uvutils.parse_jpolstr("e", x_orientation=x_orient2) == "Jee" - assert uvutils.parse_jpolstr("x", x_orientation=x_orient2) == "Jnn" - assert uvutils.parse_jpolstr("y", x_orientation=x_orient2) == "Jee" - assert uvutils.parse_jpolstr("en", x_orientation=x_orient2) == "Jen" - assert uvutils.parse_jpolstr("NE", x_orientation=x_orient2) == "Jne" - - # check warnings for non-recognized x_orientation - with check_warnings(UserWarning, "x_orientation not recognized"): - assert uvutils.jstr2num("x", x_orientation="foo") == -5 - - with check_warnings(UserWarning, "x_orientation not recognized"): - assert uvutils.jnum2str(-6, x_orientation="foo") == "Jyy" - - -def test_conj_pol(): - """Test function to conjugate pols""" - - pol_nums = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4] - cpol_nums = [-7, -8, -6, -5, -3, -4, -2, -1, 1, 2, 3, 4] - assert pol_nums == uvutils.conj_pol(cpol_nums) - assert uvutils.conj_pol(pol_nums) == cpol_nums - # fmt: off - pol_str = ['yx', 'xy', 'yy', 'xx', 'ee', 'nn', 'en', 'ne', 'lr', 'rl', 'll', - 'rr', 'pI', 'pQ', 'pU', 'pV'] - cpol_str = ['xy', 'yx', 'yy', 'xx', 'ee', 'nn', 'ne', 'en', 'rl', 'lr', 'll', - 'rr', 'pI', 'pQ', 'pU', 'pV'] - # fmt: on - assert pol_str == uvutils.conj_pol(cpol_str) - assert uvutils.conj_pol(pol_str) == cpol_str - assert [pol_str, pol_nums] == uvutils.conj_pol([cpol_str, cpol_nums]) - - # Test error with jones - cjstr = ["Jxy", "Jyx", "Jyy", "Jxx", "Jrl", "Jlr", "Jll", "Jrr"] - assert pytest.raises(KeyError, uvutils.conj_pol, cjstr) - - # Test invalid pol - with pytest.raises( - ValueError, match="Polarization not recognized, cannot be conjugated." - ): - uvutils.conj_pol(2.3) - - -@pytest.mark.parametrize("grid_alg", [True, False, None]) -def test_redundancy_finder(grid_alg): - """ - Check that get_baseline_redundancies and get_antenna_redundancies return consistent - redundant groups for a test file with the HERA19 layout. - """ - uvd = UVData() - uvd.read_uvfits( - os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits") - ) - - uvd.select(times=uvd.time_array[0]) - uvd.unproject_phase(use_ant_pos=True) - # uvw_array is now equivalent to baseline positions - uvd.conjugate_bls("ant1 25 ms errors detected reading in LST values") -def test_uvw_track_generator(flip_u, use_uvw, use_earthloc): - sma_mir = UVData.from_file(os.path.join(DATA_PATH, "sma_test.mir")) - sma_mir.set_lsts_from_time_array() - sma_mir._set_app_coords_helper() - sma_mir.set_uvws_from_antenna_positions() - if not use_uvw: - # Just subselect the antennas in the dataset - sma_mir.telescope.antenna_positions = sma_mir.telescope.antenna_positions[ - [0, 3], : - ] - - if use_earthloc: - telescope_loc = EarthLocation.from_geodetic( - lon=sma_mir.telescope.location_lat_lon_alt_degrees[1], - lat=sma_mir.telescope.location_lat_lon_alt_degrees[0], - height=sma_mir.telescope.location_lat_lon_alt_degrees[2], - ) - else: - telescope_loc = sma_mir.telescope.location_lat_lon_alt_degrees - - if use_uvw: - sma_copy = sma_mir.copy() - sma_copy.unproject_phase() - uvw_array = sma_copy.uvw_array - else: - uvw_array = None - - cat_dict = sma_mir.phase_center_catalog[1] - gen_results = uvutils.uvw_track_generator( - lon_coord=cat_dict["cat_lon"], - lat_coord=cat_dict["cat_lat"], - coord_frame=cat_dict["cat_frame"], - coord_epoch=cat_dict["cat_epoch"], - telescope_loc=telescope_loc, - time_array=sma_mir.time_array if use_uvw else sma_mir.time_array[0], - antenna_positions=( - sma_mir.telescope.antenna_positions if uvw_array is None else None - ), - force_postive_u=flip_u, - uvw_array=uvw_array, - ) - - assert sma_mir._phase_center_app_ra.compare_value(gen_results["app_ra"]) - assert sma_mir._phase_center_app_dec.compare_value(gen_results["app_dec"]) - assert sma_mir._phase_center_frame_pa.compare_value(gen_results["frame_pa"]) - assert sma_mir._lst_array.compare_value(gen_results["lst"]) - if flip_u: - assert sma_mir._uvw_array.compare_value(-gen_results["uvw"]) - else: - assert sma_mir._uvw_array.compare_value(gen_results["uvw"]) - - -@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") -@pytest.mark.parametrize("selenoid", ["SPHERE", "GSFC", "GRAIL23", "CE-1-LAM-GEO"]) -def test_uvw_track_generator_moon(selenoid): - # Note this isn't a particularly deep test, but it at least exercises the code. - from spiceypy.utils.exceptions import SpiceUNKNOWNFRAME - - try: - gen_results = uvutils.uvw_track_generator( - lon_coord=0.0, - lat_coord=0.0, - coord_frame="icrs", - telescope_loc=(0, 0, 0), - time_array=2456789.0, - antenna_positions=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), - telescope_frame="mcmf", - ellipsoid=selenoid, - ) - except SpiceUNKNOWNFRAME as err: - pytest.skip("SpiceUNKNOWNFRAME error: " + str(err)) - - # Check that the total lengths all match 1 - np.testing.assert_allclose((gen_results["uvw"] ** 2.0).sum(1), 2.0) - - if selenoid == "SPHERE": - # check defaults - gen_results = uvutils.uvw_track_generator( - lon_coord=0.0, - lat_coord=0.0, - coord_frame="icrs", - telescope_loc=(0, 0, 0), - time_array=2456789.0, - antenna_positions=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), - telescope_frame="mcmf", - ) - - # Check that the total lengths all match 1 - np.testing.assert_allclose((gen_results["uvw"] ** 2.0).sum(1), 2.0) - - -@pytest.mark.parametrize("err_state", ["err", "warn", "none"]) -@pytest.mark.parametrize("tel_loc", ["Center", "Moon", "Earth", "Space"]) -@pytest.mark.parametrize("check_frame", ["Moon", "Earth"]) -@pytest.mark.parametrize("del_tel_loc", [False, None, True]) -def test_check_surface_based_positions(err_state, tel_loc, check_frame, del_tel_loc): - tel_loc_dict = { - "Center": np.array([0, 0, 0]), - "Moon": np.array([0, 0, 1.737e6]), - "Earth": np.array([0, 6.37e6, 0]), - "Space": np.array([4.22e7, 0, 0]), - } - tel_frame_dict = {"Moon": "mcmf", "Earth": "itrs"} - - ant_pos = np.array( - [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] - ) - if del_tel_loc: - ant_pos += tel_loc_dict[tel_loc] - - fail_type = err_msg = err_type = None - err_check = check_warnings - if (tel_loc != check_frame) and (err_state != "none"): - if tel_loc == "Center": - fail_type = "below" - elif tel_loc == "Space": - fail_type = "above" - else: - fail_type = "above" if tel_loc == "Earth" else "below" - - if fail_type is not None: - err_msg = ( - f"{tel_frame_dict[check_frame]} position vector magnitudes must be " - f"on the order of the radius of {check_frame} -- they appear to lie well " - f"{fail_type} this." - ) - if err_state == "err": - err_type = ValueError - err_check = pytest.raises - else: - err_type = UserWarning - - with err_check(err_type, match=err_msg): - status = uvutils.check_surface_based_positions( - telescope_loc=None if (del_tel_loc) else tel_loc_dict[tel_loc], - antenna_positions=None if (del_tel_loc is None) else ant_pos, - telescope_frame=tel_frame_dict[check_frame], - raise_error=err_state == "err", - raise_warning=err_state == "warn", - ) - - assert (err_state == "err") or (status == (tel_loc == check_frame)) - - -@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") -@pytest.mark.parametrize("tel_loc", ["Earth", "Moon"]) -@pytest.mark.parametrize("check_frame", ["Earth", "Moon"]) -def test_check_surface_based_positions_earthmoonloc(tel_loc, check_frame): - frame = "mcmf" if (check_frame == "Moon") else "itrs" - - if tel_loc == "Earth": - loc = EarthLocation.from_geodetic(0, 0, 0) - else: - loc = MoonLocation.from_selenodetic(0, 0, 0) - - if tel_loc == check_frame: - assert uvutils.check_surface_based_positions( - telescope_loc=loc, telescope_frame=frame - ) - else: - with pytest.raises(ValueError, match=(f"{frame} position vector")): - uvutils.check_surface_based_positions( - telescope_loc=[loc.x.value, loc.y.value, loc.z.value], - telescope_frame=frame, - ) - - -def test_determine_pol_order_err(): - with pytest.raises(ValueError, match='order must be either "AIPS" or "CASA".'): - uvutils.determine_pol_order([], order="ABC") - - -@pytest.mark.parametrize( - "pols,aips_order,casa_order", - [ - [[-8, -7, -6, -5], [3, 2, 1, 0], [3, 1, 0, 2]], - [[-5, -6, -7, -8], [0, 1, 2, 3], [0, 2, 3, 1]], - [[1, 2, 3, 4], [0, 1, 2, 3], [0, 1, 2, 3]], - ], -) -@pytest.mark.parametrize("order", ["CASA", "AIPS"]) -def test_pol_order(pols, aips_order, casa_order, order): - check = uvutils.determine_pol_order(pols, order=order) - - if order == "CASA": - assert all(check == casa_order) - if order == "AIPS": - assert all(check == aips_order) - - -def test_slicify(): - assert uvutils.slicify(None) is None - assert uvutils.slicify(slice(None)) == slice(None) - assert uvutils.slicify([]) is None - assert uvutils.slicify([1, 2, 3]) == slice(1, 4, 1) - assert uvutils.slicify([1]) == slice(1, 2, 1) - assert uvutils.slicify([0, 2, 4]) == slice(0, 5, 2) - assert uvutils.slicify([0, 1, 2, 7]) == [0, 1, 2, 7] - - -@pytest.mark.parametrize( - "obj1,obj2,union_result,interset_result,diff_result", - [ - [[1, 2, 3], [3, 4, 5], [1, 2, 3, 4, 5], [3], [1, 2]], # Partial overlap - [[1, 2], [1, 2], [1, 2], [1, 2], []], # Full overlap - [[1, 3, 5], [2, 4, 6], [1, 2, 3, 4, 5, 6], [], [1, 3, 5]], # No overlap - [[1, 2], None, [1, 2], [1, 2], [1, 2]], # Nones - ], -) -def test_sorted_unique_ops(obj1, obj2, union_result, interset_result, diff_result): - assert uvutils._sorted_unique_union(obj1, obj2) == union_result - assert uvutils._sorted_unique_intersection(obj1, obj2) == interset_result - assert uvutils._sorted_unique_difference(obj1, obj2) == diff_result - - -def test_generate_new_phase_center_id_errs(): - with pytest.raises(ValueError, match="Cannot specify old_id if no catalog"): - uvutils.generate_new_phase_center_id(old_id=1) - - with pytest.raises(ValueError, match="Provided cat_id was found in reserved_ids"): - uvutils.generate_new_phase_center_id(cat_id=1, reserved_ids=[1, 2, 3]) diff --git a/tests/test_uvcalibrate.py b/tests/test_uvcalibrate.py new file mode 100644 index 0000000000..d04ca2150c --- /dev/null +++ b/tests/test_uvcalibrate.py @@ -0,0 +1,622 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for uvcalibrate function.""" +import os +import re + +import numpy as np +import pytest + +from pyuvdata import UVCal, utils, uvcalibrate +from pyuvdata.data import DATA_PATH +from pyuvdata.testing import check_warnings + + +@pytest.mark.filterwarnings("ignore:Fixing auto-correlations to be be real-only,") +@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values") +@pytest.mark.filterwarnings("ignore:telescope_location, antenna_positions") +def test_uvcalibrate_apply_gains_oldfiles(uvcalibrate_uvdata_oldfiles): + # read data + uvd = uvcalibrate_uvdata_oldfiles + + # give it an x_orientation + uvd.telescope.x_orientation = "east" + uvc = UVCal() + uvc.read_calfits(os.path.join(DATA_PATH, "zen.2457698.40355.xx.gain.calfits")) + # downselect to match each other in shape (but not in actual values!) + uvd.select(frequencies=uvd.freq_array[:10]) + uvc.select(times=uvc.time_array[:3]) + + with pytest.raises( + ValueError, + match=re.escape( + "All antenna names with data on UVData are missing " + "on UVCal. To continue with calibration " + "(and flag all the data), set ant_check=False." + ), + ): + uvcalibrate(uvd, uvc, prop_flags=True, ant_check=True, inplace=False) + + ants_expected = [ + "The uvw_array does not match the expected values", + "All antenna names with data on UVData are missing " + "on UVCal. Since ant_check is False, calibration will " + "proceed but all data will be flagged.", + ] + missing_times = [2457698.4036761867, 2457698.4038004624] + + time_expected = f"Time {missing_times[0]} exists on UVData but not on UVCal." + + freq_expected = f"Frequency {uvd.freq_array[0]} exists on UVData but not on UVCal." + + with check_warnings(UserWarning, match=ants_expected): + with pytest.raises(ValueError, match=time_expected): + uvcalibrate(uvd, uvc, prop_flags=True, ant_check=False, inplace=False) + + uvc.select(times=uvc.time_array[0]) + + time_expected = [ + "Times do not match between UVData and UVCal but time_check is False, so " + "calibration will be applied anyway." + ] + + with check_warnings(UserWarning, match=ants_expected + time_expected): + with pytest.raises(ValueError, match=freq_expected): + uvcalibrate(uvd, uvc, prop_flags=True, ant_check=False, time_check=False) + + +@pytest.mark.filterwarnings("ignore:Fixing auto-correlations to be be real-only,") +@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values") +@pytest.mark.filterwarnings("ignore:telescope_location, antenna_positions") +def test_uvcalibrate_delay_oldfiles(uvcalibrate_uvdata_oldfiles): + uvd = uvcalibrate_uvdata_oldfiles + + uvc = UVCal() + uvc.read_calfits(os.path.join(DATA_PATH, "zen.2457698.40355.xx.delay.calfits")) + # downselect to match + uvc.select(times=uvc.time_array[3]) + uvc.gain_convention = "multiply" + + freq_array_use = np.squeeze(uvd.freq_array) + chan_with_use = uvd.channel_width + + ant_expected = [ + "The uvw_array does not match the expected values", + "All antenna names with data on UVData are missing " + "on UVCal. Since ant_check is False, calibration will " + "proceed but all data will be flagged.", + "Times do not match between UVData and UVCal but time_check is False, so " + "calibration will be applied anyway.", + r"UVData object does not have `x_orientation` specified but UVCal does", + ] + with check_warnings(UserWarning, match=ant_expected): + uvdcal = uvcalibrate( + uvd, uvc, prop_flags=False, ant_check=False, time_check=False, inplace=False + ) + + uvc.convert_to_gain(freq_array=freq_array_use, channel_width=chan_with_use) + with check_warnings(UserWarning, match=ant_expected): + uvdcal2 = uvcalibrate( + uvd, uvc, prop_flags=False, ant_check=False, time_check=False, inplace=False + ) + + assert uvdcal == uvdcal2 + + +@pytest.mark.filterwarnings("ignore:Fixing auto-correlations to be be real-only,") +@pytest.mark.parametrize("flip_gain_conj", [False, True]) +@pytest.mark.parametrize("gain_convention", ["divide", "multiply"]) +@pytest.mark.parametrize("time_range", [None, "Ntimes", 3]) +def test_uvcalibrate(uvcalibrate_data, flip_gain_conj, gain_convention, time_range): + uvd, uvc = uvcalibrate_data + + if time_range is not None: + tstarts = uvc.time_array - uvc.integration_time / (86400 * 2) + tends = uvc.time_array + uvc.integration_time / (86400 * 2) + if time_range == "Ntimes": + uvc.time_range = np.stack((tstarts, tends), axis=1) + else: + nt_per_range = int(np.ceil(uvc.Ntimes / time_range)) + tstart_inds = np.array(np.arange(time_range) * nt_per_range) + tstarts_use = tstarts[tstart_inds] + tend_inds = np.array((np.arange(time_range) + 1) * nt_per_range - 1) + tend_inds[-1] = -1 + tends_use = tends[tend_inds] + uvc.select(times=uvc.time_array[0:time_range]) + uvc.time_range = np.stack((tstarts_use, tends_use), axis=1) + uvc.time_array = None + uvc.lst_array = None + uvc.set_lsts_from_time_array() + + uvc.gain_convention = gain_convention + + if gain_convention == "divide": + assert uvc.gain_scale is None + else: + # set the gain_scale to "Jy" to test that vis units are set properly + uvc.gain_scale = "Jy" + + with check_warnings( + DeprecationWarning, + match="uvcalibrate has moved, please import it as 'from pyuvdata import " + "uvcalibrate'. This warnings will become an error in version 3.2", + ): + uvdcal = utils.uvcalibrate( + uvd, uvc, inplace=False, flip_gain_conj=flip_gain_conj + ) + if gain_convention == "divide": + assert uvdcal.vis_units == "uncalib" + else: + assert uvdcal.vis_units == "Jy" + + key = (1, 13, "xx") + ant1 = (1, "Jxx") + ant2 = (13, "Jxx") + + if flip_gain_conj: + gain_product = (uvc.get_gains(ant1).conj() * uvc.get_gains(ant2)).T + else: + gain_product = (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T + + if time_range is not None and time_range != "Ntimes": + gain_product = gain_product[:, np.newaxis] + gain_product = np.repeat(gain_product, nt_per_range, axis=1) + current_shape = gain_product.shape + new_shape = (current_shape[0] * current_shape[1], current_shape[-1]) + gain_product = gain_product.reshape(new_shape) + gain_product = gain_product[: uvd.Ntimes] + + if gain_convention == "divide": + np.testing.assert_array_almost_equal( + uvdcal.get_data(key), uvd.get_data(key) / gain_product + ) + else: + np.testing.assert_array_almost_equal( + uvdcal.get_data(key), uvd.get_data(key) * gain_product + ) + + # test undo + uvdcal = uvcalibrate( + uvdcal, + uvc, + prop_flags=True, + ant_check=False, + inplace=False, + undo=True, + flip_gain_conj=flip_gain_conj, + ) + + np.testing.assert_array_almost_equal(uvd.get_data(key), uvdcal.get_data(key)) + assert uvdcal.vis_units == "uncalib" + + +@pytest.mark.filterwarnings("ignore:Combined frequencies are separated by more than") +def test_uvcalibrate_dterm_handling(uvcalibrate_data): + uvd, uvc = uvcalibrate_data + + # test d-term exception + with pytest.raises( + ValueError, match="Cannot apply D-term calibration without -7 or -8" + ): + uvcalibrate(uvd, uvc, d_term_cal=True) + + # d-term not implemented error + uvcDterm = uvc.copy() + uvcDterm.jones_array = np.array([-7, -8]) + uvcDterm = uvc + uvcDterm + with pytest.raises( + NotImplementedError, match="D-term calibration is not yet implemented." + ): + uvcalibrate(uvd, uvcDterm, d_term_cal=True) + + +@pytest.mark.filterwarnings("ignore:Changing number of antennas, but preserving") +def test_uvcalibrate_flag_propagation(uvcalibrate_data): + uvd, uvc = uvcalibrate_data + + # test flag propagation + uvc.flag_array[0] = True + uvc.gain_array[1] = 0.0 + uvdcal = uvcalibrate(uvd, uvc, prop_flags=True, ant_check=False, inplace=False) + + assert np.all(uvdcal.get_flags(1, 13, "xx")) # assert completely flagged + assert np.all(uvdcal.get_flags(0, 12, "xx")) # assert completely flagged + np.testing.assert_array_almost_equal( + uvd.get_data(1, 13, "xx"), uvdcal.get_data(1, 13, "xx") + ) + np.testing.assert_array_almost_equal( + uvd.get_data(0, 12, "xx"), uvdcal.get_data(0, 12, "xx") + ) + + uvc_sub = uvc.select(antenna_nums=[1, 12], inplace=False) + + uvdata_unique_nums = np.unique(np.append(uvd.ant_1_array, uvd.ant_2_array)) + uvd.telescope.antenna_names = np.array(uvd.telescope.antenna_names) + missing_ants = uvdata_unique_nums.tolist() + missing_ants.remove(1) + missing_ants.remove(12) + missing_ant_names = [ + uvd.telescope.antenna_names[ + np.where(uvd.telescope.antenna_numbers == antnum)[0][0] + ] + for antnum in missing_ants + ] + + exp_err = ( + f"Antennas {missing_ant_names} have data on UVData but " + "are missing on UVCal. To continue calibration and " + "flag the data from missing antennas, set ant_check=False." + ) + + with pytest.raises(ValueError) as errinfo: + uvdcal = uvcalibrate( + uvd, uvc_sub, prop_flags=True, ant_check=True, inplace=False + ) + + assert exp_err == str(errinfo.value) + + with pytest.warns(UserWarning) as warninfo: + uvdcal = uvcalibrate( + uvd, uvc_sub, prop_flags=True, ant_check=False, inplace=False + ) + warns = {warn.message.args[0] for warn in warninfo} + ant_expected = { + f"Antennas {missing_ant_names} have data on UVData but are missing " + "on UVCal. Since ant_check is False, calibration will " + "proceed and the data for these antennas will be flagged." + } + + assert warns == ant_expected + assert np.all(uvdcal.get_flags(13, 24, "xx")) # assert completely flagged + + +@pytest.mark.filterwarnings("ignore:Cannot preserve total_quality_array") +def test_uvcalibrate_flag_propagation_name_mismatch(uvcalibrate_init_data): + uvd, uvc = uvcalibrate_init_data + + # test flag propagation + uvc.flag_array[0] = True + uvc.gain_array[1] = 0.0 + with pytest.raises( + ValueError, + match=re.escape( + "All antenna names with data on UVData are missing " + "on UVCal. To continue with calibration " + "(and flag all the data), set ant_check=False." + ), + ): + uvdcal = uvcalibrate(uvd, uvc, prop_flags=True, ant_check=True, inplace=False) + + with check_warnings( + UserWarning, + match="All antenna names with data on UVData are missing " + "on UVCal. Since ant_check is False, calibration will " + "proceed but all data will be flagged.", + ): + uvdcal = uvcalibrate(uvd, uvc, prop_flags=True, ant_check=False, inplace=False) + + assert np.all(uvdcal.get_flags(1, 13, "xx")) # assert completely flagged + assert np.all(uvdcal.get_flags(0, 12, "xx")) # assert completely flagged + np.testing.assert_array_almost_equal( + uvd.get_data(1, 13, "xx"), uvdcal.get_data(1, 13, "xx") + ) + np.testing.assert_array_almost_equal( + uvd.get_data(0, 12, "xx"), uvdcal.get_data(0, 12, "xx") + ) + + +def test_uvcalibrate_extra_cal_antennas(uvcalibrate_data): + uvd, uvc = uvcalibrate_data + + # remove some antennas from the data + uvd.select(antenna_nums=[0, 1, 12, 13]) + + uvdcal = uvcalibrate(uvd, uvc, inplace=False) + + key = (1, 13, "xx") + ant1 = (1, "Jxx") + ant2 = (13, "Jxx") + + np.testing.assert_array_almost_equal( + uvdcal.get_data(key), + uvd.get_data(key) / (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T, + ) + + +def test_uvcalibrate_antenna_names_mismatch(uvcalibrate_init_data): + uvd, uvc = uvcalibrate_init_data + + with pytest.raises( + ValueError, + match=re.escape( + "All antenna names with data on UVData are missing " + "on UVCal. To continue with calibration " + "(and flag all the data), set ant_check=False." + ), + ): + uvcalibrate(uvd, uvc, inplace=False) + + # now test that they're all flagged if ant_check is False + with check_warnings( + UserWarning, + match="All antenna names with data on UVData are missing " + "on UVCal. Since ant_check is False, calibration will " + "proceed but all data will be flagged.", + ): + uvdcal = uvcalibrate(uvd, uvc, ant_check=False, inplace=False) + + assert np.all(uvdcal.flag_array) # assert completely flagged + + +@pytest.mark.parametrize("time_range", [True, False]) +def test_uvcalibrate_time_mismatch(uvcalibrate_data, time_range): + uvd, uvc = uvcalibrate_data + + if time_range: + tstarts = uvc.time_array - uvc.integration_time / (86400 * 2) + tends = uvc.time_array + uvc.integration_time / (86400 * 2) + original_time_range = np.stack((tstarts, tends), axis=1) + uvc.time_range = original_time_range + uvc.time_array = None + uvc.lst_array = None + uvc.set_lsts_from_time_array() + + # change times to get warnings + if time_range: + uvc.time_range = uvc.time_range + 1 + uvc.set_lsts_from_time_array() + expected_err = "Time_ranges on UVCal do not cover all UVData times." + with pytest.raises(ValueError, match=expected_err): + uvcalibrate(uvd, uvc, inplace=False) + else: + uvc.time_array = uvc.time_array + 1 + uvc.set_lsts_from_time_array() + expected_err = { + f"Time {this_time} exists on UVData but not on UVCal." + for this_time in np.unique(uvd.time_array) + } + + with pytest.raises(ValueError) as errinfo: + uvcalibrate(uvd, uvc, inplace=False) + assert str(errinfo.value) in expected_err + + # for time_range, make the time ranges not cover some UVData times + if time_range: + uvc.time_range = original_time_range + uvc.time_range[0, 1] = uvc.time_range[0, 0] + uvc.integration_time[0] / ( + 86400 * 4 + ) + uvc.set_lsts_from_time_array() + with pytest.raises(ValueError, match=expected_err): + uvcalibrate(uvd, uvc, inplace=False) + + uvc.phase_center_id_array = np.arange(uvc.Ntimes) + uvc.phase_center_catalog = {0: None} + uvc.select(phase_center_ids=0) + with check_warnings( + UserWarning, match="Time_range on UVCal does not cover all UVData times" + ): + _ = uvcalibrate(uvd, uvc, inplace=False, time_check=False) + + +def test_uvcalibrate_time_wrong_size(uvcalibrate_data): + uvd, uvc = uvcalibrate_data + + # downselect by one time to get error + uvc.select(times=uvc.time_array[1:]) + with pytest.raises( + ValueError, + match="The uvcal object has more than one time but fewer than the " + "number of unique times on the uvdata object.", + ): + uvcalibrate(uvd, uvc, inplace=False) + + +@pytest.mark.filterwarnings("ignore:The time_array and time_range attributes") +@pytest.mark.filterwarnings("ignore:The lst_array and lst_range attributes") +@pytest.mark.parametrize("time_range", [True, False]) +def test_uvcalibrate_single_time_types(uvcalibrate_data, time_range): + uvd, uvc = uvcalibrate_data + + # only one time + uvc.select(times=uvc.time_array[0]) + if time_range: + # check cal runs fine with a good time range + uvc.time_range = np.reshape( + np.array([np.min(uvd.time_array), np.max(uvd.time_array)]), (1, 2) + ) + uvc.set_lsts_from_time_array() + with pytest.raises( + ValueError, match="The time_array and time_range attributes are both set" + ): + uvdcal = uvcalibrate(uvd, uvc, inplace=False, time_check=False) + uvc.time_array = uvc.lst_array = None + uvdcal = uvcalibrate(uvd, uvc, inplace=False) + + key = (1, 13, "xx") + ant1 = (1, "Jxx") + ant2 = (13, "Jxx") + + np.testing.assert_array_almost_equal( + uvdcal.get_data(key), + uvd.get_data(key) / (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T, + ) + + # then change time_range to get warnings + uvc.time_range = np.array(uvc.time_range) + 1 + uvc.set_lsts_from_time_array() + + if time_range: + msg_start = "Time_range on UVCal does not cover all UVData times" + else: + msg_start = "Times do not match between UVData and UVCal" + err_msg = msg_start + ". Set time_check=False to apply calibration anyway." + warn_msg = [ + msg_start + " but time_check is False, so calibration will be applied anyway." + ] + + with pytest.raises(ValueError, match=err_msg): + uvcalibrate(uvd, uvc, inplace=False) + + if not time_range: + with check_warnings(UserWarning, match=warn_msg): + uvdcal = uvcalibrate(uvd, uvc, inplace=False, time_check=False) + + key = (1, 13, "xx") + ant1 = (1, "Jxx") + ant2 = (13, "Jxx") + + np.testing.assert_array_almost_equal( + uvdcal.get_data(key), + uvd.get_data(key) / (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T, + ) + + +@pytest.mark.filterwarnings("ignore:Combined frequencies are separated by more than") +def test_uvcalibrate_extra_cal_times(uvcalibrate_data): + uvd, uvc = uvcalibrate_data + + uvc2 = uvc.copy() + uvc2.time_array = uvc.time_array + 1 + uvc2.set_lsts_from_time_array() + uvc_use = uvc + uvc2 + + uvdcal = uvcalibrate(uvd, uvc_use, inplace=False) + + key = (1, 13, "xx") + ant1 = (1, "Jxx") + ant2 = (13, "Jxx") + + np.testing.assert_array_almost_equal( + uvdcal.get_data(key), + uvd.get_data(key) / (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T, + ) + + +def test_uvcalibrate_freq_mismatch(uvcalibrate_data): + uvd, uvc = uvcalibrate_data + + # change some frequencies to get warnings + maxf = np.max(uvc.freq_array) + uvc.freq_array[uvc.Nfreqs // 2 :] = uvc.freq_array[uvc.Nfreqs // 2 :] + maxf + expected_err = { + f"Frequency {this_freq} exists on UVData but not on UVCal." + for this_freq in uvd.freq_array[uvd.Nfreqs // 2 :] + } + # structured this way rather than using the match parameter because expected_err + # is a set. + with pytest.raises(ValueError) as errinfo: + uvcalibrate(uvd, uvc, inplace=False) + assert str(errinfo.value) in expected_err + + +@pytest.mark.filterwarnings("ignore:Combined frequencies are not evenly spaced.") +@pytest.mark.filterwarnings("ignore:Selected frequencies are not contiguous.") +def test_uvcalibrate_extra_cal_freqs(uvcalibrate_data): + uvd, uvc = uvcalibrate_data + + uvc2 = uvc.copy() + uvc2.freq_array = uvc.freq_array + np.max(uvc.freq_array) + uvc_use = uvc + uvc2 + + uvdcal = uvcalibrate(uvd, uvc_use, inplace=False) + + key = (1, 13, "xx") + ant1 = (1, "Jxx") + ant2 = (13, "Jxx") + + np.testing.assert_array_almost_equal( + uvdcal.get_data(key), + uvd.get_data(key) / (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T, + ) + + +def test_uvcalibrate_feedpol_mismatch(uvcalibrate_data): + uvd, uvc = uvcalibrate_data + + # downselect the feed polarization to get warnings + uvc.select(jones=utils.jstr2num("Jnn", x_orientation=uvc.telescope.x_orientation)) + with pytest.raises( + ValueError, match=("Feed polarization e exists on UVData but not on UVCal.") + ): + uvcalibrate(uvd, uvc, inplace=False) + + +def test_uvcalibrate_x_orientation_mismatch(uvcalibrate_data): + uvd, uvc = uvcalibrate_data + + # next check None uvd_x + uvd.telescope.x_orientation = None + uvc.telescope.x_orientation = "east" + with pytest.warns( + UserWarning, + match=r"UVData object does not have `x_orientation` specified but UVCal does", + ): + uvcalibrate(uvd, uvc, inplace=False) + + +def test_uvcalibrate_wideband_gain(uvcalibrate_data): + uvd, uvc = uvcalibrate_data + + uvc.flex_spw_id_array = None + uvc._set_wide_band() + uvc.spw_array = np.array([1, 2, 3]) + uvc.Nspws = 3 + uvc.gain_array = uvc.gain_array[:, 0:3, :, :] + uvc.flag_array = uvc.flag_array[:, 0:3, :, :] + uvc.quality_array = uvc.quality_array[:, 0:3, :, :] + uvc.total_quality_array = uvc.total_quality_array[0:3, :, :] + + uvc.freq_range = np.zeros((uvc.Nspws, 2), dtype=uvc.freq_array.dtype) + uvc.freq_range[0, :] = uvc.freq_array[[0, 2]] + uvc.freq_range[1, :] = uvc.freq_array[[2, 4]] + uvc.freq_range[2, :] = uvc.freq_array[[4, 6]] + + uvc.channel_width = None + uvc.freq_array = None + uvc.Nfreqs = 1 + + uvc.check() + with pytest.raises( + ValueError, + match="uvcalibrate currently does not support wide-band calibrations", + ): + uvcalibrate(uvd, uvc, inplace=False) + + +@pytest.mark.filterwarnings("ignore:Fixing auto-correlations to be be real-only") +@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values") +@pytest.mark.filterwarnings("ignore:Nfreqs will be required to be 1 for wide_band cals") +@pytest.mark.filterwarnings("ignore:telescope_location, antenna_positions") +def test_uvcalibrate_delay_multispw(uvcalibrate_uvdata_oldfiles): + uvd = uvcalibrate_uvdata_oldfiles + + uvc = UVCal() + uvc.read_calfits(os.path.join(DATA_PATH, "zen.2457698.40355.xx.delay.calfits")) + # downselect to match + uvc.select(times=uvc.time_array[3]) + uvc.gain_convention = "multiply" + + uvc.Nspws = 3 + uvc.spw_array = np.array([1, 2, 3]) + + # copy the delay array to the second SPW + uvc.delay_array = np.repeat(uvc.delay_array, uvc.Nspws, axis=1) + uvc.flag_array = np.repeat(uvc.flag_array, uvc.Nspws, axis=1) + uvc.quality_array = np.repeat(uvc.quality_array, uvc.Nspws, axis=1) + + uvc.freq_range = np.repeat(uvc.freq_range, uvc.Nspws, axis=0) + # Make the second & third SPWs be contiguous with a 10 MHz range + uvc.freq_range[1, 0] = uvc.freq_range[0, 1] + uvc.freq_range[1, 1] = uvc.freq_range[1, 0] + 10e6 + uvc.freq_range[2, 0] = uvc.freq_range[1, 1] + uvc.freq_range[2, 1] = uvc.freq_range[1, 1] + 10e6 + + uvc.check() + with pytest.raises( + ValueError, + match="uvcalibrate currently does not support multi spectral window delay " + "calibrations", + ): + uvcalibrate(uvd, uvc, inplace=False) diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 0000000000..4fb475d6bf --- /dev/null +++ b/tests/utils/__init__.py @@ -0,0 +1,4 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for utility functions.""" diff --git a/tests/utils/conftest.py b/tests/utils/conftest.py new file mode 100644 index 0000000000..0e181c2ffb --- /dev/null +++ b/tests/utils/conftest.py @@ -0,0 +1,119 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""pytest fixtures for utils tests.""" +import numpy as np +import pytest +from astropy.coordinates import SkyCoord + +import pyuvdata.utils.phasing as phs_utils +from pyuvdata import utils +from pyuvdata.utils.phasing import hasmoon + +from .test_coordinates import selenoids + + +@pytest.fixture(scope="session") +def astrometry_args(): + default_args = { + "time_array": 2456789.0 + np.array([0.0, 1.25, 10.5, 100.75]), + "icrs_ra": 2.468, + "icrs_dec": 1.234, + "epoch": 2000.0, + "telescope_loc": (0.123, -0.456, 4321.0), + "telescope_frame": "itrs", + "pm_ra": 12.3, + "pm_dec": 45.6, + "vrad": 31.4, + "dist": 73.31, + "library": "erfa", + } + default_args["lst_array"] = utils.get_lst_for_time( + jd_array=default_args["time_array"], + latitude=default_args["telescope_loc"][0] * (180.0 / np.pi), + longitude=default_args["telescope_loc"][1] * (180.0 / np.pi), + altitude=default_args["telescope_loc"][2], + frame="itrs", + ) + + default_args["drift_coord"] = SkyCoord( + default_args["lst_array"], + [default_args["telescope_loc"][0]] * len(default_args["lst_array"]), + unit="rad", + ) + + if hasmoon: + default_args["moon_telescope_loc"] = ( + 0.6875 * np.pi / 180.0, + 24.433 * np.pi / 180.0, + 0.3, + ) + default_args["moon_lst_array"] = {} + default_args["moon_drift_coord"] = {} + for selenoid in selenoids: + default_args["moon_lst_array"][selenoid] = utils.get_lst_for_time( + jd_array=default_args["time_array"], + latitude=default_args["moon_telescope_loc"][0] * (180.0 / np.pi), + longitude=default_args["moon_telescope_loc"][1] * (180.0 / np.pi), + altitude=default_args["moon_telescope_loc"][2], + frame="mcmf", + ellipsoid=selenoid, + ) + default_args["moon_drift_coord"][selenoid] = SkyCoord( + default_args["moon_lst_array"][selenoid], + [default_args["moon_telescope_loc"][0]] + * len(default_args["moon_lst_array"][selenoid]), + unit="rad", + ) + + default_args["icrs_coord"] = SkyCoord( + default_args["icrs_ra"], default_args["icrs_dec"], unit="rad" + ) + + default_args["fk5_ra"], default_args["fk5_dec"] = ( + phs_utils.transform_sidereal_coords( + longitude=default_args["icrs_ra"], + latitude=default_args["icrs_dec"], + in_coord_frame="icrs", + out_coord_frame="fk5", + in_coord_epoch="J2000.0", + out_coord_epoch="J2000.0", + ) + ) + + # These are values calculated w/o the optional arguments, e.g. pm, vrad, dist + default_args["app_ra"], default_args["app_dec"] = phs_utils.transform_icrs_to_app( + time_array=default_args["time_array"], + ra=default_args["icrs_ra"], + dec=default_args["icrs_dec"], + telescope_loc=default_args["telescope_loc"], + ) + + default_args["app_coord"] = SkyCoord( + default_args["app_ra"], default_args["app_dec"], unit="rad" + ) + + if hasmoon: + default_args["moon_app_ra"] = {} + default_args["moon_app_dec"] = {} + default_args["moon_app_coord"] = {} + for selenoid in selenoids: + ( + default_args["moon_app_ra"][selenoid], + default_args["moon_app_dec"][selenoid], + ) = phs_utils.transform_icrs_to_app( + time_array=default_args["time_array"], + ra=default_args["icrs_ra"], + dec=default_args["icrs_dec"], + telescope_loc=default_args["moon_telescope_loc"], + telescope_frame="mcmf", + ellipsoid=selenoid, + ) + + default_args["moon_app_coord"][selenoid] = SkyCoord( + default_args["moon_app_ra"][selenoid], + default_args["moon_app_dec"][selenoid], + unit="rad", + ) + + yield default_args diff --git a/tests/utils/file_io/__init__.py b/tests/utils/file_io/__init__.py new file mode 100644 index 0000000000..b79a0ec501 --- /dev/null +++ b/tests/utils/file_io/__init__.py @@ -0,0 +1,4 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for utility file_io functions.""" diff --git a/tests/utils/file_io/test_fits.py b/tests/utils/file_io/test_fits.py new file mode 100644 index 0000000000..e8c14672b8 --- /dev/null +++ b/tests/utils/file_io/test_fits.py @@ -0,0 +1,37 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for helper utility functions.""" +import os + +from astropy.io import fits + +from pyuvdata import utils +from pyuvdata.data import DATA_PATH +from pyuvdata.testing import check_warnings + +casa_tutorial_uvfits = os.path.join( + DATA_PATH, "day2_TDEM0003_10s_norx_1src_1spw.uvfits" +) + + +def test_deprecated_utils_import(): + + with fits.open(casa_tutorial_uvfits, memmap=True) as hdu_list: + vis_hdu = hdu_list[0] + + with check_warnings( + DeprecationWarning, + match="The _fits_indexhdus function has moved, please import it as " + "pyuvdata.utils.file_io.fits._indexhdus. This warnings will become an " + "error in version 3.2", + ): + utils._fits_indexhdus(hdu_list) + + with check_warnings( + DeprecationWarning, + match="The _fits_gethduaxis function has moved, please import it as " + "pyuvdata.utils.file_io.fits._gethduaxis. This warnings will become an " + "error in version 3.2", + ): + utils._fits_gethduaxis(vis_hdu, 5) diff --git a/tests/utils/file_io/test_hdf5.py b/tests/utils/file_io/test_hdf5.py new file mode 100644 index 0000000000..3cd6e57174 --- /dev/null +++ b/tests/utils/file_io/test_hdf5.py @@ -0,0 +1,66 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for hdf5 utility functions.""" + +import numpy as np +import pytest + +import pyuvdata.utils.file_io.hdf5 as hdf5_utils +from pyuvdata.utils import helpers + + +@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values") +def test_read_slicing(): + """Test HDF5 slicing helper functions""" + # check trivial slice representations + slices, _ = helpers._convert_to_slices([]) + assert slices == [slice(0, 0, 0)] + slices, _ = helpers._convert_to_slices(10) + assert slices == [slice(10, 11, 1)] + + # dataset shape checking + # check various kinds of indexing give the right answer + indices = [slice(0, 10), 0, [0, 1, 2], [0]] + dset = np.empty((100, 1, 1024, 2), dtype=np.float64) + shape, _ = hdf5_utils._get_dset_shape(dset, indices) + assert tuple(shape) == (10, 1, 3, 1) + + # dataset indexing + # check various kinds of indexing give the right answer + slices = [helpers._convert_to_slices(ind)[0] for ind in indices] + slices[1] = 0 + data = hdf5_utils._index_dset(dset, slices) + assert data.shape == tuple(shape) + + # Handling bool arrays + bool_arr = np.zeros((10000,), dtype=bool) + index_arr = np.arange(1, 10000, 2) + bool_arr[index_arr] = True + assert helpers._convert_to_slices(bool_arr) == helpers._convert_to_slices(index_arr) + assert helpers._convert_to_slices(bool_arr, return_index_on_fail=True) == ( + helpers._convert_to_slices(index_arr, return_index_on_fail=True) + ) + + # Index return on fail with two slices + index_arr[0] = 0 + bool_arr[0:2] = [True, False] + + for item in [index_arr, bool_arr]: + result, check = helpers._convert_to_slices( + item, max_nslice=1, return_index_on_fail=True + ) + assert not check + assert len(result) == 1 + assert result[0] is item + + # Check a more complicated pattern w/ just the max_slice_frac defined + index_arr = np.arange(0, 100) ** 2 + bool_arr[:] = False + bool_arr[index_arr] = True + + for item in [index_arr, bool_arr]: + result, check = helpers._convert_to_slices(item, return_index_on_fail=True) + assert not check + assert len(result) == 1 + assert result[0] is item diff --git a/tests/test_ms_utils.py b/tests/utils/file_io/test_ms.py similarity index 99% rename from tests/test_ms_utils.py rename to tests/utils/file_io/test_ms.py index 29d29c2aaf..df65d1da8c 100644 --- a/tests/test_ms_utils.py +++ b/tests/utils/file_io/test_ms.py @@ -8,7 +8,7 @@ import numpy as np import pytest -from pyuvdata import ms_utils +import pyuvdata.utils.file_io.ms as ms_utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings diff --git a/tests/utils/test_array_collapse.py b/tests/utils/test_array_collapse.py new file mode 100644 index 0000000000..26854bc387 --- /dev/null +++ b/tests/utils/test_array_collapse.py @@ -0,0 +1,405 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Testing for collapsing utilities.""" + +import numpy as np +import pytest + +from pyuvdata.testing import check_warnings +from pyuvdata.utils import array_collapse + + +def test_collapse_mean_no_return_no_weights(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + out = array_collapse.collapse(data, "mean", axis=0) + out1 = array_collapse.mean_collapse(data, axis=0) + # Actual values are tested in test_mean_no_weights + assert np.array_equal(out, out1) + + +def test_collapse_mean_returned_no_weights(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + out, wo = array_collapse.collapse(data, "mean", axis=0, return_weights=True) + out1, wo1 = array_collapse.mean_collapse(data, axis=0, return_weights=True) + # Actual values are tested in test_mean_no_weights + assert np.array_equal(out, out1) + assert np.array_equal(wo, wo1) + + +def test_collapse_mean_returned_with_weights(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + 1 + w = 1.0 / data + out, wo = array_collapse.collapse( + data, "mean", weights=w, axis=0, return_weights=True + ) + out1, wo1 = array_collapse.mean_collapse( + data, weights=w, axis=0, return_weights=True + ) + # Actual values are tested in test_mean_weights + assert np.array_equal(out, out1) + assert np.array_equal(wo, wo1) + + +def test_collapse_mean_returned_with_weights_and_weights_square(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + 1 + w = 1.0 / data + out, wo, wso = array_collapse.collapse( + data, "mean", weights=w, axis=0, return_weights=True, return_weights_square=True + ) + out1, wo1, wso1 = array_collapse.mean_collapse( + data, weights=w, axis=0, return_weights=True, return_weights_square=True + ) + # Actual values are tested in test_mean_weights + assert np.array_equal(out, out1) + assert np.array_equal(wo, wo1) + assert np.array_equal(wso, wso1) + + +def test_collapse_mean_returned_with_weights_square_no_return_weights(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + 1 + w = 1.0 / data + out, wso = array_collapse.collapse( + data, + "mean", + weights=w, + axis=0, + return_weights=False, + return_weights_square=True, + ) + out1, wso1 = array_collapse.mean_collapse( + data, weights=w, axis=0, return_weights=False, return_weights_square=True + ) + # Actual values are tested in test_mean_weights + assert np.array_equal(out, out1) + assert np.array_equal(wso, wso1) + + +def test_collapse_absmean_no_return_no_weights(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = (-1) ** i * np.ones_like(data[:, i]) + out = array_collapse.collapse(data, "absmean", axis=0) + out1 = array_collapse.absmean_collapse(data, axis=0) + # Actual values are tested in test_absmean_no_weights + assert np.array_equal(out, out1) + + +def test_collapse_quadmean_no_return_no_weights(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + out = array_collapse.collapse(data, "quadmean", axis=0) + out1 = array_collapse.quadmean_collapse(data, axis=0) + # Actual values are tested elsewhere? + assert np.array_equal(out, out1) + + +def test_collapse_quadmean_returned_with_weights_and_weights_square(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + 1 + w = 1.0 / data + out, wo, wso = array_collapse.collapse( + data, + "quadmean", + weights=w, + axis=0, + return_weights=True, + return_weights_square=True, + ) + out1, wo1, wso1 = array_collapse.quadmean_collapse( + data, weights=w, axis=0, return_weights=True, return_weights_square=True + ) + # Actual values are tested elsewhere? + assert np.array_equal(out, out1) + assert np.array_equal(wo, wo1) + assert np.array_equal(wso, wso1) + + +def test_collapse_quadmean_returned_with_weights_square_no_return_weights(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + 1 + w = 1.0 / data + out, wso = array_collapse.collapse( + data, + "quadmean", + weights=w, + axis=0, + return_weights=False, + return_weights_square=True, + ) + out1, wso1 = array_collapse.quadmean_collapse( + data, weights=w, axis=0, return_weights=False, return_weights_square=True + ) + # Actual values are tested elsewhere? + assert np.array_equal(out, out1) + assert np.array_equal(wso, wso1) + + +def test_collapse_quadmean_returned_without_weights_square_with_return_weights(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + 1 + w = 1.0 / data + out, wo = array_collapse.collapse( + data, + "quadmean", + weights=w, + axis=0, + return_weights=True, + return_weights_square=False, + ) + out1, wo1 = array_collapse.quadmean_collapse( + data, weights=w, axis=0, return_weights=True, return_weights_square=False + ) + # Actual values are tested elsewhere? + assert np.array_equal(out, out1) + assert np.array_equal(wo, wo1) + + +def test_collapse_quadmean_returned_with_weights_square_without_weights(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + 1 + w = 1.0 / data + out, wo = array_collapse.collapse( + data, + "quadmean", + weights=w, + axis=0, + return_weights=False, + return_weights_square=True, + ) + out1, wo1 = array_collapse.quadmean_collapse( + data, weights=w, axis=0, return_weights=False, return_weights_square=True + ) + # Actual values are tested elsewhere? + assert np.array_equal(out, out1) + assert np.array_equal(wo, wo1) + + +def test_collapse_or_no_return_no_weights(): + # Fake data + data = np.zeros((50, 25), np.bool_) + data[0, 8] = True + o = array_collapse.collapse(data, "or", axis=0) + o1 = array_collapse.or_collapse(data, axis=0) + assert np.array_equal(o, o1) + + +def test_collapse_and_no_return_no_weights(): + # Fake data + data = np.zeros((50, 25), np.bool_) + data[0, :] = True + o = array_collapse.collapse(data, "and", axis=0) + o1 = array_collapse.and_collapse(data, axis=0) + assert np.array_equal(o, o1) + + +def test_collapse_error(): + pytest.raises(ValueError, array_collapse.collapse, np.ones((2, 3)), "fooboo") + + +def test_mean_no_weights(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + out, wo = array_collapse.mean_collapse(data, axis=0, return_weights=True) + assert np.array_equal(out, np.arange(data.shape[1])) + assert np.array_equal(wo, data.shape[0] * np.ones(data.shape[1])) + out, wo = array_collapse.mean_collapse(data, axis=1, return_weights=True) + assert np.all(out == np.mean(np.arange(data.shape[1]))) + assert len(out) == data.shape[0] + assert np.array_equal(wo, data.shape[1] * np.ones(data.shape[0])) + out, wo = array_collapse.mean_collapse(data, return_weights=True) + assert out == np.mean(np.arange(data.shape[1])) + assert wo == data.size + out = array_collapse.mean_collapse(data) + assert out == np.mean(np.arange(data.shape[1])) + + +def test_mean_weights_and_weights_square(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + 1 + w = 1.0 / data + out, wo, wso = array_collapse.mean_collapse( + data, weights=w, axis=0, return_weights=True, return_weights_square=True + ) + np.testing.assert_allclose(out * wo, data.shape[0]) + np.testing.assert_allclose( + wo, float(data.shape[0]) / (np.arange(data.shape[1]) + 1) + ) + np.testing.assert_allclose( + wso, float(data.shape[0]) / (np.arange(data.shape[1]) + 1) ** 2 + ) + out, wo, wso = array_collapse.mean_collapse( + data, weights=w, axis=1, return_weights=True, return_weights_square=True + ) + np.testing.assert_allclose(out * wo, data.shape[1]) + np.testing.assert_allclose(wo, np.sum(1.0 / (np.arange(data.shape[1]) + 1))) + np.testing.assert_allclose(wso, np.sum(1.0 / (np.arange(data.shape[1]) + 1) ** 2)) + + # Zero weights + w = np.ones_like(data) + w[0, :] = 0 + w[:, 0] = 0 + out, wo = array_collapse.mean_collapse(data, weights=w, axis=0, return_weights=True) + ans = np.arange(data.shape[1]).astype(np.float64) + 1 + ans[0] = np.inf + assert np.array_equal(out, ans) + ans = (data.shape[0] - 1) * np.ones(data.shape[1]) + ans[0] = 0 + assert np.all(wo == ans) + out, wo = array_collapse.mean_collapse(data, weights=w, axis=1, return_weights=True) + ans = np.mean(np.arange(data.shape[1])[1:] + 1) * np.ones(data.shape[0]) + ans[0] = np.inf + assert np.all(out == ans) + ans = (data.shape[1] - 1) * np.ones(data.shape[0]) + ans[0] = 0 + assert np.all(wo == ans) + + +def test_mean_infs(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + data[:, 0] = np.inf + data[0, :] = np.inf + out, wo = array_collapse.mean_collapse(data, axis=0, return_weights=True) + ans = np.arange(data.shape[1]).astype(np.float64) + ans[0] = np.inf + assert np.array_equal(out, ans) + ans = (data.shape[0] - 1) * np.ones(data.shape[1]) + ans[0] = 0 + assert np.all(wo == ans) + out, wo = array_collapse.mean_collapse(data, axis=1, return_weights=True) + ans = np.mean(np.arange(data.shape[1])[1:]) * np.ones(data.shape[0]) + ans[0] = np.inf + assert np.all(out == ans) + ans = (data.shape[1] - 1) * np.ones(data.shape[0]) + ans[0] = 0 + assert np.all(wo == ans) + + +def test_absmean(): + # Fake data + data1 = np.zeros((50, 25)) + for i in range(data1.shape[1]): + data1[:, i] = (-1) ** i * np.ones_like(data1[:, i]) + data2 = np.ones_like(data1) + out1 = array_collapse.absmean_collapse(data1) + out2 = array_collapse.absmean_collapse(data2) + assert out1 == out2 + + +def test_quadmean(): + # Fake data + data = np.zeros((50, 25)) + for i in range(data.shape[1]): + data[:, i] = i * np.ones_like(data[:, i]) + o1, w1 = array_collapse.quadmean_collapse(data, return_weights=True) + o2, w2 = array_collapse.mean_collapse(np.abs(data) ** 2, return_weights=True) + o3 = array_collapse.quadmean_collapse(data) # without return_weights + o2 = np.sqrt(o2) + assert o1 == o2 + assert w1 == w2 + assert o1 == o3 + + +def test_or_collapse(): + # Fake data + data = np.zeros((50, 25), np.bool_) + data[0, 8] = True + o = array_collapse.or_collapse(data, axis=0) + ans = np.zeros(25, np.bool_) + ans[8] = True + assert np.array_equal(o, ans) + o = array_collapse.or_collapse(data, axis=1) + ans = np.zeros(50, np.bool_) + ans[0] = True + assert np.array_equal(o, ans) + o = array_collapse.or_collapse(data) + assert o + + +def test_or_collapse_weights(): + # Fake data + data = np.zeros((50, 25), np.bool_) + data[0, 8] = True + w = np.ones_like(data, np.float64) + o, wo = array_collapse.or_collapse(data, axis=0, weights=w, return_weights=True) + ans = np.zeros(25, np.bool_) + ans[8] = True + assert np.array_equal(o, ans) + assert np.array_equal(wo, np.ones_like(o, dtype=np.float64)) + w[0, 8] = 0.3 + with check_warnings(UserWarning, "Currently weights are"): + o = array_collapse.or_collapse(data, axis=0, weights=w) + assert np.array_equal(o, ans) + + +def test_or_collapse_errors(): + data = np.zeros(5) + pytest.raises(ValueError, array_collapse.or_collapse, data) + + +def test_and_collapse(): + # Fake data + data = np.zeros((50, 25), np.bool_) + data[0, :] = True + o = array_collapse.and_collapse(data, axis=0) + ans = np.zeros(25, np.bool_) + assert np.array_equal(o, ans) + o = array_collapse.and_collapse(data, axis=1) + ans = np.zeros(50, np.bool_) + ans[0] = True + assert np.array_equal(o, ans) + o = array_collapse.and_collapse(data) + assert not o + + +def test_and_collapse_weights(): + # Fake data + data = np.zeros((50, 25), np.bool_) + data[0, :] = True + w = np.ones_like(data, np.float64) + o, wo = array_collapse.and_collapse(data, axis=0, weights=w, return_weights=True) + ans = np.zeros(25, np.bool_) + assert np.array_equal(o, ans) + assert np.array_equal(wo, np.ones_like(o, dtype=np.float64)) + w[0, 8] = 0.3 + with check_warnings(UserWarning, "Currently weights are"): + o = array_collapse.and_collapse(data, axis=0, weights=w) + assert np.array_equal(o, ans) + + +def test_and_collapse_errors(): + data = np.zeros(5) + pytest.raises(ValueError, array_collapse.and_collapse, data) diff --git a/tests/utils/test_bls.py b/tests/utils/test_bls.py new file mode 100644 index 0000000000..ecc18cfcf3 --- /dev/null +++ b/tests/utils/test_bls.py @@ -0,0 +1,38 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for baseline number utility functions.""" + +import numpy as np +import pytest + +import pyuvdata.utils.bls as bl_utils + + +class FakeClass: + def __init__(self): + pass + + +def test_parse_ants_error(): + test_obj = FakeClass() + with pytest.raises( + ValueError, + match=( + "UVBased objects must have all the following attributes in order " + "to call 'parse_ants': " + ), + ): + bl_utils.parse_ants(test_obj, ant_str="") + + +def test_antnums_to_baseline_miriad_convention(): + ant1 = np.array([1, 2, 3, 1, 1, 1, 255, 256]) # Ant1 array should be 1-based + ant2 = np.array([1, 2, 3, 254, 255, 256, 1, 2]) # Ant2 array should be 1-based + bl_gold = np.array([257, 514, 771, 510, 511, 67840, 65281, 65538], dtype="uint64") + + n_ant = 256 + bl = bl_utils.antnums_to_baseline( + ant1, ant2, Nants_telescope=n_ant, use_miriad_convention=True + ) + np.testing.assert_allclose(bl, bl_gold) diff --git a/tests/utils/test_coordinates.py b/tests/utils/test_coordinates.py new file mode 100644 index 0000000000..1326f7ae34 --- /dev/null +++ b/tests/utils/test_coordinates.py @@ -0,0 +1,749 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for coordinate utility functions.""" +import os +import re + +import numpy as np +import pytest +from astropy import units +from astropy.coordinates import EarthLocation + +from pyuvdata import utils +from pyuvdata.data import DATA_PATH +from pyuvdata.utils.coordinates import hasmoon + +selenoids = ["SPHERE", "GSFC", "GRAIL23", "CE-1-LAM-GEO"] + +if hasmoon: + from lunarsky import MoonLocation + + frame_selenoid = [["itrs", None]] + for snd in selenoids: + frame_selenoid.append(["mcmf", snd]) +else: + frame_selenoid = [["itrs", None]] + + +# Earth +ref_latlonalt = (-26.7 * np.pi / 180.0, 116.7 * np.pi / 180.0, 377.8) +ref_xyz = (-2562123.42683, 5094215.40141, -2848728.58869) + +# Moon +ref_latlonalt_moon = (0.6875 * np.pi / 180.0, 24.433 * np.pi / 180.0, 0.3) +ref_xyz_moon = { + "SPHERE": (1581421.43506347, 718463.12201783, 20843.2071012), + "GSFC": (1582332.08831085, 718876.84524219, 20805.18709001), + "GRAIL23": (1581855.3916402, 718660.27490195, 20836.2107652), + "CE-1-LAM-GEO": (1581905.99108228, 718683.26297605, 20806.77965693), +} + + +@pytest.fixture(scope="module") +def enu_ecef_info(): + """Some setup info for ENU/ECEF calculations.""" + center_lat = -30.7215261207 * np.pi / 180.0 + center_lon = 21.4283038269 * np.pi / 180.0 + center_alt = 1051.7 + # fmt: off + lats = (np.array([-30.72218216, -30.72138101, -30.7212785, -30.7210011, + -30.72159853, -30.72206199, -30.72174614, -30.72188775, + -30.72183915, -30.72100138]) + * np.pi / 180.0) + lons = (np.array([21.42728211, 21.42811727, 21.42814544, 21.42795736, + 21.42686739, 21.42918772, 21.42785662, 21.4286408, + 21.42750933, 21.42896567]) + * np.pi / 180.0) + alts = np.array([1052.25, 1051.35, 1051.2, 1051., 1051.45, 1052.04, 1051.68, + 1051.87, 1051.77, 1051.06]) + # used pymap3d, which implements matlab code, as a reference. + x = [5109327.46674067, 5109339.76407785, 5109344.06370947, + 5109365.11297147, 5109372.115673, 5109266.94314734, + 5109329.89620962, 5109295.13656657, 5109337.21810468, + 5109329.85680612] + + y = [2005130.57953031, 2005221.35184577, 2005225.93775268, + 2005214.8436201, 2005105.42364036, 2005302.93158317, + 2005190.65566222, 2005257.71335575, 2005157.78980089, + 2005304.7729239] + + z = [-3239991.24516348, -3239914.4185286, -3239904.57048431, + -3239878.02656316, -3239935.20415493, -3239979.68381865, + -3239949.39266985, -3239962.98805772, -3239958.30386264, + -3239878.08403833] + + east = [-97.87631659, -17.87126443, -15.17316938, -33.19049252, -137.60520964, + 84.67346748, -42.84049408, 32.28083937, -76.1094745, 63.40285935] + north = [-72.7437482, 16.09066646, 27.45724573, 58.21544651, -8.02964511, + -59.41961437, -24.39698388, -40.09891961, -34.70965816, 58.18410876] + up = [0.54883333, -0.35004539, -0.50007736, -0.70035299, -0.25148791, 0.33916067, + -0.02019057, 0.16979185, 0.06945155, -0.64058124] + # fmt: on + yield ( + center_lat, + center_lon, + center_alt, + lats, + lons, + alts, + x, + y, + z, + east, + north, + up, + ) + + +@pytest.fixture(scope="module") +def enu_mcmf_info(): + center_lat, center_lon, center_alt = [ + 0.6875 * np.pi / 180.0, + 24.433 * np.pi / 180.0, + 0.3, + ] + + # Creating a test pattern of a circle of antennas, radius 500 m in ENU coordinates. + angs = np.linspace(0, 2 * np.pi, 10, endpoint=False) + enus = 500 * np.array([np.cos(angs), np.sin(angs), [0] * angs.size]) + east = enus[0].tolist() + north = enus[1].tolist() + up = enus[2].tolist() + + # fmt: off + lats = { + "SPHERE": np.deg2rad( + [ + 0.68749997, 0.69719361, 0.70318462, 0.70318462, 0.69719361, + 0.68749997, 0.67780635, 0.67181538, 0.67181538, 0.67780635 + ] + ), + "GSFC": np.deg2rad( + [ + 0.68749997, 0.69721132, 0.70321328, 0.70321328, 0.69721132, + 0.68749997, 0.67778864, 0.67178672, 0.67178672, 0.67778864 + ] + ), + "GRAIL23": np.deg2rad( + [ + 0.68749997, 0.69719686, 0.70318988, 0.70318988, 0.69719686, + 0.68749997, 0.6778031 , 0.67181011, 0.67181011, 0.6778031 + ] + ), + "CE-1-LAM-GEO": np.deg2rad( + [ + 0.68749997, 0.69721058, 0.70321207, 0.70321207, 0.69721058, + 0.68749997, 0.67778938, 0.67178792, 0.67178792, 0.67778938 + ] + ), + } + lons = { + "SPHERE": np.deg2rad( + [ + 24.44949297, 24.44634312, 24.43809663, 24.42790337, 24.41965688, + 24.41650703, 24.41965693, 24.42790341, 24.43809659, 24.44634307 + ] + ), + "GSFC": np.deg2rad( + [ + 24.44948348, 24.44633544, 24.43809369, 24.42790631, 24.41966456, + 24.41651652, 24.41966461, 24.42790634, 24.43809366, 24.44633539 + ] + ), + "GRAIL23": np.deg2rad( + [ + 24.44948845, 24.44633946, 24.43809523, 24.42790477, 24.41966054, + 24.41651155, 24.41966059, 24.42790481, 24.43809519, 24.44633941 + ] + ), + "CE-1-LAM-GEO": np.deg2rad( + [ + 24.44948792, 24.44633904, 24.43809507, 24.42790493, 24.41966096, + 24.41651208, 24.41966102, 24.42790497, 24.43809503, 24.44633898 + ] + ), + } + alts = { + "SPHERE": [ + 0.371959, 0.371959, 0.371959, 0.371959, 0.371959, 0.371959, + 0.371959, 0.371959, 0.371959, 0.371959 + ], + "GSFC": [ + 0.37191758, 0.37197732, 0.37207396, 0.37207396, 0.37197732, + 0.37191758, 0.37197732, 0.37207396, 0.37207396, 0.37197732 + ], + "GRAIL23": [ + 0.37193926, 0.37195442, 0.37197896, 0.37197896, 0.37195442, + 0.37193926, 0.37195442, 0.37197896, 0.37197896, 0.37195442 + ], + "CE-1-LAM-GEO": [ + 0.37193696, 0.37198809, 0.37207083, 0.37207083, 0.37198809, + 0.37193696, 0.37198809, 0.37207083, 0.37207083, 0.37198809 + ], + } + x = { + "SPHERE": [ + 1581214.62062477, 1581250.9080965 , 1581352.33107362, + 1581480.14942611, 1581585.54088769, 1581628.24950218, + 1581591.96203044, 1581490.53905332, 1581362.72070084, + 1581257.32923925 + ], + "GSFC": [ + 1582125.27387214, 1582161.56134388, 1582262.984321, + 1582390.80267348, 1582496.19413507, 1582538.90274956, + 1582502.61527782, 1582401.1923007 , 1582273.37394822, + 1582167.98248663 + ], + "GRAIL23": [ + 1581648.57720149, 1581684.86467323, 1581786.28765035, + 1581914.10600283, 1582019.49746442, 1582062.2060789 , + 1582025.91860717, 1581924.49563005, 1581796.67727756, + 1581691.28581598 + ], + "CE-1-LAM-GEO": [ + 1581699.17664357, 1581735.46411531, 1581836.88709243, + 1581964.70544491, 1582070.0969065 , 1582112.80552098, + 1582076.51804925, 1581975.09507213, 1581847.27671964, + 1581741.88525806 + ] + } + + y = { + "SPHERE": [ + 718918.34480718, 718829.94638063, 718601.4335154 , 718320.09035913, + 718093.38043501, 718007.89922848, 718096.29765503, 718324.81052027, + 718606.15367654, 718832.86360065 + ], + "GSFC": [ + 719332.06803154, 719243.66960499, 719015.15673976, 718733.81358349, + 718507.10365937, 718421.62245284, 718510.02087939, 718738.53374463, + 719019.8769009 , 719246.58682501 + ], + "GRAIL23": [ + 719115.4976913 , 719027.09926475, 718798.58639952, 718517.24324325, + 718290.53331913, 718205.0521126 , 718293.45053915, 718521.96340439, + 718803.30656066, 719030.01648477 + ], + "CE-1-LAM-GEO": [ + 719138.4857654 , 719050.08733885, 718821.57447362, 718540.23131734, + 718313.52139323, 718228.0401867 , 718316.43861325, 718544.95147849, + 718826.29463476, 719053.00455887 + ], + } + z = { + "SPHERE": [ + 20843.2071012 , 21137.07857037, 21318.70112664, 21318.70112664, + 21137.07857037, 20843.2071012 , 20549.33563204, 20367.71307577, + 20367.71307577, 20549.33563204 + ], + "GSFC": [ + 20805.18709001, 21099.05855918, 21280.68111545, 21280.68111545, + 21099.05855918, 20805.18709001, 20511.31562084, 20329.69306457, + 20329.69306457, 20511.31562084 + ], + "GRAIL23": [ + 20836.2107652 , 21130.08223437, 21311.70479064, 21311.70479064, + 21130.08223437, 20836.2107652 , 20542.33929603, 20360.71673976, + 20360.71673976, 20542.33929603 + ], + "CE-1-LAM-GEO": [ + 20806.77965693, 21100.6511261 , 21282.27368237, 21282.27368237, + 21100.6511261 , 20806.77965693, 20512.90818776, 20331.28563149, + 20331.28563149, 20512.90818776 + ], + } + + # fmt: on + yield ( + center_lat, + center_lon, + center_alt, + lats, + lons, + alts, + x, + y, + z, + east, + north, + up, + ) + + +def test_XYZ_from_LatLonAlt(): + """Test conversion from lat/lon/alt to ECEF xyz with reference values.""" + out_xyz = utils.XYZ_from_LatLonAlt( + ref_latlonalt[0], ref_latlonalt[1], ref_latlonalt[2] + ) + # Got reference by forcing http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm + # to give additional precision. + np.testing.assert_allclose(ref_xyz, out_xyz, rtol=0, atol=1e-3) + + # test error checking + with pytest.raises( + ValueError, + match="latitude, longitude and altitude must all have the same length", + ): + utils.XYZ_from_LatLonAlt( + ref_latlonalt[0], + ref_latlonalt[1], + np.array([ref_latlonalt[2], ref_latlonalt[2]]), + ) + + with pytest.raises( + ValueError, + match="latitude, longitude and altitude must all have the same length", + ): + utils.XYZ_from_LatLonAlt( + ref_latlonalt[0], + np.array([ref_latlonalt[1], ref_latlonalt[1]]), + ref_latlonalt[2], + ) + + +def test_LatLonAlt_from_XYZ(): + """Test conversion from ECEF xyz to lat/lon/alt with reference values.""" + out_latlonalt = utils.LatLonAlt_from_XYZ(ref_xyz) + # Got reference by forcing http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm + # to give additional precision. + np.testing.assert_allclose(ref_latlonalt, out_latlonalt, rtol=0, atol=1e-3) + pytest.raises(ValueError, utils.LatLonAlt_from_XYZ, ref_latlonalt) + + # test passing multiple values + xyz_mult = np.stack((np.array(ref_xyz), np.array(ref_xyz))) + lat_vec, lon_vec, alt_vec = utils.LatLonAlt_from_XYZ(xyz_mult) + np.testing.assert_allclose( + ref_latlonalt, (lat_vec[1], lon_vec[1], alt_vec[1]), rtol=0, atol=1e-3 + ) + # check error if array transposed + with pytest.raises( + ValueError, + match=re.escape("The expected shape of ECEF xyz array is (Npts, 3)."), + ): + utils.LatLonAlt_from_XYZ(xyz_mult.T) + + # check error if only 2 coordinates + with pytest.raises( + ValueError, + match=re.escape("The expected shape of ECEF xyz array is (Npts, 3)."), + ): + utils.LatLonAlt_from_XYZ(xyz_mult[:, 0:2]) + + # test error checking + pytest.raises(ValueError, utils.LatLonAlt_from_XYZ, ref_xyz[0:1]) + + +@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") +@pytest.mark.parametrize("selenoid", selenoids) +def test_XYZ_from_LatLonAlt_mcmf(selenoid): + """Test MCMF lat/lon/alt to xyz with reference values.""" + lat, lon, alt = ref_latlonalt_moon + out_xyz = utils.XYZ_from_LatLonAlt(lat, lon, alt, frame="mcmf", ellipsoid=selenoid) + np.testing.assert_allclose(ref_xyz_moon[selenoid], out_xyz, rtol=0, atol=1e-3) + + # test default ellipsoid + if selenoid == "SPHERE": + out_xyz = utils.XYZ_from_LatLonAlt(lat, lon, alt, frame="mcmf") + np.testing.assert_allclose(ref_xyz_moon[selenoid], out_xyz, rtol=0, atol=1e-3) + + # Test errors with invalid frame + with pytest.raises( + ValueError, match="No cartesian to spherical transform defined for frame" + ): + utils.XYZ_from_LatLonAlt(lat, lon, alt, frame="undef") + + +@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") +@pytest.mark.parametrize("selenoid", selenoids) +def test_LatLonAlt_from_XYZ_mcmf(selenoid): + """Test MCMF xyz to lat/lon/alt with reference values.""" + out_latlonalt = utils.LatLonAlt_from_XYZ( + ref_xyz_moon[selenoid], frame="mcmf", ellipsoid=selenoid + ) + np.testing.assert_allclose(ref_latlonalt_moon, out_latlonalt, rtol=0, atol=1e-3) + + # test default ellipsoid + if selenoid == "SPHERE": + out_latlonalt = utils.LatLonAlt_from_XYZ(ref_xyz_moon[selenoid], frame="mcmf") + np.testing.assert_allclose(ref_latlonalt_moon, out_latlonalt, rtol=0, atol=1e-3) + + # Test errors with invalid frame + with pytest.raises( + ValueError, match="Cannot check acceptability for unknown frame" + ): + out_latlonalt = utils.LatLonAlt_from_XYZ(ref_xyz_moon[selenoid], frame="undef") + with pytest.raises( + ValueError, match="No spherical to cartesian transform defined for frame" + ): + utils.LatLonAlt_from_XYZ( + ref_xyz_moon[selenoid], frame="undef", check_acceptability=False + ) + + +@pytest.mark.skipif(hasmoon, reason="Test only when lunarsky not installed.") +def test_no_moon(): + """Check errors when calling functions with MCMF without lunarsky.""" + msg = "Need to install `lunarsky` package to work with MCMF frame." + with pytest.raises(ValueError, match=msg): + utils.LatLonAlt_from_XYZ(ref_xyz_moon["SPHERE"], frame="mcmf") + lat, lon, alt = ref_latlonalt_moon + with pytest.raises(ValueError, match=msg): + utils.XYZ_from_LatLonAlt(lat, lon, alt, frame="mcmf") + with pytest.raises(ValueError, match=msg): + utils.get_lst_for_time( + [2451545.0], latitude=0, longitude=0, altitude=0, frame="mcmf" + ) + with pytest.raises(ValueError, match=msg): + utils.ENU_from_ECEF( + None, latitude=0.0, longitude=1.0, altitude=10.0, frame="mcmf" + ) + with pytest.raises(ValueError, match=msg): + utils.ECEF_from_ENU( + None, latitude=0.0, longitude=1.0, altitude=10.0, frame="mcmf" + ) + + +def test_lla_xyz_lla_roundtrip(): + """Test roundtripping an array will yield the same values.""" + np.random.seed(0) + lats = -30.721 + np.random.normal(0, 0.0005, size=30) + lons = 21.428 + np.random.normal(0, 0.0005, size=30) + alts = np.random.uniform(1051, 1054, size=30) + lats *= np.pi / 180.0 + lons *= np.pi / 180.0 + xyz = utils.XYZ_from_LatLonAlt(lats, lons, alts) + lats_new, lons_new, alts_new = utils.LatLonAlt_from_XYZ(xyz) + np.testing.assert_allclose(lats_new, lats) + np.testing.assert_allclose(lons_new, lons) + np.testing.assert_allclose(alts_new, alts) + + +def test_xyz_from_latlonalt(enu_ecef_info): + """Test calculating xyz from lat lot alt.""" + (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( + enu_ecef_info + ) + xyz = utils.XYZ_from_LatLonAlt(lats, lons, alts) + np.testing.assert_allclose(np.stack((x, y, z), axis=1), xyz, atol=1e-3) + + +def test_enu_from_ecef(enu_ecef_info): + """Test calculating ENU from ECEF coordinates.""" + (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( + enu_ecef_info + ) + xyz = utils.XYZ_from_LatLonAlt(lats, lons, alts) + + enu = utils.ENU_from_ECEF( + xyz, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) + np.testing.assert_allclose(np.stack((east, north, up), axis=1), enu, atol=1e-3) + + enu2 = utils.ENU_from_ECEF( + xyz, + center_loc=EarthLocation.from_geodetic( + lat=center_lat * units.rad, + lon=center_lon * units.rad, + height=center_alt * units.m, + ), + ) + np.testing.assert_allclose(enu, enu2) + + +@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") +@pytest.mark.parametrize("selenoid", selenoids) +def test_enu_from_mcmf(enu_mcmf_info, selenoid): + (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( + enu_mcmf_info + ) + xyz = utils.XYZ_from_LatLonAlt( + lats[selenoid], lons[selenoid], alts[selenoid], frame="mcmf", ellipsoid=selenoid + ) + enu = utils.ENU_from_ECEF( + xyz, + latitude=center_lat, + longitude=center_lon, + altitude=center_alt, + frame="mcmf", + ellipsoid=selenoid, + ) + + np.testing.assert_allclose(np.stack((east, north, up), axis=1), enu, atol=1e-3) + + enu2 = utils.ENU_from_ECEF( + xyz, + center_loc=MoonLocation.from_selenodetic( + lat=center_lat * units.rad, + lon=center_lon * units.rad, + height=center_alt * units.m, + ellipsoid=selenoid, + ), + ) + np.testing.assert_allclose(enu, enu2, atol=1e-3) + + +def test_invalid_frame(): + """Test error is raised when an invalid frame name is passed in.""" + with pytest.raises( + ValueError, match='No ENU_from_ECEF transform defined for frame "UNDEF".' + ): + utils.ENU_from_ECEF( + np.zeros((2, 3)), latitude=0.0, longitude=0.0, altitude=0.0, frame="undef" + ) + with pytest.raises( + ValueError, match='No ECEF_from_ENU transform defined for frame "UNDEF".' + ): + utils.ECEF_from_ENU( + np.zeros((2, 3)), latitude=0.0, longitude=0.0, altitude=0.0, frame="undef" + ) + + with pytest.raises( + ValueError, match="center_loc is not a supported type. It must be one of " + ): + utils.ENU_from_ECEF( + np.zeros((2, 3)), center_loc=units.Quantity(np.array([0, 0, 0]) * units.m) + ) + + with pytest.raises( + ValueError, match="center_loc is not a supported type. It must be one of " + ): + utils.ECEF_from_ENU( + np.zeros((2, 3)), center_loc=units.Quantity(np.array([0, 0, 0]) * units.m) + ) + + +@pytest.mark.parametrize("shape_type", ["transpose", "Nblts,2", "Nblts,1"]) +def test_enu_from_ecef_shape_errors(enu_ecef_info, shape_type): + """Test ENU_from_ECEF input shape errors.""" + (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( + enu_ecef_info + ) + xyz = utils.XYZ_from_LatLonAlt(lats, lons, alts) + if shape_type == "transpose": + xyz = xyz.T.copy() + elif shape_type == "Nblts,2": + xyz = xyz.copy()[:, 0:2] + elif shape_type == "Nblts,1": + xyz = xyz.copy()[:, 0:1] + + # check error if array transposed + with pytest.raises( + ValueError, + match=re.escape("The expected shape of ECEF xyz array is (Npts, 3)."), + ): + utils.ENU_from_ECEF( + xyz, longitude=center_lat, latitude=center_lon, altitude=center_alt + ) + + +def test_enu_from_ecef_magnitude_error(enu_ecef_info): + """Test ENU_from_ECEF input magnitude errors.""" + (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( + enu_ecef_info + ) + xyz = utils.XYZ_from_LatLonAlt(lats, lons, alts) + # error checking + with pytest.raises( + ValueError, + match="ITRS vector magnitudes must be on the order of the radius of the earth", + ): + utils.ENU_from_ECEF( + xyz / 2.0, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) + + +def test_enu_from_ecef_error(): + # check error no center location info passed + with pytest.raises( + ValueError, + match="Either center_loc or all of latitude, longitude and altitude " + "must be passed.", + ): + utils.ENU_from_ECEF(np.array([0, 0, 0])) + + with pytest.raises( + ValueError, + match="Either center_loc or all of latitude, longitude and altitude " + "must be passed.", + ): + utils.ECEF_from_ENU(np.array([0, 0, 0])) + + +@pytest.mark.parametrize(["frame", "selenoid"], frame_selenoid) +def test_ecef_from_enu_roundtrip(enu_ecef_info, enu_mcmf_info, frame, selenoid): + """Test ECEF_from_ENU values.""" + (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( + enu_ecef_info if frame == "itrs" else enu_mcmf_info + ) + if frame == "mcmf": + lats = lats[selenoid] + lons = lons[selenoid] + alts = alts[selenoid] + loc_obj = MoonLocation.from_selenodetic( + lat=center_lat * units.rad, + lon=center_lon * units.rad, + height=center_alt * units.m, + ellipsoid=selenoid, + ) + else: + loc_obj = EarthLocation.from_geodetic( + lat=center_lat * units.rad, + lon=center_lon * units.rad, + height=center_alt * units.m, + ) + + xyz = utils.XYZ_from_LatLonAlt(lats, lons, alts, frame=frame, ellipsoid=selenoid) + enu = utils.ENU_from_ECEF( + xyz, + latitude=center_lat, + longitude=center_lon, + altitude=center_alt, + frame=frame, + ellipsoid=selenoid, + ) + # check that a round trip gives the original value. + xyz_from_enu = utils.ECEF_from_ENU( + enu, + latitude=center_lat, + longitude=center_lon, + altitude=center_alt, + frame=frame, + ellipsoid=selenoid, + ) + np.testing.assert_allclose(xyz, xyz_from_enu, atol=1e-3) + + xyz_from_enu2 = utils.ECEF_from_ENU(enu, center_loc=loc_obj) + np.testing.assert_allclose(xyz_from_enu, xyz_from_enu2, atol=1e-3) + + if selenoid == "SPHERE": + enu = utils.ENU_from_ECEF( + xyz, + latitude=center_lat, + longitude=center_lon, + altitude=center_alt, + frame=frame, + ) + # check that a round trip gives the original value. + xyz_from_enu = utils.ECEF_from_ENU( + enu, + latitude=center_lat, + longitude=center_lon, + altitude=center_alt, + frame=frame, + ) + np.testing.assert_allclose(xyz, xyz_from_enu, atol=1e-3) + + +@pytest.mark.parametrize("shape_type", ["transpose", "Nblts,2", "Nblts,1"]) +def test_ecef_from_enu_shape_errors(enu_ecef_info, shape_type): + (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( + enu_ecef_info + ) + xyz = utils.XYZ_from_LatLonAlt(lats, lons, alts) + enu = utils.ENU_from_ECEF( + xyz, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) + if shape_type == "transpose": + enu = enu.copy().T + elif shape_type == "Nblts,2": + enu = enu.copy()[:, 0:2] + elif shape_type == "Nblts,1": + enu = enu.copy()[:, 0:1] + + # check error if array transposed + with pytest.raises( + ValueError, match=re.escape("The expected shape of the ENU array is (Npts, 3).") + ): + utils.ECEF_from_ENU( + enu, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) + + +def test_ecef_from_enu_single(enu_ecef_info): + """Test single coordinate transform.""" + (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( + enu_ecef_info + ) + xyz = utils.XYZ_from_LatLonAlt(lats, lons, alts) + # check passing a single value + enu_single = utils.ENU_from_ECEF( + xyz[0, :], latitude=center_lat, longitude=center_lon, altitude=center_alt + ) + + np.testing.assert_allclose( + np.array((east[0], north[0], up[0])), enu_single, atol=1e-3 + ) + + +def test_ecef_from_enu_single_roundtrip(enu_ecef_info): + """Test single coordinate roundtrip.""" + (center_lat, center_lon, center_alt, lats, lons, alts, x, y, z, east, north, up) = ( + enu_ecef_info + ) + xyz = utils.XYZ_from_LatLonAlt(lats, lons, alts) + # check passing a single value + enu = utils.ENU_from_ECEF( + xyz, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) + + enu_single = utils.ENU_from_ECEF( + xyz[0, :], latitude=center_lat, longitude=center_lon, altitude=center_alt + ) + np.testing.assert_allclose( + np.array((east[0], north[0], up[0])), enu[0, :], atol=1e-3 + ) + + xyz_from_enu = utils.ECEF_from_ENU( + enu_single, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) + np.testing.assert_allclose(xyz[0, :], xyz_from_enu, atol=1e-3) + + +def test_mwa_ecef_conversion(): + """ + Test based on comparing the antenna locations in a Cotter uvfits file to + the antenna locations in MWA_tools. + """ + + test_data_file = os.path.join(DATA_PATH, "mwa128_ant_layouts.npz") + f = np.load(test_data_file) + + # From the STABXYZ table in a cotter-generated uvfits file, obsid = 1066666832 + xyz = f["stabxyz"] + # From the East/North/Height columns in a cotter-generated metafits file, + # obsid = 1066666832 + enh = f["ENH"] + # From a text file antenna_locations.txt in MWA_Tools/scripts + txt_topo = f["txt_topo"] + + # From the unphased uvw coordinates of obsid 1066666832, positions relative + # to antenna 0 + # these aren't used in the current test, but are interesting and might help + # with phasing diagnosis in the future + uvw_topo = f["uvw_topo"] + # Sky coordinates are flipped for uvw derived values + uvw_topo = -uvw_topo + uvw_topo += txt_topo[0] + + # transpose these arrays to get them into the right shape + txt_topo = txt_topo.T + uvw_topo = uvw_topo.T + + # ARRAYX, ARRAYY, ARRAYZ in ECEF frame from Cotter file + arrcent = f["arrcent"] + lat, lon, alt = utils.LatLonAlt_from_XYZ(arrcent) + + # The STABXYZ coordinates are defined with X through the local meridian, + # so rotate back to the prime meridian + new_xyz = utils.ECEF_from_rotECEF(xyz.T, lon) + # add in array center to get real ECEF + ecef_xyz = new_xyz + arrcent + + enu = utils.ENU_from_ECEF(ecef_xyz, latitude=lat, longitude=lon, altitude=alt) + + np.testing.assert_allclose(enu, enh) + + # test other direction of ECEF rotation + rot_xyz = utils.rotECEF_from_ECEF(new_xyz, lon) + np.testing.assert_allclose(rot_xyz.T, xyz) diff --git a/tests/utils/test_helpers.py b/tests/utils/test_helpers.py new file mode 100644 index 0000000000..787d5b5f16 --- /dev/null +++ b/tests/utils/test_helpers.py @@ -0,0 +1,294 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for helper utility functions.""" + +import numpy as np +import pytest +from astropy.coordinates import EarthLocation + +from pyuvdata import utils +from pyuvdata.testing import check_warnings +from pyuvdata.utils import helpers + +from .test_coordinates import hasmoon + +if hasmoon: + from lunarsky import MoonLocation + + +@pytest.mark.parametrize( + "filename1,filename2,answer", + [ + (["foo.uvh5"], ["bar.uvh5"], ["foo.uvh5", "bar.uvh5"]), + (["foo.uvh5", "bar.uvh5"], ["foo.uvh5"], ["foo.uvh5", "bar.uvh5"]), + (["foo.uvh5"], None, ["foo.uvh5"]), + (None, ["bar.uvh5"], ["bar.uvh5"]), + (None, None, None), + ], +) +def test_combine_filenames(filename1, filename2, answer): + combined_filenames = helpers._combine_filenames(filename1, filename2) + if answer is None: + assert combined_filenames is answer + else: + # use sets to test equality so that order doesn't matter + assert set(combined_filenames) == set(answer) + + return + + +def test_deprecated_utils_import(): + with check_warnings( + DeprecationWarning, + match="The _check_histories function has moved, please import it from " + "pyuvdata.utils.helpers. This warnings will become an error in version 3.2", + ): + utils._check_histories("foo", "foo") + + +@pytest.mark.parametrize( + "blt_order", + [ + ("time", "baseline"), + ("baseline", "time"), + ("ant1", "time"), + ("ant2", "time"), + ("time", "ant1"), + ("time", "ant2"), + ("baseline",), + ("time",), + ("ant1",), + ("ant2",), + (), + ([0, 2, 6, 4, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]), + ], +) +def test_determine_blt_order(blt_order): + nant = 3 + ntime = 2 + + def getbl(ant1, ant2): + return utils.antnums_to_baseline(ant1, ant2, Nants_telescope=nant) + + def getantbls(): + # Arrange them backwards so by default they are NOT sorted + ant1 = np.arange(nant, dtype=int)[::-1] + ant2 = ant1.copy() + ANT1, ANT2 = np.meshgrid(ant1, ant2) + + return ANT1.flatten(), ANT2.flatten() + + def gettimebls(blt_order): + ant1, ant2 = getantbls() + time_array = np.linspace( + 2000, 1000, ntime + ) # backwards so not sorted by default + + TIME = np.tile(time_array, len(ant1)) + ANT1 = np.repeat(ant1, len(time_array)) + ANT2 = np.repeat(ant2, len(time_array)) + BASELINE = getbl(ANT1, ANT2) + + lc = locals() + if isinstance(blt_order, list): + inds = np.array(blt_order) + elif blt_order: + inds = np.lexsort(tuple(lc[k.upper()] for k in blt_order[::-1])) + else: + inds = np.arange(len(TIME)) + + return TIME[inds], ANT1[inds], ANT2[inds], BASELINE[inds] + + # time, bl + TIME, ANT1, ANT2, BL = gettimebls(blt_order) + order = helpers.determine_blt_order( + time_array=TIME, + ant_1_array=ANT1, + ant_2_array=ANT2, + baseline_array=BL, + Nbls=nant**2, + Ntimes=ntime, + ) + if isinstance(blt_order, list): + assert order is None + elif blt_order: + assert order == blt_order + else: + assert order is None + + is_rect, time_first = helpers.determine_rectangularity( + time_array=TIME, baseline_array=BL, nbls=nant**2, ntimes=ntime + ) + if blt_order in [("ant1", "time"), ("ant2", "time")]: + # sorting by ant1/ant2 then time means we split the other ant into a + # separate group + assert not is_rect + assert not time_first + elif isinstance(blt_order, list): + assert not is_rect + assert not time_first + else: + assert is_rect + assert time_first == ( + (len(blt_order) == 2 and blt_order[-1] == "time") + or (len(blt_order) == 1 and blt_order[0] != "time") + or not blt_order # we by default move time first (backwards, but still) + ) + + +def test_determine_blt_order_size_1(): + times = np.array([2458119.5]) + ant1 = np.array([0]) + ant2 = np.array([1]) + bl = utils.antnums_to_baseline(ant1, ant2, Nants_telescope=2) + + order = helpers.determine_blt_order( + time_array=times, + ant_1_array=ant1, + ant_2_array=ant2, + baseline_array=bl, + Nbls=1, + Ntimes=1, + ) + assert order == ("baseline", "time") + is_rect, time_first = helpers.determine_rectangularity( + time_array=times, baseline_array=bl, nbls=1, ntimes=1 + ) + assert is_rect + assert time_first + + +def test_determine_rect_time_first(): + times = np.linspace(2458119.5, 2458120.5, 10) + ant1 = np.arange(3) + ant2 = np.arange(3) + ANT1, ANT2 = np.meshgrid(ant1, ant2) + bls = utils.antnums_to_baseline(ANT1.flatten(), ANT2.flatten(), Nants_telescope=3) + + rng = np.random.default_rng(12345) + + TIME = np.tile(times, len(bls)) + BL = np.concatenate([rng.permuted(bls) for i in range(len(times))]) + + is_rect, time_first = helpers.determine_rectangularity( + time_array=TIME, baseline_array=BL, nbls=9, ntimes=10 + ) + assert not is_rect + + # now, permute time instead of bls + TIME = np.concatenate([rng.permuted(times) for i in range(len(bls))]) + BL = np.tile(bls, len(times)) + is_rect, time_first = helpers.determine_rectangularity( + time_array=TIME, baseline_array=BL, nbls=9, ntimes=10 + ) + assert not is_rect + + TIME = np.array([1000.0, 1000.0, 2000.0, 1000.0]) + BLS = np.array([0, 0, 1, 0]) + + is_rect, time_first = helpers.determine_rectangularity( + time_array=TIME, baseline_array=BLS, nbls=2, ntimes=2 + ) + assert not is_rect + + +@pytest.mark.parametrize("err_state", ["err", "warn", "none"]) +@pytest.mark.parametrize("tel_loc", ["Center", "Moon", "Earth", "Space"]) +@pytest.mark.parametrize("check_frame", ["Moon", "Earth"]) +@pytest.mark.parametrize("del_tel_loc", [False, None, True]) +def test_check_surface_based_positions(err_state, tel_loc, check_frame, del_tel_loc): + tel_loc_dict = { + "Center": np.array([0, 0, 0]), + "Moon": np.array([0, 0, 1.737e6]), + "Earth": np.array([0, 6.37e6, 0]), + "Space": np.array([4.22e7, 0, 0]), + } + tel_frame_dict = {"Moon": "mcmf", "Earth": "itrs"} + + ant_pos = np.array( + [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] + ) + if del_tel_loc: + ant_pos += tel_loc_dict[tel_loc] + + fail_type = err_msg = err_type = None + err_check = check_warnings + if (tel_loc != check_frame) and (err_state != "none"): + if tel_loc == "Center": + fail_type = "below" + elif tel_loc == "Space": + fail_type = "above" + else: + fail_type = "above" if tel_loc == "Earth" else "below" + + if fail_type is not None: + err_msg = ( + f"{tel_frame_dict[check_frame]} position vector magnitudes must be " + f"on the order of the radius of {check_frame} -- they appear to lie well " + f"{fail_type} this." + ) + if err_state == "err": + err_type = ValueError + err_check = pytest.raises + else: + err_type = UserWarning + + with err_check(err_type, match=err_msg): + status = helpers.check_surface_based_positions( + telescope_loc=None if (del_tel_loc) else tel_loc_dict[tel_loc], + antenna_positions=None if (del_tel_loc is None) else ant_pos, + telescope_frame=tel_frame_dict[check_frame], + raise_error=err_state == "err", + raise_warning=err_state == "warn", + ) + + assert (err_state == "err") or (status == (tel_loc == check_frame)) + + +@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") +@pytest.mark.parametrize("tel_loc", ["Earth", "Moon"]) +@pytest.mark.parametrize("check_frame", ["Earth", "Moon"]) +def test_check_surface_based_positions_earthmoonloc(tel_loc, check_frame): + frame = "mcmf" if (check_frame == "Moon") else "itrs" + + if tel_loc == "Earth": + loc = EarthLocation.from_geodetic(0, 0, 0) + else: + loc = MoonLocation.from_selenodetic(0, 0, 0) + + if tel_loc == check_frame: + assert helpers.check_surface_based_positions( + telescope_loc=loc, telescope_frame=frame + ) + else: + with pytest.raises(ValueError, match=(f"{frame} position vector")): + helpers.check_surface_based_positions( + telescope_loc=[loc.x.value, loc.y.value, loc.z.value], + telescope_frame=frame, + ) + + +def test_slicify(): + assert helpers.slicify(None) is None + assert helpers.slicify(slice(None)) == slice(None) + assert helpers.slicify([]) is None + assert helpers.slicify([1, 2, 3]) == slice(1, 4, 1) + assert helpers.slicify([1]) == slice(1, 2, 1) + assert helpers.slicify([0, 2, 4]) == slice(0, 5, 2) + assert helpers.slicify([0, 1, 2, 7]) == [0, 1, 2, 7] + + +@pytest.mark.parametrize( + "obj1,obj2,union_result,interset_result,diff_result", + [ + [[1, 2, 3], [3, 4, 5], [1, 2, 3, 4, 5], [3], [1, 2]], # Partial overlap + [[1, 2], [1, 2], [1, 2], [1, 2], []], # Full overlap + [[1, 3, 5], [2, 4, 6], [1, 2, 3, 4, 5, 6], [], [1, 3, 5]], # No overlap + [[1, 2], None, [1, 2], [1, 2], [1, 2]], # Nones + ], +) +def test_sorted_unique_ops(obj1, obj2, union_result, interset_result, diff_result): + assert helpers._sorted_unique_union(obj1, obj2) == union_result + assert helpers._sorted_unique_intersection(obj1, obj2) == interset_result + assert helpers._sorted_unique_difference(obj1, obj2) == diff_result diff --git a/tests/utils/test_lst.py b/tests/utils/test_lst.py new file mode 100644 index 0000000000..447de98550 --- /dev/null +++ b/tests/utils/test_lst.py @@ -0,0 +1,211 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for LST utility functions.""" + +import numpy as np +import pytest +from astropy import units +from astropy.coordinates import EarthLocation + +from pyuvdata import utils +from pyuvdata.utils.lst import hasmoon + +if hasmoon: + from lunarsky import MoonLocation + from lunarsky import Time as LTime + +from .test_coordinates import selenoids + + +def test_astrometry_lst(astrometry_args): + """ + Check for consistency beteen astrometry libraries when calculating LAST + + This test evaluates consistency in calculating local apparent sidereal time when + using the different astrometry libraries available in pyuvdata, namely: astropy, + pyERFA, and python-novas. Between these three, we expect agreement within 6 µs in + most instances, although for pyuvdata we tolerate differences of up to ~60 µs + (which translates to 1 mas in sky position error) since we don't expect to need + astrometry better than this. + """ + pytest.importorskip("novas") + pytest.importorskip("novas_de405") + astrometry_list = ["erfa", "astropy", "novas"] + lst_results = [None, None, None, None] + # These values were indepedently calculated using erfa v1.7.2, which at the + # time of coding agreed to < 50 µs with astropy v4.2.1 and novas 3.1.1.5. We + # use those values here as a sort of history check to make sure that something + # hasn't changed in the underlying astrometry libraries without being caught + lst_results[3] = np.array( + [0.8506741803481069, 2.442973468758589, 4.1728965710160555, 1.0130589895999587] + ) + + for idx, name in enumerate(astrometry_list): + # Note that the units aren't right here (missing a rad-> deg conversion), but + # the above values were calculated using the arguments below. + lst_results[idx] = utils.get_lst_for_time( + jd_array=astrometry_args["time_array"], + latitude=astrometry_args["telescope_loc"][0], + longitude=astrometry_args["telescope_loc"][1], + altitude=astrometry_args["telescope_loc"][2], + astrometry_library=name, + ) + + for idx in range(len(lst_results) - 1): + for jdx in range(idx + 1, len(lst_results)): + alpha_time = lst_results[idx] * units.rad + beta_time = lst_results[jdx] * units.rad + assert np.all(np.abs(alpha_time - beta_time).to_value("mas") < 1.0) + + +@pytest.mark.parametrize("astrometry_lib", ["astropy", "novas", "erfa"]) +def test_lst_for_time_smooth(astrometry_lib): + """ + Test that LSTs are smooth and do not have large discontinuities. + + Inspired by a bug found by the HERA validation team in our original implemenatation + using the erfa library. + """ + if astrometry_lib == "novas": + pytest.importorskip("novas") + pytest.importorskip("novas_de405") + + hera_loc = EarthLocation.from_geodetic( + lat=-30.72152612068957, lon=21.428303826863015, height=1051.6900000218302 + ) + + start_time = 2458101.5435486115 + n_times = 28728 + integration_time = 1.0 + + daysperhour = 1 / 24.0 + hourspersec = 1 / 60.0**2 + dayspersec = daysperhour * hourspersec + inttime_days = integration_time * dayspersec + duration = inttime_days * n_times + end_time = start_time + duration - inttime_days + times = np.linspace(start_time, end_time + inttime_days, n_times, endpoint=False) + + uv_lsts = utils.get_lst_for_time( + times, + latitude=hera_loc.lat.deg, + longitude=hera_loc.lon.deg, + altitude=hera_loc.height.value, + astrometry_library=astrometry_lib, + frame="itrs", + ) + + dtimes = times - int(times[0]) + poly_fit = np.poly1d(np.polyfit(dtimes, uv_lsts, 2)) + diff_poly = uv_lsts - poly_fit(dtimes) + assert np.max(np.abs(diff_poly)) < 1e-10 + + +@pytest.mark.parametrize("astrolib", ["novas", "astropy", "erfa"]) +def test_lst_for_time_float_vs_array(astrometry_args, astrolib): + """ + Test for equality when passing a single float vs an ndarray (of length 1) when + calling get_lst_for_time. + """ + if astrolib == "novas": + pytest.importorskip("novas") + pytest.importorskip("novas_de405") + + r2d = 180.0 / np.pi + + lst_array = utils.get_lst_for_time( + jd_array=np.array(astrometry_args["time_array"][0]), + latitude=astrometry_args["telescope_loc"][0] * r2d, + longitude=astrometry_args["telescope_loc"][1] * r2d, + altitude=astrometry_args["telescope_loc"][2], + astrometry_library=astrolib, + ) + + check_lst = utils.get_lst_for_time( + jd_array=astrometry_args["time_array"][0], + telescope_loc=np.multiply(astrometry_args["telescope_loc"], [r2d, r2d, 1]), + astrometry_library=astrolib, + ) + + assert np.all(lst_array == check_lst) + + +def test_get_lst_for_time_errors(astrometry_args): + with pytest.raises( + ValueError, + match="Requested coordinate transformation library is not supported, please " + "select either 'erfa' or 'astropy' for astrometry_library.", + ): + utils.get_lst_for_time( + jd_array=np.array(astrometry_args["time_array"][0]), + latitude=astrometry_args["telescope_loc"][0] * (180.0 / np.pi), + longitude=astrometry_args["telescope_loc"][1] * (180.0 / np.pi), + altitude=astrometry_args["telescope_loc"][2], + astrometry_library="foo", + ) + + with pytest.raises( + ValueError, + match="Cannot set both telescope_loc and latitude/longitude/altitude", + ): + utils.get_lst_for_time( + np.array(astrometry_args["time_array"][0]), + latitude=astrometry_args["telescope_loc"][0] * (180.0 / np.pi), + telescope_loc=astrometry_args["telescope_loc"][2], + ) + + +@pytest.mark.filterwarnings("ignore:The get_frame_attr_names") +@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") +@pytest.mark.parametrize("selenoid", selenoids) +def test_lst_for_time_moon(astrometry_args, selenoid): + """Test the get_lst_for_time function with MCMF frame""" + from lunarsky import SkyCoord as LSkyCoord + + lat, lon, alt = (0.6875, 24.433, 0) # Degrees + + # check error if try to use the wrong astrometry library + with pytest.raises( + NotImplementedError, + match="The MCMF frame is only supported with the 'astropy' astrometry library", + ): + lst_array = utils.get_lst_for_time( + jd_array=astrometry_args["time_array"], + latitude=lat, + longitude=lon, + altitude=alt, + frame="mcmf", + ellipsoid=selenoid, + astrometry_library="novas", + ) + + lst_array = utils.get_lst_for_time( + jd_array=astrometry_args["time_array"], + latitude=lat, + longitude=lon, + altitude=alt, + frame="mcmf", + ellipsoid=selenoid, + ) + + # Verify that lsts are close to local zenith RA + loc = MoonLocation.from_selenodetic(lon, lat, alt, ellipsoid=selenoid) + for ii, tt in enumerate( + LTime(astrometry_args["time_array"], format="jd", scale="utc", location=loc) + ): + src = LSkyCoord(alt="90d", az="0d", frame="lunartopo", obstime=tt, location=loc) + # TODO: would be nice to get this down to utils.RADIAN_TOL + # seems like maybe the ellipsoid isn't being used properly? + assert np.isclose(lst_array[ii], src.transform_to("icrs").ra.rad, atol=1e-5) + + # test default ellipsoid + if selenoid == "SPHERE": + lst_array_default = utils.get_lst_for_time( + jd_array=astrometry_args["time_array"], + latitude=lat, + longitude=lon, + altitude=alt, + frame="mcmf", + ) + np.testing.assert_allclose(lst_array, lst_array_default) diff --git a/tests/utils/test_phasing.py b/tests/utils/test_phasing.py new file mode 100644 index 0000000000..556615b302 --- /dev/null +++ b/tests/utils/test_phasing.py @@ -0,0 +1,1960 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for phasing utility functions.""" +import os +import re + +import numpy as np +import pytest +from astropy import units +from astropy.coordinates import Angle, EarthLocation, SkyCoord +from astropy.time import Time + +import pyuvdata.utils.phasing as phs_utils +from pyuvdata import UVData, utils +from pyuvdata.data import DATA_PATH +from pyuvdata.utils.phasing import hasmoon + +from .test_coordinates import frame_selenoid + +if hasmoon: + from lunarsky import MoonLocation + from lunarsky import Time as LTime + + +@pytest.fixture +def vector_list(): + x_vecs = np.array([[1, 0, 0], [2, 0, 0]], dtype=float).T + y_vecs = np.array([[0, 1, 0], [0, 2, 0]], dtype=float).T + z_vecs = np.array([[0, 0, 1], [0, 0, 2]], dtype=float).T + test_vecs = np.array([[1, 1, 1], [2, 2, 2]], dtype=float).T + + yield x_vecs, y_vecs, z_vecs, test_vecs + + +@pytest.fixture +def calc_uvw_args(): + default_args = { + "app_ra": np.zeros(3), + "app_dec": np.zeros(3) + 1.0, + "frame_pa": np.zeros(3) + 1e-3, + "lst_array": np.zeros(3) + np.pi, + "use_ant_pos": True, + "uvw_array": np.array([[1, -1, 0], [0, -1, 1], [-1, 0, 1]], dtype=float), + "antenna_positions": np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=float), + "antenna_numbers": [1, 2, 3], + "ant_1_array": np.array([1, 1, 2]), + "ant_2_array": np.array([2, 3, 3]), + "old_app_ra": np.zeros(3) + np.pi, + "old_app_dec": np.zeros(3), + "old_frame_pa": np.zeros(3), + "telescope_lat": 1.0, + "telescope_lon": 0.0, + "to_enu": False, + "from_enu": False, + } + yield default_args + + +@pytest.mark.skipif(hasmoon, reason="Test only when lunarsky not installed.") +def test_no_moon(): + """Check errors when calling functions with MCMF without lunarsky.""" + msg = "Need to install `lunarsky` package to work with MCMF frame." + with pytest.raises(ValueError, match=msg): + phs_utils.transform_icrs_to_app( + time_array=[2451545.0], + ra=0, + dec=0, + telescope_loc=(0, 0, 0), + telescope_frame="mcmf", + ) + with pytest.raises(ValueError, match=msg): + phs_utils.transform_app_to_icrs( + time_array=[2451545.0], + app_ra=0, + app_dec=0, + telescope_loc=(0, 0, 0), + telescope_frame="mcmf", + ) + with pytest.raises(ValueError, match=msg): + phs_utils.calc_app_coords(lon_coord=0.0, lat_coord=0.0, telescope_frame="mcmf") + + +@pytest.mark.parametrize( + "lon_array,lat_array,msg", + ( + [0.0, np.array([0.0]), "lon_array and lat_array must either both be floats or"], + [np.array([0.0, 1.0]), np.array([0.0]), "lon_array and lat_array must have "], + ), +) +def test_polar2_to_cart3_arg_errs(lon_array, lat_array, msg): + """ + Test that bad arguments to polar2_to_cart3 throw appropriate errors. + """ + with pytest.raises(ValueError, match=msg): + phs_utils.polar2_to_cart3(lon_array=lon_array, lat_array=lat_array) + + +@pytest.mark.parametrize( + "input1,msg", + ( + [0.0, "xyz_array must be an ndarray."], + [np.array(0.0), "xyz_array must have ndim > 0"], + [np.array([0.0]), "xyz_array must be length 3"], + ), +) +def test_cart3_to_polar2_arg_errs(input1, msg): + """ + Test that bad arguments to cart3_to_polar2 throw appropriate errors. + """ + with pytest.raises(ValueError, match=msg): + phs_utils.cart3_to_polar2(input1) + + +@pytest.mark.parametrize( + "input1,input2,input3,msg", + ( + [np.zeros((1, 3, 1)), np.zeros((1, 3, 3)), 2, "rot_matrix must be of shape "], + [np.zeros((1, 2, 1)), np.zeros((1, 3, 3)), 1, "Misshaped xyz_array - expected"], + [np.zeros((2, 1)), np.zeros((1, 3, 3)), 1, "Misshaped xyz_array - expected"], + [np.zeros((2)), np.zeros((1, 3, 3)), 1, "Misshaped xyz_array - expected shape"], + ), +) +def test_rotate_matmul_wrapper_arg_errs(input1, input2, input3, msg): + """ + Test that bad arguments to _rotate_matmul_wrapper throw appropriate errors. + """ + with pytest.raises(ValueError, match=msg): + phs_utils._rotate_matmul_wrapper( + xyz_array=input1, rot_matrix=input2, n_rot=input3 + ) + + +def test_cart_to_polar_roundtrip(): + """ + Test that polar->cart coord transformation is the inverse of cart->polar. + """ + # Basic round trip with vectors + assert phs_utils.cart3_to_polar2( + phs_utils.polar2_to_cart3(lon_array=0.0, lat_array=0.0) + ) == (0.0, 0.0) + + +def test_rotate_one_axis(vector_list): + """ + Tests some basic vector rotation operations with a single axis rotation. + """ + # These tests are used to verify the basic functionality of the primary + # functions used to perform rotations + x_vecs, y_vecs, z_vecs, test_vecs = vector_list + + # Test no-ops w/ 0 deg rotations + assert np.all( + phs_utils._rotate_one_axis(xyz_array=x_vecs, rot_amount=0.0, rot_axis=0) + == x_vecs + ) + assert np.all( + phs_utils._rotate_one_axis(xyz_array=x_vecs[:, 0], rot_amount=0.0, rot_axis=1) + == x_vecs[np.newaxis, :, 0, np.newaxis] + ) + assert np.all( + phs_utils._rotate_one_axis( + xyz_array=x_vecs[:, :, np.newaxis], rot_amount=0.0, rot_axis=2 + ) + == x_vecs[:, :, np.newaxis] + ) + + # Test no-ops w/ None + assert np.all( + phs_utils._rotate_one_axis(xyz_array=test_vecs, rot_amount=None, rot_axis=1) + == test_vecs + ) + assert np.all( + phs_utils._rotate_one_axis( + xyz_array=test_vecs[:, 0], rot_amount=None, rot_axis=2 + ) + == test_vecs[np.newaxis, :, 0, np.newaxis] + ) + assert np.all( + phs_utils._rotate_one_axis( + xyz_array=test_vecs[:, :, np.newaxis], rot_amount=None, rot_axis=0 + ) + == test_vecs[:, :, np.newaxis] + ) + + # Test some basic equivalencies to make sure rotations are working correctly + assert np.allclose( + x_vecs, phs_utils._rotate_one_axis(xyz_array=x_vecs, rot_amount=1.0, rot_axis=0) + ) + assert np.allclose( + y_vecs, phs_utils._rotate_one_axis(xyz_array=y_vecs, rot_amount=2.0, rot_axis=1) + ) + assert np.allclose( + z_vecs, phs_utils._rotate_one_axis(xyz_array=z_vecs, rot_amount=3.0, rot_axis=2) + ) + + assert np.allclose( + x_vecs, + phs_utils._rotate_one_axis( + xyz_array=y_vecs, rot_amount=-np.pi / 2.0, rot_axis=2 + ), + ) + assert np.allclose( + y_vecs, + phs_utils._rotate_one_axis( + xyz_array=x_vecs, rot_amount=np.pi / 2.0, rot_axis=2 + ), + ) + assert np.allclose( + x_vecs, + phs_utils._rotate_one_axis( + xyz_array=z_vecs, rot_amount=np.pi / 2.0, rot_axis=1 + ), + ) + assert np.allclose( + z_vecs, + phs_utils._rotate_one_axis( + xyz_array=x_vecs, rot_amount=-np.pi / 2.0, rot_axis=1 + ), + ) + assert np.allclose( + y_vecs, + phs_utils._rotate_one_axis( + xyz_array=z_vecs, rot_amount=-np.pi / 2.0, rot_axis=0 + ), + ) + assert np.allclose( + z_vecs, + phs_utils._rotate_one_axis( + xyz_array=y_vecs, rot_amount=np.pi / 2.0, rot_axis=0 + ), + ) + + assert np.all( + np.equal( + phs_utils._rotate_one_axis(xyz_array=test_vecs, rot_amount=1.0, rot_axis=2), + phs_utils._rotate_one_axis( + xyz_array=test_vecs, rot_amount=1.0, rot_axis=np.array([2]) + ), + ) + ) + + # Testing a special case, where the xyz_array vectors are reshaped if there + # is only a single rotation matrix used (helps speed things up significantly) + mod_vec = x_vecs.T.reshape((2, 3, 1)) + assert np.all( + phs_utils._rotate_one_axis(xyz_array=mod_vec, rot_amount=1.0, rot_axis=0) + == mod_vec + ) + + +def test_rotate_two_axis(vector_list): + """ + Tests some basic vector rotation operations with a double axis rotation. + """ + x_vecs, y_vecs, z_vecs, test_vecs = vector_list + + # These tests are used to verify the basic functionality of the primary + # functions used to two-axis rotations + assert np.allclose( + x_vecs, + phs_utils._rotate_two_axis( + xyz_array=x_vecs, + rot_amount1=2 * np.pi, + rot_amount2=1.0, + rot_axis1=1, + rot_axis2=0, + ), + ) + assert np.allclose( + y_vecs, + phs_utils._rotate_two_axis( + xyz_array=y_vecs, + rot_amount1=2 * np.pi, + rot_amount2=2.0, + rot_axis1=2, + rot_axis2=1, + ), + ) + assert np.allclose( + z_vecs, + phs_utils._rotate_two_axis( + xyz_array=z_vecs, + rot_amount1=2 * np.pi, + rot_amount2=3.0, + rot_axis1=0, + rot_axis2=2, + ), + ) + + # Do one more test, which verifies that we can filp our (1,1,1) test vector to + # the postiion at (-1, -1 , -1) + mod_vec = test_vecs.T.reshape((2, 3, 1)) + assert np.allclose( + phs_utils._rotate_two_axis( + xyz_array=mod_vec, + rot_amount1=np.pi, + rot_amount2=np.pi / 2.0, + rot_axis1=0, + rot_axis2=1, + ), + -mod_vec, + ) + + +@pytest.mark.parametrize( + "rot1,axis1,rot2,rot3,axis2,axis3", + ( + [2.0, 0, 1.0, 1.0, 0, 0], + [2.0, 0, 2.0, 0.0, 0, 1], + [2.0, 0, None, 2.0, 1, 0], + [0.0, 0, None, 0.0, 1, 2], + ), +) +def test_compare_one_to_two_axis(vector_list, rot1, axis1, rot2, rot3, axis2, axis3): + """ + Check that one-axis and two-axis rotations provide the same values when the + two-axis rotations are fundamentally rotating around a single axis. + """ + x_vecs, y_vecs, z_vecs, test_vecs = vector_list + # If performing two rots on the same axis, that should be identical to using + # a single rot (with the rot angle equal to the sum of the two rot angles) + assert np.all( + np.equal( + phs_utils._rotate_one_axis( + xyz_array=test_vecs, rot_amount=rot1, rot_axis=axis1 + ), + phs_utils._rotate_two_axis( + xyz_array=test_vecs, + rot_amount1=rot2, + rot_amount2=rot3, + rot_axis1=axis2, + rot_axis2=axis3, + ), + ) + ) + + +@pytest.mark.parametrize( + "arg_dict,err", + ( + [ + {"lst_array": None, "to_enu": True, "use_ant_pos": False}, + (ValueError, "Must include lst_array to calculate baselines in ENU"), + ], + [ + {"lst_array": None, "to_enu": True, "telescope_lat": None}, + (ValueError, "Must include telescope_lat to calculate baselines"), + ], + [ + {"lst_array": None}, + (ValueError, "Must include lst_array if use_ant_pos=True and not"), + ], + [ + {"app_ra": None, "frame_pa": None}, + (ValueError, "Must include both app_ra and app_dec, or frame_pa to"), + ], + [ + {"app_dec": None, "frame_pa": None}, + (ValueError, "Must include both app_ra and app_dec, or frame_pa to"), + ], + [ + {"app_ra": None, "app_dec": None, "frame_pa": None}, + (ValueError, "Must include both app_ra and app_dec, or frame_pa to"), + ], + [ + {"antenna_positions": None}, + (ValueError, "Must include antenna_positions if use_ant_pos=True."), + ], + [ + {"ant_1_array": None}, + (ValueError, "Must include ant_1_array, ant_2_array, and antenna_numbers"), + ], + [ + {"ant_2_array": None}, + (ValueError, "Must include ant_1_array, ant_2_array, and antenna_numbers"), + ], + [ + {"antenna_numbers": None}, + (ValueError, "Must include ant_1_array, ant_2_array, and antenna_numbers"), + ], + [ + {"telescope_lon": None}, + (ValueError, "Must include telescope_lon if use_ant_pos=True."), + ], + [ + {"uvw_array": None, "use_ant_pos": False}, + (ValueError, "Must include uvw_array if use_ant_pos=False."), + ], + [ + {"telescope_lat": None, "use_ant_pos": False, "from_enu": True}, + (ValueError, "Must include telescope_lat if moving "), + ], + [ + {"lst_array": None, "use_ant_pos": False, "from_enu": True}, + ( + ValueError, + re.escape("Must include lst_array if moving between ENU (i.e.,"), + ), + ], + [ + {"use_ant_pos": False, "old_app_ra": None}, + (ValueError, "Must include old_app_ra and old_app_dec values when data"), + ], + [ + {"use_ant_pos": False, "old_app_dec": None}, + (ValueError, "Must include old_app_ra and old_app_dec values when data"), + ], + [ + {"use_ant_pos": False, "old_frame_pa": None}, + (ValueError, "Must include old_frame_pa values if data are phased and "), + ], + ), +) +def test_calc_uvw_input_errors(calc_uvw_args, arg_dict, err): + """ + Check for argument errors with calc_uvw. + """ + for key in arg_dict.keys(): + calc_uvw_args[key] = arg_dict[key] + + with pytest.raises(err[0], match=err[1]): + phs_utils.calc_uvw( + app_ra=calc_uvw_args["app_ra"], + app_dec=calc_uvw_args["app_dec"], + frame_pa=calc_uvw_args["frame_pa"], + lst_array=calc_uvw_args["lst_array"], + use_ant_pos=calc_uvw_args["use_ant_pos"], + uvw_array=calc_uvw_args["uvw_array"], + antenna_positions=calc_uvw_args["antenna_positions"], + antenna_numbers=calc_uvw_args["antenna_numbers"], + ant_1_array=calc_uvw_args["ant_1_array"], + ant_2_array=calc_uvw_args["ant_2_array"], + old_app_ra=calc_uvw_args["old_app_ra"], + old_app_dec=calc_uvw_args["old_app_dec"], + old_frame_pa=calc_uvw_args["old_frame_pa"], + telescope_lat=calc_uvw_args["telescope_lat"], + telescope_lon=calc_uvw_args["telescope_lon"], + from_enu=calc_uvw_args["from_enu"], + to_enu=calc_uvw_args["to_enu"], + ) + + +def test_calc_uvw_no_op(calc_uvw_args): + """ + Test that transfroming ENU -> ENU gives you an output identical to the input. + """ + # This should be a no-op, check for equality + uvw_check = phs_utils.calc_uvw( + lst_array=calc_uvw_args["lst_array"], + use_ant_pos=False, + uvw_array=calc_uvw_args["uvw_array"], + telescope_lat=calc_uvw_args["telescope_lat"], + telescope_lon=calc_uvw_args["telescope_lon"], + to_enu=True, + from_enu=True, + ) + assert np.all(np.equal(calc_uvw_args["uvw_array"], uvw_check)) + + +def test_calc_uvw_same_place(calc_uvw_args): + """ + Check and see that the uvw calculator derives the same values derived by hand + (i.e, that calculating for the same position returns the same answer). + """ + # Check ant make sure that when we plug in the original values, we recover the + # exact same values that we calculated above. + uvw_ant_check = phs_utils.calc_uvw( + app_ra=calc_uvw_args["old_app_ra"], + app_dec=calc_uvw_args["old_app_dec"], + frame_pa=calc_uvw_args["old_frame_pa"], + lst_array=calc_uvw_args["lst_array"], + use_ant_pos=True, + antenna_positions=calc_uvw_args["antenna_positions"], + antenna_numbers=calc_uvw_args["antenna_numbers"], + ant_1_array=calc_uvw_args["ant_1_array"], + ant_2_array=calc_uvw_args["ant_2_array"], + telescope_lat=calc_uvw_args["telescope_lat"], + telescope_lon=calc_uvw_args["telescope_lon"], + ) + + uvw_base_check = phs_utils.calc_uvw( + app_ra=calc_uvw_args["old_app_ra"], + app_dec=calc_uvw_args["old_app_dec"], + frame_pa=calc_uvw_args["old_frame_pa"], + lst_array=calc_uvw_args["lst_array"], + use_ant_pos=False, + uvw_array=calc_uvw_args["uvw_array"], + old_app_ra=calc_uvw_args["old_app_ra"], + old_app_dec=calc_uvw_args["old_app_dec"], + old_frame_pa=calc_uvw_args["old_frame_pa"], + ) + + np.testing.assert_allclose(uvw_ant_check, calc_uvw_args["uvw_array"]) + np.testing.assert_allclose(uvw_base_check, calc_uvw_args["uvw_array"]) + + +@pytest.mark.parametrize("to_enu", [False, True]) +def test_calc_uvw_base_vs_ants(calc_uvw_args, to_enu): + """ + Check to see that we get the same values for uvw coordinates whether we calculate + them using antenna positions or the previously calculated uvw's. + """ + + # Now change position, and make sure that whether we used ant positions of rotated + # uvw vectors, we derived the same uvw-coordinates at the end + uvw_ant_check = phs_utils.calc_uvw( + app_ra=calc_uvw_args["app_ra"], + app_dec=calc_uvw_args["app_dec"], + frame_pa=calc_uvw_args["frame_pa"], + lst_array=calc_uvw_args["lst_array"], + use_ant_pos=True, + antenna_positions=calc_uvw_args["antenna_positions"], + antenna_numbers=calc_uvw_args["antenna_numbers"], + ant_1_array=calc_uvw_args["ant_1_array"], + ant_2_array=calc_uvw_args["ant_2_array"], + telescope_lat=calc_uvw_args["telescope_lat"], + telescope_lon=calc_uvw_args["telescope_lon"], + to_enu=to_enu, + ) + + uvw_base_check = phs_utils.calc_uvw( + app_ra=calc_uvw_args["app_ra"], + app_dec=calc_uvw_args["app_dec"], + frame_pa=calc_uvw_args["frame_pa"], + lst_array=calc_uvw_args["lst_array"], + use_ant_pos=False, + uvw_array=calc_uvw_args["uvw_array"], + old_app_ra=calc_uvw_args["old_app_ra"], + old_app_dec=calc_uvw_args["old_app_dec"], + old_frame_pa=calc_uvw_args["old_frame_pa"], + telescope_lat=calc_uvw_args["telescope_lat"], + telescope_lon=calc_uvw_args["telescope_lon"], + to_enu=to_enu, + ) + + np.testing.assert_allclose(uvw_ant_check, uvw_base_check) + + +def test_calc_uvw_enu_roundtrip(calc_uvw_args): + """ + Check and see that we can go from uvw to ENU and back to uvw using the `uvw_array` + argument alone (i.e., without antenna positions). + """ + # Now attempt to round trip from projected to ENU back to projected -- that should + # give us the original set of uvw-coordinates. + temp_uvw = phs_utils.calc_uvw( + lst_array=calc_uvw_args["lst_array"], + use_ant_pos=False, + uvw_array=calc_uvw_args["uvw_array"], + old_app_ra=calc_uvw_args["old_app_ra"], + old_app_dec=calc_uvw_args["old_app_dec"], + old_frame_pa=calc_uvw_args["old_frame_pa"], + telescope_lat=calc_uvw_args["telescope_lat"], + telescope_lon=calc_uvw_args["telescope_lon"], + to_enu=True, + ) + + uvw_base_enu_check = phs_utils.calc_uvw( + app_ra=calc_uvw_args["old_app_ra"], + app_dec=calc_uvw_args["old_app_dec"], + frame_pa=calc_uvw_args["old_frame_pa"], + lst_array=calc_uvw_args["lst_array"], + use_ant_pos=False, + uvw_array=temp_uvw, + telescope_lat=calc_uvw_args["telescope_lat"], + telescope_lon=calc_uvw_args["telescope_lon"], + from_enu=True, + ) + + np.testing.assert_allclose( + calc_uvw_args["uvw_array"], uvw_base_enu_check, atol=1e-15, rtol=0 + ) + + +def test_calc_uvw_pa_ex_post_facto(calc_uvw_args): + """ + Check and see that one can apply the frame position angle rotation after-the-fact + and still get out the same answer you get if you were doing it during the initial + uvw coordinate calculation. + """ + # Finally, check and see what happens if you do the PA rotation as part of the + # first uvw calcuation, and make sure it agrees with what you get if you decide + # to apply the PA rotation after-the-fact. + uvw_base_check = phs_utils.calc_uvw( + app_ra=calc_uvw_args["app_ra"], + app_dec=calc_uvw_args["app_dec"], + frame_pa=calc_uvw_args["frame_pa"], + lst_array=calc_uvw_args["lst_array"], + use_ant_pos=False, + uvw_array=calc_uvw_args["uvw_array"], + old_app_ra=calc_uvw_args["old_app_ra"], + old_app_dec=calc_uvw_args["old_app_dec"], + old_frame_pa=calc_uvw_args["old_frame_pa"], + ) + + temp_uvw = phs_utils.calc_uvw( + app_ra=calc_uvw_args["app_ra"], + app_dec=calc_uvw_args["app_dec"], + lst_array=calc_uvw_args["lst_array"], + use_ant_pos=False, + uvw_array=calc_uvw_args["uvw_array"], + old_app_ra=calc_uvw_args["old_app_ra"], + old_app_dec=calc_uvw_args["old_app_dec"], + old_frame_pa=calc_uvw_args["old_frame_pa"], + ) + + uvw_base_late_pa_check = phs_utils.calc_uvw( + frame_pa=calc_uvw_args["frame_pa"], + use_ant_pos=False, + uvw_array=temp_uvw, + old_frame_pa=calc_uvw_args["old_frame_pa"], + ) + + np.testing.assert_allclose(uvw_base_check, uvw_base_late_pa_check) + + +@pytest.mark.filterwarnings('ignore:ERFA function "pmsafe" yielded') +@pytest.mark.filterwarnings('ignore:ERFA function "dtdtf" yielded') +@pytest.mark.filterwarnings('ignore:ERFA function "utcut1" yielded') +@pytest.mark.filterwarnings('ignore:ERFA function "utctai" yielded') +@pytest.mark.parametrize( + "arg_dict,msg", + ( + [{"library": "xyz"}, "Requested coordinate transformation library is not"], + [{"icrs_ra": np.arange(10)}, "ra and dec must be the same shape."], + [{"icrs_dec": np.arange(10)}, "ra and dec must be the same shape."], + [{"pm_ra": np.arange(10)}, "pm_ra must be the same shape as ra and dec."], + [{"pm_dec": np.arange(10)}, "pm_dec must be the same shape as ra and dec."], + [{"dist": np.arange(10)}, "dist must be the same shape as ra and dec."], + [{"vrad": np.arange(10)}, "vrad must be the same shape as ra and dec."], + [ + { + "icrs_ra": [0, 0], + "icrs_dec": [0, 0], + "pm_ra": None, + "pm_dec": None, + "dist": None, + "vrad": None, + }, + "time_array must be of either of", + ], + [{"time_array": 0.0, "library": "novas"}, "No current support for JPL ephems"], + ), +) +def test_transform_icrs_to_app_arg_errs(astrometry_args, arg_dict, msg): + """ + Check for argument errors with transform_icrs_to_app + """ + pytest.importorskip("novas") + default_args = astrometry_args.copy() + for key in arg_dict.keys(): + default_args[key] = arg_dict[key] + + # Start w/ the transform_icrs_to_app block + with pytest.raises(ValueError, match=msg): + phs_utils.transform_icrs_to_app( + time_array=default_args["time_array"], + ra=default_args["icrs_ra"], + dec=default_args["icrs_dec"], + telescope_loc=default_args["telescope_loc"], + telescope_frame=default_args["telescope_frame"], + pm_ra=default_args["pm_ra"], + pm_dec=default_args["pm_dec"], + dist=default_args["dist"], + vrad=default_args["vrad"], + epoch=default_args["epoch"], + astrometry_library=default_args["library"], + ) + + +@pytest.mark.parametrize( + "arg_dict,msg", + ( + [{"library": "xyz"}, "Requested coordinate transformation library is not"], + [{"app_ra": np.arange(10)}, "app_ra and app_dec must be the same shape."], + [{"app_dec": np.arange(10)}, "app_ra and app_dec must be the same shape."], + [{"time_array": np.arange(10)}, "time_array must be of either of length 1"], + ), +) +def test_transform_app_to_icrs_arg_errs(astrometry_args, arg_dict, msg): + """ + Check for argument errors with transform_app_to_icrs + """ + default_args = astrometry_args.copy() + for key in arg_dict.keys(): + default_args[key] = arg_dict[key] + + with pytest.raises(ValueError, match=msg): + phs_utils.transform_app_to_icrs( + time_array=default_args["time_array"], + app_ra=default_args["app_ra"], + app_dec=default_args["app_dec"], + telescope_loc=default_args["telescope_loc"], + telescope_frame=default_args["telescope_frame"], + astrometry_library=default_args["library"], + ) + + +def test_transform_sidereal_coords_arg_errs(): + """ + Check for argument errors with transform_sidereal_coords + """ + # Next on to sidereal to sidereal + with pytest.raises(ValueError, match="lon and lat must be the same shape."): + phs_utils.transform_sidereal_coords( + longitude=[0.0], + latitude=[0.0, 1.0], + in_coord_frame="fk5", + out_coord_frame="icrs", + in_coord_epoch="J2000.0", + time_array=[0.0, 1.0, 2.0], + ) + + with pytest.raises(ValueError, match="Shape of time_array must be either that of "): + phs_utils.transform_sidereal_coords( + longitude=[0.0, 1.0], + latitude=[0.0, 1.0], + in_coord_frame="fk4", + out_coord_frame="fk4", + in_coord_epoch=1950.0, + out_coord_epoch=1984.0, + time_array=[0.0, 1.0, 2.0], + ) + + +@pytest.mark.filterwarnings('ignore:ERFA function "d2dtf" yielded') +@pytest.mark.parametrize( + ["arg_dict", "msg"], + [ + [ + {"force_lookup": True, "time_array": np.arange(100000)}, + "Requesting too many individual ephem points from JPL-Horizons.", + ], + [{"force_lookup": False, "high_cadence": True}, "Too many ephem points"], + [{"time_array": np.arange(10)}, "No current support for JPL ephems outside"], + [{"targ_name": "whoami"}, "Target ID is not recognized in either the small"], + ], +) +def test_lookup_jplhorizons_arg_errs(arg_dict, msg): + """ + Check for argument errors with lookup_jplhorizons. + """ + # Don't do this test if we don't have astroquery loaded + pytest.importorskip("astroquery") + + from ssl import SSLError + + from requests import RequestException + + default_args = { + "targ_name": "Mars", + "time_array": np.array([0.0, 1000.0]) + 2456789.0, + "telescope_loc": EarthLocation.from_geodetic(0, 0, height=0.0), + "high_cadence": False, + "force_lookup": None, + } + + for key in arg_dict.keys(): + default_args[key] = arg_dict[key] + + # We have to handle this piece a bit carefully, since some queries fail due to + # intermittent failures connecting to the JPL-Horizons service. + with pytest.raises(Exception) as cm: + phs_utils.lookup_jplhorizons( + default_args["targ_name"], + default_args["time_array"], + telescope_loc=default_args["telescope_loc"], + high_cadence=default_args["high_cadence"], + force_indv_lookup=default_args["force_lookup"], + ) + + if issubclass(cm.type, RequestException) or issubclass(cm.type, SSLError): + pytest.skip("SSL/Connection error w/ JPL Horizons") + + assert issubclass(cm.type, ValueError) + assert str(cm.value).startswith(msg) + + +@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") +def test_lookup_jplhorizons_moon_err(): + """ + Check for argument errors with lookup_jplhorizons. + """ + # Don't do this test if we don't have astroquery loaded + pytest.importorskip("astroquery") + + from ssl import SSLError + + from requests import RequestException + + default_args = { + "targ_name": "Mars", + "time_array": np.array([0.0, 1000.0]) + 2456789.0, + "telescope_loc": MoonLocation.from_selenodetic(0.6875, 24.433, 0), + "high_cadence": False, + "force_lookup": None, + } + + # We have to handle this piece a bit carefully, since some queries fail due to + # intermittent failures connecting to the JPL-Horizons service. + with pytest.raises(Exception) as cm: + phs_utils.lookup_jplhorizons( + default_args["targ_name"], + default_args["time_array"], + telescope_loc=default_args["telescope_loc"], + high_cadence=default_args["high_cadence"], + force_indv_lookup=default_args["force_lookup"], + ) + + if issubclass(cm.type, RequestException) or issubclass(cm.type, SSLError): + pytest.skip("SSL/Connection error w/ JPL Horizons") + + assert issubclass(cm.type, NotImplementedError) + assert str(cm.value).startswith( + "Cannot lookup JPL positions for telescopes with a MoonLocation" + ) + + +@pytest.mark.parametrize( + "bad_arg,msg", + [ + ["etimes", "ephem_ra must have the same shape as ephem_times."], + ["ra", "ephem_ra must have the same shape as ephem_times."], + ["dec", "ephem_dec must have the same shape as ephem_times."], + ["dist", "ephem_dist must have the same shape as ephem_times."], + ["vel", "ephem_vel must have the same shape as ephem_times."], + ], +) +def test_interpolate_ephem_arg_errs(bad_arg, msg): + """ + Check for argument errors with interpolate_ephem + """ + # Now moving on to the interpolation scheme + with pytest.raises(ValueError, match=msg): + phs_utils.interpolate_ephem( + time_array=0.0, + ephem_times=0.0 if ("etimes" == bad_arg) else [0.0, 1.0], + ephem_ra=0.0 if ("ra" == bad_arg) else [0.0, 1.0], + ephem_dec=0.0 if ("dec" == bad_arg) else [0.0, 1.0], + ephem_dist=0.0 if ("dist" == bad_arg) else [0.0, 1.0], + ephem_vel=0.0 if ("vel" == bad_arg) else [0.0, 1.0], + ) + + +def test_calc_app_coords_arg_errs(): + """ + Check for argument errors with calc_app_coords + """ + # Now on to app_coords + with pytest.raises(ValueError, match="Object type whoknows is not recognized."): + phs_utils.calc_app_coords( + lon_coord=0.0, lat_coord=0.0, telescope_loc=(0, 1, 2), coord_type="whoknows" + ) + + +def test_transform_multi_sidereal_coords(astrometry_args): + """ + Perform some basic tests to verify that we can transform between sidereal frames + with multiple coordinates. + """ + # Check and make sure that we can deal with non-singleton times or coords with + # singleton coords and times, respectively. + check_ra, check_dec = phs_utils.transform_sidereal_coords( + longitude=astrometry_args["icrs_ra"] * np.ones(2), + latitude=astrometry_args["icrs_dec"] * np.ones(2), + in_coord_frame="icrs", + out_coord_frame="fk5", + in_coord_epoch=2000.0, + out_coord_epoch=2000.0, + time_array=astrometry_args["time_array"][0] * np.ones(2), + ) + assert np.all(np.equal(astrometry_args["fk5_ra"], check_ra)) + assert np.all(np.equal(astrometry_args["fk5_dec"], check_dec)) + + +def test_transform_fk5_fk4_icrs_loop(astrometry_args): + """ + Do a roundtrip test between ICRS, FK5, FK4 and back to ICRS to verify that we can + handle transformation between different sidereal frames correctly. + """ + # Now do a triangle between ICRS -> FK5 -> FK4 -> ICRS. If all is working well, + # then we should recover the same position we started with. + fk5_ra, fk5_dec = phs_utils.transform_sidereal_coords( + longitude=astrometry_args["icrs_ra"], + latitude=astrometry_args["icrs_dec"], + in_coord_frame="icrs", + out_coord_frame="fk5", + in_coord_epoch=2000.0, + out_coord_epoch=2000.0, + time_array=astrometry_args["time_array"][0], + ) + + fk4_ra, fk4_dec = phs_utils.transform_sidereal_coords( + longitude=fk5_ra, + latitude=fk5_dec, + in_coord_frame="fk5", + out_coord_frame="fk4", + in_coord_epoch="J2000.0", + out_coord_epoch="B1950.0", + ) + + check_ra, check_dec = phs_utils.transform_sidereal_coords( + longitude=fk4_ra, + latitude=fk4_dec, + in_coord_frame="fk4", + out_coord_frame="icrs", + in_coord_epoch="B1950.0", + out_coord_epoch="J2000.0", + ) + + check_coord = SkyCoord(check_ra, check_dec, unit="rad") + assert np.all(check_coord.separation(astrometry_args["icrs_coord"]).uarcsec < 0.1) + + +@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) +@pytest.mark.parametrize("in_lib", ["erfa", "astropy"]) +@pytest.mark.parametrize("out_lib", ["erfa", "astropy"]) +def test_roundtrip_icrs(astrometry_args, telescope_frame, selenoid, in_lib, out_lib): + """ + Performs a roundtrip test to verify that one can transform between + ICRS <-> topocentric to the precision limit, without running into + issues. + """ + if telescope_frame == "itrs": + telescope_loc = astrometry_args["telescope_loc"] + else: + telescope_loc = astrometry_args["moon_telescope_loc"] + + if telescope_frame == "mcmf" and in_lib != "astropy": + with pytest.raises( + NotImplementedError, + match="MoonLocation telescopes are only supported with the 'astropy' " + "astrometry library", + ): + app_ra, app_dec = phs_utils.transform_icrs_to_app( + time_array=astrometry_args["time_array"], + ra=astrometry_args["icrs_ra"], + dec=astrometry_args["icrs_dec"], + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=selenoid, + epoch=astrometry_args["epoch"], + astrometry_library=in_lib, + ) + return + + if telescope_frame == "mcmf" and out_lib == "astropy": + kwargs = {"telescope_frame": telescope_frame, "ellipsoid": selenoid} + else: + # don't pass telescope frame here so something still happens if frame and + # astrometry lib conflict + kwargs = {} + + app_ra, app_dec = phs_utils.transform_icrs_to_app( + time_array=astrometry_args["time_array"], + ra=astrometry_args["icrs_ra"], + dec=astrometry_args["icrs_dec"], + telescope_loc=telescope_loc, + epoch=astrometry_args["epoch"], + astrometry_library=in_lib, + **kwargs, + ) + + if telescope_frame == "mcmf" and out_lib != "astropy": + with pytest.raises( + NotImplementedError, + match="MoonLocation telescopes are only supported with the 'astropy' " + "astrometry library", + ): + check_ra, check_dec = phs_utils.transform_app_to_icrs( + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=selenoid, + astrometry_library=out_lib, + ) + return + + if telescope_frame == "mcmf": + from spiceypy.utils.exceptions import SpiceUNKNOWNFRAME + + try: + check_ra, check_dec = phs_utils.transform_app_to_icrs( + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + astrometry_library=out_lib, + **kwargs, + ) + except SpiceUNKNOWNFRAME as err: + pytest.skip("SpiceUNKNOWNFRAME error: " + str(err)) + else: + check_ra, check_dec = phs_utils.transform_app_to_icrs( + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + astrometry_library=out_lib, + **kwargs, + ) + + check_coord = SkyCoord(check_ra, check_dec, unit="rad", frame="icrs") + # Verify that everything agrees to better than µas-level accuracy if the + # libraries are the same, otherwise to 100 µas if cross-comparing libraries + if in_lib == out_lib: + assert np.all( + astrometry_args["icrs_coord"].separation(check_coord).uarcsec < 1.0 + ) + else: + assert np.all( + astrometry_args["icrs_coord"].separation(check_coord).uarcsec < 100.0 + ) + + if selenoid == "SPHERE": + # check defaults + app_ra, app_dec = phs_utils.transform_icrs_to_app( + time_array=astrometry_args["time_array"], + ra=astrometry_args["icrs_ra"], + dec=astrometry_args["icrs_dec"], + telescope_loc=telescope_loc, + epoch=astrometry_args["epoch"], + astrometry_library=in_lib, + telescope_frame=telescope_frame, + ) + check_ra, check_dec = phs_utils.transform_app_to_icrs( + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + astrometry_library=out_lib, + telescope_frame=telescope_frame, + ) + check_coord = SkyCoord(check_ra, check_dec, unit="rad", frame="icrs") + # Verify that everything agrees to better than µas-level accuracy if the + # libraries are the same, otherwise to 100 µas if cross-comparing libraries + assert np.all( + astrometry_args["icrs_coord"].separation(check_coord).uarcsec < 1.0 + ) + + +def test_calc_parallactic_angle(): + """ + A relatively straightforward test to verify that we recover the parallactic + angles we expect given some known inputs + """ + expected_vals = np.array([1.0754290375762232, 0.0, -0.6518070715011698]) + meas_vals = phs_utils.calc_parallactic_angle( + app_ra=[0.0, 1.0, 2.0], + app_dec=[-1.0, 0.0, 1.0], + lst_array=[2.0, 1.0, 0], + telescope_lat=1.0, + ) + # Make sure things agree to better than ~0.1 uas (as it definitely should) + np.testing.assert_allclose(expected_vals, meas_vals, 0.0, 1e-12) + + +def test_calc_frame_pos_angle(): + """ + Verify that we recover frame position angles correctly + """ + # First test -- plug in "topo" for the frame, which should always produce an + # array of all zeros (the topo frame is what the apparent coords are in) + frame_pa = phs_utils.calc_frame_pos_angle( + time_array=np.array([2456789.0] * 100), + app_ra=np.arange(100) * (np.pi / 50), + app_dec=np.zeros(100), + telescope_loc=(0, 0, 0), + ref_frame="topo", + ) + assert len(frame_pa) == 100 + assert np.all(frame_pa == 0.0) + # PA of zero degrees (they're always aligned) + # Next test -- plug in J2000 and see that we actually get back a frame PA + # of basically 0 degrees. + j2000_jd = Time(2000.0, format="jyear").utc.jd + frame_pa = phs_utils.calc_frame_pos_angle( + time_array=np.array([j2000_jd] * 100), + app_ra=np.arange(100) * (np.pi / 50), + app_dec=np.zeros(100), + telescope_loc=(0, 0, 0), + ref_frame="fk5", + ref_epoch=2000.0, + ) + # At J2000, the only frame PA terms come from aberation, which basically max out + # at ~< 1e-4 rad. Check to make sure that lines up with what we measure. + assert np.all(np.abs(frame_pa) < 1e-4) + + # JD 2458849.5 is Jan-01-2020, so 20 years of parallax ought to have accumulated + # (with about 1 arcmin/yr of precession). Make sure these values are sensible + frame_pa = phs_utils.calc_frame_pos_angle( + time_array=np.array([2458849.5] * 100), + app_ra=np.arange(100) * (np.pi / 50), + app_dec=np.zeros(100), + telescope_loc=(0, 0, 0), + ref_frame="fk5", + ref_epoch=2000.0, + ) + assert np.all(np.abs(frame_pa) < 20 * (50.3 / 3600) * (np.pi / 180.0)) + # Check the PA at a couple of chosen points, which just so happen to be very close + # in magnitude (as they're basically in the same plane as the motion of the Earth) + assert np.isclose(frame_pa[25], 0.001909957544309159) + assert np.isclose(frame_pa[-25], -0.0019098101664715339) + + +def test_jphl_lookup(astrometry_args): + """ + A very simple lookup query to verify that the astroquery tools for accessing + JPL-Horizons are working. This test is very limited, on account of not wanting to + slam JPL w/ coordinate requests. + """ + pytest.importorskip("astroquery") + + from ssl import SSLError + + from requests import RequestException + + # If we can't connect to JPL-Horizons, then skip this test and don't outright fail. + try: + [ephem_times, ephem_ra, ephem_dec, ephem_dist, ephem_vel] = ( + phs_utils.lookup_jplhorizons("Sun", 2456789.0) + ) + except (SSLError, RequestException) as err: + pytest.skip("SSL/Connection error w/ JPL Horizons: " + str(err)) + + assert np.all(np.equal(ephem_times, 2456789.0)) + np.testing.assert_allclose(ephem_ra, 0.8393066751804976) + np.testing.assert_allclose(ephem_dec, 0.3120687480116649) + np.testing.assert_allclose(ephem_dist, 1.00996185750717) + np.testing.assert_allclose(ephem_vel, 0.386914) + + # check calling lookup_jplhorizons with EarthLocation vs lat/lon/alt passed + try: + ephem_info_latlon = phs_utils.lookup_jplhorizons( + "Sun", 2456789.0, telescope_loc=astrometry_args["telescope_loc"] + ) + ephem_info_el = phs_utils.lookup_jplhorizons( + "Sun", + 2456789.0, + telescope_loc=EarthLocation.from_geodetic( + lat=astrometry_args["telescope_loc"][0] * units.rad, + lon=astrometry_args["telescope_loc"][1] * units.rad, + height=astrometry_args["telescope_loc"][2] * units.m, + ), + ) + except (SSLError, RequestException) as err: + pytest.skip("SSL/Connection error w/ JPL Horizons: " + str(err)) + + for ind, item in enumerate(ephem_info_latlon): + assert item == ephem_info_el[ind] + + +def test_ephem_interp_one_point(): + """ + These tests do some simple checks to verify that the interpolator behaves properly + when only being provided singleton values. + """ + # First test the case where there is only one ephem point, and thus everything + # takes on that value + time_array = np.arange(100) * 0.01 + ephem_times = np.array([0]) + ephem_ra = np.array([1.0]) + ephem_dec = np.array([2.0]) + ephem_dist = np.array([3.0]) + ephem_vel = np.array([4.0]) + + ra_vals0, dec_vals0, dist_vals0, vel_vals0 = phs_utils.interpolate_ephem( + time_array=time_array, + ephem_times=ephem_times, + ephem_ra=ephem_ra, + ephem_dec=ephem_dec, + ephem_dist=ephem_dist, + ephem_vel=ephem_vel, + ) + + assert np.all(ra_vals0 == 1.0) + assert np.all(dec_vals0 == 2.0) + assert np.all(dist_vals0 == 3.0) + assert np.all(vel_vals0 == 4.0) + + +def test_ephem_interp_multi_point(): + """ + Test that ephem coords are interpolated correctly when supplying more than a + singleton value for the various arrays. + """ + # Next test the case where the ephem only has a couple of points, in which case the + # code will default to using a simple, linear interpolation scheme. + time_array = np.arange(100) * 0.01 + ephem_times = np.array([0, 1]) + ephem_ra = np.array([0, 1]) + 1.0 + ephem_dec = np.array([0, 1]) + 2.0 + ephem_dist = np.array([0, 1]) + 3.0 + ephem_vel = np.array([0, 1]) + 4.0 + + ra_vals1, dec_vals1, dist_vals1, vel_vals1 = phs_utils.interpolate_ephem( + time_array=time_array, + ephem_times=ephem_times, + ephem_ra=ephem_ra, + ephem_dec=ephem_dec, + ephem_dist=ephem_dist, + ephem_vel=ephem_vel, + ) + + # When there are lots more data points, the interpolator will default to using a + # cubic spline, which _should_ be very close (to numerical precision limits) to what + # we get with the method above. + ephem_times = np.arange(11) * 0.1 + ephem_ra = (np.arange(11) * 0.1) + 1.0 + ephem_dec = (np.arange(11) * 0.1) + 2.0 + ephem_dist = (np.arange(11) * 0.1) + 3.0 + ephem_vel = (np.arange(11) * 0.1) + 4.0 + + ra_vals2, dec_vals2, dist_vals2, vel_vals2 = phs_utils.interpolate_ephem( + time_array=time_array, + ephem_times=ephem_times, + ephem_ra=ephem_ra, + ephem_dec=ephem_dec, + ephem_dist=ephem_dist, + ephem_vel=ephem_vel, + ) + + # Make sure that everything is consistent to floating point precision + np.testing.assert_allclose(ra_vals1, ra_vals2, 1e-15, 0.0) + np.testing.assert_allclose(dec_vals1, dec_vals2, 1e-15, 0.0) + np.testing.assert_allclose(dist_vals1, dist_vals2, 1e-15, 0.0) + np.testing.assert_allclose(vel_vals1, vel_vals2, 1e-15, 0.0) + np.testing.assert_allclose(time_array + 1.0, ra_vals2, 1e-15, 0.0) + np.testing.assert_allclose(time_array + 2.0, dec_vals2, 1e-15, 0.0) + np.testing.assert_allclose(time_array + 3.0, dist_vals2, 1e-15, 0.0) + np.testing.assert_allclose(time_array + 4.0, vel_vals2, 1e-15, 0.0) + + +@pytest.mark.parametrize("frame", ["icrs", "fk5"]) +@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) +def test_calc_app_sidereal(astrometry_args, frame, telescope_frame, selenoid): + """ + Tests that we can calculate app coords for sidereal objects + """ + # First step is to check and make sure we can do sidereal coords. This is the most + # basic thing to check, so this really _should work. + if telescope_frame == "itrs": + telescope_loc = astrometry_args["telescope_loc"] + else: + from spiceypy.utils.exceptions import SpiceUNKNOWNFRAME + + telescope_loc = astrometry_args["moon_telescope_loc"] + + try: + check_ra, check_dec = phs_utils.calc_app_coords( + lon_coord=( + astrometry_args["fk5_ra"] + if (frame == "fk5") + else astrometry_args["icrs_ra"] + ), + lat_coord=( + astrometry_args["fk5_dec"] + if (frame == "fk5") + else astrometry_args["icrs_dec"] + ), + coord_type="sidereal", + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=selenoid, + time_array=astrometry_args["time_array"], + coord_frame=frame, + coord_epoch=astrometry_args["epoch"], + ) + except SpiceUNKNOWNFRAME as err: + pytest.skip("SpiceUNKNOWNFRAME error: " + str(err)) + + check_coord = SkyCoord(check_ra, check_dec, unit="rad") + + if telescope_frame == "itrs": + app_coord = astrometry_args["app_coord"] + else: + app_coord = astrometry_args["moon_app_coord"][selenoid] + + assert np.all(app_coord.separation(check_coord).uarcsec < 1.0) + + +@pytest.mark.parametrize("frame", ["icrs", "fk5"]) +@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) +def test_calc_app_ephem(astrometry_args, frame, telescope_frame, selenoid): + """ + Tests that we can calculate app coords for ephem objects + """ + # Next, see what happens when we pass an ephem. Note that this is just a single + # point ephem, so its not testing any of the fancy interpolation, but we have other + # tests for poking at that. The two tests here are to check bot the ICRS and FK5 + # paths through the ephem. + if telescope_frame == "itrs": + telescope_loc = astrometry_args["telescope_loc"] + else: + telescope_loc = astrometry_args["moon_telescope_loc"] + + if frame == "fk5": + ephem_ra = astrometry_args["fk5_ra"] + ephem_dec = astrometry_args["fk5_dec"] + else: + ephem_ra = np.array([astrometry_args["icrs_ra"]]) + ephem_dec = np.array([astrometry_args["icrs_dec"]]) + + ephem_times = np.array([astrometry_args["time_array"][0]]) + check_ra, check_dec = phs_utils.calc_app_coords( + lon_coord=ephem_ra, + lat_coord=ephem_dec, + coord_times=ephem_times, + coord_type="ephem", + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=selenoid, + time_array=astrometry_args["time_array"], + coord_epoch=astrometry_args["epoch"], + coord_frame=frame, + ) + check_coord = SkyCoord(check_ra, check_dec, unit="rad") + + if telescope_frame == "itrs": + app_coord = astrometry_args["app_coord"] + else: + app_coord = astrometry_args["moon_app_coord"][selenoid] + assert np.all(app_coord.separation(check_coord).uarcsec < 1.0) + + +@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) +def test_calc_app_driftscan(astrometry_args, telescope_frame, selenoid): + """ + Tests that we can calculate app coords for driftscan objects + """ + # Now on to the driftscan, which takes in arguments in terms of az and el (and + # the values we've given below should also be for zenith) + if telescope_frame == "itrs": + telescope_loc = astrometry_args["telescope_loc"] + else: + telescope_loc = astrometry_args["moon_telescope_loc"] + + check_ra, check_dec = phs_utils.calc_app_coords( + lon_coord=0.0, + lat_coord=np.pi / 2.0, + coord_type="driftscan", + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=selenoid, + time_array=astrometry_args["time_array"], + ) + check_coord = SkyCoord(check_ra, check_dec, unit="rad") + if telescope_frame == "itrs": + drift_coord = astrometry_args["drift_coord"] + else: + drift_coord = astrometry_args["moon_drift_coord"][selenoid] + + assert np.all(drift_coord.separation(check_coord).uarcsec < 1.0) + + +@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) +def test_calc_app_unprojected(astrometry_args, telescope_frame, selenoid): + """ + Tests that we can calculate app coords for unphased objects + """ + # Finally, check unprojected, which is forced to point toward zenith (unlike + # driftscan, which is allowed to point at any az/el position) + # use "unphased" to check for deprecation warning + if telescope_frame == "itrs": + telescope_loc = astrometry_args["telescope_loc"] + lst_array = astrometry_args["lst_array"] + else: + telescope_loc = astrometry_args["moon_telescope_loc"] + lst_array = astrometry_args["moon_lst_array"][selenoid] + + check_ra, check_dec = phs_utils.calc_app_coords( + lon_coord=None, + lat_coord=None, + coord_type="unprojected", + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=selenoid, + time_array=astrometry_args["time_array"], + lst_array=lst_array, + ) + + check_coord = SkyCoord(check_ra, check_dec, unit="rad") + + if telescope_frame == "itrs": + drift_coord = astrometry_args["drift_coord"] + else: + drift_coord = astrometry_args["moon_drift_coord"][selenoid] + assert np.all(drift_coord.separation(check_coord).uarcsec < 1.0) + + +@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) +def test_calc_app_fk5_roundtrip(astrometry_args, telescope_frame, selenoid): + # Do a round-trip with the two top-level functions and make sure they agree to + # better than 1 µas, first in FK5 + if telescope_frame == "itrs": + telescope_loc = astrometry_args["telescope_loc"] + else: + telescope_loc = astrometry_args["moon_telescope_loc"] + + app_ra, app_dec = phs_utils.calc_app_coords( + lon_coord=0.0, + lat_coord=0.0, + coord_type="sidereal", + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=selenoid, + time_array=astrometry_args["time_array"], + coord_frame="fk5", + coord_epoch="J2000.0", + ) + + if telescope_frame == "mcmf": + from spiceypy.utils.exceptions import SpiceUNKNOWNFRAME + + try: + check_ra, check_dec = phs_utils.calc_sidereal_coords( + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + coord_frame="fk5", + telescope_frame=telescope_frame, + ellipsoid=selenoid, + coord_epoch=2000.0, + ) + except SpiceUNKNOWNFRAME as err: + pytest.skip("SpiceUNKNOWNFRAME error: " + str(err)) + else: + check_ra, check_dec = phs_utils.calc_sidereal_coords( + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + coord_frame="fk5", + telescope_frame=telescope_frame, + ellipsoid=selenoid, + coord_epoch=2000.0, + ) + + check_coord = SkyCoord(check_ra, check_dec, unit="rad") + assert np.all(SkyCoord(0, 0, unit="rad").separation(check_coord).uarcsec < 1.0) + + if selenoid == "SPHERE": + # check defaults + + app_ra, app_dec = phs_utils.calc_app_coords( + lon_coord=0.0, + lat_coord=0.0, + coord_type="sidereal", + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + time_array=astrometry_args["time_array"], + coord_frame="fk5", + coord_epoch="J2000.0", + ) + + check_ra, check_dec = phs_utils.calc_sidereal_coords( + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + coord_frame="fk5", + telescope_frame=telescope_frame, + coord_epoch=2000.0, + ) + check_coord = SkyCoord(check_ra, check_dec, unit="rad") + assert np.all(SkyCoord(0, 0, unit="rad").separation(check_coord).uarcsec < 1.0) + + +@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) +def test_calc_app_fk4_roundtrip(astrometry_args, telescope_frame, selenoid): + # Finally, check and make sure that FK4 performs similarly + if telescope_frame == "itrs": + telescope_loc = astrometry_args["telescope_loc"] + else: + telescope_loc = astrometry_args["moon_telescope_loc"] + + app_ra, app_dec = phs_utils.calc_app_coords( + lon_coord=0.0, + lat_coord=0.0, + coord_type="sidereal", + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=selenoid, + time_array=astrometry_args["time_array"], + coord_frame="fk4", + coord_epoch=1950.0, + ) + + check_ra, check_dec = phs_utils.calc_sidereal_coords( + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + coord_frame="fk4", + telescope_frame=telescope_frame, + ellipsoid=selenoid, + coord_epoch=1950.0, + ) + + check_coord = SkyCoord(check_ra, check_dec, unit="rad") + assert np.all(SkyCoord(0, 0, unit="rad").separation(check_coord).uarcsec < 1.0) + + +@pytest.mark.filterwarnings('ignore:ERFA function "pmsafe" yielded 4 of') +@pytest.mark.filterwarnings('ignore:ERFA function "utcut1" yielded 2 of') +@pytest.mark.filterwarnings('ignore:ERFA function "d2dtf" yielded 1 of') +@pytest.mark.parametrize("use_extra", [True, False]) +def test_astrometry_icrs_to_app(astrometry_args, use_extra): + """ + Check for consistency beteen astrometry libraries when converting ICRS -> TOPP + + This test checks for consistency in apparent coordinate calculations using the + three different libraries that are available to pyuvdata, namely: astropy, pyERFA, + and python-novas. Between these three, we expect agreement within 100 µas in + most instances, although for pyuvdata we tolerate differences of up to 1 mas since + we don't expect to need astrometry better than this. + """ + pytest.importorskip("novas") + pytest.importorskip("novas_de405") + # Do some basic cross-checking between the different astrometry libraries + # to see if they all line up correctly. + astrometry_list = ["novas", "erfa", "astropy"] + coord_results = [None, None, None, None] + + # These values were indepedently calculated using erfa v1.7.2, which at the + # time of coding agreed to < 1 mas with astropy v4.2.1 and novas 3.1.1.5. We + # use those values here as a sort of history check to make sure that something + # hasn't changed in the underlying astrometry libraries without being caught + precalc_ra = np.array( + [2.4736400623737507, 2.4736352750862760, 2.4736085367439893, 2.4734781687162820] + ) + precalc_dec = np.array( + [1.2329576409345270, 1.2329556410623417, 1.2329541289890513, 1.2328577308430242] + ) + + coord_results[3] = (precalc_ra, precalc_dec) + + kwargs = {} + extra_args = ["pm_ra", "pm_dec", "vrad", "dist"] + if use_extra: + for key in extra_args: + kwargs[key] = astrometry_args[key] + else: + # don't compare to precalc if not using extra arguments + coord_results = coord_results[:-1] + + for idx, name in enumerate(astrometry_list): + coord_results[idx] = phs_utils.transform_icrs_to_app( + time_array=astrometry_args["time_array"], + ra=astrometry_args["icrs_ra"], + dec=astrometry_args["icrs_dec"], + telescope_loc=astrometry_args["telescope_loc"], + epoch=astrometry_args["epoch"], + astrometry_library=name, + **kwargs, + ) + + for idx in range(len(coord_results) - 1): + for jdx in range(idx + 1, len(coord_results)): + alpha_coord = SkyCoord( + coord_results[idx][0], coord_results[idx][1], unit="rad" + ) + beta_coord = SkyCoord( + coord_results[jdx][0], coord_results[jdx][1], unit="rad" + ) + assert np.all(alpha_coord.separation(beta_coord).marcsec < 1.0) + + +def test_astrometry_app_to_icrs(astrometry_args): + """ + Check for consistency beteen astrometry libraries when converting TOPO -> ICRS + + This test checks for consistency between the pyERFA and astropy libraries for + converting apparent coords back to ICRS. Between these two, we expect agreement + within 100 µas in most instances, although for pyuvdata we tolerate differences of + up to 1 mas since we don't expect to need astrometry better than this. + """ + astrometry_list = ["erfa", "astropy"] + coord_results = [None, None, None] + + # These values were indepedently calculated using erfa v1.7.2, which at the + # time of coding agreed to < 1 mas with astropy v4.2.1. We again are using + # those values here as a sort of history check to make sure that something + # hasn't changed in the underlying astrometry libraries without being caught + precalc_ra = np.array( + [2.4623360300722170, 2.4623407989706756, 2.4623676572008280, 2.4624965192217900] + ) + precalc_dec = np.array( + [1.2350407132378372, 1.2350427272595987, 1.2350443204758008, 1.2351412288987034] + ) + coord_results[2] = (precalc_ra, precalc_dec) + + for idx, name in enumerate(astrometry_list): + # Note we're using icrs_ra and icrs_dec instead of app_ra and app_dec keys + # because the above pre-calculated values were generated using the ICRS + # coordinate values + coord_results[idx] = phs_utils.transform_app_to_icrs( + time_array=astrometry_args["time_array"], + app_ra=astrometry_args["icrs_ra"], + app_dec=astrometry_args["icrs_dec"], + telescope_loc=astrometry_args["telescope_loc"], + astrometry_library=name, + ) + + for idx in range(len(coord_results) - 1): + for jdx in range(idx + 1, len(coord_results)): + alpha_coord = SkyCoord( + coord_results[idx][0], coord_results[idx][1], unit="rad" + ) + beta_coord = SkyCoord( + coord_results[jdx][0], coord_results[jdx][1], unit="rad" + ) + assert np.all(alpha_coord.separation(beta_coord).marcsec < 1.0) + + +def test_sidereal_reptime(astrometry_args): + """ + Check for equality when supplying a singleton time versus an array of identical + values for transform_sidereal_coords + """ + + gcrs_ra, gcrs_dec = phs_utils.transform_sidereal_coords( + longitude=astrometry_args["icrs_ra"] * np.ones(2), + latitude=astrometry_args["icrs_dec"] * np.ones(2), + in_coord_frame="icrs", + out_coord_frame="gcrs", + time_array=Time(astrometry_args["time_array"][0], format="jd"), + ) + + check_ra, check_dec = phs_utils.transform_sidereal_coords( + longitude=astrometry_args["icrs_ra"] * np.ones(2), + latitude=astrometry_args["icrs_dec"] * np.ones(2), + in_coord_frame="icrs", + out_coord_frame="gcrs", + time_array=Time(astrometry_args["time_array"][0] * np.ones(2), format="jd"), + ) + + assert np.all(gcrs_ra == check_ra) + assert np.all(gcrs_dec == check_dec) + + +@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) +def test_transform_icrs_to_app_time_obj(astrometry_args, telescope_frame, selenoid): + """ + Test that we recover identical values when using a Time objects instead of a floats + for the various time-related arguments in transform_icrs_to_app. + """ + if telescope_frame == "itrs": + telescope_loc = astrometry_args["telescope_loc"] + else: + telescope_loc = astrometry_args["moon_telescope_loc"] + + check_ra, check_dec = phs_utils.transform_icrs_to_app( + time_array=Time(astrometry_args["time_array"], format="jd"), + ra=astrometry_args["icrs_ra"], + dec=astrometry_args["icrs_dec"], + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=selenoid, + epoch=Time(astrometry_args["epoch"], format="jyear"), + ) + + if telescope_frame == "itrs": + app_ra = astrometry_args["app_ra"] + app_dec = astrometry_args["app_dec"] + else: + app_ra = astrometry_args["moon_app_ra"][selenoid] + app_dec = astrometry_args["moon_app_dec"][selenoid] + + assert np.all(check_ra == app_ra) + assert np.all(check_dec == app_dec) + + +def test_transform_app_to_icrs_objs(astrometry_args): + """ + Test that we recover identical values when using Time/EarthLocation objects instead + of floats for time_array and telescope_loc, respectively for transform_app_to_icrs. + """ + telescope_loc = EarthLocation.from_geodetic( + astrometry_args["telescope_loc"][1] * (180.0 / np.pi), + astrometry_args["telescope_loc"][0] * (180.0 / np.pi), + height=astrometry_args["telescope_loc"][2], + ) + + icrs_ra, icrs_dec = phs_utils.transform_app_to_icrs( + time_array=astrometry_args["time_array"][0], + app_ra=astrometry_args["app_ra"][0], + app_dec=astrometry_args["app_dec"][0], + telescope_loc=astrometry_args["telescope_loc"], + ) + + check_ra, check_dec = phs_utils.transform_app_to_icrs( + time_array=Time(astrometry_args["time_array"][0], format="jd"), + app_ra=astrometry_args["app_ra"][0], + app_dec=astrometry_args["app_dec"][0], + telescope_loc=telescope_loc, + ) + + assert np.all(check_ra == icrs_ra) + assert np.all(check_dec == icrs_dec) + + +@pytest.mark.parametrize(["telescope_frame", "selenoid"], frame_selenoid) +def test_calc_app_coords_objs(astrometry_args, telescope_frame, selenoid): + """ + Test that we recover identical values when using Time/EarthLocation objects instead + of floats for time_array and telescope_loc, respectively for calc_app_coords. + """ + if telescope_frame == "itrs": + telescope_loc = EarthLocation.from_geodetic( + astrometry_args["telescope_loc"][1] * (180.0 / np.pi), + astrometry_args["telescope_loc"][0] * (180.0 / np.pi), + height=astrometry_args["telescope_loc"][2], + ) + TimeClass = Time + else: + telescope_loc = MoonLocation.from_selenodetic( + astrometry_args["telescope_loc"][1] * (180.0 / np.pi), + astrometry_args["telescope_loc"][0] * (180.0 / np.pi), + height=astrometry_args["telescope_loc"][2], + ellipsoid=selenoid, + ) + TimeClass = LTime + + app_ra, app_dec = phs_utils.calc_app_coords( + lon_coord=astrometry_args["icrs_ra"], + lat_coord=astrometry_args["icrs_dec"], + time_array=astrometry_args["time_array"][0], + telescope_loc=astrometry_args["telescope_loc"], + telescope_frame=telescope_frame, + ellipsoid=selenoid, + ) + + check_ra, check_dec = phs_utils.calc_app_coords( + lon_coord=astrometry_args["icrs_ra"], + lat_coord=astrometry_args["icrs_dec"], + time_array=TimeClass(astrometry_args["time_array"][0], format="jd"), + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, + ellipsoid=selenoid, + ) + + assert np.all(check_ra == app_ra) + assert np.all(check_dec == app_dec) + + +def test_phasing_funcs(): + # these tests are based on a notebook where I tested against the mwa_tools + # phasing code + ra_hrs = 12.1 + dec_degs = -42.3 + mjd = 55780.1 + + array_center_xyz = np.array([-2559454.08, 5095372.14, -2849057.18]) + lat_lon_alt = utils.LatLonAlt_from_XYZ(array_center_xyz) + + obs_time = Time(mjd, format="mjd", location=(lat_lon_alt[1], lat_lon_alt[0])) + + icrs_coord = SkyCoord( + ra=Angle(ra_hrs, unit="hr"), dec=Angle(dec_degs, unit="deg"), obstime=obs_time + ) + gcrs_coord = icrs_coord.transform_to("gcrs") + + # in east/north/up frame (relative to array center) in meters: (Nants, 3) + ants_enu = np.array([-101.94, 156.41, 1.24]) + + ant_xyz_abs = utils.ECEF_from_ENU( + ants_enu, + latitude=lat_lon_alt[0], + longitude=lat_lon_alt[1], + altitude=lat_lon_alt[2], + ) + + array_center_coord = SkyCoord( + x=array_center_xyz[0] * units.m, + y=array_center_xyz[1] * units.m, + z=array_center_xyz[2] * units.m, + frame="itrs", + obstime=obs_time, + ) + + itrs_coord = SkyCoord( + x=ant_xyz_abs[0] * units.m, + y=ant_xyz_abs[1] * units.m, + z=ant_xyz_abs[2] * units.m, + frame="itrs", + obstime=obs_time, + ) + + gcrs_array_center = array_center_coord.transform_to("gcrs") + gcrs_from_itrs_coord = itrs_coord.transform_to("gcrs") + + gcrs_rel = ( + (gcrs_from_itrs_coord.cartesian - gcrs_array_center.cartesian).get_xyz().T + ) + + gcrs_uvw = phs_utils.old_uvw_calc( + gcrs_coord.ra.rad, gcrs_coord.dec.rad, gcrs_rel.value + ) + + mwa_tools_calcuvw_u = -97.122828 + mwa_tools_calcuvw_v = 50.388281 + mwa_tools_calcuvw_w = -151.27976 + + np.testing.assert_allclose(gcrs_uvw[0, 0], mwa_tools_calcuvw_u, atol=1e-3) + np.testing.assert_allclose(gcrs_uvw[0, 1], mwa_tools_calcuvw_v, atol=1e-3) + np.testing.assert_allclose(gcrs_uvw[0, 2], mwa_tools_calcuvw_w, atol=1e-3) + + # also test unphasing + temp2 = phs_utils.undo_old_uvw_calc( + gcrs_coord.ra.rad, gcrs_coord.dec.rad, np.squeeze(gcrs_uvw) + ) + np.testing.assert_allclose(gcrs_rel.value, np.squeeze(temp2)) + + +def test_calc_app_coords_time_obj(): + # Generate ra/dec of zenith at time in the phase_frame coordinate system + # to use for phasing + telescope_location = EarthLocation.from_geodetic(lon=0, lat=1 * units.rad) + + # JD is arbitrary + jd = 2454600 + + zenith_coord = SkyCoord( + alt=90 * units.deg, + az=0 * units.deg, + obstime=Time(jd, format="jd"), + frame="altaz", + location=telescope_location, + ) + zenith_coord = zenith_coord.transform_to("icrs") + + obstime = Time(jd + (np.array([-1, 0, 1]) / 24.0), format="jd") + + ra = zenith_coord.ra.to_value("rad") + dec = zenith_coord.dec.to_value("rad") + app_ra_to, app_dec_to = phs_utils.calc_app_coords( + lon_coord=ra, + lat_coord=dec, + time_array=obstime, + telescope_loc=telescope_location, + ) + + app_ra_nto, app_dec_nto = phs_utils.calc_app_coords( + lon_coord=ra, + lat_coord=dec, + time_array=obstime.utc.jd, + telescope_loc=telescope_location, + ) + + np.testing.assert_allclose(app_ra_to, app_ra_nto) + np.testing.assert_allclose(app_dec_to, app_dec_nto) + + +@pytest.mark.skipif(hasmoon, reason="lunarsky installed") +def test_uvw_track_generator_errs(): + with pytest.raises( + ValueError, match="Need to install `lunarsky` package to work with MCMF frame." + ): + utils.uvw_track_generator(telescope_loc=(0, 0, 0), telescope_frame="MCMF") + + +@pytest.mark.parametrize("flip_u", [False, True]) +@pytest.mark.parametrize("use_uvw", [False, True]) +@pytest.mark.parametrize("use_earthloc", [False, True]) +@pytest.mark.filterwarnings("ignore:The lst_array is not self-consistent") +@pytest.mark.filterwarnings("ignore:> 25 ms errors detected reading in LST values") +def test_uvw_track_generator(flip_u, use_uvw, use_earthloc): + sma_mir = UVData.from_file(os.path.join(DATA_PATH, "sma_test.mir")) + sma_mir.set_lsts_from_time_array() + sma_mir._set_app_coords_helper() + sma_mir.set_uvws_from_antenna_positions() + if not use_uvw: + # Just subselect the antennas in the dataset + sma_mir.telescope.antenna_positions = sma_mir.telescope.antenna_positions[ + [0, 3], : + ] + + if use_earthloc: + telescope_loc = EarthLocation.from_geodetic( + lon=sma_mir.telescope.location_lat_lon_alt_degrees[1], + lat=sma_mir.telescope.location_lat_lon_alt_degrees[0], + height=sma_mir.telescope.location_lat_lon_alt_degrees[2], + ) + else: + telescope_loc = sma_mir.telescope.location_lat_lon_alt_degrees + + if use_uvw: + sma_copy = sma_mir.copy() + sma_copy.unproject_phase() + uvw_array = sma_copy.uvw_array + else: + uvw_array = None + + cat_dict = sma_mir.phase_center_catalog[1] + gen_results = utils.uvw_track_generator( + lon_coord=cat_dict["cat_lon"], + lat_coord=cat_dict["cat_lat"], + coord_frame=cat_dict["cat_frame"], + coord_epoch=cat_dict["cat_epoch"], + telescope_loc=telescope_loc, + time_array=sma_mir.time_array if use_uvw else sma_mir.time_array[0], + antenna_positions=( + sma_mir.telescope.antenna_positions if uvw_array is None else None + ), + force_postive_u=flip_u, + uvw_array=uvw_array, + ) + + assert sma_mir._phase_center_app_ra.compare_value(gen_results["app_ra"]) + assert sma_mir._phase_center_app_dec.compare_value(gen_results["app_dec"]) + assert sma_mir._phase_center_frame_pa.compare_value(gen_results["frame_pa"]) + assert sma_mir._lst_array.compare_value(gen_results["lst"]) + if flip_u: + assert sma_mir._uvw_array.compare_value(-gen_results["uvw"]) + else: + assert sma_mir._uvw_array.compare_value(gen_results["uvw"]) + + +@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") +@pytest.mark.parametrize("selenoid", ["SPHERE", "GSFC", "GRAIL23", "CE-1-LAM-GEO"]) +def test_uvw_track_generator_moon(selenoid): + # Note this isn't a particularly deep test, but it at least exercises the code. + from spiceypy.utils.exceptions import SpiceUNKNOWNFRAME + + try: + gen_results = utils.uvw_track_generator( + lon_coord=0.0, + lat_coord=0.0, + coord_frame="icrs", + telescope_loc=(0, 0, 0), + time_array=2456789.0, + antenna_positions=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), + telescope_frame="mcmf", + ellipsoid=selenoid, + ) + except SpiceUNKNOWNFRAME as err: + pytest.skip("SpiceUNKNOWNFRAME error: " + str(err)) + + # Check that the total lengths all match 1 + np.testing.assert_allclose((gen_results["uvw"] ** 2.0).sum(1), 2.0) + + if selenoid == "SPHERE": + # check defaults + gen_results = utils.uvw_track_generator( + lon_coord=0.0, + lat_coord=0.0, + coord_frame="icrs", + telescope_loc=(0, 0, 0), + time_array=2456789.0, + antenna_positions=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), + telescope_frame="mcmf", + ) + + # Check that the total lengths all match 1 + np.testing.assert_allclose((gen_results["uvw"] ** 2.0).sum(1), 2.0) diff --git a/tests/utils/test_pol.py b/tests/utils/test_pol.py new file mode 100644 index 0000000000..be9e2431bc --- /dev/null +++ b/tests/utils/test_pol.py @@ -0,0 +1,235 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for polarization utility functions.""" + +import numpy as np +import pytest + +from pyuvdata import utils +from pyuvdata.testing import check_warnings + + +def test_pol_funcs(): + """Test utility functions to convert between polarization strings and numbers""" + + pol_nums = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4] + pol_str = ["yx", "xy", "yy", "xx", "lr", "rl", "ll", "rr", "pI", "pQ", "pU", "pV"] + assert pol_nums == utils.polstr2num(pol_str) + assert pol_str == utils.polnum2str(pol_nums) + # Check individuals + assert -6 == utils.polstr2num("YY") + assert "pV" == utils.polnum2str(4) + # Check errors + pytest.raises(KeyError, utils.polstr2num, "foo") + pytest.raises(ValueError, utils.polstr2num, 1) + pytest.raises(ValueError, utils.polnum2str, 7.3) + # Check parse + assert utils.parse_polstr("xX") == "xx" + assert utils.parse_polstr("XX") == "xx" + assert utils.parse_polstr("i") == "pI" + + +def test_pol_funcs_x_orientation(): + """Test functions to convert between pol strings and numbers with x_orientation.""" + + pol_nums = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4] + + x_orient1 = "e" + pol_str = ["ne", "en", "nn", "ee", "lr", "rl", "ll", "rr", "pI", "pQ", "pU", "pV"] + assert pol_nums == utils.polstr2num(pol_str, x_orientation=x_orient1) + assert pol_str == utils.polnum2str(pol_nums, x_orientation=x_orient1) + # Check individuals + assert -6 == utils.polstr2num("NN", x_orientation=x_orient1) + assert "pV" == utils.polnum2str(4) + # Check errors + pytest.raises(KeyError, utils.polstr2num, "foo", x_orientation=x_orient1) + pytest.raises(ValueError, utils.polstr2num, 1, x_orientation=x_orient1) + pytest.raises(ValueError, utils.polnum2str, 7.3, x_orientation=x_orient1) + # Check parse + assert utils.parse_polstr("eE", x_orientation=x_orient1) == "ee" + assert utils.parse_polstr("xx", x_orientation=x_orient1) == "ee" + assert utils.parse_polstr("NN", x_orientation=x_orient1) == "nn" + assert utils.parse_polstr("yy", x_orientation=x_orient1) == "nn" + assert utils.parse_polstr("i", x_orientation=x_orient1) == "pI" + + x_orient2 = "n" + pol_str = ["en", "ne", "ee", "nn", "lr", "rl", "ll", "rr", "pI", "pQ", "pU", "pV"] + assert pol_nums == utils.polstr2num(pol_str, x_orientation=x_orient2) + assert pol_str == utils.polnum2str(pol_nums, x_orientation=x_orient2) + # Check individuals + assert -6 == utils.polstr2num("EE", x_orientation=x_orient2) + assert "pV" == utils.polnum2str(4) + # Check errors + pytest.raises(KeyError, utils.polstr2num, "foo", x_orientation=x_orient2) + pytest.raises(ValueError, utils.polstr2num, 1, x_orientation=x_orient2) + pytest.raises(ValueError, utils.polnum2str, 7.3, x_orientation=x_orient2) + # Check parse + assert utils.parse_polstr("nN", x_orientation=x_orient2) == "nn" + assert utils.parse_polstr("xx", x_orientation=x_orient2) == "nn" + assert utils.parse_polstr("EE", x_orientation=x_orient2) == "ee" + assert utils.parse_polstr("yy", x_orientation=x_orient2) == "ee" + assert utils.parse_polstr("i", x_orientation=x_orient2) == "pI" + + # check warnings for non-recognized x_orientation + with check_warnings(UserWarning, "x_orientation not recognized"): + assert utils.polstr2num("xx", x_orientation="foo") == -5 + + with check_warnings(UserWarning, "x_orientation not recognized"): + assert utils.polnum2str(-6, x_orientation="foo") == "yy" + + +def test_jones_num_funcs(): + """Test functions to convert between jones polarization strings and numbers.""" + + jnums = [-8, -7, -6, -5, -4, -3, -2, -1] + jstr = ["Jyx", "Jxy", "Jyy", "Jxx", "Jlr", "Jrl", "Jll", "Jrr"] + assert jnums == utils.jstr2num(jstr) + assert jstr, utils.jnum2str(jnums) + # Check shorthands + jstr = ["yx", "xy", "yy", "y", "xx", "x", "lr", "rl", "ll", "l", "rr", "r"] + jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1] + assert jnums == utils.jstr2num(jstr) + # Check individuals + assert -6 == utils.jstr2num("jyy") + assert "Jxy" == utils.jnum2str(-7) + # Check errors + pytest.raises(KeyError, utils.jstr2num, "foo") + pytest.raises(ValueError, utils.jstr2num, 1) + pytest.raises(ValueError, utils.jnum2str, 7.3) + + # check parse method + assert utils.pol.parse_jpolstr("x") == "Jxx" + assert utils.pol.parse_jpolstr("xy") == "Jxy" + assert utils.pol.parse_jpolstr("XY") == "Jxy" + + +def test_jones_num_funcs_x_orientation(): + """Test functions to convert jones pol strings and numbers with x_orientation.""" + + jnums = [-8, -7, -6, -5, -4, -3, -2, -1] + x_orient1 = "east" + jstr = ["Jne", "Jen", "Jnn", "Jee", "Jlr", "Jrl", "Jll", "Jrr"] + assert jnums == utils.jstr2num(jstr, x_orientation=x_orient1) + assert jstr == utils.jnum2str(jnums, x_orientation=x_orient1) + # Check shorthands + jstr = ["ne", "en", "nn", "n", "ee", "e", "lr", "rl", "ll", "l", "rr", "r"] + jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1] + assert jnums == utils.jstr2num(jstr, x_orientation=x_orient1) + # Check individuals + assert -6 == utils.jstr2num("jnn", x_orientation=x_orient1) + assert "Jen" == utils.jnum2str(-7, x_orientation=x_orient1) + # Check errors + pytest.raises(KeyError, utils.jstr2num, "foo", x_orientation=x_orient1) + pytest.raises(ValueError, utils.jstr2num, 1, x_orientation=x_orient1) + pytest.raises(ValueError, utils.jnum2str, 7.3, x_orientation=x_orient1) + + # check parse method + assert utils.pol.parse_jpolstr("e", x_orientation=x_orient1) == "Jee" + assert utils.pol.parse_jpolstr("x", x_orientation=x_orient1) == "Jee" + assert utils.pol.parse_jpolstr("y", x_orientation=x_orient1) == "Jnn" + assert utils.pol.parse_jpolstr("en", x_orientation=x_orient1) == "Jen" + assert utils.pol.parse_jpolstr("NE", x_orientation=x_orient1) == "Jne" + + jnums = [-8, -7, -6, -5, -4, -3, -2, -1] + x_orient2 = "north" + jstr = ["Jen", "Jne", "Jee", "Jnn", "Jlr", "Jrl", "Jll", "Jrr"] + assert jnums == utils.jstr2num(jstr, x_orientation=x_orient2) + assert jstr == utils.jnum2str(jnums, x_orientation=x_orient2) + # Check shorthands + jstr = ["en", "ne", "ee", "e", "nn", "n", "lr", "rl", "ll", "l", "rr", "r"] + jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1] + assert jnums == utils.jstr2num(jstr, x_orientation=x_orient2) + # Check individuals + assert -6 == utils.jstr2num("jee", x_orientation=x_orient2) + assert "Jne" == utils.jnum2str(-7, x_orientation=x_orient2) + # Check errors + pytest.raises(KeyError, utils.jstr2num, "foo", x_orientation=x_orient2) + pytest.raises(ValueError, utils.jstr2num, 1, x_orientation=x_orient2) + pytest.raises(ValueError, utils.jnum2str, 7.3, x_orientation=x_orient2) + + # check parse method + assert utils.pol.parse_jpolstr("e", x_orientation=x_orient2) == "Jee" + assert utils.pol.parse_jpolstr("x", x_orientation=x_orient2) == "Jnn" + assert utils.pol.parse_jpolstr("y", x_orientation=x_orient2) == "Jee" + assert utils.pol.parse_jpolstr("en", x_orientation=x_orient2) == "Jen" + assert utils.pol.parse_jpolstr("NE", x_orientation=x_orient2) == "Jne" + + # check warnings for non-recognized x_orientation + with check_warnings(UserWarning, "x_orientation not recognized"): + assert utils.jstr2num("x", x_orientation="foo") == -5 + + with check_warnings(UserWarning, "x_orientation not recognized"): + assert utils.jnum2str(-6, x_orientation="foo") == "Jyy" + + +def test_conj_pol(): + """Test function to conjugate pols""" + + pol_nums = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4] + cpol_nums = [-7, -8, -6, -5, -3, -4, -2, -1, 1, 2, 3, 4] + assert pol_nums == utils.conj_pol(cpol_nums) + assert utils.conj_pol(pol_nums) == cpol_nums + # fmt: off + pol_str = ['yx', 'xy', 'yy', 'xx', 'ee', 'nn', 'en', 'ne', 'lr', 'rl', 'll', + 'rr', 'pI', 'pQ', 'pU', 'pV'] + cpol_str = ['xy', 'yx', 'yy', 'xx', 'ee', 'nn', 'ne', 'en', 'rl', 'lr', 'll', + 'rr', 'pI', 'pQ', 'pU', 'pV'] + # fmt: on + assert pol_str == utils.conj_pol(cpol_str) + assert utils.conj_pol(pol_str) == cpol_str + assert [pol_str, pol_nums] == utils.conj_pol([cpol_str, cpol_nums]) + + # Test error with jones + cjstr = ["Jxy", "Jyx", "Jyy", "Jxx", "Jrl", "Jlr", "Jll", "Jrr"] + assert pytest.raises(KeyError, utils.conj_pol, cjstr) + + # Test invalid pol + with pytest.raises( + ValueError, match="Polarization not recognized, cannot be conjugated." + ): + utils.conj_pol(2.3) + + +def test_reorder_conj_pols_non_list(): + pytest.raises(ValueError, utils.pol.reorder_conj_pols, 4) + + +def test_reorder_conj_pols_strings(): + pols = ["xx", "xy", "yx"] + corder = utils.pol.reorder_conj_pols(pols) + assert np.array_equal(corder, [0, 2, 1]) + + +def test_reorder_conj_pols_ints(): + pols = [-5, -7, -8] # 'xx', 'xy', 'yx' + corder = utils.pol.reorder_conj_pols(pols) + assert np.array_equal(corder, [0, 2, 1]) + + +def test_reorder_conj_pols_missing_conj(): + pols = ["xx", "xy"] # Missing 'yx' + pytest.raises(ValueError, utils.pol.reorder_conj_pols, pols) + + +def test_determine_pol_order_err(): + with pytest.raises(ValueError, match='order must be either "AIPS" or "CASA".'): + utils.pol.determine_pol_order([], order="ABC") + + +@pytest.mark.parametrize( + "pols,aips_order,casa_order", + [ + [[-8, -7, -6, -5], [3, 2, 1, 0], [3, 1, 0, 2]], + [[-5, -6, -7, -8], [0, 1, 2, 3], [0, 2, 3, 1]], + [[1, 2, 3, 4], [0, 1, 2, 3], [0, 1, 2, 3]], + ], +) +@pytest.mark.parametrize("order", ["CASA", "AIPS"]) +def test_pol_order(pols, aips_order, casa_order, order): + check = utils.pol.determine_pol_order(pols, order=order) + + if order == "CASA": + assert all(check == casa_order) + if order == "AIPS": + assert all(check == aips_order) diff --git a/tests/utils/test_ps_cat.py b/tests/utils/test_ps_cat.py new file mode 100644 index 0000000000..7e0c94010f --- /dev/null +++ b/tests/utils/test_ps_cat.py @@ -0,0 +1,16 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for phase center catalog utility functions.""" + +import pytest + +import pyuvdata.utils.ps_cat as ps_cat_utils + + +def test_generate_new_phase_center_id_errs(): + with pytest.raises(ValueError, match="Cannot specify old_id if no catalog"): + ps_cat_utils.generate_new_phase_center_id(old_id=1) + + with pytest.raises(ValueError, match="Provided cat_id was found in reserved_ids"): + ps_cat_utils.generate_new_phase_center_id(cat_id=1, reserved_ids=[1, 2, 3]) diff --git a/tests/utils/test_redundancy.py b/tests/utils/test_redundancy.py new file mode 100644 index 0000000000..a8fb1d382d --- /dev/null +++ b/tests/utils/test_redundancy.py @@ -0,0 +1,372 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for baseline redundancy utility functions.""" +import copy +import os +import re + +import numpy as np +import pytest + +import pyuvdata.utils.redundancy as red_utils +from pyuvdata import UVData, utils +from pyuvdata.data import DATA_PATH +from pyuvdata.testing import check_warnings + + +@pytest.mark.parametrize("grid_alg", [True, False, None]) +def test_redundancy_finder(grid_alg): + """ + Check that get_baseline_redundancies and get_antenna_redundancies return consistent + redundant groups for a test file with the HERA19 layout. + """ + uvd = UVData() + uvd.read_uvfits( + os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits") + ) + + uvd.select(times=uvd.time_array[0]) + uvd.unproject_phase(use_ant_pos=True) + # uvw_array is now equivalent to baseline positions + uvd.conjugate_bls("ant1 dict[str, Any]: @pytest.fixture def lunar_simple_params() -> dict[str, Any]: pytest.importorskip("lunarsky") - from pyuvdata.utils import MoonLocation + from pyuvdata.utils.coordinates import MoonLocation return { "freq_array": np.linspace(1e8, 2e8, 100), diff --git a/tests/uvdata/test_miriad.py b/tests/uvdata/test_miriad.py index 4730edeb2d..e65b65c708 100644 --- a/tests/uvdata/test_miriad.py +++ b/tests/uvdata/test_miriad.py @@ -30,8 +30,7 @@ from astropy.coordinates import Angle from astropy.time import Time, TimeDelta -import pyuvdata.utils as uvutils -from pyuvdata import UVData +from pyuvdata import UVData, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings from pyuvdata.uvdata.miriad import Miriad @@ -567,7 +566,7 @@ def test_miriad_location_handling(paper_miriad_main, tmp_path): antpos_length = np.sqrt(np.sum(np.abs(rel_ecef_antpos) ** 2, axis=1)) ecef_antpos = rel_ecef_antpos + uv_in.telescope._location.xyz() - antpos = uvutils.rotECEF_from_ECEF(ecef_antpos, uv_in.telescope.location.lon.rad) + antpos = utils.rotECEF_from_ECEF(ecef_antpos, uv_in.telescope.location.lon.rad) # zero out bad locations (these are checked on read) antpos[np.where(antpos_length == 0), :] = [0, 0, 0] @@ -726,10 +725,10 @@ def test_miriad_location_handling(paper_miriad_main, tmp_path): good_antpos = np.where(antpos_length > 0)[0] rot_ants = good_antpos[: len(good_antpos) // 2] - rot_antpos = uvutils.rotECEF_from_ECEF( + rot_antpos = utils.rotECEF_from_ECEF( ecef_antpos[rot_ants, :], uv_in.telescope.location.lon.rad + np.pi ) - modified_antpos = uvutils.rotECEF_from_ECEF( + modified_antpos = utils.rotECEF_from_ECEF( ecef_antpos, uv_in.telescope.location.lon.rad ) modified_antpos[rot_ants, :] = rot_antpos @@ -862,7 +861,9 @@ def test_loop_multi_phase(tmp_path, paper_miriad, frame): # without the "phsframe" variable, the unprojected phase center gets interpreted as # an ephem type phase center. - zen_id, _ = uvutils.look_in_catalog(uv3.phase_center_catalog, cat_name="zenith") + zen_id, _ = utils.ps_cat.look_in_catalog( + uv3.phase_center_catalog, cat_name="zenith" + ) new_id = uv3._add_phase_center(cat_name="zenith", cat_type="unprojected") uv3.phase_center_id_array[np.nonzero(uv3.phase_center_id_array == zen_id)] = new_id uv3._clear_unused_phase_centers() @@ -902,7 +903,7 @@ def test_miriad_only_itrs(tmp_path, paper_miriad): uv_in.telescope.location = MoonLocation.from_selenodetic( lat=latitude * units.rad, lon=longitude * units.rad, height=altitude * units.m ) - new_full_antpos = uvutils.ECEF_from_ENU( + new_full_antpos = utils.ECEF_from_ENU( enu=enu_antpos, center_loc=uv_in.telescope.location ) @@ -1934,7 +1935,7 @@ def test_multi_files(casa_uvfits, tmp_path): uv1.read([testfile1, testfile2], file_type="miriad") # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert utils.helpers._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis using" @@ -1960,7 +1961,7 @@ def test_multi_files(casa_uvfits, tmp_path): uv1 = UVData() uv1.read([testfile1, testfile2], axis="freq") # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert utils.helpers._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis using" @@ -1995,7 +1996,7 @@ def test_antpos_units(casa_uvfits, tmp_path): aantpos = auv["antpos"].reshape(3, -1).T * const.c.to("m/ns").value aantpos = aantpos[uv.telescope.antenna_numbers, :] aantpos = ( - uvutils.ECEF_from_rotECEF(aantpos, uv.telescope.location.lon.rad) + utils.ECEF_from_rotECEF(aantpos, uv.telescope.location.lon.rad) - uv.telescope._location.xyz() ) assert np.allclose(aantpos, uv.telescope.antenna_positions) @@ -2020,7 +2021,7 @@ def test_readmiriad_write_miriad_check_time_format(tmp_path): t1 = Time(uv["time"], format="jd", location=uvd.telescope.location) dt = TimeDelta(uv["inttime"] / 2, format="sec") t2 = t1 + dt - lsts = uvutils.get_lst_for_time( + lsts = utils.get_lst_for_time( np.array([t1.jd, t2.jd]), telescope_loc=uvd.telescope.location ) delta_lst = lsts[1] - lsts[0] diff --git a/tests/uvdata/test_ms.py b/tests/uvdata/test_ms.py index caf90b79d9..6ab1ff7133 100644 --- a/tests/uvdata/test_ms.py +++ b/tests/uvdata/test_ms.py @@ -12,12 +12,11 @@ import pytest from astropy.time import Time -from pyuvdata import UVData -from pyuvdata import utils as uvutils +from pyuvdata import UVData, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings -from ..test_utils import frame_selenoid, hasmoon +from ..utils.test_coordinates import frame_selenoid, hasmoon pytest.importorskip("casacore") @@ -127,7 +126,7 @@ def test_read_nrao_loopback(tmp_path, nrao_uv, telescope_frame, selenoid, del_te height=uvobj.telescope.location.height, ellipsoid=selenoid, ) - new_full_antpos = uvutils.ECEF_from_ENU( + new_full_antpos = utils.ECEF_from_ENU( enu=enu_antpos, center_loc=uvobj.telescope.location ) uvobj.telescope.antenna_positions = ( @@ -679,7 +678,7 @@ def test_ms_scannumber_multiphasecenter(tmp_path, multi_frame): miriad_uv._set_app_coords_helper() if multi_frame: - cat_id = uvutils.look_for_name(miriad_uv.phase_center_catalog, "NOISE") + cat_id = utils.ps_cat.look_for_name(miriad_uv.phase_center_catalog, "NOISE") ra_use = miriad_uv.phase_center_catalog[cat_id[0]]["cat_lon"][0] dec_use = miriad_uv.phase_center_catalog[cat_id[0]]["cat_lat"][0] with pytest.raises( diff --git a/tests/uvdata/test_uvdata.py b/tests/uvdata/test_uvdata.py index f79f8fedbb..2dcb117190 100644 --- a/tests/uvdata/test_uvdata.py +++ b/tests/uvdata/test_uvdata.py @@ -20,12 +20,12 @@ from astropy.time import Time from astropy.utils import iers -import pyuvdata.utils as uvutils -from pyuvdata import UVCal, UVData +from pyuvdata import UVCal, UVData, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings +from pyuvdata.utils import helpers -from ..test_utils import frame_selenoid +from ..utils.test_coordinates import frame_selenoid from .test_mwa_corr_fits import filelist as mwa_corr_files try: @@ -933,7 +933,7 @@ def test_phase_unphase_hera_antpos(hera_uvh5): # check that they match if you phase & unphase using antenna locations # first replace the uvws with the right values lat, lon, alt = uv_raw.telescope.location_lat_lon_alt - antenna_enu = uvutils.ENU_from_ECEF( + antenna_enu = utils.ENU_from_ECEF( (uv_raw.telescope.antenna_positions + uv_raw.telescope._location.xyz()), center_loc=uv_raw.telescope.location, ) @@ -1026,7 +1026,7 @@ def test_phase_to_time(casa_uvfits, telescope_frame, selenoid): height=uv_in.telescope.location.height, ellipsoid=selenoid, ) - new_full_antpos = uvutils.ECEF_from_ENU( + new_full_antpos = utils.ECEF_from_ENU( enu=enu_antpos, center_loc=uv_in.telescope.location ) uv_in.telescope.antenna_positions = ( @@ -1035,7 +1035,7 @@ def test_phase_to_time(casa_uvfits, telescope_frame, selenoid): uv_in.set_lsts_from_time_array() uv_in.check() - zenith_coord = uvutils.LunarSkyCoord( + zenith_coord = utils.phasing.LunarSkyCoord( alt=Angle(90 * units.deg), az=Angle(0 * units.deg), obstime=phase_time, @@ -1345,9 +1345,9 @@ def test_select_blts(paper_uvh5): assert len(blt_inds) == uv_object2.Nblts # verify that histories are different - assert not uvutils._check_histories(old_history, uv_object2.history) + assert not helpers._check_histories(old_history, uv_object2.history) - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific baseline-times using pyuvdata.", uv_object2.history, ) @@ -1360,7 +1360,7 @@ def test_select_blts(paper_uvh5): assert len(blt_inds) == uv_object2.Nblts - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific baseline-times using pyuvdata.", uv_object2.history, ) @@ -1399,7 +1399,7 @@ def test_select_phase_center_id(tmp_path, carma_miriad): uv2 = uv_obj.select(phase_center_ids=[1, 2], inplace=False) uv_sum = uv1 + uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_obj.history + " Downselected to specific phase center IDs using pyuvdata. " "Combined data along baseline-time axis using pyuvdata.", uv_sum.history, @@ -1433,7 +1433,7 @@ def test_select_phase_center_id_blts(carma_miriad): ) uv_sum = uv1 + uv2 + uv3 - assert uvutils._check_histories( + assert helpers._check_histories( uv_obj.history + " Downselected to specific baseline-times, phase center IDs using pyuvdata. " "Combined data along baseline-time axis using pyuvdata. " @@ -1477,7 +1477,7 @@ def test_select_antennas(casa_uvfits): ): assert ant in ants_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific antennas using pyuvdata.", uv_object2.history, ) @@ -1495,7 +1495,7 @@ def test_select_antennas(casa_uvfits): ): assert ant in ants_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific antennas using pyuvdata.", uv_object2.history, ) @@ -1614,7 +1614,7 @@ def test_select_bls(casa_uvfits): for pair in sorted_pairs_object2: assert pair in sorted_pairs_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific baselines using pyuvdata.", uv_object2.history, ) @@ -1643,7 +1643,7 @@ def test_select_bls(casa_uvfits): for pair in sorted_pairs_object3: assert pair in sorted_pairs_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific baselines using pyuvdata.", uv_object3.history, ) @@ -1682,7 +1682,7 @@ def test_select_bls(casa_uvfits): for bl in sorted_pairs_object2: assert bl in sorted_bls_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific baselines, polarizations using pyuvdata.", uv_object2.history, @@ -1711,7 +1711,7 @@ def test_select_bls(casa_uvfits): for pair in sorted_pairs_object2: assert pair in sorted_pairs_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific baselines using pyuvdata.", uv_object2.history, ) @@ -1796,7 +1796,7 @@ def test_select_times(casa_uvfits): for t in np.unique(uv_object2.time_array): assert t in times_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific times using pyuvdata.", uv_object2.history, ) @@ -1811,7 +1811,7 @@ def test_select_times(casa_uvfits): for t in np.unique(uv_object2.time_array): assert t in times_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific times using pyuvdata.", uv_object2.history, ) @@ -1850,7 +1850,7 @@ def test_select_time_range(casa_uvfits): for t in np.unique(uv_object2.time_array): assert t in times_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific times using pyuvdata.", uv_object2.history, ) @@ -1878,7 +1878,7 @@ def test_select_lsts(casa_uvfits, tmp_path): for lst in np.unique(uv_object2.lst_array): assert lst in lsts_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -1893,7 +1893,7 @@ def test_select_lsts(casa_uvfits, tmp_path): for lst in np.unique(uv_object2.lst_array): assert lst in lsts_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -2037,7 +2037,7 @@ def test_select_lsts_multi_day(casa_uvfits): unique_jds = np.unique(np.asarray(uv_object2.time_array, dtype=np.int_)) assert len(unique_jds) == 2 - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -2146,7 +2146,7 @@ def test_select_lst_range(casa_uvfits, tmp_path): for lst in np.unique(uv_object2.lst_array): assert lst in lsts_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -2195,7 +2195,7 @@ def test_select_lst_range_too_big(casa_uvfits): for lst in np.unique(uv_object2.lst_array): assert lst in lsts_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -2230,7 +2230,7 @@ def test_select_lst_range_wrap_around(casa_uvfits): for lst in np.unique(uv_object2.lst_array): assert lst in lsts_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -2312,7 +2312,7 @@ def test_select_frequencies_writeerrors(casa_uvfits, tmp_path): for f in np.unique(uv_object2.freq_array): assert f in freqs_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uv_object2.history, ) @@ -2327,7 +2327,7 @@ def test_select_frequencies_writeerrors(casa_uvfits, tmp_path): for f in np.unique(uv_object2.freq_array): assert f in freqs_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uv_object2.history, ) @@ -2340,7 +2340,7 @@ def test_select_frequencies_writeerrors(casa_uvfits, tmp_path): for f in uv_object2.freq_array: assert f in [freqs_to_keep[0]] - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uv_object2.history, ) @@ -2423,7 +2423,7 @@ def test_select_freq_chans(casa_uvfits): for f in np.unique(uv_object2.freq_array): assert f in uv_object.freq_array[chans_to_keep] - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uv_object2.history, ) @@ -2438,7 +2438,7 @@ def test_select_freq_chans(casa_uvfits): for f in np.unique(uv_object2.freq_array): assert f in uv_object.freq_array[chans_to_keep] - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uv_object2.history, ) @@ -2479,18 +2479,18 @@ def test_select_polarizations(hera_uvh5, pols_to_keep): assert p in uv_object2.polarization_array else: assert ( - uvutils.polstr2num(p, x_orientation=uv_object2.telescope.x_orientation) + utils.polstr2num(p, x_orientation=uv_object2.telescope.x_orientation) in uv_object2.polarization_array ) for p in np.unique(uv_object2.polarization_array): if isinstance(pols_to_keep[0], int): assert p in pols_to_keep else: - assert p in uvutils.polstr2num( + assert p in utils.polstr2num( pols_to_keep, x_orientation=uv_object2.telescope.x_orientation ) - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific polarizations using pyuvdata.", uv_object2.history, ) @@ -2615,7 +2615,7 @@ def test_select(casa_uvfits): for p in np.unique(uv_object2.polarization_array): assert p in pols_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to " "specific baseline-times, antennas, " "baselines, times, frequencies, " @@ -2711,7 +2711,7 @@ def test_select_with_lst(casa_uvfits): for p in np.unique(uv_object2.polarization_array): assert p in pols_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to " "specific baseline-times, antennas, " "baselines, lsts, frequencies, " @@ -2735,7 +2735,7 @@ def test_select_not_inplace(casa_uvfits): old_history = uv_object.history uv1 = uv_object.select(freq_chans=np.arange(32), inplace=False) uv1 += uv_object.select(freq_chans=np.arange(32, 64), inplace=False) - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -3236,14 +3236,14 @@ def test_sum_vis(casa_uvfits): uv_summed = uv_half.sum_vis(uv_half_mod) assert np.array_equal(uv_summed.data_array, uv_full.data_array) - assert uvutils._check_histories( + assert helpers._check_histories( uv_half.history + " Visibilities summed using pyuvdata. Unique part of second " "object history follows. testing the history.", uv_summed.history, ) # add a test for full coverage of _combine_history_addition function assert ( - uvutils._combine_history_addition( + helpers._combine_history_addition( uv_half.history + " Visibilities summed using pyuvdata. Unique part of second " "object history follows. testing the history.", @@ -3255,7 +3255,7 @@ def test_sum_vis(casa_uvfits): uv_summed = uv_half.sum_vis(uv_half_mod, verbose_history=True) assert np.array_equal(uv_summed.data_array, uv_full.data_array) - assert uvutils._check_histories( + assert helpers._check_histories( uv_half.history + " Visibilities summed using pyuvdata. Second object history follows. " + uv_half_mod.history, @@ -3266,7 +3266,7 @@ def test_sum_vis(casa_uvfits): uv_diffed = uv_full.diff_vis(uv_half) assert np.array_equal(uv_diffed.data_array, uv_half.data_array) - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Visibilities differenced using pyuvdata.", uv_diffed.history ) @@ -3358,7 +3358,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv2.select(freq_chans=np.arange(32, 64)) uv1 += uv2 # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -3387,7 +3387,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv1.select(polarizations=uv1.polarization_array[0:2]) uv2.select(polarizations=uv2.polarization_array[2:4]) uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific polarizations using pyuvdata. " "Combined data along polarization axis " @@ -3413,7 +3413,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv1.select(times=times[0 : len(times) // 2]) uv2.select(times=times[len(times) // 2 :]) uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time axis " @@ -3433,7 +3433,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv1.select(blt_inds=ind1) uv2.select(blt_inds=ind2) uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific baseline-times using pyuvdata. " "Combined data along baseline-time axis " @@ -3470,7 +3470,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv3.baseline_array = uv3.baseline_array[-1::-1] uv1 += uv3 uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific baseline-times using pyuvdata. " "Combined data along baseline-time axis " @@ -3493,7 +3493,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4] ) uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific times, polarizations using " "pyuvdata. Combined data along " @@ -3533,7 +3533,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32)) uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64)) uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific times, frequencies using " "pyuvdata. Combined data along " @@ -3572,7 +3572,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv1.select(times=times[0 : len(times) // 2]) uv2.select(times=times[len(times) // 2 :]) uv1 = uv1 + uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time " @@ -3685,7 +3685,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv2.select(polarizations=uv2.polarization_array[2:4]) uv2.history += " testing the history. AIPS WTSCAL = 1.0" uv_new = uv1 + uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Unique part of next " "object history follows. testing the history.", @@ -3695,7 +3695,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): assert uv_new == uv_full uv_new = uv1.__add__(uv2, verbose_history=True) - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Next object history " "follows. " + uv2.history, @@ -3727,7 +3727,7 @@ def test_add_unprojected(casa_uvfits): uv2.select(freq_chans=np.arange(32, 64)) uv1 += uv2 # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency " @@ -3743,7 +3743,7 @@ def test_add_unprojected(casa_uvfits): uv1.select(polarizations=uv1.polarization_array[0:2]) uv2.select(polarizations=uv2.polarization_array[2:4]) uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific polarizations using pyuvdata. " "Combined data along polarization " @@ -3760,7 +3760,7 @@ def test_add_unprojected(casa_uvfits): uv1.select(times=times[0 : len(times) // 2]) uv2.select(times=times[len(times) // 2 :]) uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time " @@ -3780,7 +3780,7 @@ def test_add_unprojected(casa_uvfits): uv1.select(blt_inds=ind1) uv2.select(blt_inds=ind2) uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific baseline-times using pyuvdata. " "Combined data along baseline-time " @@ -3802,7 +3802,7 @@ def test_add_unprojected(casa_uvfits): times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4] ) uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific times, polarizations using " "pyuvdata. Combined data along " @@ -3842,7 +3842,7 @@ def test_add_unprojected(casa_uvfits): uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32)) uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64)) uv1 += uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific times, frequencies using " "pyuvdata. Combined data along " @@ -3881,7 +3881,7 @@ def test_add_unprojected(casa_uvfits): uv1.select(times=times[0 : len(times) // 2]) uv2.select(times=times[len(times) // 2 :]) uv1 = uv1 + uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time " @@ -3923,7 +3923,7 @@ def test_add_unprojected(casa_uvfits): uv2.select(polarizations=uv2.polarization_array[2:4]) uv2.history += " testing the history. AIPS WTSCAL = 1.0" uv_new = uv1 + uv2 - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Unique part of next " "object history follows. testing the history.", @@ -3933,7 +3933,7 @@ def test_add_unprojected(casa_uvfits): assert uv_new == uv_full uv_new = uv1.__add__(uv2, verbose_history=True) - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Next object history " "follows." + uv2.history, @@ -4135,7 +4135,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv3.select(freq_chans=np.arange(40, 64)) uv1.fast_concat([uv2, uv3], "freq", inplace=True) # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -4186,7 +4186,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv2.select(polarizations=uv2.polarization_array[1:3]) uv3.select(polarizations=uv3.polarization_array[3:4]) uv1.fast_concat([uv2, uv3], "polarization", inplace=True) - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific polarizations using pyuvdata. " "Combined data along polarization axis " @@ -4231,7 +4231,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv2.select(times=times[len(times) // 3 : (len(times) // 3) * 2]) uv3.select(times=times[(len(times) // 3) * 2 :]) uv1.fast_concat([uv2, uv3], "blt", inplace=True) - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time axis " @@ -4250,7 +4250,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv1.select(blt_inds=ind1) uv2.select(blt_inds=ind2) uv1.fast_concat(uv2, "blt", inplace=True) - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific baseline-times using pyuvdata. " "Combined data along baseline-time axis " @@ -4302,7 +4302,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv2.select(blt_inds=ind2) uv2.fast_concat(uv1, "blt", inplace=True) - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific baseline-times using pyuvdata. " "Combined data along baseline-time " @@ -4370,7 +4370,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv1.select(times=times[0 : len(times) // 2]) uv2.select(times=times[len(times) // 2 :]) uv1 = uv1.fast_concat(uv2, "blt", inplace=False) - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time " @@ -4472,7 +4472,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv2.select(polarizations=uv2.polarization_array[2:4]) uv2.history += " testing the history. AIPS WTSCAL = 1.0" uv_new = uv1.fast_concat(uv2, "polarization") - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Unique part of next " "object history follows. testing the history.", @@ -4482,7 +4482,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): assert uv_new == uv_full uv_new = uv1.fast_concat(uv2, "polarization", verbose_history=True) - assert uvutils._check_histories( + assert helpers._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Next object history " "follows." + uv2.history, @@ -4540,7 +4540,7 @@ def test_key2inds(casa_uvfits, tuplify): assert indp[0] == slice(0, 1, 1) # Combo with pol as string - key = (ant1, ant2, uvutils.polnum2str(pol)) + key = (ant1, ant2, utils.polnum2str(pol)) if tuplify: key = (key,) ind1, ind2, indp = uv._key2inds(key) @@ -4556,7 +4556,7 @@ def test_key2inds(casa_uvfits, tuplify): assert indp[1] == slice(0, 1, 1) # Conjugation with pol as string - key = (ant2, ant1, uvutils.polnum2str(pol)) + key = (ant2, ant1, utils.polnum2str(pol)) if tuplify: key = (key,) ind1, ind2, indp = uv._key2inds(key) @@ -4667,10 +4667,10 @@ def test_key2inds_conj_all_pols_bl_fringe(casa_uvfits): # Mix one instance of this baseline. uv.ant_1_array[0] = ant2 uv.ant_2_array[0] = ant1 - uv.baseline_array[0] = uvutils.antnums_to_baseline( + uv.baseline_array[0] = utils.antnums_to_baseline( ant2, ant1, Nants_telescope=uv.telescope.Nants ) - bl = uvutils.antnums_to_baseline(ant1, ant2, Nants_telescope=uv.telescope.Nants) + bl = utils.antnums_to_baseline(ant1, ant2, Nants_telescope=uv.telescope.Nants) bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0] ind1, ind2, indp = uv._key2inds(bl) @@ -4699,7 +4699,7 @@ def test_key2inds_conj_all_pols_bls(casa_uvfits): ant1 = uv.ant_1_array[0] ant2 = uv.ant_2_array[0] - bl = uvutils.antnums_to_baseline(ant2, ant1, Nants_telescope=uv.telescope.Nants) + bl = utils.antnums_to_baseline(ant2, ant1, Nants_telescope=uv.telescope.Nants) bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0] ind1, ind2, indp = uv._key2inds(bl) @@ -4717,7 +4717,7 @@ def test_key2inds_conj_all_pols_missing_data_bls(casa_uvfits): uv.select(polarizations=["rl"]) ant1 = uv.ant_1_array[0] ant2 = uv.ant_2_array[0] - bl = uvutils.antnums_to_baseline(ant2, ant1, Nants_telescope=uv.telescope.Nants) + bl = utils.antnums_to_baseline(ant2, ant1, Nants_telescope=uv.telescope.Nants) with pytest.raises( KeyError, match="Baseline 81924 not found for polarization array in data." @@ -4858,7 +4858,7 @@ def test_get_data(casa_uvfits, kind): d = fnc(ant1, ant2, pol) assert np.all(dcheck == d) - d = fnc(ant1, ant2, uvutils.polnum2str(pol)) + d = fnc(ant1, ant2, utils.polnum2str(pol)) assert np.all(dcheck == d) d = fnc((ant1, ant2, pol)) @@ -4959,9 +4959,7 @@ def test_antpair2ind_exceptions(paper_uvh5): def test_antpairpol_iter(casa_uvfits): # Test generator uv = casa_uvfits - pol_dict = { - uvutils.polnum2str(uv.polarization_array[i]): i for i in range(uv.Npols) - } + pol_dict = {utils.polnum2str(uv.polarization_array[i]): i for i in range(uv.Npols)} keys = [] pols = set() bls = set() @@ -5022,7 +5020,7 @@ def test_telescope_loc_xyz_check(paper_uvh5, tmp_path): # test that improper telescope locations can still be read uv = paper_uvh5 uv.telescope.location = EarthLocation.from_geocentric( - *uvutils.XYZ_from_LatLonAlt(*uv.telescope._location.xyz()), unit="m" + *utils.XYZ_from_LatLonAlt(*uv.telescope._location.xyz()), unit="m" ) # fix LST values uv.set_lsts_from_time_array() @@ -5828,11 +5826,13 @@ def test_get_antenna_redundancies(pyuvsim_redundant, grid_alg): apos = uv0.telescope.get_enu_antpos() with check_warnings(warn_type, match=warn_str): - new_red_gps, new_centers, new_lengths = uvutils.get_antenna_redundancies( - uv0.telescope.antenna_numbers, - apos, - include_autos=False, - use_grid_alg=grid_alg, + new_red_gps, new_centers, new_lengths = ( + utils.redundancy.get_antenna_redundancies( + uv0.telescope.antenna_numbers, + apos, + include_autos=False, + use_grid_alg=grid_alg, + ) ) # all redundancy info is the same @@ -6490,7 +6490,7 @@ def test_overlapping_data_add(casa_uvfits, tmp_path): "Combined data along polarization axis using pyuvdata. Combined data along " "baseline-time axis using pyuvdata. Overwrote invalid data using pyuvdata." ) - assert uvutils._check_histories(uvfull.history, uv.history + extra_history) + assert helpers._check_histories(uvfull.history, uv.history + extra_history) uvfull.history = uv.history # make histories match assert uv == uvfull @@ -6505,7 +6505,7 @@ def test_overlapping_data_add(casa_uvfits, tmp_path): "Combined data along polarization axis using pyuvdata. Combined data along " "baseline-time axis using pyuvdata." ) - assert uvutils._check_histories(uvfull.history, uv.history + extra_history2) + assert helpers._check_histories(uvfull.history, uv.history + extra_history2) uvfull.history = uv.history # make histories match assert uv == uvfull @@ -6547,7 +6547,7 @@ def test_overlapping_data_add(casa_uvfits, tmp_path): uvfull.read(np.array([uv1_out, uv2_out, uv3_out, uv4_out])) uvfull.reorder_blts() uv.reorder_blts() - assert uvutils._check_histories(uvfull.history, uv.history + extra_history2) + assert helpers._check_histories(uvfull.history, uv.history + extra_history2) uvfull.history = uv.history # make histories match # make sure filenames are what we expect @@ -6576,7 +6576,7 @@ def test_lsts_from_time_with_only_unique(paper_uvh5): """ uv = paper_uvh5 # calculate the lsts for all elements in time array - full_lsts = uvutils.get_lst_for_time( + full_lsts = utils.get_lst_for_time( uv.time_array, telescope_loc=uv.telescope.location ) # use `set_lst_from_time_array` to set the uv.lst_array using only unique values @@ -6591,7 +6591,7 @@ def test_lsts_from_time_with_only_unique_background(paper_uvh5): """ uv = paper_uvh5 # calculate the lsts for all elements in time array - full_lsts = uvutils.get_lst_for_time( + full_lsts = utils.get_lst_for_time( uv.time_array, telescope_loc=uv.telescope.location ) # use `set_lst_from_time_array` to set the uv.lst_array using only unique values @@ -9357,7 +9357,7 @@ def test_read_background_lsts(): def test_parse_ants_x_orientation_kwarg(hera_uvh5): uvd = hera_uvh5 # call with x_orientation = None to make parse_ants read from the object - ant_pair, pols = uvutils.parse_ants(uvd, "cross") + ant_pair, pols = utils.bls.parse_ants(uvd, "cross") ant_pair2, pols2 = uvd.parse_ants("cross") assert np.array_equal(ant_pair, ant_pair2) assert np.array_equal(pols, pols2) @@ -9627,7 +9627,7 @@ def test_print_object_multi(carma_miriad): ) def test_look_in_catalog_err(sma_mir, kwargs, err_type, err_msg): with pytest.raises(err_type, match=err_msg): - uvutils.look_in_catalog(sma_mir.phase_center_catalog, **kwargs) + utils.ps_cat.look_in_catalog(sma_mir.phase_center_catalog, **kwargs) @pytest.mark.parametrize( @@ -9656,7 +9656,7 @@ def test_look_in_catalog(hera_uvh5, name, stype, arg_dict, exp_id, exp_diffs): parameters and that recorded in the UVData object. """ hera_uvh5.print_phase_center_info() - [cat_id, num_diffs] = uvutils.look_in_catalog( + [cat_id, num_diffs] = utils.ps_cat.look_in_catalog( hera_uvh5.phase_center_catalog, cat_name=name, cat_type=stype, @@ -9684,17 +9684,16 @@ def test_look_in_catalog_phase_dict(sma_mir): behave as expected """ # Now try lookup using a dictionary of properties - assert uvutils.look_in_catalog(sma_mir.phase_center_catalog, cat_name="3c84") == ( - 1, - 5, - ) + assert utils.ps_cat.look_in_catalog( + sma_mir.phase_center_catalog, cat_name="3c84" + ) == (1, 5) phase_dict = sma_mir.phase_center_catalog[1] - assert uvutils.look_in_catalog( + assert utils.ps_cat.look_in_catalog( sma_mir.phase_center_catalog, cat_name="3c84", phase_dict=phase_dict ) == (1, 0) # Make sure that if we set ignore_name, we still get a match - assert uvutils.look_in_catalog( + assert utils.ps_cat.look_in_catalog( sma_mir.phase_center_catalog, cat_name="3c84", phase_dict=phase_dict, @@ -9702,7 +9701,7 @@ def test_look_in_catalog_phase_dict(sma_mir): ) == (1, 0) # Match w/ a mis-capitalization - assert uvutils.look_in_catalog( + assert utils.ps_cat.look_in_catalog( sma_mir.phase_center_catalog, cat_name="3C84", phase_dict=phase_dict, @@ -10010,7 +10009,7 @@ def test_add_clear_phase_center(sma_mir): # Check to see that the catalog actually changed assert sma_mir.phase_center_catalog != check_dict # And ake sure we can ID by name, but find diffs if attributes dont match - assert uvutils.look_in_catalog( + assert utils.ps_cat.look_in_catalog( sma_mir.phase_center_catalog, cat_name="Mars", cat_lon=[0], cat_lat=[0] ) == (0, 7) @@ -10089,14 +10088,14 @@ def test_split_phase_center(hera_uvh5): select_mask = np.isin(hera_uvh5.time_array, np.unique(hera_uvh5.time_array)[::2]) hera_uvh5.split_phase_center("3c84", new_name="3c84_2", select_mask=select_mask) - cat_id1 = uvutils.look_for_name(hera_uvh5.phase_center_catalog, "3c84") - cat_id2 = uvutils.look_for_name(hera_uvh5.phase_center_catalog, "3c84_2") + cat_id1 = utils.ps_cat.look_for_name(hera_uvh5.phase_center_catalog, "3c84") + cat_id2 = utils.ps_cat.look_for_name(hera_uvh5.phase_center_catalog, "3c84_2") # Check that the catalog IDs also line up w/ what we expect assert np.all(hera_uvh5.phase_center_id_array[~select_mask] == cat_id1) assert np.all(hera_uvh5.phase_center_id_array[select_mask] == cat_id2) assert hera_uvh5.Nphase == 2 - cat_id_all = uvutils.look_for_name( + cat_id_all = utils.ps_cat.look_for_name( hera_uvh5.phase_center_catalog, ["3c84", "3c84_2"] ) assert np.all(np.isin(hera_uvh5.phase_center_id_array, cat_id_all)) @@ -10132,8 +10131,8 @@ def test_split_phase_center_downselect(hera_uvh5): downselect=True, ) - cat_id1 = uvutils.look_for_name(hera_uvh5.phase_center_catalog, "3c84") - cat_id3 = uvutils.look_for_name(hera_uvh5.phase_center_catalog, "3c84_3") + cat_id1 = utils.ps_cat.look_for_name(hera_uvh5.phase_center_catalog, "3c84") + cat_id3 = utils.ps_cat.look_for_name(hera_uvh5.phase_center_catalog, "3c84_3") assert np.all(hera_uvh5.phase_center_id_array[~select_mask] == cat_id1) assert np.all(hera_uvh5.phase_center_id_array[select_mask] == cat_id3) @@ -10153,7 +10152,7 @@ def test_split_phase_center_downselect(hera_uvh5): assert hera_uvh5.phase_center_catalog == catalog_copy assert np.all( hera_uvh5.phase_center_id_array - == uvutils.look_for_name(hera_uvh5.phase_center_catalog, "3c84") + == utils.ps_cat.look_for_name(hera_uvh5.phase_center_catalog, "3c84") ) @@ -10327,12 +10326,12 @@ def test_phase_dict_helper_sidereal_lookup(sma_mir, dummy_phase_dict): ) assert ( phase_dict.pop("cat_id") - == uvutils.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] + == utils.ps_cat.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] ) assert ( phase_dict == sma_mir.phase_center_catalog[ - uvutils.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] + utils.ps_cat.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] ] ) @@ -10344,7 +10343,7 @@ def test_phase_dict_helper_jpl_lookup_existing(sma_mir): """ # Finally, check that we get a good result if feeding the same values, even if not # actually performing a lookup - cat_id = uvutils.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] + cat_id = utils.ps_cat.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] phase_dict = sma_mir._phase_dict_helper( lon=sma_mir.phase_center_catalog[cat_id].get("cat_lon"), lat=sma_mir.phase_center_catalog[cat_id].get("cat_lat"), @@ -10364,7 +10363,7 @@ def test_phase_dict_helper_jpl_lookup_existing(sma_mir): assert ( phase_dict == sma_mir.phase_center_catalog[ - uvutils.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] + utils.ps_cat.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] ] ) diff --git a/tests/uvdata/test_uvfits.py b/tests/uvdata/test_uvfits.py index 21fd4ea197..ca0a024bce 100644 --- a/tests/uvdata/test_uvfits.py +++ b/tests/uvdata/test_uvfits.py @@ -12,12 +12,12 @@ import pytest from astropy.io import fits -import pyuvdata.utils as uvutils -from pyuvdata import UVData +import pyuvdata.utils.file_io.fits as fits_utils +from pyuvdata import UVData, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings -from ..test_utils import frame_selenoid, hasmoon +from ..utils.test_coordinates import frame_selenoid, hasmoon casa_tutorial_uvfits = os.path.join( DATA_PATH, "day2_TDEM0003_10s_norx_1src_1spw.uvfits" @@ -157,7 +157,7 @@ def test_time_precision(tmp_path): uvd2.read(testfile) unique_times, inverse_inds = np.unique(uvd2.time_array, return_inverse=True) - unique_lst_array = uvutils.get_lst_for_time( + unique_lst_array = utils.get_lst_for_time( unique_times, telescope_loc=uvd.telescope.location ) @@ -195,7 +195,7 @@ def test_break_read_uvfits(tmp_path): file1 = os.path.join(DATA_PATH, "1061316296.uvfits") write_file = os.path.join(tmp_path, "multi_subarray.uvfits") with fits.open(file1, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] ant_hdu = hdu_list[hdunames["AIPS AN"]] vis_data = vis_hdu.data.copy() @@ -213,7 +213,7 @@ def test_break_read_uvfits(tmp_path): write_file = os.path.join(tmp_path, "bad_frame.uvfits") with fits.open(file1, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] ant_hdu = hdu_list[hdunames["AIPS AN"]] ant_hdr = ant_hdu.header.copy() @@ -245,7 +245,7 @@ def test_source_group_params(casa_uvfits, tmp_path): uv_in.write_uvfits(write_file) with fits.open(write_file, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] vis_hdr = vis_hdu.header.copy() raw_data_array = vis_hdu.data.data @@ -341,7 +341,7 @@ def test_source_frame_defaults(casa_uvfits, tmp_path, frame): uv_in.write_uvfits(write_file) with fits.open(write_file, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] vis_hdr = vis_hdu.header.copy() raw_data_array = vis_hdu.data.data @@ -392,7 +392,7 @@ def test_multi_source_frame_defaults(casa_uvfits, tmp_path, frame_list): uv_in.write_uvfits(write_file) with fits.open(write_file, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] vis_hdr = vis_hdu.header.copy() raw_data_array = vis_hdu.data.data @@ -441,7 +441,7 @@ def test_missing_aips_su_table(casa_uvfits, tmp_path): uv_in.write_uvfits(write_file) with fits.open(write_file, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] vis_hdr = vis_hdu.header.copy() raw_data_array = vis_hdu.data.data @@ -528,7 +528,7 @@ def test_readwriteread(tmp_path, casa_uvfits, telescope_frame, selenoid): height=uv_in.telescope.location.height, ellipsoid=selenoid, ) - new_full_antpos = uvutils.ECEF_from_ENU( + new_full_antpos = utils.ECEF_from_ENU( enu=enu_antpos, center_loc=uv_in.telescope.location ) uv_in.telescope.antenna_positions = ( @@ -552,7 +552,7 @@ def test_readwriteread(tmp_path, casa_uvfits, telescope_frame, selenoid): # it is properly defaulted to SPHERE if telescope_frame == "mcmf" and selenoid == "SPHERE": with fits.open(write_file, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) ant_hdu = hdu_list[hdunames["AIPS AN"]] ant_hdr = ant_hdu.header.copy() @@ -588,7 +588,7 @@ def test_uvw_coordinate_suffixes(casa_uvfits, tmp_path, uvw_suffix): uv_in.write_uvfits(write_file) with fits.open(write_file, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] vis_hdr = vis_hdu.header.copy() raw_data_array = vis_hdu.data.data @@ -624,7 +624,7 @@ def test_uvw_coordinate_suffixes(casa_uvfits, tmp_path, uvw_suffix): ], ): uv2 = UVData.from_file(write_file2) - uv2.uvw_array = uvutils._rotate_one_axis( + uv2.uvw_array = utils.phasing._rotate_one_axis( xyz_array=uv2.uvw_array[:, :, None], rot_amount=-1 * (uv2.phase_center_app_dec - np.pi / 2), rot_axis=0, @@ -647,7 +647,7 @@ def test_uvw_coordinate_suffix_errors(casa_uvfits, tmp_path, uvw_suffix): uv_in.write_uvfits(write_file) with fits.open(write_file, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] vis_hdr = vis_hdu.header.copy() raw_data_array = vis_hdu.data.data @@ -805,7 +805,7 @@ def test_readwriteread_missing_info(tmp_path, casa_uvfits, lat_lon_alt): # check missing telescope_name, timesys vs timsys spelling, xyz_telescope_frame=???? uv_in.write_uvfits(write_file) with fits.open(write_file, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] vis_hdr = vis_hdu.header.copy() @@ -882,7 +882,7 @@ def test_readwriteread_error_single_time(tmp_path, casa_uvfits): uv_singlet.write_uvfits(write_file) with fits.open(write_file, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] vis_hdr = vis_hdu.header.copy() raw_data_array = vis_hdu.data.data @@ -936,7 +936,7 @@ def test_uvfits_no_moon(casa_uvfits, tmp_path): uv_out = UVData() with fits.open(write_file, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) ant_hdu = hdu_list[hdunames["AIPS AN"]] ant_hdr = ant_hdu.header.copy() @@ -1181,7 +1181,7 @@ def test_select_read_nospw_pol(casa_uvfits, tmp_path): # this requires writing a new file because the no spw file we have has only 1 pol with fits.open(casa_tutorial_uvfits, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] vis_hdr = vis_hdu.header.copy() raw_data_array = vis_hdu.data.data @@ -1329,7 +1329,7 @@ def test_multi_files(casa_uvfits, tmp_path): uv1.read(np.array([testfile1, testfile2]), file_type="uvfits") # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert utils.helpers._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -1371,7 +1371,7 @@ def test_multi_files_axis(casa_uvfits, tmp_path): uv1.read([testfile1, testfile2], axis="freq") # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert utils.helpers._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -1414,7 +1414,7 @@ def test_multi_files_metadata_only(casa_uvfits, tmp_path): uv1.read([testfile1, testfile2], read_data=False) # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert utils.helpers._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -1479,7 +1479,7 @@ def test_cotter_telescope_frame(tmp_path): uvd1 = UVData() with fits.open(file1, memmap=True) as hdu_list: - hdunames = uvutils._fits_indexhdus(hdu_list) + hdunames = fits_utils._indexhdus(hdu_list) vis_hdu = hdu_list[0] ant_hdu = hdu_list[hdunames["AIPS AN"]] ant_hdu.header.pop("FRAME") @@ -1718,7 +1718,7 @@ def test_miriad_convention(tmp_path): expected_vals = {"ANTENNA1_0": 4, "ANTENNA2_0": 8, "NOSTA_0": 1} # Check baselines match MIRIAD convention - bl_miriad_expected = uvutils.antnums_to_baseline( + bl_miriad_expected = utils.antnums_to_baseline( uv.ant_1_array, uv.ant_2_array, Nants_telescope=512, use_miriad_convention=True ) with fits.open(testfile1) as hdu: diff --git a/tests/uvdata/test_uvh5.py b/tests/uvdata/test_uvh5.py index 0183a88bb6..365963b2b3 100644 --- a/tests/uvdata/test_uvh5.py +++ b/tests/uvdata/test_uvh5.py @@ -18,14 +18,13 @@ from astropy.time import Time from packaging import version -import pyuvdata.hdf5_utils as hdf5_utils -import pyuvdata.utils as uvutils -from pyuvdata import UVData +import pyuvdata.utils.file_io.hdf5 as hdf5_utils +from pyuvdata import UVData, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings from pyuvdata.uvdata import uvh5 -from ..test_utils import frame_selenoid +from ..utils.test_coordinates import frame_selenoid # ignore common file-read warnings pytestmark = [ @@ -53,7 +52,7 @@ def uv_partial_write(casa_uvfits, tmp_path): # convert a uvfits file to uvh5, cutting down the amount of data uv_uvfits = casa_uvfits uv_uvfits.select(antenna_nums=[3, 7, 24]) - uv_uvfits.lst_array = uvutils.get_lst_for_time( + uv_uvfits.lst_array = utils.get_lst_for_time( uv_uvfits.time_array, telescope_loc=uv_uvfits.telescope.location ) @@ -201,7 +200,7 @@ def test_read_uvfits_write_uvh5_read_uvh5( height=uv_in.telescope.location.height, ellipsoid=selenoid, ) - new_full_antpos = uvutils.ECEF_from_ENU( + new_full_antpos = utils.ECEF_from_ENU( enu=enu_antpos, center_loc=uv_in.telescope.location ) uv_in.telescope.antenna_positions = ( @@ -432,7 +431,7 @@ def test_uvh5_read_multiple_files(casa_uvfits, tmp_path): uv1.read(np.array([testfile1, testfile2]), file_type="uvh5") # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert utils.helpers._check_histories( uv_in.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis using" @@ -476,7 +475,7 @@ def test_uvh5_read_multiple_files_metadata_only(casa_uvfits, tmp_path): uv_full.read_uvfits(uvfits_filename, read_data=False) uv1.read([testfile1, testfile2], read_data=False) # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert utils.helpers._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis using" @@ -516,7 +515,7 @@ def test_uvh5_read_multiple_files_axis(casa_uvfits, tmp_path): uv2.write_uvh5(testfile2, clobber=True) uv1.read([testfile1, testfile2], axis="freq") # Check history is correct, before replacing and doing a full object check - assert uvutils._check_histories( + assert utils.helpers._check_histories( uv_in.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis using" diff --git a/tests/uvflag/test_uvflag.py b/tests/uvflag/test_uvflag.py index 5d4883fcd8..1a9f328791 100644 --- a/tests/uvflag/test_uvflag.py +++ b/tests/uvflag/test_uvflag.py @@ -9,20 +9,21 @@ import shutil import warnings -import astropy.units as units import h5py import numpy as np import pytest from _pytest.outcomes import Skipped +from astropy import units -from pyuvdata import UVCal, UVData, UVFlag, __version__, hdf5_utils -from pyuvdata import utils as uvutils +from pyuvdata import UVCal, UVData, UVFlag, __version__, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings +from pyuvdata.utils import helpers +from pyuvdata.utils.file_io import hdf5 as hdf5_utils from pyuvdata.uvbase import old_telescope_metadata_attrs from pyuvdata.uvflag import and_rows_cols, flags2waterfall -from ..test_utils import frame_selenoid, hasmoon +from ..utils.test_coordinates import frame_selenoid, hasmoon test_d_file = os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcAA.uvh5") test_c_file = os.path.join(DATA_PATH, "zen.2457555.42443.HH.uvcA.omni.calfits") @@ -758,7 +759,7 @@ def test_read_write_loop_spw(uvdata_obj, test_outfile, telescope_frame, selenoid height=uv.telescope.location.height, ellipsoid=selenoid, ) - new_full_antpos = uvutils.ECEF_from_ENU( + new_full_antpos = utils.ECEF_from_ENU( enu=enu_antpos, center_loc=uv.telescope.location ) uv.telescope.antenna_positions = new_full_antpos - uv.telescope._location.xyz() @@ -3261,13 +3262,13 @@ def test_select_blt_inds(input_uvf, uvf_mode, dimension): assert uvf1.Ntimes == new_nblts # verify that histories are different - assert not uvutils._check_histories(uvf.history, uvf1.history) + assert not helpers._check_histories(uvf.history, uvf1.history) if uvf.type == "baseline": addition_str = "baseline-times" else: addition_str = "times" - assert uvutils._check_histories( + assert helpers._check_histories( uvf.history + f" Downselected to specific {addition_str} using pyuvdata.", uvf1.history, ) @@ -3346,7 +3347,7 @@ def test_select_antenna_nums(input_uvf, uvf_mode, dimension): for ant in np.unique(uvf2.ant_array): assert ant in ants_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific antennas using pyuvdata.", uvf2.history, ) @@ -3426,7 +3427,7 @@ def test_select_bls(input_uvf, uvf_mode): for pair in sorted_pairs_object2: assert pair in sorted_pairs_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific baselines using pyuvdata.", uvf2.history, ) @@ -3469,7 +3470,7 @@ def test_select_bls(input_uvf, uvf_mode): for pair in sorted_pairs_object2: assert pair in sorted_pairs_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to " "specific baselines, polarizations using pyuvdata.", uvf2.history, @@ -3557,7 +3558,7 @@ def test_select_times(input_uvf, uvf_mode): for t in np.unique(uvf2.time_array): assert t in times_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific times using pyuvdata.", uvf2.history ) # check that it also works with higher dimension array @@ -3571,7 +3572,7 @@ def test_select_times(input_uvf, uvf_mode): for t in np.unique(uvf2.time_array): assert t in times_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific times using pyuvdata.", uvf2.history ) # check for errors associated with times not included in data @@ -3606,7 +3607,7 @@ def test_select_frequencies(input_uvf, uvf_mode): for f in np.unique(uvf2.freq_array): assert f in freqs_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uvf2.history, ) @@ -3621,7 +3622,7 @@ def test_select_frequencies(input_uvf, uvf_mode): for f in np.unique(uvf2.freq_array): assert f in freqs_to_keep - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uvf2.history, ) @@ -3634,7 +3635,7 @@ def test_select_frequencies(input_uvf, uvf_mode): for f in uvf2.freq_array: assert f in [freqs_to_keep[0]] - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uvf2.history, ) @@ -3672,7 +3673,7 @@ def test_select_freq_chans(input_uvf, uvf_mode): for f in np.unique(uvf2.freq_array): assert f in uvf.freq_array[chans_to_keep] - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uvf2.history, ) @@ -3688,7 +3689,7 @@ def test_select_freq_chans(input_uvf, uvf_mode): for f in np.unique(uvf2.freq_array): assert f in uvf.freq_array[chans_to_keep] - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uvf2.history, ) @@ -3737,18 +3738,18 @@ def test_select_polarizations(uvf_mode, pols_to_keep, input_uvf): assert p in uvf2.polarization_array else: assert ( - uvutils.polstr2num(p, x_orientation=uvf2.telescope.x_orientation) + utils.polstr2num(p, x_orientation=uvf2.telescope.x_orientation) in uvf2.polarization_array ) for p in np.unique(uvf2.polarization_array): if isinstance(pols_to_keep[0], int): assert p in pols_to_keep else: - assert p in uvutils.polstr2num( + assert p in utils.polstr2num( pols_to_keep, x_orientation=uvf2.telescope.x_orientation ) - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to specific polarizations using pyuvdata.", uvf2.history, ) @@ -3887,7 +3888,7 @@ def test_select(input_uvf, uvf_mode): assert p in pols_to_keep if uvf.type == "baseline": - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to " "specific baseline-times, antennas, " "baselines, times, frequencies, " @@ -3895,7 +3896,7 @@ def test_select(input_uvf, uvf_mode): uvf2.history, ) elif uvf.type == "antenna": - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to " "specific times, antennas, " "frequencies, " @@ -3903,7 +3904,7 @@ def test_select(input_uvf, uvf_mode): uvf2.history, ) else: - assert uvutils._check_histories( + assert helpers._check_histories( old_history + " Downselected to " "specific times, " "frequencies, " @@ -3965,7 +3966,7 @@ def test_select_parse_ants(uvf_from_data, uvf_mode): assert uvf.Nbls == 3 assert np.array_equiv( np.unique(uvf.baseline_array), - uvutils.antnums_to_baseline( + utils.antnums_to_baseline( *np.transpose([(88, 97), (97, 104), (97, 105)]), Nants_telescope=uvf.telescope.Nants, ), From c013e20dd17187e84599ada9795dc7806f99fb7a Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 20 Jun 2024 15:46:44 -0700 Subject: [PATCH 02/12] put utils extensions inside utils namespace --- setup.py | 6 +++--- src/pyuvdata/utils/bls.py | 2 +- src/pyuvdata/utils/coordinates.py | 2 +- src/pyuvdata/utils/phasing.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/setup.py b/setup.py index 60eafc411d..53cebdf7a0 100644 --- a/setup.py +++ b/setup.py @@ -88,7 +88,7 @@ def is_platform_windows(): ) bls_extension = Extension( - "pyuvdata._bls", + "pyuvdata.utils._bls", sources=["src/pyuvdata/utils/bls.pyx"], define_macros=global_c_macros, include_dirs=[numpy.get_include()], @@ -96,7 +96,7 @@ def is_platform_windows(): ) coordinates_extension = Extension( - "pyuvdata._coordinates", + "pyuvdata.utils._coordinates", sources=["src/pyuvdata/utils/coordinates.pyx"], define_macros=global_c_macros, include_dirs=[numpy.get_include()], @@ -104,7 +104,7 @@ def is_platform_windows(): ) phasing_extension = Extension( - "pyuvdata._phasing", + "pyuvdata.utils._phasing", sources=["src/pyuvdata/utils/phasing.pyx"], define_macros=global_c_macros, include_dirs=[numpy.get_include()], diff --git a/src/pyuvdata/utils/bls.py b/src/pyuvdata/utils/bls.py index 8c1472a0fa..e5ba647ced 100644 --- a/src/pyuvdata/utils/bls.py +++ b/src/pyuvdata/utils/bls.py @@ -7,7 +7,7 @@ import numpy as np -from .. import _bls +from . import _bls from .pol import polnum2str, polstr2num __all__ = ["baseline_to_antnums", "antnums_to_baseline"] diff --git a/src/pyuvdata/utils/coordinates.py b/src/pyuvdata/utils/coordinates.py index c00916892e..df34f069f2 100644 --- a/src/pyuvdata/utils/coordinates.py +++ b/src/pyuvdata/utils/coordinates.py @@ -5,7 +5,7 @@ import numpy as np from astropy.coordinates import EarthLocation -from .. import _coordinates +from . import _coordinates try: from lunarsky import MoonLocation diff --git a/src/pyuvdata/utils/phasing.py b/src/pyuvdata/utils/phasing.py index 0ad36dae0b..c781217552 100644 --- a/src/pyuvdata/utils/phasing.py +++ b/src/pyuvdata/utils/phasing.py @@ -11,7 +11,7 @@ from astropy.time import Time from astropy.utils import iers -from .. import _phasing +from . import _phasing from .lst import get_lst_for_time try: From 9cfb33f75da5e3f3caf543ae4768d0b1cb356cd2 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Fri, 21 Jun 2024 12:11:02 -0700 Subject: [PATCH 03/12] update organization to eliminate helpers catch-all --- src/pyuvdata/telescopes.py | 2 +- src/pyuvdata/utils/__init__.py | 20 +- src/pyuvdata/utils/bltaxis.py | 230 +++ src/pyuvdata/utils/coordinates.py | 95 ++ src/pyuvdata/utils/file_io/ms.py | 2 +- src/pyuvdata/utils/frequency.py | 307 ++++ src/pyuvdata/utils/helpers.py | 1340 ----------------- src/pyuvdata/utils/history.py | 65 + src/pyuvdata/utils/lst.py | 245 --- .../{ps_cat.py => phase_center_catalog.py} | 0 src/pyuvdata/utils/phasing.py | 2 +- src/pyuvdata/utils/times.py | 534 +++++++ src/pyuvdata/utils/tools.py | 371 +++++ src/pyuvdata/uvbase.py | 2 +- src/pyuvdata/uvbeam/beamfits.py | 14 +- src/pyuvdata/uvbeam/cst_beam.py | 9 +- src/pyuvdata/uvbeam/mwa_beam.py | 2 +- src/pyuvdata/uvbeam/uvbeam.py | 37 +- src/pyuvdata/uvcal/calfits.py | 10 +- src/pyuvdata/uvcal/calh5.py | 19 +- src/pyuvdata/uvcal/fhd_cal.py | 2 +- src/pyuvdata/uvcal/ms_cal.py | 5 +- src/pyuvdata/uvcal/uvcal.py | 81 +- src/pyuvdata/uvdata/fhd.py | 4 +- src/pyuvdata/uvdata/initializers.py | 2 +- src/pyuvdata/uvdata/miriad.py | 17 +- src/pyuvdata/uvdata/ms.py | 5 +- src/pyuvdata/uvdata/mwa_corr_fits.py | 7 +- src/pyuvdata/uvdata/uvdata.py | 114 +- src/pyuvdata/uvdata/uvfits.py | 7 +- src/pyuvdata/uvdata/uvh5.py | 17 +- src/pyuvdata/uvflag/uvflag.py | 73 +- tests/utils/file_io/test_hdf5.py | 20 +- tests/utils/test_bltaxis.py | 155 ++ tests/utils/test_coordinates.py | 77 + tests/utils/test_helpers.py | 294 ---- ...ps_cat.py => test_phase_center_catalog.py} | 2 +- tests/utils/{test_lst.py => test_times.py} | 4 +- tests/utils/test_tools.py | 64 + tests/uvbeam/test_beamfits.py | 2 +- tests/uvbeam/test_mwa_beam.py | 2 +- tests/uvbeam/test_uvbeam.py | 45 +- tests/uvcal/test_uvcal.py | 71 +- tests/uvdata/test_fhd.py | 2 +- tests/uvdata/test_miriad.py | 6 +- tests/uvdata/test_ms.py | 4 +- tests/uvdata/test_uvdata.py | 181 ++- tests/uvdata/test_uvfits.py | 6 +- tests/uvdata/test_uvh5.py | 6 +- tests/uvflag/test_uvflag.py | 33 +- 50 files changed, 2350 insertions(+), 2264 deletions(-) create mode 100644 src/pyuvdata/utils/bltaxis.py create mode 100644 src/pyuvdata/utils/frequency.py delete mode 100644 src/pyuvdata/utils/helpers.py create mode 100644 src/pyuvdata/utils/history.py delete mode 100644 src/pyuvdata/utils/lst.py rename src/pyuvdata/utils/{ps_cat.py => phase_center_catalog.py} (100%) create mode 100644 src/pyuvdata/utils/times.py create mode 100644 src/pyuvdata/utils/tools.py create mode 100644 tests/utils/test_bltaxis.py delete mode 100644 tests/utils/test_helpers.py rename tests/utils/{test_ps_cat.py => test_phase_center_catalog.py} (90%) rename tests/utils/{test_lst.py => test_times.py} (98%) create mode 100644 tests/utils/test_tools.py diff --git a/src/pyuvdata/telescopes.py b/src/pyuvdata/telescopes.py index d6be12461d..a03b364b47 100644 --- a/src/pyuvdata/telescopes.py +++ b/src/pyuvdata/telescopes.py @@ -466,7 +466,7 @@ def check(self, *, check_extra=True, run_check_acceptability=True): if run_check_acceptability: # Check antenna positions - utils.helpers.check_surface_based_positions( + utils.coordinates.check_surface_based_positions( antenna_positions=self.antenna_positions, telescope_loc=self.location, raise_error=False, diff --git a/src/pyuvdata/utils/__init__.py b/src/pyuvdata/utils/__init__.py index a9ae4800e4..1769bb9f97 100644 --- a/src/pyuvdata/utils/__init__.py +++ b/src/pyuvdata/utils/__init__.py @@ -20,33 +20,39 @@ # these seem to be necessary for the installed package to access these submodules from . import array_collapse # noqa from . import bls # noqa +from . import bltaxis # noqa from . import coordinates # noqa from . import file_io # noqa -from . import helpers # noqa +from . import frequency # noqa +from . import history # noqa +from . import phase_center_catalog # noqa from . import phasing # noqa from . import pol # noqa -from . import ps_cat # noqa from . import redundancy # noqa +from . import times # noqa +from . import tools # noqa # Add things to the utils namespace used by outside packages from .array_collapse import collapse # noqa from .bls import * # noqa from .coordinates import * # noqa -from .lst import get_lst_for_time # noqa from .phasing import uvw_track_generator # noqa from .pol import * # noqa +from .times import get_lst_for_time # noqa + +# deprecated imports def _check_histories(history1, history2): """Check if two histories are the same. - Deprecated. Use pyuvdata.utils.helpers._check_histories + Deprecated. Use pyuvdata.utils.history._check_histories """ - from .helpers import _check_histories + from .history import _check_histories warnings.warn( "The _check_histories function has moved, please import it from " - "pyuvdata.utils.helpers. This warnings will become an error in version 3.2", + "pyuvdata.utils.history. This warnings will become an error in version 3.2", DeprecationWarning, ) @@ -88,6 +94,8 @@ def _fits_indexhdus(hdulist): """ Get a dict of table names and HDU numbers from a FITS HDU list. + Deprecated. Use pyuvdata.utils.file_io.fits._indexhdus. + Parameters ---------- hdulist : list of astropy.io.fits HDU objects diff --git a/src/pyuvdata/utils/bltaxis.py b/src/pyuvdata/utils/bltaxis.py new file mode 100644 index 0000000000..e67c28f4fb --- /dev/null +++ b/src/pyuvdata/utils/bltaxis.py @@ -0,0 +1,230 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for the baseline-time axis.""" + +import numpy as np + + +def determine_blt_order( + *, time_array, ant_1_array, ant_2_array, baseline_array, Nbls, Ntimes # noqa: N803 +) -> tuple[str] | None: + """Get the blt order from analysing metadata.""" + times = time_array + ant1 = ant_1_array + ant2 = ant_2_array + bls = baseline_array + + time_bl = True + time_a = True + time_b = True + bl_time = True + a_time = True + b_time = True + bl_order = True + a_order = True + b_order = True + time_order = True + + if Nbls == 1 and Ntimes == 1: + return ("baseline", "time") # w.l.o.g. + + for i, (t, a, b, bl) in enumerate( + zip(times[1:], ant1[1:], ant2[1:], bls[1:]), start=1 + ): + on_bl_boundary = i % Nbls == 0 + on_time_boundary = i % Ntimes == 0 + + if t < times[i - 1]: + time_bl = False + time_a = False + time_b = False + time_order = False + + if not on_time_boundary: + bl_time = False + a_time = False + b_time = False + + if bl == bls[i - 1]: + bl_time = False + if a == ant1[i - 1]: + a_time = False + if b == ant2[i - 1]: + b_time = False + + elif t == times[i - 1]: + if bl < bls[i - 1]: + time_bl = False + if a < ant1[i - 1]: + time_a = False + if b < ant2[i - 1]: + time_b = False + + if bl < bls[i - 1]: + bl_time = False + bl_order = False + if not on_bl_boundary: + time_bl = False + if a < ant1[i - 1]: + a_time = False + a_order = False + if not on_bl_boundary: + time_a = False + if b < ant2[i - 1]: + b_time = False + b_order = False + if not on_bl_boundary: + time_b = False + + if not any( + ( + time_bl, + time_a, + time_b, + time_bl, + bl_time, + a_time, + b_time, + bl_order, + a_order, + b_order, + time_order, + ) + ): + break + + if Nbls > 1 and Ntimes > 1: + assert not ( + (time_bl and bl_time) + or (time_a and a_time) + or (time_b and b_time) + or (time_order and a_order) + or (time_order and b_order) + or (a_order and b_order) + or (time_order and bl_order) + ), ( + "Something went wrong when trying to determine the order of the blts axis. " + "Please raise an issue on github, as this is not meant to happen." + "None of the following should ever be True: \n" + f"\ttime_bl and bl_time: {time_bl and bl_time}\n" + f"\ttime_a and a_time: {time_a and a_time}\n" + f"\ttime_b and b_time: {time_b and b_time}\n" + f"\ttime_order and a_order: {time_order and a_order}\n" + f"\ttime_order and b_order: {time_order and b_order}\n" + f"\ta_order and b_order: {a_order and b_order}\n" + f"\ttime_order and bl_order: {time_order and bl_order}\n\n" + "Please include the following information in your issue:\n" + f"Nbls: {Nbls}\n" + f"Ntimes: {Ntimes}\n" + f"TIMES: {times}\n" + f"ANT1: {ant1}\n" + f"ANT2: {ant2}\n" + f"BASELINES: {bls}\n" + ) + + if time_bl: + return ("time", "baseline") + if bl_time: + return ("baseline", "time") + if time_a: + return ("time", "ant1") + if a_time: + return ("ant1", "time") + if time_b: + return ("time", "ant2") + if b_time: + return ("ant2", "time") + if bl_order: + return ("baseline",) + if a_order: + return ("ant1",) + if b_order: + return ("ant2",) + if time_order: + return ("time",) + + return None + + +def determine_rectangularity( + *, + time_array: np.ndarray, + baseline_array: np.ndarray, + nbls: int, + ntimes: int, + blt_order: str | tuple[str] | None = None, +): + """Determine if the data is rectangular or not. + + Parameters + ---------- + time_array : array_like + Array of times in JD. + baseline_array : array_like + Array of baseline integers. + nbls : int + Number of baselines. + ntimes : int + Number of times. + blt_order : str or tuple of str, optional + If known, pass the blt_order, which can short-circuit the determination + of rectangularity. + + Returns + ------- + is_rect : bool + True if the data is rectangular, False otherwise. + time_axis_faster_than_bls : bool + True if the data is rectangular and the time axis is the last axis (i.e. times + change first, then bls). False either if baselines change first, OR if it is + not rectangular. + + Notes + ----- + Rectangular data is defined as data for which using regular slicing of size Ntimes + or Nbls will give you either all the same time and all different baselines, or + vice versa. This does NOT require that the baselines and times are sorted within + that structure. + """ + # check if the data is rectangular + time_first = True + bl_first = True + + if time_array.size != nbls * ntimes: + return False, False + elif nbls * ntimes == 1: + return True, True + elif nbls == 1: + return True, True + elif ntimes == 1: + return True, False + elif blt_order == ("baseline", "time"): + return True, True + elif blt_order == ("time", "baseline"): + return True, False + + # That's all the easiest checks. + if time_array[1] == time_array[0]: + time_first = False + if baseline_array[1] == baseline_array[0]: + bl_first = False + if not time_first and not bl_first: + return False, False + + if time_first: + time_array = time_array.reshape((nbls, ntimes)) + baseline_array = baseline_array.reshape((nbls, ntimes)) + if np.sum(np.abs(np.diff(time_array, axis=0))) != 0: + return False, False + if (np.diff(baseline_array, axis=1) != 0).any(): + return False, False + return True, True + elif bl_first: + time_array = time_array.reshape((ntimes, nbls)) + baseline_array = baseline_array.reshape((ntimes, nbls)) + if np.sum(np.abs(np.diff(time_array, axis=1))) != 0: + return False, False + if (np.diff(baseline_array, axis=0) != 0).any(): + return False, False + return True, False diff --git a/src/pyuvdata/utils/coordinates.py b/src/pyuvdata/utils/coordinates.py index df34f069f2..1ddf35e121 100644 --- a/src/pyuvdata/utils/coordinates.py +++ b/src/pyuvdata/utils/coordinates.py @@ -2,6 +2,8 @@ # Copyright (c) 2024 Radio Astronomy Software Group # Licensed under the 2-clause BSD License """Utilities for coordinate transforms.""" +import warnings + import numpy as np from astropy.coordinates import EarthLocation @@ -34,6 +36,12 @@ allowed_location_types.append(MoonLocation) +_range_dict = { + "itrs": (6.35e6, 6.39e6, "Earth"), + "mcmf": (1717100.0, 1757100.0, "Moon"), +} + + def LatLonAlt_from_XYZ(xyz, *, frame="ITRS", ellipsoid=None, check_acceptability=True): """ Calculate lat/lon/alt from ECEF x,y,z. @@ -472,3 +480,90 @@ def ECEF_from_ENU( xyz = np.squeeze(xyz) return xyz + + +def check_surface_based_positions( + *, + telescope_loc=None, + telescope_frame="itrs", + antenna_positions=None, + raise_error=True, + raise_warning=True, +): + """ + Check that antenna positions are consistent with ground-based values. + + Check that the antenna position, telescope location, or combination of both produces + locations that are consistent with surface-based positions. If supplying both + antenna position and telescope location, the check will be run against the sum total + of both. For the Earth, the permitted range of values is betwen 6350 and 6390 km, + whereas for theMoon the range is 1717.1 to 1757.1 km. + + telescope_loc : tuple or EarthLocation or MoonLocation + Telescope location, specified as a 3-element tuple (specifying geo/selenocentric + position in meters) or as an astropy EarthLocation (or lunarsky MoonLocation). + telescope_frame : str, optional + Reference frame for latitude/longitude/altitude. Options are itrs (default) or + mcmf. Only used if telescope_loc is not an EarthLocation or MoonLocation. + antenna_positions : ndarray of float + List of antenna positions relative to array center in ECEF coordinates, + required if not providing `uvw_array`. Shape is (Nants, 3). If no telescope_loc + is specified, these values will be assumed to be relative to geocenter. + raise_error : bool + If True, an error is raised if telescope_loc and/or telescope_loc do not conform + to expectations for a surface-based telescope. Default is True. + raise_warning : bool + If True, a warning is raised if telescope_loc and/or telescope_loc do not + conform to expectations for a surface-based telescope. Default is True, only + used if `raise_error` is set to False. + + Returns + ------- + valid : bool + If True, the antenna_positions and/or telescope_loc conform to expectations for + a surface-based telescope. Otherwise returns false. + + """ + if antenna_positions is None: + antenna_positions = np.zeros((1, 3)) + + if isinstance(telescope_loc, EarthLocation) or ( + hasmoon and isinstance(telescope_loc, MoonLocation) + ): + antenna_positions = antenna_positions + ( + telescope_loc.x.to("m").value, + telescope_loc.y.to("m").value, + telescope_loc.z.to("m").value, + ) + if isinstance(telescope_loc, EarthLocation): + telescope_frame = "itrs" + else: + telescope_frame = "mcmf" + elif telescope_loc is not None: + antenna_positions = antenna_positions + telescope_loc + + low_lim, hi_lim, world = _range_dict[telescope_frame] + + err_type = None + if np.any(np.sum(antenna_positions**2.0, axis=1) < low_lim**2.0): + err_type = "below" + elif np.any(np.sum(antenna_positions**2.0, axis=1) > hi_lim**2.0): + err_type = "above" + + if err_type is None: + return True + + err_msg = ( + f"{telescope_frame} position vector magnitudes must be on the order of " + f"the radius of {world} -- they appear to lie well {err_type} this." + ) + + # If desired, raise an error + if raise_error: + raise ValueError(err_msg) + + # Otherwise, if desired, raise a warning instead + if raise_warning: + warnings.warn(err_msg) + + return False diff --git a/src/pyuvdata/utils/file_io/ms.py b/src/pyuvdata/utils/file_io/ms.py index 96e841e9b7..286c4f4738 100644 --- a/src/pyuvdata/utils/file_io/ms.py +++ b/src/pyuvdata/utils/file_io/ms.py @@ -940,7 +940,7 @@ def read_ms_history(filepath, pyuvdata_version_str, check_origin=False, raise_er history_str += message[idx] + "\n" # Check and make sure the pyuvdata version is in the history if it's not already - if not utils.helpers._check_history_version(history_str, pyuvdata_version_str): + if not utils.history._check_history_version(history_str, pyuvdata_version_str): history_str += pyuvdata_version_str # Finally, return the completed string diff --git a/src/pyuvdata/utils/frequency.py b/src/pyuvdata/utils/frequency.py new file mode 100644 index 0000000000..78be2954b4 --- /dev/null +++ b/src/pyuvdata/utils/frequency.py @@ -0,0 +1,307 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Frequency related utilities.""" +import warnings + +import numpy as np + +from .tools import _test_array_constant + + +def _check_flex_spw_contiguous(*, spw_array, flex_spw_id_array): + """ + Check if the spectral windows are contiguous for multi-spw datasets. + + This checks the flex_spw_id_array to make sure that all channels for each + spectral window are together in one block, versus being interspersed (e.g., + channel #1 and #3 is in spw #1, channels #2 and #4 are in spw #2). In theory, + UVH5 and UVData objects can handle this, but MIRIAD, MIR, UVFITS, and MS file + formats cannot, so we just consider it forbidden. + + Parameters + ---------- + spw_array : array of integers + Array of spectral window numbers, shape (Nspws,). + flex_spw_id_array : array of integers + Array of spectral window numbers per frequency channel, shape (Nfreqs,). + + """ + exp_spw_ids = np.unique(spw_array) + # This is an internal consistency check to make sure that the indexes match + # up as expected -- this shouldn't error unless someone is mucking with + # settings they shouldn't be. + assert np.all(np.unique(flex_spw_id_array) == exp_spw_ids), ( + "There are some entries in flex_spw_id_array that are not in spw_array. " + "This is a bug, please report it in an issue." + ) + + n_breaks = np.sum(flex_spw_id_array[1:] != flex_spw_id_array[:-1]) + if (n_breaks + 1) != spw_array.size: + raise ValueError( + "Channels from different spectral windows are interspersed with " + "one another, rather than being grouped together along the " + "frequency axis. Most file formats do not support such " + "non-grouping of data." + ) + + +def _check_freq_spacing( + *, + freq_array, + freq_tols, + channel_width, + channel_width_tols, + spw_array, + flex_spw_id_array, + raise_errors=True, +): + """ + Check if frequencies are evenly spaced and separated by their channel width. + + This is a requirement for writing uvfits & miriad files. + + Parameters + ---------- + freq_array : array of float + Array of frequencies, shape (Nfreqs,). + freq_tols : tuple of float + freq_array tolerances (from uvobj._freq_array.tols). + channel_width : array of float + Channel widths, either a scalar or an array of shape (Nfreqs,). + channel_width_tols : tuple of float + channel_width tolerances (from uvobj._channel_width.tols). + spw_array : array of integers or None + Array of spectral window numbers, shape (Nspws,). + flex_spw_id_array : array of integers or None + Array of spectral window numbers per frequency channel, shape (Nfreqs,). + raise_errors : bool + Option to raise errors if the various checks do not pass. + + Returns + ------- + spacing_error : bool + Flag that channel spacings or channel widths are not equal. + chanwidth_error : bool + Flag that channel spacing does not match channel width. + + """ + spacing_error = False + chanwidth_error = False + + # Check to make sure that the flexible spectral window has indicies set up + # correctly (grouped together) for this check + _check_flex_spw_contiguous(spw_array=spw_array, flex_spw_id_array=flex_spw_id_array) + + for spw_id in spw_array: + mask = flex_spw_id_array == spw_id + if sum(mask) > 1: + freq_spacing = np.diff(freq_array[mask]) + freq_dir = -1.0 if all(freq_spacing < 0) else 1.0 + if not _test_array_constant(freq_spacing, tols=freq_tols): + spacing_error = True + if not _test_array_constant(channel_width[mask], tols=channel_width_tols): + spacing_error = True + elif not np.allclose( + freq_spacing, + np.mean(channel_width[mask]) * freq_dir, + rtol=channel_width_tols[0], + atol=channel_width_tols[1], + ): + chanwidth_error = True + + if raise_errors and spacing_error: + raise ValueError( + "The frequencies are not evenly spaced (probably because of a select " + "operation) or has differing values of channel widths. Some file formats " + "(e.g. uvfits, miriad) do not support unevenly spaced frequencies." + ) + if raise_errors and chanwidth_error: + raise ValueError( + "The frequencies are separated by more than their channel width (probably " + "because of a select operation). Some file formats (e.g. uvfits, miriad) " + "do not support frequencies that are spaced by more than their channel " + "width." + ) + + return spacing_error, chanwidth_error + + +def _sort_freq_helper( + *, + Nfreqs, # noqa: N803 + freq_array, + Nspws, # noqa: N803 + spw_array, + flex_spw_id_array, + spw_order, + channel_order, + select_spw, +): + """ + Figure out the frequency sorting order for object based frequency sorting. + + Parameters + ---------- + Nfreqs : int + Number of frequencies, taken directly from the object parameter. + freq_array : array_like of float + Frequency array, taken directly from the object parameter. + Nfreqs : int + Number of spectral windows, taken directly from the object parameter. + spw_array : array_like of int + Spectral window array, taken directly from the object parameter. + flex_spw_id_array : array_like of int + Array of SPW IDs for each channel, taken directly from the object parameter. + spw_order : str or array_like of int + A string describing the desired order of spectral windows along the + frequency axis. Allowed strings include `number` (sort on spectral window + number) and `freq` (sort on median frequency). A '-' can be prepended + to signify descending order instead of the default ascending order, + e.g., if you have SPW #1 and 2, and wanted them ordered as [2, 1], + you would specify `-number`. Alternatively, one can supply an index array + of length Nspws that specifies how to shuffle the spws (this is not the desired + final spw order). Default is to apply no sorting of spectral windows. + channel_order : str or array_like of int + A string describing the desired order of frequency channels within a + spectral window. Allowed strings include `freq`, which will sort channels + within a spectral window by frequency. A '-' can be optionally prepended + to signify descending order instead of the default ascending order. + Alternatively, one can supply an index array of length Nfreqs that + specifies the new order. Default is to apply no sorting of channels + within a single spectral window. Note that proving an array_like of ints + will cause the values given to `spw_order` and `select_spw` to be ignored. + select_spw : int or array_like of int + An int or array_like of ints which specifies which spectral windows to + apply sorting. Note that setting this argument will cause the value + given to `spw_order` to be ignored. + + Returns + ------- + index_array : ndarray of int + Array giving the desired order of the channels to be used for sorting along the + frequency axis + + Raises + ------ + UserWarning + Raised if providing arguments to select_spw and channel_order (the latter + overrides the former). + ValueError + Raised if select_spw contains values not in spw_array, or if channel_order + is not the same length as freq_array. + + """ + if (spw_order is None) and (channel_order is None): + warnings.warn( + "Not specifying either spw_order or channel_order causes " + "no sorting actions to be applied. Returning object unchanged." + ) + return + + # Check to see if there are arguments we should be ignoring + if isinstance(channel_order, (np.ndarray, list, tuple)): + if select_spw is not None: + warnings.warn( + "The select_spw argument is ignored when providing an " + "array_like of int for channel_order" + ) + if spw_order is not None: + warnings.warn( + "The spw_order argument is ignored when providing an " + "array_like of int for channel_order" + ) + channel_order = np.asarray(channel_order) + if not channel_order.size == Nfreqs or not np.all( + np.sort(channel_order) == np.arange(Nfreqs) + ): + raise ValueError( + "Index array for channel_order must contain all indicies for " + "the frequency axis, without duplicates." + ) + index_array = channel_order + else: + index_array = np.arange(Nfreqs) + # Multipy by 1.0 here to make a cheap copy of the array to manipulate + temp_freqs = 1.0 * freq_array + # Same trick for ints -- add 0 to make a cheap copy + temp_spws = 0 + flex_spw_id_array + + # Check whether or not we need to sort the channels in individual windows + sort_spw = {idx: channel_order is not None for idx in spw_array} + if select_spw is not None: + if spw_order is not None: + warnings.warn( + "The spw_order argument is ignored when providing an " + "argument for select_spw" + ) + if channel_order is None: + warnings.warn( + "Specifying select_spw without providing channel_order causes " + "no sorting actions to be applied. Returning object unchanged." + ) + return + if isinstance(select_spw, (np.ndarray, list, tuple)): + sort_spw = {idx: idx in select_spw for idx in spw_array} + else: + sort_spw = {idx: idx == select_spw for idx in spw_array} + elif spw_order is not None: + if isinstance(spw_order, (np.ndarray, list, tuple)): + spw_order = np.asarray(spw_order) + if not spw_order.size == Nspws or not np.all( + np.sort(spw_order) == np.arange(Nspws) + ): + raise ValueError( + "Index array for spw_order must contain all indicies for " + "the spw_array, without duplicates." + ) + elif spw_order not in ["number", "freq", "-number", "-freq", None]: + raise ValueError( + "spw_order can only be one of 'number', '-number', " + "'freq', '-freq', None or an index array of length Nspws" + ) + elif Nspws > 1: + # Only need to do this step if we actually have multiple spws. + + # If the string starts with a '-', then we will flip the order at + # the end of the operation + flip_spws = spw_order[0] == "-" + + if "number" in spw_order: + spw_order = np.argsort(spw_array) + elif "freq" in spw_order: + spw_order = np.argsort( + [np.median(temp_freqs[temp_spws == idx]) for idx in spw_array] + ) + if flip_spws: + spw_order = np.flip(spw_order) + else: + spw_order = np.arange(Nspws) + # Now that we know the spw order, we can apply the first sort + index_array = np.concatenate( + [index_array[temp_spws == spw] for spw in spw_array[spw_order]] + ) + temp_freqs = temp_freqs[index_array] + temp_spws = temp_spws[index_array] + # Spectral windows are assumed sorted at this point + if channel_order is not None: + if channel_order not in ["freq", "-freq"]: + raise ValueError( + "channel_order can only be one of 'freq' or '-freq' or an index " + "array of length Nfreqs" + ) + for idx in spw_array: + if sort_spw[idx]: + select_mask = temp_spws == idx + subsort_order = index_array[select_mask] + subsort_order = subsort_order[np.argsort(temp_freqs[select_mask])] + index_array[select_mask] = ( + np.flip(subsort_order) + if channel_order[0] == "-" + else subsort_order + ) + if np.all(index_array[1:] > index_array[:-1]): + # Nothing to do - the data are already sorted! + return + + return index_array diff --git a/src/pyuvdata/utils/helpers.py b/src/pyuvdata/utils/helpers.py deleted file mode 100644 index 0838c27e30..0000000000 --- a/src/pyuvdata/utils/helpers.py +++ /dev/null @@ -1,1340 +0,0 @@ -# -*- mode: python; coding: utf-8 -*- -# Copyright (c) 2024 Radio Astronomy Software Group -# Licensed under the 2-clause BSD License -"""Helper utilities.""" - -import warnings -from collections.abc import Iterable -from typing import Iterable as IterableType - -import numpy as np -from astropy.coordinates import EarthLocation - -from .coordinates import hasmoon -from .lst import get_lst_for_time - -if hasmoon: - from lunarsky import MoonLocation - -_range_dict = { - "itrs": (6.35e6, 6.39e6, "Earth"), - "mcmf": (1717100.0, 1757100.0, "Moon"), -} - - -def _check_history_version(history, version_string): - """Check if version_string is present in history string.""" - if version_string.replace(" ", "") in history.replace("\n", "").replace(" ", ""): - return True - else: - return False - - -def _check_histories(history1, history2): - """Check if two histories are the same.""" - if history1.replace("\n", "").replace(" ", "") == history2.replace( - "\n", "" - ).replace(" ", ""): - return True - else: - return False - - -def _combine_history_addition(history1, history2): - """ - Find extra history to add to have minimal repeats. - - Parameters - ---------- - history1 : str - First history. - history2 : str - Second history - - Returns - ------- - str - Extra history to add to first history. - - """ - # first check if they're the same to avoid more complicated processing. - if _check_histories(history1, history2): - return None - - hist2_words = history2.split(" ") - add_hist = "" - test_hist1 = " " + history1 + " " - for i, word in enumerate(hist2_words): - if " " + word + " " not in test_hist1: - add_hist += " " + word - keep_going = i + 1 < len(hist2_words) - while keep_going: - if (hist2_words[i + 1] == " ") or ( - " " + hist2_words[i + 1] + " " not in test_hist1 - ): - add_hist += " " + hist2_words[i + 1] - del hist2_words[i + 1] - keep_going = i + 1 < len(hist2_words) - else: - keep_going = False - - if add_hist == "": - add_hist = None - return add_hist - - -def _test_array_constant(array, *, tols=None): - """ - Check if an array contains constant values to some tolerance. - - Uses np.isclose on the min & max of the arrays with the given tolerances. - - Parameters - ---------- - array : np.ndarray or UVParameter - UVParameter or array to check for constant values. - tols : tuple of float, optional - length 2 tuple giving (rtol, atol) to pass to np.isclose, defaults to (0, 0) if - passing an array, otherwise defaults to using the tolerance on the UVParameter. - - Returns - ------- - bool - True if the array is constant to the given tolerances, False otherwise. - """ - # Import UVParameter here rather than at the top to avoid circular imports - from pyuvdata.parameter import UVParameter - - if isinstance(array, UVParameter): - array_to_test = array.value - if tols is None: - tols = array.tols - else: - array_to_test = array - if tols is None: - tols = (0, 0) - assert isinstance(tols, tuple), "tols must be a length-2 tuple" - assert len(tols) == 2, "tols must be a length-2 tuple" - - if array_to_test.size == 1: - # arrays with 1 element are constant by definition - return True - - # if min and max are equal don't bother with tolerance checking - if np.min(array_to_test) == np.max(array_to_test): - return True - - return np.isclose( - np.min(array_to_test), np.max(array_to_test), rtol=tols[0], atol=tols[1] - ) - - -def _test_array_constant_spacing(array, *, tols=None): - """ - Check if an array is constantly spaced to some tolerance. - - Calls _test_array_constant on the np.diff of the array. - - Parameters - ---------- - array : np.ndarray or UVParameter - UVParameter or array to check for constant spacing. - tols : tuple of float, optional - length 2 tuple giving (rtol, atol) to pass to np.isclose, defaults to (0, 0) if - passing an array, otherwise defaults to using the tolerance on the UVParameter. - - Returns - ------- - bool - True if the array spacing is constant to the given tolerances, False otherwise. - """ - # Import UVParameter here rather than at the top to avoid circular imports - from pyuvdata.parameter import UVParameter - - if isinstance(array, UVParameter): - array_to_test = array.value - if tols is None: - tols = array.tols - else: - array_to_test = array - if tols is None: - tols = (0, 0) - assert isinstance(tols, tuple), "tols must be a length-2 tuple" - assert len(tols) == 2, "tols must be a length-2 tuple" - - if array_to_test.size <= 2: - # arrays with 1 or 2 elements are constantly spaced by definition - return True - - array_diff = np.diff(array_to_test) - return _test_array_constant(array_diff, tols=tols) - - -def _check_flex_spw_contiguous(*, spw_array, flex_spw_id_array): - """ - Check if the spectral windows are contiguous for multi-spw datasets. - - This checks the flex_spw_id_array to make sure that all channels for each - spectral window are together in one block, versus being interspersed (e.g., - channel #1 and #3 is in spw #1, channels #2 and #4 are in spw #2). In theory, - UVH5 and UVData objects can handle this, but MIRIAD, MIR, UVFITS, and MS file - formats cannot, so we just consider it forbidden. - - Parameters - ---------- - spw_array : array of integers - Array of spectral window numbers, shape (Nspws,). - flex_spw_id_array : array of integers - Array of spectral window numbers per frequency channel, shape (Nfreqs,). - - """ - exp_spw_ids = np.unique(spw_array) - # This is an internal consistency check to make sure that the indexes match - # up as expected -- this shouldn't error unless someone is mucking with - # settings they shouldn't be. - assert np.all(np.unique(flex_spw_id_array) == exp_spw_ids), ( - "There are some entries in flex_spw_id_array that are not in spw_array. " - "This is a bug, please report it in an issue." - ) - - n_breaks = np.sum(flex_spw_id_array[1:] != flex_spw_id_array[:-1]) - if (n_breaks + 1) != spw_array.size: - raise ValueError( - "Channels from different spectral windows are interspersed with " - "one another, rather than being grouped together along the " - "frequency axis. Most file formats do not support such " - "non-grouping of data." - ) - - -def _check_freq_spacing( - *, - freq_array, - freq_tols, - channel_width, - channel_width_tols, - spw_array, - flex_spw_id_array, - raise_errors=True, -): - """ - Check if frequencies are evenly spaced and separated by their channel width. - - This is a requirement for writing uvfits & miriad files. - - Parameters - ---------- - freq_array : array of float - Array of frequencies, shape (Nfreqs,). - freq_tols : tuple of float - freq_array tolerances (from uvobj._freq_array.tols). - channel_width : array of float - Channel widths, either a scalar or an array of shape (Nfreqs,). - channel_width_tols : tuple of float - channel_width tolerances (from uvobj._channel_width.tols). - spw_array : array of integers or None - Array of spectral window numbers, shape (Nspws,). - flex_spw_id_array : array of integers or None - Array of spectral window numbers per frequency channel, shape (Nfreqs,). - raise_errors : bool - Option to raise errors if the various checks do not pass. - - Returns - ------- - spacing_error : bool - Flag that channel spacings or channel widths are not equal. - chanwidth_error : bool - Flag that channel spacing does not match channel width. - - """ - spacing_error = False - chanwidth_error = False - - # Check to make sure that the flexible spectral window has indicies set up - # correctly (grouped together) for this check - _check_flex_spw_contiguous(spw_array=spw_array, flex_spw_id_array=flex_spw_id_array) - - for spw_id in spw_array: - mask = flex_spw_id_array == spw_id - if sum(mask) > 1: - freq_spacing = np.diff(freq_array[mask]) - freq_dir = -1.0 if all(freq_spacing < 0) else 1.0 - if not _test_array_constant(freq_spacing, tols=freq_tols): - spacing_error = True - if not _test_array_constant(channel_width[mask], tols=channel_width_tols): - spacing_error = True - elif not np.allclose( - freq_spacing, - np.mean(channel_width[mask]) * freq_dir, - rtol=channel_width_tols[0], - atol=channel_width_tols[1], - ): - chanwidth_error = True - - if raise_errors and spacing_error: - raise ValueError( - "The frequencies are not evenly spaced (probably because of a select " - "operation) or has differing values of channel widths. Some file formats " - "(e.g. uvfits, miriad) do not support unevenly spaced frequencies." - ) - if raise_errors and chanwidth_error: - raise ValueError( - "The frequencies are separated by more than their channel width (probably " - "because of a select operation). Some file formats (e.g. uvfits, miriad) " - "do not support frequencies that are spaced by more than their channel " - "width." - ) - - return spacing_error, chanwidth_error - - -def _get_iterable(x): - """Return iterable version of input.""" - if isinstance(x, Iterable): - return x - else: - return (x,) - - -def _sort_freq_helper( - *, - Nfreqs, # noqa: N803 - freq_array, - Nspws, - spw_array, - flex_spw_id_array, - spw_order, - channel_order, - select_spw, -): - """ - Figure out the frequency sorting order for object based frequency sorting. - - Parameters - ---------- - Nfreqs : int - Number of frequencies, taken directly from the object parameter. - freq_array : array_like of float - Frequency array, taken directly from the object parameter. - Nfreqs : int - Number of spectral windows, taken directly from the object parameter. - spw_array : array_like of int - Spectral window array, taken directly from the object parameter. - flex_spw_id_array : array_like of int - Array of SPW IDs for each channel, taken directly from the object parameter. - spw_order : str or array_like of int - A string describing the desired order of spectral windows along the - frequency axis. Allowed strings include `number` (sort on spectral window - number) and `freq` (sort on median frequency). A '-' can be prepended - to signify descending order instead of the default ascending order, - e.g., if you have SPW #1 and 2, and wanted them ordered as [2, 1], - you would specify `-number`. Alternatively, one can supply an index array - of length Nspws that specifies how to shuffle the spws (this is not the desired - final spw order). Default is to apply no sorting of spectral windows. - channel_order : str or array_like of int - A string describing the desired order of frequency channels within a - spectral window. Allowed strings include `freq`, which will sort channels - within a spectral window by frequency. A '-' can be optionally prepended - to signify descending order instead of the default ascending order. - Alternatively, one can supply an index array of length Nfreqs that - specifies the new order. Default is to apply no sorting of channels - within a single spectral window. Note that proving an array_like of ints - will cause the values given to `spw_order` and `select_spw` to be ignored. - select_spw : int or array_like of int - An int or array_like of ints which specifies which spectral windows to - apply sorting. Note that setting this argument will cause the value - given to `spw_order` to be ignored. - - Returns - ------- - index_array : ndarray of int - Array giving the desired order of the channels to be used for sorting along the - frequency axis - - Raises - ------ - UserWarning - Raised if providing arguments to select_spw and channel_order (the latter - overrides the former). - ValueError - Raised if select_spw contains values not in spw_array, or if channel_order - is not the same length as freq_array. - - """ - if (spw_order is None) and (channel_order is None): - warnings.warn( - "Not specifying either spw_order or channel_order causes " - "no sorting actions to be applied. Returning object unchanged." - ) - return - - # Check to see if there are arguments we should be ignoring - if isinstance(channel_order, (np.ndarray, list, tuple)): - if select_spw is not None: - warnings.warn( - "The select_spw argument is ignored when providing an " - "array_like of int for channel_order" - ) - if spw_order is not None: - warnings.warn( - "The spw_order argument is ignored when providing an " - "array_like of int for channel_order" - ) - channel_order = np.asarray(channel_order) - if not channel_order.size == Nfreqs or not np.all( - np.sort(channel_order) == np.arange(Nfreqs) - ): - raise ValueError( - "Index array for channel_order must contain all indicies for " - "the frequency axis, without duplicates." - ) - index_array = channel_order - else: - index_array = np.arange(Nfreqs) - # Multipy by 1.0 here to make a cheap copy of the array to manipulate - temp_freqs = 1.0 * freq_array - # Same trick for ints -- add 0 to make a cheap copy - temp_spws = 0 + flex_spw_id_array - - # Check whether or not we need to sort the channels in individual windows - sort_spw = {idx: channel_order is not None for idx in spw_array} - if select_spw is not None: - if spw_order is not None: - warnings.warn( - "The spw_order argument is ignored when providing an " - "argument for select_spw" - ) - if channel_order is None: - warnings.warn( - "Specifying select_spw without providing channel_order causes " - "no sorting actions to be applied. Returning object unchanged." - ) - return - if isinstance(select_spw, (np.ndarray, list, tuple)): - sort_spw = {idx: idx in select_spw for idx in spw_array} - else: - sort_spw = {idx: idx == select_spw for idx in spw_array} - elif spw_order is not None: - if isinstance(spw_order, (np.ndarray, list, tuple)): - spw_order = np.asarray(spw_order) - if not spw_order.size == Nspws or not np.all( - np.sort(spw_order) == np.arange(Nspws) - ): - raise ValueError( - "Index array for spw_order must contain all indicies for " - "the spw_array, without duplicates." - ) - elif spw_order not in ["number", "freq", "-number", "-freq", None]: - raise ValueError( - "spw_order can only be one of 'number', '-number', " - "'freq', '-freq', None or an index array of length Nspws" - ) - elif Nspws > 1: - # Only need to do this step if we actually have multiple spws. - - # If the string starts with a '-', then we will flip the order at - # the end of the operation - flip_spws = spw_order[0] == "-" - - if "number" in spw_order: - spw_order = np.argsort(spw_array) - elif "freq" in spw_order: - spw_order = np.argsort( - [np.median(temp_freqs[temp_spws == idx]) for idx in spw_array] - ) - if flip_spws: - spw_order = np.flip(spw_order) - else: - spw_order = np.arange(Nspws) - # Now that we know the spw order, we can apply the first sort - index_array = np.concatenate( - [index_array[temp_spws == spw] for spw in spw_array[spw_order]] - ) - temp_freqs = temp_freqs[index_array] - temp_spws = temp_spws[index_array] - # Spectral windows are assumed sorted at this point - if channel_order is not None: - if channel_order not in ["freq", "-freq"]: - raise ValueError( - "channel_order can only be one of 'freq' or '-freq' or an index " - "array of length Nfreqs" - ) - for idx in spw_array: - if sort_spw[idx]: - select_mask = temp_spws == idx - subsort_order = index_array[select_mask] - subsort_order = subsort_order[np.argsort(temp_freqs[select_mask])] - index_array[select_mask] = ( - np.flip(subsort_order) - if channel_order[0] == "-" - else subsort_order - ) - if np.all(index_array[1:] > index_array[:-1]): - # Nothing to do - the data are already sorted! - return - - return index_array - - -def _sorted_unique_union(obj1, obj2=None): - """ - Determine the union of unique elements from two lists. - - Convenience function for handling various actions with indices. - - Parameters - ---------- - obj1 : list or tuple or set or 1D ndarray - First list from which to determine unique entries. - obj2 : list or tuple or set or 1D ndarray - Second list from which to determine unique entries, which is joined with the - first list. If None, the method will simply return the sorted list of unique - elements in obj1. - - Returns - ------- - sorted_unique : list - List containing the union of unique entries between obj1 and obj2. - """ - return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).union(obj2)) - - -def _sorted_unique_intersection(obj1, obj2=None): - """ - Determine the intersection of unique elements from two lists. - - Convenience function for handling various actions with indices. - - Parameters - ---------- - obj1 : list or tuple or set or 1D ndarray - First list from which to determine unique entries. - obj2 : list or tuple or set or 1D ndarray - Second list from which to determine unique entries, which is intersected with - the first list. If None, the method will simply return the sorted list of unique - elements in obj1. - - Returns - ------- - sorted_unique : list - List containing the intersection of unique entries between obj1 and obj2. - """ - return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).intersection(obj2)) - - -def _sorted_unique_difference(obj1, obj2=None): - """ - Determine the difference of unique elements from two lists. - - Convenience function for handling various actions with indices. - - Parameters - ---------- - obj1 : list or tuple or set or 1D ndarray - First list from which to determine unique entries. - obj2 : list or tuple or set or 1D ndarray - Second list from which to determine unique entries, which is differenced with - the first list. If None, the method will simply return the sorted list of unique - elements in obj1. - - Returns - ------- - sorted_unique : list - List containing the difference in unique entries between obj1 and obj2. - """ - return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).difference(obj2)) - - -def _combine_filenames(filename1, filename2): - """Combine the filename attribute from multiple UVBase objects. - - The 4 cases are: - 1. `filename1` has been set, `filename2` has not - 2. `filename1` has not been set, `filename2` has - 3. `filename1` and `filename2` both have been set - 4. `filename1` and `filename2` both have not been set - In case (1), we do not want to update the attribute, because it is - already set correctly. In case (2), we want to replace `filename1` - with the value from `filename2. In case (3), we want to take the union of - the sets of the filenames. In case (4), we want the filename attribute - to still be `None`. - - Parameters - ---------- - filename1 : list of str or None - The list of filenames for the first UVBase object. If it is not set, it - should be `None`. - filename2 : list of str or None - The list of filenames for the second UVData object. If it is not set, it - should be `None`. - - Returns - ------- - combined_filenames : list of str or None - The combined list, with potentially duplicate entries removed. - """ - combined_filenames = filename1 - if filename1 is not None: - if filename2 is not None: - combined_filenames = sorted(set(filename1).union(set(filename2))) - elif filename2 is not None: - combined_filenames = filename2 - - return combined_filenames - - -def _convert_to_slices( - indices, *, max_nslice_frac=0.1, max_nslice=None, return_index_on_fail=False -): - """ - Convert list of indices to a list of slices. - - Parameters - ---------- - indices : list - A 1D list of integers for array indexing (boolean ndarrays are also supported). - max_nslice_frac : float - A float from 0 -- 1. If the number of slices - needed to represent input 'indices' divided by len(indices) - exceeds this fraction, then we determine that we cannot - easily represent 'indices' with a list of slices. - max_nslice : int - Optional argument, defines the maximum number of slices for determining if - `indices` can be easily represented with a list of slices. If set, then - the argument supplied to `max_nslice_frac` is ignored. - return_index_on_fail : bool - If set to True and the list of input indexes cannot easily be respresented by - a list of slices (as defined by `max_nslice` or `max_nslice_frac`), then return - the input list of index values instead of a list of suboptimal slices. - - Returns - ------- - slice_list : list - Nominally the list of slice objects used to represent indices. However, if - `return_index_on_fail=True` and input indexes cannot easily be respresented, - return a 1-element list containing the input for `indices`. - check : bool - If True, indices is easily represented by slices - (`max_nslice_frac` or `max_nslice` conditions met), otherwise False. - - Notes - ----- - Example: - if: indices = [1, 2, 3, 4, 10, 11, 12, 13, 14] - then: slices = [slice(1, 5, 1), slice(11, 15, 1)] - """ - # check for already a slice or a single index position - if isinstance(indices, slice): - return [indices], True - if isinstance(indices, (int, np.integer)): - return [slice(indices, indices + 1, 1)], True - - # check for boolean index - if isinstance(indices, np.ndarray) and (indices.dtype == bool): - eval_ind = np.where(indices)[0] - else: - eval_ind = indices - # assert indices is longer than 2, or return trivial solutions - if len(eval_ind) == 0: - return [slice(0, 0, 0)], False - if len(eval_ind) <= 2: - return [ - slice(eval_ind[0], eval_ind[-1] + 1, max(eval_ind[-1] - eval_ind[0], 1)) - ], True - - # Catch the simplest case of "give me a single slice or exit" - if (max_nslice == 1) and return_index_on_fail: - step = eval_ind[1] - eval_ind[0] - if all(np.diff(eval_ind) == step): - return [slice(eval_ind[0], eval_ind[-1] + 1, step)], True - return [indices], False - - # setup empty slices list - Ninds = len(eval_ind) - slices = [] - - # iterate over indices - start = last_step = None - for ind in eval_ind: - if last_step is None: - # Check if this is the first slice, in which case start is None - if start is None: - start = ind - continue - last_step = ind - start - last_ind = ind - continue - - # calculate step from previous index - step = ind - last_ind - - # if step != last_step, this ends the slice - if step != last_step: - # append to list - slices.append(slice(start, last_ind + 1, last_step)) - - # setup next step - start = ind - last_step = None - - last_ind = ind - - # Append the last slice - slices.append(slice(start, ind + 1, last_step)) - - # determine whether slices are a reasonable representation, and determine max_nslice - # if only max_nslice_frac was supplied. - if max_nslice is None: - max_nslice = max_nslice_frac * Ninds - check = len(slices) <= max_nslice - - if return_index_on_fail and not check: - return [indices], check - else: - return slices, check - - -def slicify(ind: slice | None | IterableType[int]) -> slice | None | IterableType[int]: - """Convert an iterable of integers into a slice object if possible.""" - if ind is None or isinstance(ind, slice): - return ind - if len(ind) == 0: - return None - - if len(set(np.ediff1d(ind))) <= 1: - return slice(ind[0], ind[-1] + 1, ind[1] - ind[0] if len(ind) > 1 else 1) - else: - # can't slicify - return ind - - -def _check_range_overlap(val_range, range_type="time"): - """ - Detect if any val_range in an array overlap. - - Parameters - ---------- - val_range : np.array of float - Array of ranges, shape (Nranges, 2). - range_type : str - Type of range (for good error messages) - - Returns - ------- - bool - True if any range overlaps. - """ - # first check that time ranges are well formed (stop is >= than start) - if np.any((val_range[:, 1] - val_range[:, 0]) < 0): - raise ValueError( - f"The {range_type} ranges are not well-formed, some stop {range_type}s " - f"are after start {range_type}s." - ) - - # Sort by start time - sorted_ranges = val_range[np.argsort(val_range[:, 0]), :] - - # then check if adjacent pairs overlap - for ind in range(sorted_ranges.shape[0] - 1): - range1 = sorted_ranges[ind] - range2 = sorted_ranges[ind + 1] - if range2[0] < range1[1]: - return True - - -def _select_times_helper( - *, - times, - time_range, - lsts, - lst_range, - obj_time_array, - obj_time_range, - obj_lst_array, - obj_lst_range, - time_tols, - lst_tols, -): - """ - Get time indices in a select. - - Parameters - ---------- - times : array_like of float - The times to keep in the object, each value passed here should exist in the - time_array. Can be None, cannot be set with `time_range`, `lsts` or `lst_array`. - time_range : array_like of float - The time range in Julian Date to keep in the object, must be length 2. Some of - the times in the object should fall between the first and last elements. Can be - None, cannot be set with `times`, `lsts` or `lst_array`. - lsts : array_like of float - The local sidereal times (LSTs) to keep in the object, each value passed here - should exist in the lst_array. Can be None, cannot be set with `times`, - `time_range`, or `lst_range`. - lst_range : array_like of float - The local sidereal time (LST) range in radians to keep in the - object, must be of length 2. Some of the LSTs in the object should - fall between the first and last elements. If the second value is - smaller than the first, the LSTs are treated as having phase-wrapped - around LST = 2*pi = 0, and the LSTs kept on the object will run from - the larger value, through 0, and end at the smaller value. Can be None, cannot - be set with `times`, `time_range`, or `lsts`. - obj_time_array : array_like of float - Time array on object. Can be None if `object_time_range` is set. - obj_time_range : array_like of float - Time range on object. Can be None if `object_time_array` is set. - obj_lst_array : array_like of float - LST array on object. Can be None if `object_lst_range` is set. - obj_lst_range : array_like of float - LST range on object. Can be None if `object_lst_array` is set. - time_tols : tuple of float - Length 2 tuple giving (rtol, atol) to use for time matching. - lst_tols : tuple of float - Length 2 tuple giving (rtol, atol) to use for lst matching. - - """ - have_times = times is not None - have_time_range = time_range is not None - have_lsts = lsts is not None - have_lst_range = lst_range is not None - n_time_params = np.count_nonzero( - [have_times, have_time_range, have_lsts, have_lst_range] - ) - if n_time_params > 1: - raise ValueError( - "Only one of [times, time_range, lsts, lst_range] may be " - "specified per selection operation." - ) - if n_time_params == 0: - return None - - time_inds = np.zeros(0, dtype=np.int64) - if times is not None: - times = _get_iterable(times) - if np.array(times).ndim > 1: - times = np.array(times).flatten() - - if obj_time_range is not None: - for jd in times: - this_ind = np.nonzero( - np.logical_and( - (obj_time_range[:, 0] <= jd), (obj_time_range[:, 1] >= jd) - ) - )[0] - if this_ind.size > 0: - time_inds = np.append(time_inds, this_ind) - else: - raise ValueError(f"Time {jd} does not fall in any time_range.") - else: - for jd in times: - if np.any( - np.isclose(obj_time_array, jd, rtol=time_tols[0], atol=time_tols[1]) - ): - time_inds = np.append( - time_inds, - np.where( - np.isclose( - obj_time_array, jd, rtol=time_tols[0], atol=time_tols[1] - ) - )[0], - ) - else: - raise ValueError(f"Time {jd} is not present in the time_array.") - - if time_range is not None: - if np.size(time_range) != 2: - raise ValueError("time_range must be length 2.") - - if obj_time_range is not None: - for tind, trange in enumerate(obj_time_range): - if _check_range_overlap(np.stack((trange, time_range), axis=0)): - time_inds = np.append(time_inds, tind) - attr_str = "time_range" - else: - time_inds = np.nonzero( - (obj_time_array <= time_range[1]) & (obj_time_array >= time_range[0]) - )[0] - attr_str = "time_array" - if time_inds.size == 0: - raise ValueError( - f"No elements in {attr_str} between {time_range[0]} and " - f"{time_range[1]}." - ) - - if (lsts is not None or lst_range is not None) and obj_lst_range is not None: - # check for lsts wrapping around zero - lst_range_wrap = obj_lst_range[:, 0] > obj_lst_range[:, 1] - - if lsts is not None: - if np.any(np.asarray(lsts) > 2 * np.pi): - warnings.warn( - "The lsts parameter contained a value greater than 2*pi. " - "LST values are assumed to be in radians, not hours." - ) - lsts = _get_iterable(lsts) - if np.array(lsts).ndim > 1: - lsts = np.array(lsts).flatten() - - if obj_lst_range is not None: - for lst in lsts: - lst_ind = np.nonzero( - np.logical_and( - (obj_lst_range[:, 0] <= lst), (obj_lst_range[:, 1] >= lst) - ) - )[0] - if lst_ind.size == 0 and np.any(lst_range_wrap): - for lr_ind in np.nonzero(lst_range_wrap)[0]: - if (obj_lst_range[lr_ind, 0] <= lst and lst <= 2 * np.pi) or ( - lst >= 0 and lst <= obj_lst_range[lr_ind, 1] - ): - lst_ind = np.array([lr_ind]) - if lst_ind.size > 0: - time_inds = np.append(time_inds, lst_ind) - else: - raise ValueError(f"LST {lst} does not fall in any lst_range") - else: - for lst in lsts: - if np.any( - np.isclose(obj_lst_array, lst, rtol=lst_tols[0], atol=lst_tols[1]) - ): - time_inds = np.append( - time_inds, - np.where( - np.isclose( - obj_lst_array, lst, rtol=lst_tols[0], atol=lst_tols[1] - ) - )[0], - ) - else: - raise ValueError(f"LST {lst} is not present in the lst_array") - - if lst_range is not None: - if np.size(lst_range) != 2: - raise ValueError("lst_range must be length 2.") - if np.any(np.asarray(lst_range) > 2 * np.pi): - warnings.warn( - "The lst_range contained a value greater than 2*pi. " - "LST values are assumed to be in radians, not hours." - ) - if obj_lst_range is not None: - for lind, lrange in enumerate(obj_lst_range): - if not lst_range_wrap[lind] and lst_range[0] < lst_range[1]: - if _check_range_overlap(np.stack((lrange, lst_range), axis=0)): - time_inds = np.append(time_inds, lind) - else: - if (lst_range[0] >= lrange[0] and lst_range[0] <= 2 * np.pi) or ( - lst_range[1] <= lrange[1] and lst_range[1] >= 0 - ): - time_inds = np.append(time_inds, lind) - attr_str = "lst_range" - else: - if lst_range[1] < lst_range[0]: - # we're wrapping around LST = 2*pi = 0 - lst_range_1 = [lst_range[0], 2 * np.pi] - lst_range_2 = [0, lst_range[1]] - time_inds1 = np.nonzero( - (obj_lst_array <= lst_range_1[1]) - & (obj_lst_array >= lst_range_1[0]) - )[0] - time_inds2 = np.nonzero( - (obj_lst_array <= lst_range_2[1]) - & (obj_lst_array >= lst_range_2[0]) - )[0] - time_inds = np.union1d(time_inds1, time_inds2) - else: - time_inds = np.nonzero( - (obj_lst_array <= lst_range[1]) & (obj_lst_array >= lst_range[0]) - )[0] - attr_str = "lst_array" - - if time_inds.size == 0: - raise ValueError( - f"No elements in {attr_str} between {lst_range[0]} and " - f"{lst_range[1]}." - ) - return time_inds - - -def check_lsts_against_times( - *, - jd_array, - lst_array, - lst_tols, - latitude=None, - longitude=None, - altitude=None, - frame="itrs", - ellipsoid=None, - telescope_loc=None, -): - """ - Check that LSTs are consistent with the time_array and telescope location. - - This just calls `get_lst_for_time`, compares that result to the `lst_array` - and warns if they are not within the tolerances specified by `lst_tols`. - - Parameters - ---------- - jd_array : ndarray of float - JD times to get lsts for. - lst_array : ndarray of float - LSTs to check to see if they match the jd_array at the location. - latitude : float - Latitude of location to check the lst for in degrees. - longitude : float - Longitude of location to check the lst for in degrees. - altitude : float - Altitude of location to check the lst for in meters. - lst_tops : tuple of float - A length 2 tuple giving the (relative, absolute) tolerances to check the - LST agreement to. These are passed directly to numpy.allclose. - frame : str - Reference frame for latitude/longitude/altitude. - Options are itrs (default) or mcmf. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", "GSFC", - "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is mcmf. - telescope_loc : tuple or EarthLocation or MoonLocation - Alternative way of specifying telescope lat/lon/alt, either as a 3-element tuple - or as an astropy EarthLocation (or lunarsky MoonLocation). Cannot supply both - `telescope_loc` and `latitute`, `longitude`, or `altitude`. - - Returns - ------- - None - - Warns - ----- - If the `lst_array` does not match the calculated LSTs to the lst_tols. - - """ - # Don't worry about passing the astrometry library because we test that they agree - # to better than our standard lst tolerances. - lsts = get_lst_for_time( - jd_array=jd_array, - telescope_loc=telescope_loc, - latitude=latitude, - longitude=longitude, - altitude=altitude, - frame=frame, - ellipsoid=ellipsoid, - ) - - if not np.allclose(lst_array, lsts, rtol=lst_tols[0], atol=lst_tols[1]): - warnings.warn( - "The lst_array is not self-consistent with the time_array and " - "telescope location. Consider recomputing with the " - "`set_lsts_from_time_array` method." - ) - - -def check_surface_based_positions( - *, - telescope_loc=None, - telescope_frame="itrs", - antenna_positions=None, - raise_error=True, - raise_warning=True, -): - """ - Check that antenna positions are consistent with ground-based values. - - Check that the antenna position, telescope location, or combination of both produces - locations that are consistent with surface-based positions. If supplying both - antenna position and telescope location, the check will be run against the sum total - of both. For the Earth, the permitted range of values is betwen 6350 and 6390 km, - whereas for theMoon the range is 1717.1 to 1757.1 km. - - telescope_loc : tuple or EarthLocation or MoonLocation - Telescope location, specified as a 3-element tuple (specifying geo/selenocentric - position in meters) or as an astropy EarthLocation (or lunarsky MoonLocation). - telescope_frame : str, optional - Reference frame for latitude/longitude/altitude. Options are itrs (default) or - mcmf. Only used if telescope_loc is not an EarthLocation or MoonLocation. - antenna_positions : ndarray of float - List of antenna positions relative to array center in ECEF coordinates, - required if not providing `uvw_array`. Shape is (Nants, 3). If no telescope_loc - is specified, these values will be assumed to be relative to geocenter. - raise_error : bool - If True, an error is raised if telescope_loc and/or telescope_loc do not conform - to expectations for a surface-based telescope. Default is True. - raise_warning : bool - If True, a warning is raised if telescope_loc and/or telescope_loc do not - conform to expectations for a surface-based telescope. Default is True, only - used if `raise_error` is set to False. - - Returns - ------- - valid : bool - If True, the antenna_positions and/or telescope_loc conform to expectations for - a surface-based telescope. Otherwise returns false. - - """ - if antenna_positions is None: - antenna_positions = np.zeros((1, 3)) - - if isinstance(telescope_loc, EarthLocation) or ( - hasmoon and isinstance(telescope_loc, MoonLocation) - ): - antenna_positions = antenna_positions + ( - telescope_loc.x.to("m").value, - telescope_loc.y.to("m").value, - telescope_loc.z.to("m").value, - ) - if isinstance(telescope_loc, EarthLocation): - telescope_frame = "itrs" - else: - telescope_frame = "mcmf" - elif telescope_loc is not None: - antenna_positions = antenna_positions + telescope_loc - - low_lim, hi_lim, world = _range_dict[telescope_frame] - - err_type = None - if np.any(np.sum(antenna_positions**2.0, axis=1) < low_lim**2.0): - err_type = "below" - elif np.any(np.sum(antenna_positions**2.0, axis=1) > hi_lim**2.0): - err_type = "above" - - if err_type is None: - return True - - err_msg = ( - f"{telescope_frame} position vector magnitudes must be on the order of " - f"the radius of {world} -- they appear to lie well {err_type} this." - ) - - # If desired, raise an error - if raise_error: - raise ValueError(err_msg) - - # Otherwise, if desired, raise a warning instead - if raise_warning: - warnings.warn(err_msg) - - return False - - -def determine_blt_order( - *, time_array, ant_1_array, ant_2_array, baseline_array, Nbls, Ntimes # noqa: N803 -) -> tuple[str] | None: - """Get the blt order from analysing metadata.""" - times = time_array - ant1 = ant_1_array - ant2 = ant_2_array - bls = baseline_array - - time_bl = True - time_a = True - time_b = True - bl_time = True - a_time = True - b_time = True - bl_order = True - a_order = True - b_order = True - time_order = True - - if Nbls == 1 and Ntimes == 1: - return ("baseline", "time") # w.l.o.g. - - for i, (t, a, b, bl) in enumerate( - zip(times[1:], ant1[1:], ant2[1:], bls[1:]), start=1 - ): - on_bl_boundary = i % Nbls == 0 - on_time_boundary = i % Ntimes == 0 - - if t < times[i - 1]: - time_bl = False - time_a = False - time_b = False - time_order = False - - if not on_time_boundary: - bl_time = False - a_time = False - b_time = False - - if bl == bls[i - 1]: - bl_time = False - if a == ant1[i - 1]: - a_time = False - if b == ant2[i - 1]: - b_time = False - - elif t == times[i - 1]: - if bl < bls[i - 1]: - time_bl = False - if a < ant1[i - 1]: - time_a = False - if b < ant2[i - 1]: - time_b = False - - if bl < bls[i - 1]: - bl_time = False - bl_order = False - if not on_bl_boundary: - time_bl = False - if a < ant1[i - 1]: - a_time = False - a_order = False - if not on_bl_boundary: - time_a = False - if b < ant2[i - 1]: - b_time = False - b_order = False - if not on_bl_boundary: - time_b = False - - if not any( - ( - time_bl, - time_a, - time_b, - time_bl, - bl_time, - a_time, - b_time, - bl_order, - a_order, - b_order, - time_order, - ) - ): - break - - if Nbls > 1 and Ntimes > 1: - assert not ( - (time_bl and bl_time) - or (time_a and a_time) - or (time_b and b_time) - or (time_order and a_order) - or (time_order and b_order) - or (a_order and b_order) - or (time_order and bl_order) - ), ( - "Something went wrong when trying to determine the order of the blts axis. " - "Please raise an issue on github, as this is not meant to happen." - "None of the following should ever be True: \n" - f"\ttime_bl and bl_time: {time_bl and bl_time}\n" - f"\ttime_a and a_time: {time_a and a_time}\n" - f"\ttime_b and b_time: {time_b and b_time}\n" - f"\ttime_order and a_order: {time_order and a_order}\n" - f"\ttime_order and b_order: {time_order and b_order}\n" - f"\ta_order and b_order: {a_order and b_order}\n" - f"\ttime_order and bl_order: {time_order and bl_order}\n\n" - "Please include the following information in your issue:\n" - f"Nbls: {Nbls}\n" - f"Ntimes: {Ntimes}\n" - f"TIMES: {times}\n" - f"ANT1: {ant1}\n" - f"ANT2: {ant2}\n" - f"BASELINES: {bls}\n" - ) - - if time_bl: - return ("time", "baseline") - if bl_time: - return ("baseline", "time") - if time_a: - return ("time", "ant1") - if a_time: - return ("ant1", "time") - if time_b: - return ("time", "ant2") - if b_time: - return ("ant2", "time") - if bl_order: - return ("baseline",) - if a_order: - return ("ant1",) - if b_order: - return ("ant2",) - if time_order: - return ("time",) - - return None - - -def determine_rectangularity( - *, - time_array: np.ndarray, - baseline_array: np.ndarray, - nbls: int, - ntimes: int, - blt_order: str | tuple[str] | None = None, -): - """Determine if the data is rectangular or not. - - Parameters - ---------- - time_array : array_like - Array of times in JD. - baseline_array : array_like - Array of baseline integers. - nbls : int - Number of baselines. - ntimes : int - Number of times. - blt_order : str or tuple of str, optional - If known, pass the blt_order, which can short-circuit the determination - of rectangularity. - - Returns - ------- - is_rect : bool - True if the data is rectangular, False otherwise. - time_axis_faster_than_bls : bool - True if the data is rectangular and the time axis is the last axis (i.e. times - change first, then bls). False either if baselines change first, OR if it is - not rectangular. - - Notes - ----- - Rectangular data is defined as data for which using regular slicing of size Ntimes - or Nbls will give you either all the same time and all different baselines, or - vice versa. This does NOT require that the baselines and times are sorted within - that structure. - """ - # check if the data is rectangular - time_first = True - bl_first = True - - if time_array.size != nbls * ntimes: - return False, False - elif nbls * ntimes == 1: - return True, True - elif nbls == 1: - return True, True - elif ntimes == 1: - return True, False - elif blt_order == ("baseline", "time"): - return True, True - elif blt_order == ("time", "baseline"): - return True, False - - # That's all the easiest checks. - if time_array[1] == time_array[0]: - time_first = False - if baseline_array[1] == baseline_array[0]: - bl_first = False - if not time_first and not bl_first: - return False, False - - if time_first: - time_array = time_array.reshape((nbls, ntimes)) - baseline_array = baseline_array.reshape((nbls, ntimes)) - if np.sum(np.abs(np.diff(time_array, axis=0))) != 0: - return False, False - if (np.diff(baseline_array, axis=1) != 0).any(): - return False, False - return True, True - elif bl_first: - time_array = time_array.reshape((ntimes, nbls)) - baseline_array = baseline_array.reshape((ntimes, nbls)) - if np.sum(np.abs(np.diff(time_array, axis=1))) != 0: - return False, False - if (np.diff(baseline_array, axis=0) != 0).any(): - return False, False - return True, False diff --git a/src/pyuvdata/utils/history.py b/src/pyuvdata/utils/history.py new file mode 100644 index 0000000000..d6d93c27a8 --- /dev/null +++ b/src/pyuvdata/utils/history.py @@ -0,0 +1,65 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""History management utilities.""" + + +def _check_history_version(history, version_string): + """Check if version_string is present in history string.""" + if version_string.replace(" ", "") in history.replace("\n", "").replace(" ", ""): + return True + else: + return False + + +def _check_histories(history1, history2): + """Check if two histories are the same.""" + if history1.replace("\n", "").replace(" ", "") == history2.replace( + "\n", "" + ).replace(" ", ""): + return True + else: + return False + + +def _combine_history_addition(history1, history2): + """ + Find extra history to add to have minimal repeats. + + Parameters + ---------- + history1 : str + First history. + history2 : str + Second history + + Returns + ------- + str + Extra history to add to first history. + + """ + # first check if they're the same to avoid more complicated processing. + if _check_histories(history1, history2): + return None + + hist2_words = history2.split(" ") + add_hist = "" + test_hist1 = " " + history1 + " " + for i, word in enumerate(hist2_words): + if " " + word + " " not in test_hist1: + add_hist += " " + word + keep_going = i + 1 < len(hist2_words) + while keep_going: + if (hist2_words[i + 1] == " ") or ( + " " + hist2_words[i + 1] + " " not in test_hist1 + ): + add_hist += " " + hist2_words[i + 1] + del hist2_words[i + 1] + keep_going = i + 1 < len(hist2_words) + else: + keep_going = False + + if add_hist == "": + add_hist = None + return add_hist diff --git a/src/pyuvdata/utils/lst.py b/src/pyuvdata/utils/lst.py deleted file mode 100644 index cb09fb95c7..0000000000 --- a/src/pyuvdata/utils/lst.py +++ /dev/null @@ -1,245 +0,0 @@ -# -*- mode: python; coding: utf-8 -*- -# Copyright (c) 2024 Radio Astronomy Software Group -# Licensed under the 2-clause BSD License -"""Utilities for working LSTs.""" -import warnings - -import erfa -import numpy as np -from astropy.coordinates import Angle, EarthLocation -from astropy.time import Time -from astropy.utils import iers - -try: - from lunarsky import MoonLocation - from lunarsky import Time as LTime - - hasmoon = True -except ImportError: - hasmoon = False - - -def get_lst_for_time( - jd_array=None, - *, - telescope_loc=None, - latitude=None, - longitude=None, - altitude=None, - astrometry_library=None, - frame="itrs", - ellipsoid=None, -): - """ - Get the local apparent sidereal time for a set of jd times at an earth location. - - This function calculates the local apparent sidereal time (LAST), given a UTC time - and a position on the Earth, using either the astropy or NOVAS libraries. It - is important to note that there is an apporoximate 20 microsecond difference - between the two methods, presumably due to small differences in the apparent - reference frame. These differences will cancel out when calculating coordinates - in the TOPO frame, so long as apparent coordinates are calculated using the - same library (i.e., astropy or NOVAS). Failing to do so can introduce errors - up to ~1 mas in the horizontal coordinate system (i.e., AltAz). - - Parameters - ---------- - jd_array : ndarray of float - JD times to get lsts for. - telescope_loc : tuple or EarthLocation or MoonLocation - Alternative way of specifying telescope lat/lon/alt, either as a 3-element tuple - or as an astropy EarthLocation (or lunarsky MoonLocation). Cannot supply both - `telescope_loc` and `latitute`, `longitude`, or `altitude`. - latitude : float - Latitude of location to get lst for in degrees. Cannot specify both `latitute` - and `telescope_loc`. - longitude : float - Longitude of location to get lst for in degrees. Cannot specify both `longitude` - and `telescope_loc`. - altitude : float - Altitude of location to get lst for in meters. Cannot specify both `altitude` - and `telescope_loc`. - astrometry_library : str - Library used for running the LST calculations. Allowed options are 'erfa' - (which uses the pyERFA), 'novas' (which uses the python-novas library), - and 'astropy' (which uses the astropy utilities). Default is erfa unless - the telescope_location is a MoonLocation object, in which case the default is - astropy. - frame : str - Reference frame for latitude/longitude/altitude. Options are itrs (default) - or mcmf. Not used if telescope_loc is an EarthLocation or MoonLocation object. - ellipsoid : str - Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", - "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default - is "SPHERE". Only used if frame is mcmf. Not used if telescope_loc is - an EarthLocation or MoonLocation object. - - Returns - ------- - ndarray of float - LASTs in radians corresponding to the jd_array. - - """ - site_loc = None - if telescope_loc is not None: - if not all(item is None for item in [latitude, longitude, altitude]): - raise ValueError( - "Cannot set both telescope_loc and latitude/longitude/altitude" - ) - if isinstance(telescope_loc, EarthLocation) or ( - hasmoon and isinstance(telescope_loc, MoonLocation) - ): - site_loc = telescope_loc - if isinstance(telescope_loc, EarthLocation): - frame = "ITRS" - else: - frame = "MCMF" - else: - latitude, longitude, altitude = telescope_loc - - if site_loc is None: - if frame.upper() == "MCMF": - if not hasmoon: - raise ValueError( - "Need to install `lunarsky` package to work with MCMF frame." - ) - if ellipsoid is None: - ellipsoid = "SPHERE" - - site_loc = MoonLocation.from_selenodetic( - Angle(longitude, unit="deg"), - Angle(latitude, unit="deg"), - altitude, - ellipsoid=ellipsoid, - ) - else: - site_loc = EarthLocation.from_geodetic( - Angle(longitude, unit="deg"), - Angle(latitude, unit="deg"), - height=altitude, - ) - if astrometry_library is None: - if frame == "itrs": - astrometry_library = "erfa" - else: - astrometry_library = "astropy" - - if astrometry_library not in ["erfa", "astropy", "novas"]: - raise ValueError( - "Requested coordinate transformation library is not supported, please " - "select either 'erfa' or 'astropy' for astrometry_library." - ) - - if isinstance(jd_array, np.ndarray): - lst_array = np.zeros_like(jd_array) - if lst_array.ndim == 0: - lst_array = lst_array.reshape(1) - else: - lst_array = np.zeros(1) - - jd, reverse_inds = np.unique(jd_array, return_inverse=True) - - if isinstance(site_loc, EarthLocation): - TimeClass = Time - else: - if not astrometry_library == "astropy": - raise NotImplementedError( - "The MCMF frame is only supported with the 'astropy' astrometry library" - ) - TimeClass = LTime - - times = TimeClass(jd, format="jd", scale="utc", location=site_loc) - - if iers.conf.auto_max_age is None: # pragma: no cover - delta, status = times.get_delta_ut1_utc(return_status=True) - if np.any( - np.isin(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE)) - ): - warnings.warn( - "time is out of IERS range, setting delta ut1 utc to extrapolated value" - ) - times.delta_ut1_utc = delta - if astrometry_library == "erfa": - # This appears to be what astropy is using under the hood, - # so it _should_ be totally consistent. - gast_array = erfa.gst06a( - times.ut1.jd1, times.ut1.jd2, times.tt.jd1, times.tt.jd2 - ) - - # Technically one should correct for the polar wobble here, but the differences - # along the equitorial are miniscule -- of order 10s of nanoradians, well below - # the promised accuracy of IERS -- and rotation matricies can be expensive. - # We do want to correct though for for secular polar drift (s'/TIO locator), - # which nudges the Earth rotation angle of order 47 uas per century. - sp = erfa.sp00(times.tt.jd1, times.tt.jd2) - - lst_array = np.mod(gast_array + sp + site_loc.lon.rad, 2.0 * np.pi)[ - reverse_inds - ] - elif astrometry_library == "astropy": - lst_array = times.sidereal_time("apparent").radian - if lst_array.ndim == 0: - lst_array = lst_array.reshape(1) - lst_array = lst_array[reverse_inds] - elif astrometry_library == "novas": - # Import the NOVAS library only if it's needed/available. - try: - import novas_de405 # noqa - from novas import compat as novas - from novas.compat import eph_manager - except ImportError as e: # pragma: no cover - raise ImportError( - "novas and/or novas_de405 are not installed but is required for " - "NOVAS functionality" - ) from e - - jd_start, jd_end, number = eph_manager.ephem_open() - - tt_time_array = times.tt.value - ut1_high_time_array = times.ut1.jd1 - ut1_low_time_array = times.ut1.jd2 - full_ut1_time_array = ut1_high_time_array + ut1_low_time_array - polar_motion_data = iers.earth_orientation_table.get() - - delta_x_array = np.interp( - times.mjd, - polar_motion_data["MJD"].value, - polar_motion_data["dX_2000A_B"].value, - left=0.0, - right=0.0, - ) - - delta_y_array = np.interp( - times.mjd, - polar_motion_data["MJD"].value, - polar_motion_data["dY_2000A_B"].value, - left=0.0, - right=0.0, - ) - - # Catch the case where we don't have CIP delta values yet (they don't typically - # have predictive values like the polar motion does) - delta_x_array[np.isnan(delta_x_array)] = 0.0 - delta_y_array[np.isnan(delta_y_array)] = 0.0 - - for idx in range(len(times)): - novas.cel_pole( - tt_time_array[idx], 2, delta_x_array[idx], delta_y_array[idx] - ) - # The NOVAS routine will return Greenwich Apparent Sidereal Time (GAST), - # in units of hours - lst_array[reverse_inds == idx] = novas.sidereal_time( - ut1_high_time_array[idx], - ut1_low_time_array[idx], - (tt_time_array[idx] - full_ut1_time_array[idx]) * 86400.0, - ) - - # Add the telescope lon to convert from GAST to LAST (local) - lst_array = np.mod(lst_array + (longitude / 15.0), 24.0) - - # Convert from hours back to rad - lst_array *= np.pi / 12.0 - - lst_array = np.reshape(lst_array, jd_array.shape) - - return lst_array diff --git a/src/pyuvdata/utils/ps_cat.py b/src/pyuvdata/utils/phase_center_catalog.py similarity index 100% rename from src/pyuvdata/utils/ps_cat.py rename to src/pyuvdata/utils/phase_center_catalog.py diff --git a/src/pyuvdata/utils/phasing.py b/src/pyuvdata/utils/phasing.py index c781217552..994c165c7b 100644 --- a/src/pyuvdata/utils/phasing.py +++ b/src/pyuvdata/utils/phasing.py @@ -12,7 +12,7 @@ from astropy.utils import iers from . import _phasing -from .lst import get_lst_for_time +from .times import get_lst_for_time try: from lunarsky import MoonLocation diff --git a/src/pyuvdata/utils/times.py b/src/pyuvdata/utils/times.py new file mode 100644 index 0000000000..b5d6d39617 --- /dev/null +++ b/src/pyuvdata/utils/times.py @@ -0,0 +1,534 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for working LSTs.""" +import warnings + +import erfa +import numpy as np +from astropy.coordinates import Angle, EarthLocation +from astropy.time import Time +from astropy.utils import iers + +try: + from lunarsky import MoonLocation + from lunarsky import Time as LTime + + hasmoon = True +except ImportError: + hasmoon = False + +from .tools import _check_range_overlap, _get_iterable + + +def get_lst_for_time( + jd_array=None, + *, + telescope_loc=None, + latitude=None, + longitude=None, + altitude=None, + astrometry_library=None, + frame="itrs", + ellipsoid=None, +): + """ + Get the local apparent sidereal time for a set of jd times at an earth location. + + This function calculates the local apparent sidereal time (LAST), given a UTC time + and a position on the Earth, using either the astropy or NOVAS libraries. It + is important to note that there is an apporoximate 20 microsecond difference + between the two methods, presumably due to small differences in the apparent + reference frame. These differences will cancel out when calculating coordinates + in the TOPO frame, so long as apparent coordinates are calculated using the + same library (i.e., astropy or NOVAS). Failing to do so can introduce errors + up to ~1 mas in the horizontal coordinate system (i.e., AltAz). + + Parameters + ---------- + jd_array : ndarray of float + JD times to get lsts for. + telescope_loc : tuple or EarthLocation or MoonLocation + Alternative way of specifying telescope lat/lon/alt, either as a 3-element tuple + or as an astropy EarthLocation (or lunarsky MoonLocation). Cannot supply both + `telescope_loc` and `latitute`, `longitude`, or `altitude`. + latitude : float + Latitude of location to get lst for in degrees. Cannot specify both `latitute` + and `telescope_loc`. + longitude : float + Longitude of location to get lst for in degrees. Cannot specify both `longitude` + and `telescope_loc`. + altitude : float + Altitude of location to get lst for in meters. Cannot specify both `altitude` + and `telescope_loc`. + astrometry_library : str + Library used for running the LST calculations. Allowed options are 'erfa' + (which uses the pyERFA), 'novas' (which uses the python-novas library), + and 'astropy' (which uses the astropy utilities). Default is erfa unless + the telescope_location is a MoonLocation object, in which case the default is + astropy. + frame : str + Reference frame for latitude/longitude/altitude. Options are itrs (default) + or mcmf. Not used if telescope_loc is an EarthLocation or MoonLocation object. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", + "GSFC", "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is mcmf. Not used if telescope_loc is + an EarthLocation or MoonLocation object. + + Returns + ------- + ndarray of float + LASTs in radians corresponding to the jd_array. + + """ + site_loc = None + if telescope_loc is not None: + if not all(item is None for item in [latitude, longitude, altitude]): + raise ValueError( + "Cannot set both telescope_loc and latitude/longitude/altitude" + ) + if isinstance(telescope_loc, EarthLocation) or ( + hasmoon and isinstance(telescope_loc, MoonLocation) + ): + site_loc = telescope_loc + if isinstance(telescope_loc, EarthLocation): + frame = "ITRS" + else: + frame = "MCMF" + else: + latitude, longitude, altitude = telescope_loc + + if site_loc is None: + if frame.upper() == "MCMF": + if not hasmoon: + raise ValueError( + "Need to install `lunarsky` package to work with MCMF frame." + ) + if ellipsoid is None: + ellipsoid = "SPHERE" + + site_loc = MoonLocation.from_selenodetic( + Angle(longitude, unit="deg"), + Angle(latitude, unit="deg"), + altitude, + ellipsoid=ellipsoid, + ) + else: + site_loc = EarthLocation.from_geodetic( + Angle(longitude, unit="deg"), + Angle(latitude, unit="deg"), + height=altitude, + ) + if astrometry_library is None: + if frame == "itrs": + astrometry_library = "erfa" + else: + astrometry_library = "astropy" + + if astrometry_library not in ["erfa", "astropy", "novas"]: + raise ValueError( + "Requested coordinate transformation library is not supported, please " + "select either 'erfa' or 'astropy' for astrometry_library." + ) + + if isinstance(jd_array, np.ndarray): + lst_array = np.zeros_like(jd_array) + if lst_array.ndim == 0: + lst_array = lst_array.reshape(1) + else: + lst_array = np.zeros(1) + + jd, reverse_inds = np.unique(jd_array, return_inverse=True) + + if isinstance(site_loc, EarthLocation): + TimeClass = Time + else: + if not astrometry_library == "astropy": + raise NotImplementedError( + "The MCMF frame is only supported with the 'astropy' astrometry library" + ) + TimeClass = LTime + + times = TimeClass(jd, format="jd", scale="utc", location=site_loc) + + if iers.conf.auto_max_age is None: # pragma: no cover + delta, status = times.get_delta_ut1_utc(return_status=True) + if np.any( + np.isin(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE)) + ): + warnings.warn( + "time is out of IERS range, setting delta ut1 utc to extrapolated value" + ) + times.delta_ut1_utc = delta + if astrometry_library == "erfa": + # This appears to be what astropy is using under the hood, + # so it _should_ be totally consistent. + gast_array = erfa.gst06a( + times.ut1.jd1, times.ut1.jd2, times.tt.jd1, times.tt.jd2 + ) + + # Technically one should correct for the polar wobble here, but the differences + # along the equitorial are miniscule -- of order 10s of nanoradians, well below + # the promised accuracy of IERS -- and rotation matricies can be expensive. + # We do want to correct though for for secular polar drift (s'/TIO locator), + # which nudges the Earth rotation angle of order 47 uas per century. + sp = erfa.sp00(times.tt.jd1, times.tt.jd2) + + lst_array = np.mod(gast_array + sp + site_loc.lon.rad, 2.0 * np.pi)[ + reverse_inds + ] + elif astrometry_library == "astropy": + lst_array = times.sidereal_time("apparent").radian + if lst_array.ndim == 0: + lst_array = lst_array.reshape(1) + lst_array = lst_array[reverse_inds] + elif astrometry_library == "novas": + # Import the NOVAS library only if it's needed/available. + try: + import novas_de405 # noqa + from novas import compat as novas + from novas.compat import eph_manager + except ImportError as e: # pragma: no cover + raise ImportError( + "novas and/or novas_de405 are not installed but is required for " + "NOVAS functionality" + ) from e + + jd_start, jd_end, number = eph_manager.ephem_open() + + tt_time_array = times.tt.value + ut1_high_time_array = times.ut1.jd1 + ut1_low_time_array = times.ut1.jd2 + full_ut1_time_array = ut1_high_time_array + ut1_low_time_array + polar_motion_data = iers.earth_orientation_table.get() + + delta_x_array = np.interp( + times.mjd, + polar_motion_data["MJD"].value, + polar_motion_data["dX_2000A_B"].value, + left=0.0, + right=0.0, + ) + + delta_y_array = np.interp( + times.mjd, + polar_motion_data["MJD"].value, + polar_motion_data["dY_2000A_B"].value, + left=0.0, + right=0.0, + ) + + # Catch the case where we don't have CIP delta values yet (they don't typically + # have predictive values like the polar motion does) + delta_x_array[np.isnan(delta_x_array)] = 0.0 + delta_y_array[np.isnan(delta_y_array)] = 0.0 + + for idx in range(len(times)): + novas.cel_pole( + tt_time_array[idx], 2, delta_x_array[idx], delta_y_array[idx] + ) + # The NOVAS routine will return Greenwich Apparent Sidereal Time (GAST), + # in units of hours + lst_array[reverse_inds == idx] = novas.sidereal_time( + ut1_high_time_array[idx], + ut1_low_time_array[idx], + (tt_time_array[idx] - full_ut1_time_array[idx]) * 86400.0, + ) + + # Add the telescope lon to convert from GAST to LAST (local) + lst_array = np.mod(lst_array + (longitude / 15.0), 24.0) + + # Convert from hours back to rad + lst_array *= np.pi / 12.0 + + lst_array = np.reshape(lst_array, jd_array.shape) + + return lst_array + + +def check_lsts_against_times( + *, + jd_array, + lst_array, + lst_tols, + latitude=None, + longitude=None, + altitude=None, + frame="itrs", + ellipsoid=None, + telescope_loc=None, +): + """ + Check that LSTs are consistent with the time_array and telescope location. + + This just calls `get_lst_for_time`, compares that result to the `lst_array` + and warns if they are not within the tolerances specified by `lst_tols`. + + Parameters + ---------- + jd_array : ndarray of float + JD times to get lsts for. + lst_array : ndarray of float + LSTs to check to see if they match the jd_array at the location. + latitude : float + Latitude of location to check the lst for in degrees. + longitude : float + Longitude of location to check the lst for in degrees. + altitude : float + Altitude of location to check the lst for in meters. + lst_tops : tuple of float + A length 2 tuple giving the (relative, absolute) tolerances to check the + LST agreement to. These are passed directly to numpy.allclose. + frame : str + Reference frame for latitude/longitude/altitude. + Options are itrs (default) or mcmf. + ellipsoid : str + Ellipsoid to use for lunar coordinates. Must be one of "SPHERE", "GSFC", + "GRAIL23", "CE-1-LAM-GEO" (see lunarsky package for details). Default + is "SPHERE". Only used if frame is mcmf. + telescope_loc : tuple or EarthLocation or MoonLocation + Alternative way of specifying telescope lat/lon/alt, either as a 3-element tuple + or as an astropy EarthLocation (or lunarsky MoonLocation). Cannot supply both + `telescope_loc` and `latitute`, `longitude`, or `altitude`. + + Returns + ------- + None + + Warns + ----- + If the `lst_array` does not match the calculated LSTs to the lst_tols. + + """ + # Don't worry about passing the astrometry library because we test that they agree + # to better than our standard lst tolerances. + lsts = get_lst_for_time( + jd_array=jd_array, + telescope_loc=telescope_loc, + latitude=latitude, + longitude=longitude, + altitude=altitude, + frame=frame, + ellipsoid=ellipsoid, + ) + + if not np.allclose(lst_array, lsts, rtol=lst_tols[0], atol=lst_tols[1]): + warnings.warn( + "The lst_array is not self-consistent with the time_array and " + "telescope location. Consider recomputing with the " + "`set_lsts_from_time_array` method." + ) + + +def _select_times_helper( + *, + times, + time_range, + lsts, + lst_range, + obj_time_array, + obj_time_range, + obj_lst_array, + obj_lst_range, + time_tols, + lst_tols, +): + """ + Get time indices in a select. + + Parameters + ---------- + times : array_like of float + The times to keep in the object, each value passed here should exist in the + time_array. Can be None, cannot be set with `time_range`, `lsts` or `lst_array`. + time_range : array_like of float + The time range in Julian Date to keep in the object, must be length 2. Some of + the times in the object should fall between the first and last elements. Can be + None, cannot be set with `times`, `lsts` or `lst_array`. + lsts : array_like of float + The local sidereal times (LSTs) to keep in the object, each value passed here + should exist in the lst_array. Can be None, cannot be set with `times`, + `time_range`, or `lst_range`. + lst_range : array_like of float + The local sidereal time (LST) range in radians to keep in the + object, must be of length 2. Some of the LSTs in the object should + fall between the first and last elements. If the second value is + smaller than the first, the LSTs are treated as having phase-wrapped + around LST = 2*pi = 0, and the LSTs kept on the object will run from + the larger value, through 0, and end at the smaller value. Can be None, cannot + be set with `times`, `time_range`, or `lsts`. + obj_time_array : array_like of float + Time array on object. Can be None if `object_time_range` is set. + obj_time_range : array_like of float + Time range on object. Can be None if `object_time_array` is set. + obj_lst_array : array_like of float + LST array on object. Can be None if `object_lst_range` is set. + obj_lst_range : array_like of float + LST range on object. Can be None if `object_lst_array` is set. + time_tols : tuple of float + Length 2 tuple giving (rtol, atol) to use for time matching. + lst_tols : tuple of float + Length 2 tuple giving (rtol, atol) to use for lst matching. + + """ + have_times = times is not None + have_time_range = time_range is not None + have_lsts = lsts is not None + have_lst_range = lst_range is not None + n_time_params = np.count_nonzero( + [have_times, have_time_range, have_lsts, have_lst_range] + ) + if n_time_params > 1: + raise ValueError( + "Only one of [times, time_range, lsts, lst_range] may be " + "specified per selection operation." + ) + if n_time_params == 0: + return None + + time_inds = np.zeros(0, dtype=np.int64) + if times is not None: + times = _get_iterable(times) + if np.array(times).ndim > 1: + times = np.array(times).flatten() + + if obj_time_range is not None: + for jd in times: + this_ind = np.nonzero( + np.logical_and( + (obj_time_range[:, 0] <= jd), (obj_time_range[:, 1] >= jd) + ) + )[0] + if this_ind.size > 0: + time_inds = np.append(time_inds, this_ind) + else: + raise ValueError(f"Time {jd} does not fall in any time_range.") + else: + for jd in times: + if np.any( + np.isclose(obj_time_array, jd, rtol=time_tols[0], atol=time_tols[1]) + ): + time_inds = np.append( + time_inds, + np.where( + np.isclose( + obj_time_array, jd, rtol=time_tols[0], atol=time_tols[1] + ) + )[0], + ) + else: + raise ValueError(f"Time {jd} is not present in the time_array.") + + if time_range is not None: + if np.size(time_range) != 2: + raise ValueError("time_range must be length 2.") + + if obj_time_range is not None: + for tind, trange in enumerate(obj_time_range): + if _check_range_overlap(np.stack((trange, time_range), axis=0)): + time_inds = np.append(time_inds, tind) + attr_str = "time_range" + else: + time_inds = np.nonzero( + (obj_time_array <= time_range[1]) & (obj_time_array >= time_range[0]) + )[0] + attr_str = "time_array" + if time_inds.size == 0: + raise ValueError( + f"No elements in {attr_str} between {time_range[0]} and " + f"{time_range[1]}." + ) + + if (lsts is not None or lst_range is not None) and obj_lst_range is not None: + # check for lsts wrapping around zero + lst_range_wrap = obj_lst_range[:, 0] > obj_lst_range[:, 1] + + if lsts is not None: + if np.any(np.asarray(lsts) > 2 * np.pi): + warnings.warn( + "The lsts parameter contained a value greater than 2*pi. " + "LST values are assumed to be in radians, not hours." + ) + lsts = _get_iterable(lsts) + if np.array(lsts).ndim > 1: + lsts = np.array(lsts).flatten() + + if obj_lst_range is not None: + for lst in lsts: + lst_ind = np.nonzero( + np.logical_and( + (obj_lst_range[:, 0] <= lst), (obj_lst_range[:, 1] >= lst) + ) + )[0] + if lst_ind.size == 0 and np.any(lst_range_wrap): + for lr_ind in np.nonzero(lst_range_wrap)[0]: + if (obj_lst_range[lr_ind, 0] <= lst and lst <= 2 * np.pi) or ( + lst >= 0 and lst <= obj_lst_range[lr_ind, 1] + ): + lst_ind = np.array([lr_ind]) + if lst_ind.size > 0: + time_inds = np.append(time_inds, lst_ind) + else: + raise ValueError(f"LST {lst} does not fall in any lst_range") + else: + for lst in lsts: + if np.any( + np.isclose(obj_lst_array, lst, rtol=lst_tols[0], atol=lst_tols[1]) + ): + time_inds = np.append( + time_inds, + np.where( + np.isclose( + obj_lst_array, lst, rtol=lst_tols[0], atol=lst_tols[1] + ) + )[0], + ) + else: + raise ValueError(f"LST {lst} is not present in the lst_array") + + if lst_range is not None: + if np.size(lst_range) != 2: + raise ValueError("lst_range must be length 2.") + if np.any(np.asarray(lst_range) > 2 * np.pi): + warnings.warn( + "The lst_range contained a value greater than 2*pi. " + "LST values are assumed to be in radians, not hours." + ) + if obj_lst_range is not None: + for lind, lrange in enumerate(obj_lst_range): + if not lst_range_wrap[lind] and lst_range[0] < lst_range[1]: + if _check_range_overlap(np.stack((lrange, lst_range), axis=0)): + time_inds = np.append(time_inds, lind) + else: + if (lst_range[0] >= lrange[0] and lst_range[0] <= 2 * np.pi) or ( + lst_range[1] <= lrange[1] and lst_range[1] >= 0 + ): + time_inds = np.append(time_inds, lind) + attr_str = "lst_range" + else: + if lst_range[1] < lst_range[0]: + # we're wrapping around LST = 2*pi = 0 + lst_range_1 = [lst_range[0], 2 * np.pi] + lst_range_2 = [0, lst_range[1]] + time_inds1 = np.nonzero( + (obj_lst_array <= lst_range_1[1]) + & (obj_lst_array >= lst_range_1[0]) + )[0] + time_inds2 = np.nonzero( + (obj_lst_array <= lst_range_2[1]) + & (obj_lst_array >= lst_range_2[0]) + )[0] + time_inds = np.union1d(time_inds1, time_inds2) + else: + time_inds = np.nonzero( + (obj_lst_array <= lst_range[1]) & (obj_lst_array >= lst_range[0]) + )[0] + attr_str = "lst_array" + + if time_inds.size == 0: + raise ValueError( + f"No elements in {attr_str} between {lst_range[0]} and " + f"{lst_range[1]}." + ) + return time_inds diff --git a/src/pyuvdata/utils/tools.py b/src/pyuvdata/utils/tools.py new file mode 100644 index 0000000000..b45b67da86 --- /dev/null +++ b/src/pyuvdata/utils/tools.py @@ -0,0 +1,371 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Basic utility functions.""" +from __future__ import annotations + +from collections.abc import Iterable +from typing import Iterable as IterableType + +import numpy as np + + +def _get_iterable(x): + """Return iterable version of input.""" + if isinstance(x, Iterable): + return x + else: + return (x,) + + +def _combine_filenames(filename1, filename2): + """Combine the filename attribute from multiple UVBase objects. + + The 4 cases are: + 1. `filename1` has been set, `filename2` has not + 2. `filename1` has not been set, `filename2` has + 3. `filename1` and `filename2` both have been set + 4. `filename1` and `filename2` both have not been set + In case (1), we do not want to update the attribute, because it is + already set correctly. In case (2), we want to replace `filename1` + with the value from `filename2. In case (3), we want to take the union of + the sets of the filenames. In case (4), we want the filename attribute + to still be `None`. + + Parameters + ---------- + filename1 : list of str or None + The list of filenames for the first UVBase object. If it is not set, it + should be `None`. + filename2 : list of str or None + The list of filenames for the second UVData object. If it is not set, it + should be `None`. + + Returns + ------- + combined_filenames : list of str or None + The combined list, with potentially duplicate entries removed. + """ + combined_filenames = filename1 + if filename1 is not None: + if filename2 is not None: + combined_filenames = sorted(set(filename1).union(set(filename2))) + elif filename2 is not None: + combined_filenames = filename2 + + return combined_filenames + + +def _convert_to_slices( + indices, *, max_nslice_frac=0.1, max_nslice=None, return_index_on_fail=False +): + """ + Convert list of indices to a list of slices. + + Parameters + ---------- + indices : list + A 1D list of integers for array indexing (boolean ndarrays are also supported). + max_nslice_frac : float + A float from 0 -- 1. If the number of slices + needed to represent input 'indices' divided by len(indices) + exceeds this fraction, then we determine that we cannot + easily represent 'indices' with a list of slices. + max_nslice : int + Optional argument, defines the maximum number of slices for determining if + `indices` can be easily represented with a list of slices. If set, then + the argument supplied to `max_nslice_frac` is ignored. + return_index_on_fail : bool + If set to True and the list of input indexes cannot easily be respresented by + a list of slices (as defined by `max_nslice` or `max_nslice_frac`), then return + the input list of index values instead of a list of suboptimal slices. + + Returns + ------- + slice_list : list + Nominally the list of slice objects used to represent indices. However, if + `return_index_on_fail=True` and input indexes cannot easily be respresented, + return a 1-element list containing the input for `indices`. + check : bool + If True, indices is easily represented by slices + (`max_nslice_frac` or `max_nslice` conditions met), otherwise False. + + Notes + ----- + Example: + if: indices = [1, 2, 3, 4, 10, 11, 12, 13, 14] + then: slices = [slice(1, 5, 1), slice(11, 15, 1)] + """ + # check for already a slice or a single index position + if isinstance(indices, slice): + return [indices], True + if isinstance(indices, (int, np.integer)): + return [slice(indices, indices + 1, 1)], True + + # check for boolean index + if isinstance(indices, np.ndarray) and (indices.dtype == bool): + eval_ind = np.where(indices)[0] + else: + eval_ind = indices + # assert indices is longer than 2, or return trivial solutions + if len(eval_ind) == 0: + return [slice(0, 0, 0)], False + if len(eval_ind) <= 2: + return [ + slice(eval_ind[0], eval_ind[-1] + 1, max(eval_ind[-1] - eval_ind[0], 1)) + ], True + + # Catch the simplest case of "give me a single slice or exit" + if (max_nslice == 1) and return_index_on_fail: + step = eval_ind[1] - eval_ind[0] + if all(np.diff(eval_ind) == step): + return [slice(eval_ind[0], eval_ind[-1] + 1, step)], True + return [indices], False + + # setup empty slices list + Ninds = len(eval_ind) + slices = [] + + # iterate over indices + start = last_step = None + for ind in eval_ind: + if last_step is None: + # Check if this is the first slice, in which case start is None + if start is None: + start = ind + continue + last_step = ind - start + last_ind = ind + continue + + # calculate step from previous index + step = ind - last_ind + + # if step != last_step, this ends the slice + if step != last_step: + # append to list + slices.append(slice(start, last_ind + 1, last_step)) + + # setup next step + start = ind + last_step = None + + last_ind = ind + + # Append the last slice + slices.append(slice(start, ind + 1, last_step)) + + # determine whether slices are a reasonable representation, and determine max_nslice + # if only max_nslice_frac was supplied. + if max_nslice is None: + max_nslice = max_nslice_frac * Ninds + check = len(slices) <= max_nslice + + if return_index_on_fail and not check: + return [indices], check + else: + return slices, check + + +def slicify(ind: slice | None | IterableType[int]) -> slice | None | IterableType[int]: + """Convert an iterable of integers into a slice object if possible.""" + if ind is None or isinstance(ind, slice): + return ind + if len(ind) == 0: + return None + + if len(set(np.ediff1d(ind))) <= 1: + return slice(ind[0], ind[-1] + 1, ind[1] - ind[0] if len(ind) > 1 else 1) + else: + # can't slicify + return ind + + +def _test_array_constant(array, *, tols=None): + """ + Check if an array contains constant values to some tolerance. + + Uses np.isclose on the min & max of the arrays with the given tolerances. + + Parameters + ---------- + array : np.ndarray or UVParameter + UVParameter or array to check for constant values. + tols : tuple of float, optional + length 2 tuple giving (rtol, atol) to pass to np.isclose, defaults to (0, 0) if + passing an array, otherwise defaults to using the tolerance on the UVParameter. + + Returns + ------- + bool + True if the array is constant to the given tolerances, False otherwise. + """ + # Import UVParameter here rather than at the top to avoid circular imports + from pyuvdata.parameter import UVParameter + + if isinstance(array, UVParameter): + array_to_test = array.value + if tols is None: + tols = array.tols + else: + array_to_test = array + if tols is None: + tols = (0, 0) + assert isinstance(tols, tuple), "tols must be a length-2 tuple" + assert len(tols) == 2, "tols must be a length-2 tuple" + + if array_to_test.size == 1: + # arrays with 1 element are constant by definition + return True + + # if min and max are equal don't bother with tolerance checking + if np.min(array_to_test) == np.max(array_to_test): + return True + + return np.isclose( + np.min(array_to_test), np.max(array_to_test), rtol=tols[0], atol=tols[1] + ) + + +def _test_array_constant_spacing(array, *, tols=None): + """ + Check if an array is constantly spaced to some tolerance. + + Calls _test_array_constant on the np.diff of the array. + + Parameters + ---------- + array : np.ndarray or UVParameter + UVParameter or array to check for constant spacing. + tols : tuple of float, optional + length 2 tuple giving (rtol, atol) to pass to np.isclose, defaults to (0, 0) if + passing an array, otherwise defaults to using the tolerance on the UVParameter. + + Returns + ------- + bool + True if the array spacing is constant to the given tolerances, False otherwise. + """ + # Import UVParameter here rather than at the top to avoid circular imports + from pyuvdata.parameter import UVParameter + + if isinstance(array, UVParameter): + array_to_test = array.value + if tols is None: + tols = array.tols + else: + array_to_test = array + if tols is None: + tols = (0, 0) + assert isinstance(tols, tuple), "tols must be a length-2 tuple" + assert len(tols) == 2, "tols must be a length-2 tuple" + + if array_to_test.size <= 2: + # arrays with 1 or 2 elements are constantly spaced by definition + return True + + array_diff = np.diff(array_to_test) + return _test_array_constant(array_diff, tols=tols) + + +def _check_range_overlap(val_range, range_type="time"): + """ + Detect if any val_range in an array overlap. + + Parameters + ---------- + val_range : np.array of float + Array of ranges, shape (Nranges, 2). + range_type : str + Type of range (for good error messages) + + Returns + ------- + bool + True if any range overlaps. + """ + # first check that time ranges are well formed (stop is >= than start) + if np.any((val_range[:, 1] - val_range[:, 0]) < 0): + raise ValueError( + f"The {range_type} ranges are not well-formed, some stop {range_type}s " + f"are after start {range_type}s." + ) + + # Sort by start time + sorted_ranges = val_range[np.argsort(val_range[:, 0]), :] + + # then check if adjacent pairs overlap + for ind in range(sorted_ranges.shape[0] - 1): + range1 = sorted_ranges[ind] + range2 = sorted_ranges[ind + 1] + if range2[0] < range1[1]: + return True + + +def _sorted_unique_union(obj1, obj2=None): + """ + Determine the union of unique elements from two lists. + + Convenience function for handling various actions with indices. + + Parameters + ---------- + obj1 : list or tuple or set or 1D ndarray + First list from which to determine unique entries. + obj2 : list or tuple or set or 1D ndarray + Second list from which to determine unique entries, which is joined with the + first list. If None, the method will simply return the sorted list of unique + elements in obj1. + + Returns + ------- + sorted_unique : list + List containing the union of unique entries between obj1 and obj2. + """ + return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).union(obj2)) + + +def _sorted_unique_intersection(obj1, obj2=None): + """ + Determine the intersection of unique elements from two lists. + + Convenience function for handling various actions with indices. + + Parameters + ---------- + obj1 : list or tuple or set or 1D ndarray + First list from which to determine unique entries. + obj2 : list or tuple or set or 1D ndarray + Second list from which to determine unique entries, which is intersected with + the first list. If None, the method will simply return the sorted list of unique + elements in obj1. + + Returns + ------- + sorted_unique : list + List containing the intersection of unique entries between obj1 and obj2. + """ + return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).intersection(obj2)) + + +def _sorted_unique_difference(obj1, obj2=None): + """ + Determine the difference of unique elements from two lists. + + Convenience function for handling various actions with indices. + + Parameters + ---------- + obj1 : list or tuple or set or 1D ndarray + First list from which to determine unique entries. + obj2 : list or tuple or set or 1D ndarray + Second list from which to determine unique entries, which is differenced with + the first list. If None, the method will simply return the sorted list of unique + elements in obj1. + + Returns + ------- + sorted_unique : list + List containing the difference in unique entries between obj1 and obj2. + """ + return sorted(set(obj1)) if obj2 is None else sorted(set(obj1).difference(obj2)) diff --git a/src/pyuvdata/uvbase.py b/src/pyuvdata/uvbase.py index 8e460c0db1..7abe34eaa4 100644 --- a/src/pyuvdata/uvbase.py +++ b/src/pyuvdata/uvbase.py @@ -16,7 +16,7 @@ from . import __version__ from . import parameter as uvp -from .utils.helpers import _get_iterable +from .utils.tools import _get_iterable __all__ = ["UVBase"] diff --git a/src/pyuvdata/uvbeam/beamfits.py b/src/pyuvdata/uvbeam/beamfits.py index ba1f82e5ab..ee40b4dde6 100644 --- a/src/pyuvdata/uvbeam/beamfits.py +++ b/src/pyuvdata/uvbeam/beamfits.py @@ -10,8 +10,8 @@ from astropy.io import fits from docstring_parser import DocstringStyle +from .. import utils from ..docstrings import copy_replace_short_description -from ..utils import helpers from ..utils.file_io import fits as fits_utils from . import UVBeam @@ -331,7 +331,7 @@ def read_beamfits( self.x_orientation = primary_header.pop("XORIENT", None) self.history = str(primary_header.get("HISTORY", "")) - if not helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -534,7 +534,7 @@ def write_beamfits( if self.Nfreqs > 1: freq_spacing = self.freq_array[1:] - self.freq_array[:-1] - if not helpers._test_array_constant( + if not utils.tools._test_array_constant( freq_spacing, tols=self._freq_array.tols ): raise ValueError( @@ -551,7 +551,7 @@ def write_beamfits( else: ax_nums = reg_primary_ax_nums if self.Naxes1 > 1: - if not helpers._test_array_constant_spacing(self._axis1_array): + if not utils.tools._test_array_constant_spacing(self._axis1_array): raise ValueError( "The pixels are not evenly spaced along first axis. " "The beam fits format does not support " @@ -562,7 +562,7 @@ def write_beamfits( axis1_spacing = 1 if self.Naxes2 > 1: - if not helpers._test_array_constant_spacing(self._axis2_array): + if not utils.tools._test_array_constant_spacing(self._axis2_array): raise ValueError( "The pixels are not evenly spaced along second axis. " "The beam fits format does not support " @@ -642,7 +642,9 @@ def write_beamfits( # set up feed or pol axis if self.beam_type == "power": if self.Npols > 1: - if not helpers._test_array_constant_spacing(self._polarization_array): + if not utils.tools._test_array_constant_spacing( + self._polarization_array + ): raise ValueError( "The polarization values are not evenly " "spaced (probably because of a select operation). " diff --git a/src/pyuvdata/uvbeam/cst_beam.py b/src/pyuvdata/uvbeam/cst_beam.py index 4b297dc013..b736e57d46 100644 --- a/src/pyuvdata/uvbeam/cst_beam.py +++ b/src/pyuvdata/uvbeam/cst_beam.py @@ -9,7 +9,6 @@ import numpy as np from .. import utils -from ..utils import helpers from . import UVBeam __all__ = ["CSTBeam"] @@ -153,7 +152,9 @@ def read_cst_beam( self.model_name = model_name self.model_version = model_version self.history = history - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str if x_orientation is not None: @@ -242,14 +243,14 @@ def read_cst_beam( theta_data = theta_data.reshape((theta_axis.size, phi_axis.size), order="F") phi_data = phi_data.reshape((theta_axis.size, phi_axis.size), order="F") - if not helpers._test_array_constant_spacing( + if not utils.tools._test_array_constant_spacing( theta_axis, tols=self._axis2_array.tols ): raise ValueError( "Data does not appear to be regularly gridded in zenith angle" ) - if not helpers._test_array_constant_spacing( + if not utils.tools._test_array_constant_spacing( phi_axis, tols=self._axis1_array.tols ): raise ValueError( diff --git a/src/pyuvdata/uvbeam/mwa_beam.py b/src/pyuvdata/uvbeam/mwa_beam.py index 3791335847..15a653e8bc 100644 --- a/src/pyuvdata/uvbeam/mwa_beam.py +++ b/src/pyuvdata/uvbeam/mwa_beam.py @@ -611,7 +611,7 @@ def read_mwa_beam( gain_str = "[" + ", ".join(gain_str_list) + "]" self.history += " delays set to " + delay_str + " gains set to " + gain_str - if not utils.helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str diff --git a/src/pyuvdata/uvbeam/uvbeam.py b/src/pyuvdata/uvbeam/uvbeam.py index b23cab94c9..6789ca9145 100644 --- a/src/pyuvdata/uvbeam/uvbeam.py +++ b/src/pyuvdata/uvbeam/uvbeam.py @@ -18,7 +18,6 @@ from .. import parameter as uvp from .. import utils from ..docstrings import combine_docstrings, copy_replace_short_description -from ..utils import helpers from ..uvbase import UVBase from . import initializers @@ -2489,7 +2488,7 @@ def __add__( ) # Update filename parameter - this.filename = helpers._combine_filenames(this.filename, other.filename) + this.filename = utils.tools._combine_filenames(this.filename, other.filename) if this.filename is not None: this._filename.form = (len(this.filename),) @@ -2833,7 +2832,7 @@ def __add__( # Check specific requirements if this.Nfreqs > 1: - if not helpers._test_array_constant_spacing( + if not utils.tools._test_array_constant_spacing( this.freq_array, tols=this._freq_array.tols ): warnings.warn( @@ -2842,7 +2841,7 @@ def __add__( ) if self.beam_type == "power" and this.Npols > 2: - if not helpers._test_array_constant_spacing(this._polarization_array): + if not utils.tools._test_array_constant_spacing(this._polarization_array): warnings.warn( "Combined polarizations are not evenly spaced. This will " "make it impossible to write this data out to some file types." @@ -2850,14 +2849,16 @@ def __add__( if n_axes > 0: history_update_string += " axis using pyuvdata." - histories_match = helpers._check_histories(this.history, other.history) + histories_match = utils.history._check_histories( + this.history, other.history + ) this.history += history_update_string if not histories_match: if verbose_history: this.history += " Next object history follows. " + other.history else: - extra_history = helpers._combine_history_addition( + extra_history = utils.history._combine_history_addition( this.history, other.history ) if extra_history is not None: @@ -2974,7 +2975,9 @@ def select( beam_object.axis1_array = beam_object.axis1_array[axis1_inds] if beam_object.Naxes1 > 1: - if not helpers._test_array_constant_spacing(beam_object._axis1_array): + if not utils.tools._test_array_constant_spacing( + beam_object._axis1_array + ): warnings.warn( "Selected values along first image axis are " "not evenly spaced. This is not supported by " @@ -3006,7 +3009,9 @@ def select( beam_object.axis2_array = beam_object.axis2_array[axis2_inds] if beam_object.Naxes2 > 1: - if not helpers._test_array_constant_spacing(beam_object._axis2_array): + if not utils.tools._test_array_constant_spacing( + beam_object._axis2_array + ): warnings.warn( "Selected values along second image axis are " "not evenly spaced. This is not supported by " @@ -3050,17 +3055,17 @@ def select( ] if freq_chans is not None: - freq_chans = helpers._get_iterable(freq_chans) + freq_chans = utils.tools._get_iterable(freq_chans) if frequencies is None: frequencies = beam_object.freq_array[freq_chans] else: - frequencies = helpers._get_iterable(frequencies) + frequencies = utils.tools._get_iterable(frequencies) frequencies = np.sort( list(set(frequencies) | set(beam_object.freq_array[freq_chans])) ) if frequencies is not None: - frequencies = helpers._get_iterable(frequencies) + frequencies = utils.tools._get_iterable(frequencies) if n_selects > 0: history_update_string += ", frequencies" else: @@ -3086,7 +3091,7 @@ def select( freq_separation = ( beam_object.freq_array[1:] - beam_object.freq_array[:-1] ) - if not helpers._test_array_constant( + if not utils.tools._test_array_constant( freq_separation, tols=beam_object._freq_array.tols ): warnings.warn( @@ -3135,7 +3140,7 @@ def select( if key in beam_object.feed_array: x_orient_dict[value] = key - feeds = helpers._get_iterable(feeds) + feeds = utils.tools._get_iterable(feeds) feeds = [f.lower() for f in feeds] if n_selects > 0: history_update_string += ", feeds" @@ -3181,7 +3186,7 @@ def select( if beam_object.beam_type == "efield": raise ValueError("polarizations cannot be used with efield beams") - polarizations = helpers._get_iterable(polarizations) + polarizations = utils.tools._get_iterable(polarizations) if np.array(polarizations).ndim > 1: polarizations = np.array(polarizations).flatten() @@ -3219,7 +3224,7 @@ def select( beam_object.polarization_array[1:] - beam_object.polarization_array[:-1] ) - if not helpers._test_array_constant(pol_separation): + if not utils.tools._test_array_constant(pol_separation): warnings.warn( "Selected polarizations are not evenly spaced. This " "is not supported by the regularly gridded beam fits format" @@ -3764,7 +3769,7 @@ def read_cst_beam( if not isinstance(filename, (list, tuple)) and filename.endswith("yaml"): # update filelist basename = os.path.basename(filename) - self.filename = helpers._combine_filenames(self.filename, [basename]) + self.filename = utils.tools._combine_filenames(self.filename, [basename]) self._filename.form = (len(self.filename),) def read_mwa_beam(self, h5filepath, **kwargs): diff --git a/src/pyuvdata/uvcal/calfits.py b/src/pyuvdata/uvcal/calfits.py index c5fd3569ea..496e8b0d9f 100644 --- a/src/pyuvdata/uvcal/calfits.py +++ b/src/pyuvdata/uvcal/calfits.py @@ -18,8 +18,8 @@ except ImportError: hasmoon = False +from .. import utils from ..docstrings import copy_replace_short_description -from ..utils import helpers from ..utils.file_io import fits as fits_utils from . import UVCal @@ -127,14 +127,14 @@ def write_calfits( "The calfits file format does not support time_range when there is " "more than one time." ) - if not helpers._test_array_constant_spacing(self._time_array): + if not utils.tools._test_array_constant_spacing(self._time_array): raise ValueError( "The times are not evenly spaced (probably " "because of a select operation). The calfits format " "does not support unevenly spaced times." ) time_spacing = np.diff(self.time_array) - if not helpers._test_array_constant(self._integration_time): + if not utils.tools._test_array_constant(self._integration_time): raise ValueError( "The integration times are variable. The calfits format " "does not support variable integration times." @@ -159,7 +159,7 @@ def write_calfits( time_zero = self.time_array[0] if self.Njones > 1: - if not helpers._test_array_constant_spacing(self._jones_array): + if not utils.tools._test_array_constant_spacing(self._jones_array): raise ValueError( "The jones values are not evenly spaced." "The calibration fits file format does not" @@ -589,7 +589,7 @@ def read_calfits( self.history = str(hdr.get("HISTORY", "")) - if not helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): if not self.history.endswith("\n"): diff --git a/src/pyuvdata/uvcal/calh5.py b/src/pyuvdata/uvcal/calh5.py index 24f4b36ee3..f5908532f2 100644 --- a/src/pyuvdata/uvcal/calh5.py +++ b/src/pyuvdata/uvcal/calh5.py @@ -15,7 +15,6 @@ from .. import utils from ..docstrings import copy_replace_short_description from ..telescopes import Telescope -from ..utils import helpers from ..utils.file_io import hdf5 as hdf5_utils from .uvcal import UVCal @@ -268,7 +267,9 @@ def _read_header( # versions allowed one to store this even if it wasn't actually being used optional_parameters.remove("flex_spw_id_array") - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str # Optional parameters @@ -287,14 +288,14 @@ def _read_header( if run_check_acceptability: if self.time_array is not None: - helpers.check_lsts_against_times( + utils.times.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, telescope_loc=self.telescope.location, lst_tols=(0, utils.LST_RAD_TOL), ) if self.time_range is not None: - helpers.check_lsts_against_times( + utils.times.check_lsts_against_times( jd_array=self.time_range, lst_array=self.lst_range, telescope_loc=self.telescope.location, @@ -446,7 +447,7 @@ def _get_data( # TODO: this logic is similar to what is in uvh5. See if an abstracted # version can be pulled out into a util function. if ant_inds is not None: - ant_slices, ant_sliceable = helpers._convert_to_slices( + ant_slices, ant_sliceable = utils.tools._convert_to_slices( ant_inds, max_nslice_frac=0.1 ) else: @@ -454,7 +455,7 @@ def _get_data( ant_sliceable = True if time_inds is not None: - time_slices, time_sliceable = helpers._convert_to_slices( + time_slices, time_sliceable = utils.tools._convert_to_slices( time_inds, max_nslice_frac=0.1 ) else: @@ -462,7 +463,7 @@ def _get_data( time_sliceable = True if freq_inds is not None: - freq_slices, freq_sliceable = helpers._convert_to_slices( + freq_slices, freq_sliceable = utils.tools._convert_to_slices( freq_inds, max_nslice_frac=0.1 ) else: @@ -470,7 +471,7 @@ def _get_data( freq_sliceable = True if spw_inds is not None: - spw_slices, spw_sliceable = helpers._convert_to_slices( + spw_slices, spw_sliceable = utils.tools._convert_to_slices( spw_inds, max_nslice_frac=0.1 ) else: @@ -478,7 +479,7 @@ def _get_data( spw_sliceable = True if jones_inds is not None: - jones_slices, jones_sliceable = helpers._convert_to_slices( + jones_slices, jones_sliceable = utils.tools._convert_to_slices( jones_inds, max_nslice_frac=0.5 ) else: diff --git a/src/pyuvdata/uvcal/fhd_cal.py b/src/pyuvdata/uvcal/fhd_cal.py index 475693c4c1..664fad98d8 100644 --- a/src/pyuvdata/uvcal/fhd_cal.py +++ b/src/pyuvdata/uvcal/fhd_cal.py @@ -259,7 +259,7 @@ def read_fhd_cal( else: self.history += "\n" + extra_history - if not utils.helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): if self.history.endswith("\n"): diff --git a/src/pyuvdata/uvcal/ms_cal.py b/src/pyuvdata/uvcal/ms_cal.py index 9a2b74bc5e..844d396e24 100644 --- a/src/pyuvdata/uvcal/ms_cal.py +++ b/src/pyuvdata/uvcal/ms_cal.py @@ -13,7 +13,6 @@ from .. import utils from ..docstrings import copy_replace_short_description -from ..utils import helpers from ..utils.file_io import ms as ms_utils from . import UVCal @@ -534,7 +533,7 @@ def write_ms_cal(self, filename, clobber=False): else: spw_selection = np.equal(self.flex_spw_id_array, spw_id) spw_nchan = sum(spw_selection) - [spw_selection], _ = helpers._convert_to_slices( + [spw_selection], _ = utils.tools._convert_to_slices( spw_selection, max_nslice=1, return_index_on_fail=True ) spw_sel_dict[spw_id] = (spw_selection, spw_nchan) @@ -613,7 +612,7 @@ def write_ms_cal(self, filename, clobber=False): # Determine polarization order for writing out in CASA standard order, check # if this order can be represented by a single slice. pol_order = utils.pol.determine_pol_order(self.jones_array, order="CASA") - [pol_order], _ = helpers._convert_to_slices( + [pol_order], _ = utils.tools._convert_to_slices( pol_order, max_nslice=1, return_index_on_fail=True ) diff --git a/src/pyuvdata/uvcal/uvcal.py b/src/pyuvdata/uvcal/uvcal.py index 0ed2b2ce95..31502777d3 100644 --- a/src/pyuvdata/uvcal/uvcal.py +++ b/src/pyuvdata/uvcal/uvcal.py @@ -15,7 +15,6 @@ from .. import parameter as uvp from .. import utils from ..docstrings import combine_docstrings, copy_replace_short_description -from ..utils import helpers from ..uvbase import UVBase from . import initializers @@ -1167,7 +1166,7 @@ def _check_flex_spw_contiguous(self): """ if not self.wide_band: - helpers._check_flex_spw_contiguous( + utils.frequency._check_flex_spw_contiguous( spw_array=self.spw_array, flex_spw_id_array=self.flex_spw_id_array ) @@ -1192,7 +1191,7 @@ def _check_freq_spacing(self, *, raise_errors=True): """ if (self.freq_array is None) or (self.Nfreqs == 1): return False, False - return helpers._check_freq_spacing( + return utils.frequency._check_freq_spacing( freq_array=self.freq_array, freq_tols=self._freq_array.tols, channel_width=self.channel_width, @@ -1309,7 +1308,7 @@ def _add_phase_center( source without coordinates. """ - cat_entry = utils.ps_cat.generate_phase_center_cat_entry( + cat_entry = utils.phase_center_catalog.generate_phase_center_cat_entry( cat_name=cat_name, cat_type=cat_type, cat_lon=cat_lon, @@ -1328,7 +1327,7 @@ def _add_phase_center( # The logic below ensures that we pick the lowest positive integer that is # not currently being used by another source if cat_id is None or not force_update: - cat_id = utils.ps_cat.generate_new_phase_center_id( + cat_id = utils.phase_center_catalog.generate_new_phase_center_id( phase_center_catalog=self.phase_center_catalog, cat_id=cat_id ) @@ -1337,7 +1336,7 @@ def _add_phase_center( self.phase_center_catalog = {} else: # Let's warn if this entry has the same name as an existing one - temp_id, cat_diffs = utils.ps_cat.look_in_catalog( + temp_id, cat_diffs = utils.phase_center_catalog.look_in_catalog( self.phase_center_catalog, phase_dict=cat_entry ) @@ -1452,7 +1451,7 @@ def print_phase_center_info( ValueError If `cat_name` matches no keys in `phase_center_catalog`. """ - return utils.ps_cat.print_phase_center_info( + return utils.phase_center_catalog.print_phase_center_info( self.phase_center_catalog, catalog_identifier=catalog_identifier, hms_format=hms_format, @@ -1484,7 +1483,7 @@ def _update_phase_center_id(self, cat_id, *, new_id=None, reserved_ids=None): If not using the method on a multi-phase-ctr data set, if there's no entry that matches `cat_name`, or of the value `new_id` is already taken. """ - new_id = utils.ps_cat.generate_new_phase_center_id( + new_id = utils.phase_center_catalog.generate_new_phase_center_id( phase_center_catalog=self.phase_center_catalog, cat_id=new_id, old_id=cat_id, @@ -1554,7 +1553,7 @@ def _consolidate_phase_center_catalogs( # testing it's sometimes convenient to use self.phase_center_catalog as # the ref catalog, which causes a RunTime error due to updates to the dict. cat_entry = reference_catalog[cat_id] - match_id, match_diffs = utils.ps_cat.look_in_catalog( + match_id, match_diffs = utils.phase_center_catalog.look_in_catalog( self.phase_center_catalog, phase_dict=cat_entry, ignore_name=ignore_name ) if match_id is None or match_diffs != 0: @@ -1667,7 +1666,7 @@ def check( # check that time ranges are well formed and do not overlap if self.time_range is not None: - if helpers._check_range_overlap(self.time_range): + if utils.tools._check_range_overlap(self.time_range): raise ValueError("Some time_ranges overlap.") # note: do not check lst range overlap because of branch cut. # Assume they are ok if time_ranges are ok. @@ -1724,21 +1723,21 @@ def check( if run_check_acceptability: # Check antenna positions - helpers.check_surface_based_positions( + utils.coordinates.check_surface_based_positions( antenna_positions=self.telescope.antenna_positions, telescope_loc=self.telescope.location, raise_error=False, ) if self.time_array is not None: - helpers.check_lsts_against_times( + utils.times.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, telescope_loc=self.telescope.location, lst_tols=self._lst_array.tols if lst_tol is None else [0, lst_tol], ) if self.time_range is not None: - helpers.check_lsts_against_times( + utils.times.check_lsts_against_times( jd_array=self.time_range, lst_array=self.lst_range, telescope_loc=self.telescope.location, @@ -1867,7 +1866,7 @@ def _slice_array(self, key, data_array, *, squeeze_pol=True): :class: numpy ndarray Slice of the data_array for the key. """ - key = helpers._get_iterable(key) + key = utils.tools._get_iterable(key) if len(key) == 1: # interpret as a single antenna output = data_array[self.ant2ind(key[0]), :, :, :] @@ -2213,7 +2212,7 @@ def reorder_freqs( index_array = np.flip(index_array) else: - index_array = helpers._sort_freq_helper( + index_array = utils.frequency._sort_freq_helper( Nfreqs=self.Nfreqs, freq_array=self.freq_array, Nspws=self.Nspws, @@ -2693,7 +2692,7 @@ def __add__( ) if this.time_range is not None: - if helpers._check_range_overlap( + if utils.tools._check_range_overlap( np.concatenate((this.time_range, other.time_range), axis=0) ): raise ValueError("A time_range overlaps in the two objects.") @@ -2829,7 +2828,7 @@ def __add__( this.reorder_jones(temp_ind) # Update filename parameter - this.filename = helpers._combine_filenames(this.filename, other.filename) + this.filename = utils.tools._combine_filenames(this.filename, other.filename) if this.filename is not None: this._filename.form = (len(this.filename),) @@ -3340,7 +3339,7 @@ def __add__( ) if this.Njones > 2: - if not helpers._test_array_constant_spacing(this._jones_array): + if not utils.tools._test_array_constant_spacing(this._jones_array): warnings.warn( "Combined Jones elements are not evenly spaced. This will " "make it impossible to write this data out to calfits files." @@ -3349,14 +3348,16 @@ def __add__( if n_axes > 0: history_update_string += " axis using pyuvdata." - histories_match = helpers._check_histories(this.history, other.history) + histories_match = utils.history._check_histories( + this.history, other.history + ) this.history += history_update_string if not histories_match: if verbose_history: this.history += " Next object history follows. " + other.history else: - extra_history = helpers._combine_history_addition( + extra_history = utils.history._combine_history_addition( this.history, other.history ) if extra_history is not None: @@ -3609,7 +3610,9 @@ def fast_concat( history_update_string += " axis using pyuvdata." histories_match = [] for obj in other: - histories_match.append(helpers._check_histories(this.history, obj.history)) + histories_match.append( + utils.history._check_histories(this.history, obj.history) + ) this.history += history_update_string for obj_num, obj in enumerate(other): @@ -3617,7 +3620,7 @@ def fast_concat( if verbose_history: this.history += " Next object history follows. " + obj.history else: - extra_history = helpers._combine_history_addition( + extra_history = utils.history._combine_history_addition( this.history, obj.history ) if extra_history is not None: @@ -3854,7 +3857,7 @@ def fast_concat( # update filename attribute for obj in other: - this.filename = helpers._combine_filenames(this.filename, obj.filename) + this.filename = utils.tools._combine_filenames(this.filename, obj.filename) if this.filename is not None: this._filename.form = len(this.filename) @@ -3970,7 +3973,7 @@ def _select_preprocess( "Only one of antenna_nums and antenna_names can be provided." ) - antenna_names = helpers._get_iterable(antenna_names) + antenna_names = utils.tools._get_iterable(antenna_names) antenna_nums = [] for s in antenna_names: if s not in self.telescope.antenna_names: @@ -3981,7 +3984,7 @@ def _select_preprocess( antenna_nums.append(self.telescope.antenna_numbers[ind]) if antenna_nums is not None: - antenna_nums = helpers._get_iterable(antenna_nums) + antenna_nums = utils.tools._get_iterable(antenna_nums) history_update_string += "antennas" n_selects += 1 @@ -4010,11 +4013,11 @@ def _select_preprocess( ) if catalog_names is not None: - phase_center_ids = utils.ps_cat.look_for_name( + phase_center_ids = utils.phase_center_catalog.look_for_name( self.phase_center_catalog, catalog_names ) - time_inds = helpers._select_times_helper( + time_inds = utils.times._select_times_helper( times=times, time_range=time_range, lsts=lsts, @@ -4045,7 +4048,7 @@ def _select_preprocess( if phase_center_ids is not None: pc_check = np.isin(self.phase_center_id_array, phase_center_ids) - time_inds = helpers._sorted_unique_intersection( + time_inds = utils.tools._sorted_unique_intersection( np.where(pc_check)[0], time_inds ) @@ -4064,7 +4067,7 @@ def _select_preprocess( time_inds_arr = np.array(time_inds) if time_inds_arr.size > 1: time_ind_separation = time_inds_arr[1:] - time_inds_arr[:-1] - if not helpers._test_array_constant(time_ind_separation): + if not utils.tools._test_array_constant(time_ind_separation): warnings.warn( "Selected times are not evenly spaced. This " "is not supported by the calfits format." @@ -4079,7 +4082,7 @@ def _select_preprocess( else: if not self.wide_band: # Translate the spws into frequencies - freq_chans = helpers._sorted_unique_union( + freq_chans = utils.tools._sorted_unique_union( np.where(np.isin(self.flex_spw_id_array, spws))[0], freq_chans ) spw_inds = None @@ -4111,7 +4114,7 @@ def _select_preprocess( ) if frequencies is not None: - frequencies = helpers._get_iterable(frequencies) + frequencies = utils.tools._get_iterable(frequencies) freq_arr_use = self.freq_array freq_check = np.isin(frequencies, freq_arr_use) @@ -4121,7 +4124,7 @@ def _select_preprocess( "present in the freq_array" ) - freq_chans = helpers._sorted_unique_union( + freq_chans = utils.tools._sorted_unique_union( np.where(np.isin(freq_arr_use, frequencies))[0], freq_chans ) @@ -4136,7 +4139,7 @@ def _select_preprocess( if frequencies is not None: pass - freq_inds = np.array(sorted(helpers._get_iterable(freq_chans))) + freq_inds = np.array(sorted(utils.tools._get_iterable(freq_chans))) if len(freq_inds) > 1: freq_ind_separation = freq_inds[1:] - freq_inds[:-1] @@ -4144,7 +4147,7 @@ def _select_preprocess( freq_ind_separation = freq_ind_separation[ np.diff(self.flex_spw_id_array[freq_inds]) == 0 ] - if not helpers._test_array_constant(freq_ind_separation): + if not utils.tools._test_array_constant(freq_ind_separation): warnings.warn( "Selected frequencies are not evenly spaced. This " "will make it impossible to write this data out to " @@ -4162,7 +4165,7 @@ def _select_preprocess( freq_inds = None if jones is not None: - jones = helpers._get_iterable(jones) + jones = utils.tools._get_iterable(jones) if np.array(jones).ndim > 1: jones = np.array(jones).flatten() if n_selects > 0: @@ -4203,10 +4206,10 @@ def _select_preprocess( jones_chans = np.where( np.isin(self.flex_spw_id_array, self.spw_array[jones_spws]) )[0] - freq_inds = helpers._sorted_unique_intersection( + freq_inds = utils.tools._sorted_unique_intersection( jones_chans, freq_inds ) - spw_inds = helpers._sorted_unique_intersection(jones_spws, spw_inds) + spw_inds = utils.tools._sorted_unique_intersection(jones_spws, spw_inds) # Trap a corner case here where the frequency and polarization selects # on a flex-pol data set end up with no actual data being selected. @@ -4215,12 +4218,12 @@ def _select_preprocess( "No data matching this Jones selection in this flex-Jones " " UVCal object." ) - spacing_check = helpers._test_array_constant_spacing( + spacing_check = utils.tools._test_array_constant_spacing( np.unique(self.flex_jones_array[spw_inds]) ) else: jones_inds = sorted(set(jones_inds)) - spacing_check = helpers._test_array_constant_spacing( + spacing_check = utils.tools._test_array_constant_spacing( self.jones_array[jones_inds] ) if not spacing_check: diff --git a/src/pyuvdata/uvdata/fhd.py b/src/pyuvdata/uvdata/fhd.py index 0d0d140bb7..1f82d9bee1 100644 --- a/src/pyuvdata/uvdata/fhd.py +++ b/src/pyuvdata/uvdata/fhd.py @@ -585,7 +585,7 @@ def read_fhd( # because they depend on the phasing of the visibilities) # the values in bl_info.JDATE are the JD for each integration. # We need to expand up to Nblts. - int_times = list(utils.helpers._get_iterable(bl_info["JDATE"][0])) + int_times = list(utils.tools._get_iterable(bl_info["JDATE"][0])) bin_offset = bl_info["BIN_OFFSET"][0] if self.Ntimes != len(int_times): warnings.warn( @@ -772,7 +772,7 @@ def read_fhd( else: self.history = "" - if not utils.helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str diff --git a/src/pyuvdata/uvdata/initializers.py b/src/pyuvdata/uvdata/initializers.py index b8b07a6abd..dcc4ec9dae 100644 --- a/src/pyuvdata/uvdata/initializers.py +++ b/src/pyuvdata/uvdata/initializers.py @@ -211,7 +211,7 @@ def configure_blt_rectangularity( ) (blts_are_rectangular, time_axis_faster_than_bls) = ( - utils.helpers.determine_rectangularity( + utils.bltaxis.determine_rectangularity( time_array=times, baseline_array=baselines, nbls=nbl, ntimes=nt ) ) diff --git a/src/pyuvdata/uvdata/miriad.py b/src/pyuvdata/uvdata/miriad.py index f1fcc4083e..d4be90c7e4 100644 --- a/src/pyuvdata/uvdata/miriad.py +++ b/src/pyuvdata/uvdata/miriad.py @@ -18,7 +18,6 @@ from .. import known_telescope_location, utils from ..docstrings import copy_replace_short_description -from ..utils import helpers from . import UVData from .uvdata import reporting_request @@ -212,7 +211,9 @@ def _load_miriad_variables(self, uv): self.spw_array = np.arange(self.Nspws) self.history = uv["history"] - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str # check for pyuvdata variables that are not recognized miriad variables @@ -430,7 +431,7 @@ def _load_antpos(self, uv, *, sorted_unique_ants=None, correct_lat_lon=True): self.telescope.location = EarthLocation.from_geocentric( *np.mean(ecef_antpos[good_antpos, :], axis=0) * units.m ) - valid_location = helpers.check_surface_based_positions( + valid_location = utils.coordinates.check_surface_based_positions( telescope_loc=self.telescope.location, raise_error=False, raise_warning=False, @@ -1409,7 +1410,7 @@ def read_miriad( # which do not test as matching, so also test for all nans if not np.all( np.isnan(epoch_list[select_mask]) - ) and not helpers._test_array_constant( + ) and not utils.tools._test_array_constant( epoch_list[select_mask], tols=(1e-05, 1e-08) ): # This is unusual but allowed within Miriad. @@ -1441,10 +1442,10 @@ def read_miriad( cat_frame = "fk5" radian_tols = self._phase_center_app_ra.tols - this_single_ra = helpers._test_array_constant( + this_single_ra = utils.tools._test_array_constant( ra_list[select_mask], tols=radian_tols ) - this_single_dec = helpers._test_array_constant( + this_single_dec = utils.tools._test_array_constant( dec_list[select_mask], tols=radian_tols ) if not cat_type == "unprojected" and ( @@ -1464,7 +1465,7 @@ def read_miriad( ) if np.max(counts) > 1: for t_ind in np.arange(unique_times.size): - if not helpers._test_array_constant( + if not utils.tools._test_array_constant( lon_use[inverse == t_ind], tols=radian_tols ): raise ValueError( @@ -1472,7 +1473,7 @@ def read_miriad( "different baselines at the same time." + reporting_request ) - if not helpers._test_array_constant( + if not utils.tools._test_array_constant( lat_use[inverse == t_ind], tols=radian_tols ): raise ValueError( diff --git a/src/pyuvdata/uvdata/ms.py b/src/pyuvdata/uvdata/ms.py index 90356fb224..def94a725c 100644 --- a/src/pyuvdata/uvdata/ms.py +++ b/src/pyuvdata/uvdata/ms.py @@ -16,7 +16,6 @@ from .. import utils from ..docstrings import copy_replace_short_description -from ..utils import helpers from ..utils.file_io import ms as ms_utils from . import UVData @@ -125,7 +124,7 @@ def write_ms( # Determine polarization order for writing out in CASA standard order, check # if this order can be represented by a single slice. pol_order = utils.pol.determine_pol_order(self.polarization_array, order="CASA") - [pol_order], _ = helpers._convert_to_slices( + [pol_order], _ = utils.tools._convert_to_slices( pol_order, max_nslice=1, return_index_on_fail=True ) @@ -245,7 +244,7 @@ def write_ms( # See if we can represent scan_screen with a single slice, which # reduces overhead of copying a new array. - [scan_slice], _ = helpers._convert_to_slices( + [scan_slice], _ = utils.tools._convert_to_slices( scan_screen, max_nslice=1, return_index_on_fail=True ) diff --git a/src/pyuvdata/uvdata/mwa_corr_fits.py b/src/pyuvdata/uvdata/mwa_corr_fits.py index e2890e3116..fcd6d7dac3 100644 --- a/src/pyuvdata/uvdata/mwa_corr_fits.py +++ b/src/pyuvdata/uvdata/mwa_corr_fits.py @@ -20,7 +20,6 @@ from .. import Telescope, _corr_fits, utils from ..data import DATA_PATH from ..docstrings import copy_replace_short_description -from ..utils import helpers from ..utils.file_io import fits as fits_utils from . import UVData @@ -1363,7 +1362,7 @@ def read_mwa_corr_fits( for filename in filelist: # update filename attribute basename = os.path.basename(filename) - self.filename = helpers._combine_filenames(self.filename, [basename]) + self.filename = utils.tools._combine_filenames(self.filename, [basename]) self._filename.form = (len(self.filename),) if filename.lower().endswith(".metafits"): @@ -1525,7 +1524,9 @@ def read_mwa_corr_fits( self.telescope.antenna_names = meta_dict["antenna_names"] self.telescope.antenna_positions = meta_dict["antenna_positions"] self.history = meta_dict["history"] - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str for key, value in meta_dict["extra_keywords"].items(): self.extra_keywords[key] = value diff --git a/src/pyuvdata/uvdata/uvdata.py b/src/pyuvdata/uvdata/uvdata.py index 2a2765be16..d10bb32b0c 100644 --- a/src/pyuvdata/uvdata/uvdata.py +++ b/src/pyuvdata/uvdata/uvdata.py @@ -25,7 +25,6 @@ from .. import parameter as uvp from .. import utils from ..docstrings import combine_docstrings, copy_replace_short_description -from ..utils import helpers from ..utils import phasing as phs_utils from ..utils.file_io import hdf5 as hdf5_utils from ..uvbase import UVBase @@ -772,7 +771,7 @@ def _add_phase_center( source without coordinates. """ - cat_entry = utils.ps_cat.generate_phase_center_cat_entry( + cat_entry = utils.phase_center_catalog.generate_phase_center_cat_entry( cat_name=cat_name, cat_type=cat_type, cat_lon=cat_lon, @@ -791,7 +790,7 @@ def _add_phase_center( # The logic below ensures that we pick the lowest positive integer that is # not currently being used by another source if cat_id is None or not force_update: - cat_id = utils.ps_cat.generate_new_phase_center_id( + cat_id = utils.phase_center_catalog.generate_new_phase_center_id( phase_center_catalog=self.phase_center_catalog, cat_id=cat_id ) @@ -800,7 +799,7 @@ def _add_phase_center( self.phase_center_catalog = {} else: # Let's warn if this entry has the same name as an existing one - temp_id, cat_diffs = utils.ps_cat.look_in_catalog( + temp_id, cat_diffs = utils.phase_center_catalog.look_in_catalog( self.phase_center_catalog, phase_dict=cat_entry ) @@ -1199,7 +1198,7 @@ def merge_phase_centers( # First, let's check and see if the dict entries are identical for cat_id in cat_id_list[1:]: - pc_id, pc_diffs = utils.ps_cat.look_in_catalog( + pc_id, pc_diffs = utils.phase_center_catalog.look_in_catalog( self.phase_center_catalog, phase_dict=self.phase_center_catalog[cat_id], ignore_name=ignore_name, @@ -1270,7 +1269,7 @@ def print_phase_center_info( ValueError If `cat_name` matches no keys in `phase_center_catalog`. """ - return utils.ps_cat.print_phase_center_info( + return utils.phase_center_catalog.print_phase_center_info( self.phase_center_catalog, catalog_identifier=catalog_identifier, hms_format=hms_format, @@ -1302,7 +1301,7 @@ def _update_phase_center_id(self, cat_id, *, new_id=None, reserved_ids=None): If not using the method on a multi-phase-ctr data set, if there's no entry that matches `cat_name`, or of the value `new_id` is already taken. """ - new_id = utils.ps_cat.generate_new_phase_center_id( + new_id = utils.phase_center_catalog.generate_new_phase_center_id( phase_center_catalog=self.phase_center_catalog, cat_id=new_id, old_id=cat_id, @@ -1371,7 +1370,7 @@ def _consolidate_phase_center_catalogs( # testing it's sometimes convenient to use self.phase_center_catalog as # the ref catalog, which causes a RunTime error due to updates to the dict. cat_entry = reference_catalog[cat_id] - match_id, match_diffs = utils.ps_cat.look_in_catalog( + match_id, match_diffs = utils.phase_center_catalog.look_in_catalog( self.phase_center_catalog, phase_dict=cat_entry, ignore_name=ignore_name ) if match_id is None or match_diffs != 0: @@ -1638,7 +1637,7 @@ def _check_flex_spw_contiguous(self): UVH5 and UVData objects can handle this, but MIRIAD, MIR, UVFITS, and MS file formats cannot, so we just consider it forbidden. """ - helpers._check_flex_spw_contiguous( + utils.frequency._check_flex_spw_contiguous( spw_array=self.spw_array, flex_spw_id_array=self.flex_spw_id_array ) @@ -1661,7 +1660,7 @@ def _check_freq_spacing(self, *, raise_errors=True): Flag that channel spacing does not match channel width. """ - return helpers._check_freq_spacing( + return utils.frequency._check_freq_spacing( freq_array=self.freq_array, freq_tols=self._freq_array.tols, channel_width=self.channel_width, @@ -2308,14 +2307,14 @@ def check( if run_check_acceptability: # Check antenna positions - helpers.check_surface_based_positions( + utils.coordinates.check_surface_based_positions( antenna_positions=self.telescope.antenna_positions, telescope_loc=self.telescope.location, raise_error=False, ) # Check the LSTs against what we expect given up-to-date IERS data - helpers.check_lsts_against_times( + utils.times.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, lst_tols=self._lst_array.tols if lst_tol is None else [0, lst_tol], @@ -2648,7 +2647,7 @@ def antpair2ind( if inds.size == 0: inds = None - inds = helpers.slicify(inds) + inds = utils.tools.slicify(inds) self.__antpair2ind_cache[(ant1, ant2, ordered)] = inds return inds @@ -2695,7 +2694,7 @@ def _key2inds(self, key: str | tuple[int] | tuple[int, int] | tuple[int, int, st """ orig_key = key - key = helpers._get_iterable(key) + key = utils.tools._get_iterable(key) if not isinstance(key, str): key = tuple(key) @@ -2806,7 +2805,7 @@ def _key2inds(self, key: str | tuple[int] | tuple[int, int] | tuple[int, int, st raise KeyError(f"Polarization {key_print} not found in data.") # Convert to slices if possible - pol_ind = (helpers.slicify(pol_ind[0]), helpers.slicify(pol_ind[1])) + pol_ind = (utils.tools.slicify(pol_ind[0]), utils.tools.slicify(pol_ind[1])) self.__key2ind_cache[key] = (blt_ind1, blt_ind2, pol_ind) return (blt_ind1, blt_ind2, pol_ind) @@ -3021,7 +3020,7 @@ def get_data( if isinstance(val, str): key.append(val) elif val is not None: - key += list(helpers._get_iterable(val)) + key += list(utils.tools._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") ind1, ind2, indp = self._key2inds(key) @@ -3073,7 +3072,7 @@ def get_flags( if isinstance(val, str): key.append(val) elif val is not None: - key += list(helpers._get_iterable(val)) + key += list(utils.tools._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") ind1, ind2, indp = self._key2inds(key) @@ -3133,7 +3132,7 @@ def get_nsamples( if isinstance(val, str): key.append(val) elif val is not None: - key += list(helpers._get_iterable(val)) + key += list(utils.tools._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") ind1, ind2, indp = self._key2inds(key) @@ -3177,7 +3176,7 @@ def get_times(self, key1, key2=None, key3=None): if isinstance(val, str): key.append(val) elif val is not None: - key += list(helpers._get_iterable(val)) + key += list(utils.tools._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") inds1, inds2, indp = self._key2inds(key) @@ -3222,7 +3221,7 @@ def get_lsts(self, key1, key2=None, key3=None): if isinstance(val, str): key.append(val) elif val is not None: - key += list(helpers._get_iterable(val)) + key += list(utils.tools._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") inds1, inds2, indp = self._key2inds(key) @@ -3326,7 +3325,7 @@ def _set_method_helper(self, dshape, key1, key2=None, key3=None): if isinstance(val, str): key.append(val) elif val is not None: - key += list(helpers._get_iterable(val)) + key += list(utils.tools._get_iterable(val)) if len(key) > 3: raise ValueError("no more than 3 key values can be passed") ind1, ind2, indp = self._key2inds(key) @@ -3345,10 +3344,10 @@ def _set_method_helper(self, dshape, key1, key2=None, key3=None): f"Input array shape is {dshape}, expected shape is {expected_shape}." ) - blt_slices, blt_sliceable = helpers._convert_to_slices( + blt_slices, blt_sliceable = utils.tools._convert_to_slices( ind1, max_nslice_frac=0.1 ) - pol_slices, pol_sliceable = helpers._convert_to_slices( + pol_slices, pol_sliceable = utils.tools._convert_to_slices( indp[0], max_nslice_frac=0.5 ) @@ -3744,7 +3743,7 @@ def set_rectangularity(self, *, force: bool = False) -> None: if self.blts_are_rectangular is not None and not force: return - rect, time = helpers.determine_rectangularity( + rect, time = utils.bltaxis.determine_rectangularity( time_array=self.time_array, baseline_array=self.baseline_array, nbls=self.Nbls, @@ -3759,7 +3758,7 @@ def determine_blt_order(self) -> tuple[str] | tuple[str, str] | None: if self.blt_order is not None: return self.blt_order - order = helpers.determine_blt_order( + order = utils.bltaxis.determine_blt_order( time_array=self.time_array, baseline_array=self.baseline_array, ant_1_array=self.ant_1_array, @@ -4070,7 +4069,7 @@ def reorder_freqs( is not the same length as freq_array. """ - index_array = helpers._sort_freq_helper( + index_array = utils.frequency._sort_freq_helper( Nfreqs=self.Nfreqs, freq_array=self.freq_array, Nspws=self.Nspws, @@ -4324,7 +4323,7 @@ def unproject_phase( self.uvw_array = new_uvw # remove/update phase center - match_id, match_diffs = utils.ps_cat.look_in_catalog( + match_id, match_diffs = utils.phase_center_catalog.look_in_catalog( self.phase_center_catalog, cat_name=cat_name, cat_type="unprojected" ) if match_diffs == 0: @@ -4376,7 +4375,14 @@ def _phase_dict_helper( } if lookup_name: - if len(utils.ps_cat.look_for_name(self.phase_center_catalog, cat_name)) > 1: + if ( + len( + utils.phase_center_catalog.look_for_name( + self.phase_center_catalog, cat_name + ) + ) + > 1 + ): raise ValueError( "Name of object has multiple matches in phase center catalog. " "Set lookup_name=False in order to continue." @@ -4408,7 +4414,7 @@ def _phase_dict_helper( cat_id = name_dict[cat_name] cat_diffs = 0 else: - cat_id, cat_diffs = utils.ps_cat.look_in_catalog( + cat_id, cat_diffs = utils.phase_center_catalog.look_in_catalog( self.phase_center_catalog, cat_name=cat_name, cat_type=cat_type, @@ -5701,7 +5707,7 @@ def __add__( this.Nants_data = this._calc_nants_data() # Update filename parameter - this.filename = helpers._combine_filenames(this.filename, other.filename) + this.filename = utils.tools._combine_filenames(this.filename, other.filename) if this.filename is not None: this._filename.form = (len(this.filename),) @@ -5727,14 +5733,16 @@ def __add__( if n_axes > 0: history_update_string += " axis using pyuvdata." - histories_match = helpers._check_histories(this.history, other.history) + histories_match = utils.history._check_histories( + this.history, other.history + ) this.history += history_update_string if not histories_match: if verbose_history: this.history += " Next object history follows. " + other.history else: - extra_history = helpers._combine_history_addition( + extra_history = utils.history._combine_history_addition( this.history, other.history ) if extra_history is not None: @@ -5978,7 +5986,9 @@ def fast_concat( histories_match = [] for obj in other: - histories_match.append(helpers._check_histories(this.history, obj.history)) + histories_match.append( + utils.history._check_histories(this.history, obj.history) + ) this.history += history_update_string for obj_num, obj in enumerate(other): @@ -5986,7 +5996,7 @@ def fast_concat( if verbose_history: this.history += " Next object history follows. " + obj.history else: - extra_history = helpers._combine_history_addition( + extra_history = utils.history._combine_history_addition( this.history, obj.history ) if extra_history is not None: @@ -6062,7 +6072,7 @@ def fast_concat( ) this.Npols = sum([this.Npols] + [obj.Npols for obj in other]) - if not helpers._test_array_constant_spacing(this._polarization_array): + if not utils.tools._test_array_constant_spacing(this._polarization_array): warnings.warn( "Combined polarizations are not evenly spaced. This will " "make it impossible to write this data out to some file types." @@ -6132,7 +6142,7 @@ def fast_concat( # update filename attribute for obj in other: - this.filename = helpers._combine_filenames(this.filename, obj.filename) + this.filename = utils.tools._combine_filenames(this.filename, obj.filename) if this.filename is not None: this._filename.form = len(this.filename) @@ -6295,14 +6305,14 @@ def sum_vis( this.data_array = this.data_array + other.data_array history_update_string = " Visibilities summed using pyuvdata." - histories_match = helpers._check_histories(this.history, other.history) + histories_match = utils.history._check_histories(this.history, other.history) this.history += history_update_string if not histories_match: if verbose_history: this.history += " Second object history follows. " + other.history else: - extra_history = helpers._combine_history_addition( + extra_history = utils.history._combine_history_addition( this.history, other.history ) if extra_history is not None: @@ -6312,7 +6322,7 @@ def sum_vis( ) # merge file names - this.filename = helpers._combine_filenames(this.filename, other.filename) + this.filename = utils.tools._combine_filenames(this.filename, other.filename) # Check final object is self-consistent if run_check: @@ -6575,7 +6585,7 @@ def _select_preprocess( # test for blt_inds presence before adding inds from antennas & times if blt_inds is not None: - blt_inds = helpers._get_iterable(blt_inds) + blt_inds = utils.tools._get_iterable(blt_inds) if np.array(blt_inds).ndim > 1: blt_inds = np.array(blt_inds).flatten() history_update_string += "baseline-times" @@ -6585,12 +6595,12 @@ def _select_preprocess( raise ValueError("Cannot set both phase_center_ids and catalog_names.") if catalog_names is not None: - phase_center_ids = utils.ps_cat.look_for_name( + phase_center_ids = utils.phase_center_catalog.look_for_name( self.phase_center_catalog, catalog_names ) if phase_center_ids is not None: - phase_center_ids = np.array(helpers._get_iterable(phase_center_ids)) + phase_center_ids = np.array(utils.tools._get_iterable(phase_center_ids)) pc_blt_inds = np.nonzero( np.isin(self.phase_center_id_array, phase_center_ids) )[0] @@ -6636,7 +6646,7 @@ def _select_preprocess( ) if antenna_nums is not None: - antenna_nums = helpers._get_iterable(antenna_nums) + antenna_nums = utils.tools._get_iterable(antenna_nums) antenna_nums = np.asarray(antenna_nums) if antenna_nums.ndim > 1: antenna_nums = antenna_nums.flatten() @@ -6756,7 +6766,7 @@ def _select_preprocess( else: blt_inds = ant_blt_inds - time_blt_inds = helpers._select_times_helper( + time_blt_inds = utils.times._select_times_helper( times=times, time_range=time_range, lsts=lsts, @@ -6812,19 +6822,19 @@ def _select_preprocess( blt_inds = sorted(set(blt_inds)) if freq_chans is not None: - freq_chans = helpers._get_iterable(freq_chans) + freq_chans = utils.tools._get_iterable(freq_chans) if np.array(freq_chans).ndim > 1: freq_chans = np.array(freq_chans).flatten() if frequencies is None: frequencies = self.freq_array[freq_chans] else: - frequencies = helpers._get_iterable(frequencies) + frequencies = utils.tools._get_iterable(frequencies) frequencies = np.sort( list(set(frequencies) | set(self.freq_array[freq_chans])) ) if frequencies is not None: - frequencies = helpers._get_iterable(frequencies) + frequencies = utils.tools._get_iterable(frequencies) if np.array(frequencies).ndim > 1: frequencies = np.array(frequencies).flatten() if n_selects > 0: @@ -6848,7 +6858,7 @@ def _select_preprocess( freq_ind_separation = freq_ind_separation[ np.diff(self.flex_spw_id_array[freq_inds]) == 0 ] - if not helpers._test_array_constant(freq_ind_separation): + if not utils.tools._test_array_constant(freq_ind_separation): warnings.warn( "Selected frequencies are not evenly spaced. This " "will make it impossible to write this data out to " @@ -6866,7 +6876,7 @@ def _select_preprocess( freq_inds = None if polarizations is not None: - polarizations = helpers._get_iterable(polarizations) + polarizations = utils.tools._get_iterable(polarizations) if np.array(polarizations).ndim > 1: polarizations = np.array(polarizations).flatten() if n_selects > 0: @@ -6927,7 +6937,7 @@ def _select_preprocess( "No data matching this polarization and frequency selection " "in this UVData object." ) - if not helpers._test_array_constant_spacing( + if not utils.tools._test_array_constant_spacing( np.unique(self.flex_spw_polarization_array[spw_inds]) ): warnings.warn( @@ -6938,7 +6948,7 @@ def _select_preprocess( else: pol_inds = np.unique(pol_inds) if len(pol_inds) > 2: - if not helpers._test_array_constant_spacing(pol_inds): + if not utils.tools._test_array_constant_spacing(pol_inds): warnings.warn( "Selected polarization values are not evenly spaced. This " "will make it impossible to write this data out to " @@ -7749,7 +7759,7 @@ def downsample_in_time( int_times = int_times if len(np.unique(int_times)) == 1: # this baseline has all the same integration times - if len(np.unique(dtime)) > 1 and not helpers._test_array_constant( + if len(np.unique(dtime)) > 1 and not utils.tools._test_array_constant( dtime, tols=self._integration_time.tols ): warnings.warn( diff --git a/src/pyuvdata/uvdata/uvfits.py b/src/pyuvdata/uvdata/uvfits.py index d02f6cf28b..e0639bba57 100644 --- a/src/pyuvdata/uvdata/uvfits.py +++ b/src/pyuvdata/uvdata/uvfits.py @@ -24,7 +24,6 @@ from .. import utils from ..docstrings import copy_replace_short_description -from ..utils import helpers from ..utils.file_io import fits as fits_utils from . import UVData @@ -71,7 +70,7 @@ def _get_parameter_data( # angles in uvfits files are stored in degrees, so convert to radians self.lst_array = np.deg2rad(vis_hdu.data.par("lst")) if run_check_acceptability: - helpers.check_lsts_against_times( + utils.times.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, telescope_loc=self.telescope.location, @@ -475,7 +474,7 @@ def read_uvfits( if self.blt_order == ("bda",): self._blt_order.form = (1,) self.history = str(vis_hdr.get("HISTORY", "")) - if not helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -952,7 +951,7 @@ def write_uvfits( if self.Npols > 1: pol_indexing = np.argsort(np.abs(self.polarization_array)) polarization_array = self.polarization_array[pol_indexing] - if not helpers._test_array_constant_spacing(polarization_array): + if not utils.tools._test_array_constant_spacing(polarization_array): raise ValueError( "The polarization values are not evenly spaced (probably " "because of a select operation). The uvfits format " diff --git a/src/pyuvdata/uvdata/uvh5.py b/src/pyuvdata/uvdata/uvh5.py index 7ff2e7a1bb..b43daa5dfe 100644 --- a/src/pyuvdata/uvdata/uvh5.py +++ b/src/pyuvdata/uvdata/uvh5.py @@ -18,7 +18,6 @@ from .. import Telescope, utils from ..docstrings import copy_replace_short_description -from ..utils import helpers from ..utils.file_io import hdf5 as hdf5_utils from . import UVData @@ -205,7 +204,7 @@ def Nbls(self) -> int: # noqa: N802 def get_blt_order(self) -> tuple[str]: """Get the blt order from analysing metadata.""" - return helpers.determine_blt_order( + return utils.bltaxis.determine_blt_order( time_array=self.time_array, ant_1_array=self.ant_1_array, ant_2_array=self.ant_2_array, @@ -252,7 +251,7 @@ def blts_are_rectangular(self) -> bool: ): return True - is_rect, self.__time_first = helpers.determine_rectangularity( + is_rect, self.__time_first = utils.bltaxis.determine_rectangularity( time_array=self.time_array, baseline_array=self.baseline_array, nbls=self.Nbls, @@ -514,7 +513,7 @@ def _read_header_with_fast_meta( proc = None if run_check_acceptability: - helpers.check_lsts_against_times( + utils.times.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, telescope_loc=self.telescope.location, @@ -588,7 +587,9 @@ def _read_header_with_fast_meta( if "time_axis_faster_than_bls" in obj.header: self.time_axis_faster_than_bls = obj.time_axis_faster_than_bls - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str # Optional parameters @@ -819,7 +820,7 @@ def _get_data( # max_nslice_frac of 0.1 yields slice speedup over fancy index for HERA data # See pyuvdata PR #805 if blt_inds is not None: - blt_slices, blt_sliceable = helpers._convert_to_slices( + blt_slices, blt_sliceable = utils.tools._convert_to_slices( blt_inds, max_nslice_frac=0.1 ) else: @@ -827,7 +828,7 @@ def _get_data( blt_sliceable = True if freq_inds is not None: - freq_slices, freq_sliceable = helpers._convert_to_slices( + freq_slices, freq_sliceable = utils.tools._convert_to_slices( freq_inds, max_nslice_frac=0.1 ) else: @@ -835,7 +836,7 @@ def _get_data( freq_sliceable = True if pol_inds is not None: - pol_slices, pol_sliceable = helpers._convert_to_slices( + pol_slices, pol_sliceable = utils.tools._convert_to_slices( pol_inds, max_nslice_frac=0.5 ) else: diff --git a/src/pyuvdata/uvflag/uvflag.py b/src/pyuvdata/uvflag/uvflag.py index 1a1725fbcc..d683bc825e 100644 --- a/src/pyuvdata/uvflag/uvflag.py +++ b/src/pyuvdata/uvflag/uvflag.py @@ -16,7 +16,6 @@ from .. import Telescope, UVCal, UVData from .. import parameter as uvp from .. import utils -from ..utils import helpers from ..uvbase import UVBase __all__ = ["UVFlag", "flags2waterfall", "and_rows_cols"] @@ -826,13 +825,13 @@ def check( if run_check_acceptability: # Check antenna positions - helpers.check_surface_based_positions( + utils.coordinates.check_surface_based_positions( antenna_positions=self.telescope.antenna_positions, telescope_loc=self.telescope.location, raise_error=False, ) - helpers.check_lsts_against_times( + utils.times.check_lsts_against_times( jd_array=self.time_array, lst_array=self.lst_array, telescope_loc=self.telescope.location, @@ -1201,7 +1200,9 @@ def collapse_pol( self.clear_unused_attributes() self.history += "Pol axis collapse. " - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str if run_check: @@ -1323,7 +1324,9 @@ def to_waterfall( self._set_type_waterfall() self.history += 'Collapsed to type "waterfall". ' # + self.pyuvdata_version_str - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str self.clear_unused_attributes() @@ -1596,7 +1599,9 @@ def to_baseline( self.history += 'Broadcast to type "baseline". ' - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str if run_check: @@ -1761,7 +1766,9 @@ def to_antenna( self._set_type_antenna() self.history += 'Broadcast to type "antenna". ' - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str if run_check: @@ -1807,7 +1814,9 @@ def to_flag( "Unknown UVFlag mode: " + self.mode + ". Cannot convert to flag." ) self.history += 'Converted to mode "flag". ' - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str self.clear_unused_attributes() @@ -1880,7 +1889,9 @@ def to_metric( ) self.history += 'Converted to mode "metric". ' - if not helpers._check_history_version(self.history, self.pyuvdata_version_str): + if not utils.history._check_history_version( + self.history, self.pyuvdata_version_str + ): self.history += self.pyuvdata_version_str self.clear_unused_attributes() @@ -1946,7 +1957,7 @@ def __add__( ) # Update filename parameter - this.filename = helpers._combine_filenames(this.filename, other.filename) + this.filename = utils.tools._combine_filenames(this.filename, other.filename) if this.filename is not None: this._filename.form = (len(this.filename),) @@ -2148,7 +2159,9 @@ def __add__( ) this.history += "Data combined along " + axis + " axis. " - if not helpers._check_history_version(this.history, this.pyuvdata_version_str): + if not utils.history._check_history_version( + this.history, this.pyuvdata_version_str + ): this.history += this.pyuvdata_version_str this.Ntimes = np.unique(this.time_array).size @@ -2244,7 +2257,9 @@ def __or__( if other.history not in this.history: this.history += "Flags OR'd with: " + other.history - if not helpers._check_history_version(this.history, this.pyuvdata_version_str): + if not utils.history._check_history_version( + this.history, this.pyuvdata_version_str + ): this.history += this.pyuvdata_version_str if run_check: @@ -2310,7 +2325,7 @@ def combine_metrics( """ # Ensure others is iterable (in case of single UVFlag object) - # cannot use helpers._get_iterable because the object itself is iterable + # cannot use utils.tools._get_iterable because the object itself is iterable if not isinstance(others, (list, tuple, np.ndarray)): others = [others] @@ -2342,7 +2357,9 @@ def combine_metrics( this.weights_array = warray this.history += "Combined metric arrays. " - if not helpers._check_history_version(this.history, this.pyuvdata_version_str): + if not utils.history._check_history_version( + this.history, this.pyuvdata_version_str + ): this.history += this.pyuvdata_version_str if run_check: @@ -2460,7 +2477,7 @@ def _select_preprocess( # test for blt_inds presence before adding inds from antennas & times if blt_inds is not None: - blt_inds = helpers._get_iterable(blt_inds) + blt_inds = utils.tools._get_iterable(blt_inds) if np.array(blt_inds).ndim > 1: blt_inds = np.array(blt_inds).flatten() if self.type == "baseline": @@ -2470,7 +2487,7 @@ def _select_preprocess( n_selects += 1 if antenna_nums is not None: - antenna_nums = helpers._get_iterable(antenna_nums) + antenna_nums = utils.tools._get_iterable(antenna_nums) if np.array(antenna_nums).ndim > 1: antenna_nums = np.array(antenna_nums).flatten() if n_selects > 0: @@ -2601,7 +2618,7 @@ def _select_preprocess( blt_inds = ant_blt_inds if times is not None: - times = helpers._get_iterable(times) + times = utils.tools._get_iterable(times) if np.array(times).ndim > 1: times = np.array(times).flatten() @@ -2650,14 +2667,14 @@ def _select_preprocess( blt_inds = sorted(set(blt_inds)) if freq_chans is not None: - freq_chans = helpers._get_iterable(freq_chans) + freq_chans = utils.tools._get_iterable(freq_chans) if np.array(freq_chans).ndim > 1: freq_chans = np.array(freq_chans).flatten() if frequencies is None: frequencies = np.squeeze(self.freq_array)[freq_chans] else: - frequencies = helpers._get_iterable(frequencies) + frequencies = utils.tools._get_iterable(frequencies) frequencies = np.sort( list( set(frequencies) | set(np.squeeze(self.freq_array)[freq_chans]) @@ -2665,7 +2682,7 @@ def _select_preprocess( ) if frequencies is not None: - frequencies = helpers._get_iterable(frequencies) + frequencies = utils.tools._get_iterable(frequencies) if np.array(frequencies).ndim > 1: frequencies = np.array(frequencies).flatten() if n_selects > 0: @@ -2689,7 +2706,7 @@ def _select_preprocess( freq_inds = None if polarizations is not None: - polarizations = helpers._get_iterable(polarizations) + polarizations = utils.tools._get_iterable(polarizations) if np.array(polarizations).ndim > 1: polarizations = np.array(polarizations).flatten() if n_selects > 0: @@ -3130,7 +3147,7 @@ def read( "freq_array spacing." ) freq_delta = np.diff(np.squeeze(self.freq_array)) - if helpers._test_array_constant_spacing( + if utils.tools._test_array_constant_spacing( self.freq_array, tols=self._freq_array.tols ): self.channel_width = np.full(self.Nfreqs, freq_delta[0]) @@ -3226,7 +3243,7 @@ def read( self.history += history - if not helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -3450,7 +3467,7 @@ def write(self, filename, *, clobber=False, data_compression="lzf"): polarization_array = self.polarization_array header["polarization_array"] = polarization_array - if not helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -3587,7 +3604,7 @@ def from_uvdata( if waterfall: self._set_type_waterfall() self.history += 'Flag object with type "waterfall" created. ' - if not helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -3611,7 +3628,7 @@ def from_uvdata( else: self._set_type_baseline() self.history += 'Flag object with type "baseline" created. ' - if not helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -3750,7 +3767,7 @@ def from_uvcal( if waterfall: self._set_type_waterfall() self.history += 'Flag object with type "waterfall" created. ' - if not helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str @@ -3773,7 +3790,7 @@ def from_uvcal( else: self._set_type_antenna() self.history += 'Flag object with type "antenna" created. ' - if not helpers._check_history_version( + if not utils.history._check_history_version( self.history, self.pyuvdata_version_str ): self.history += self.pyuvdata_version_str diff --git a/tests/utils/file_io/test_hdf5.py b/tests/utils/file_io/test_hdf5.py index 3cd6e57174..c4b8fd573f 100644 --- a/tests/utils/file_io/test_hdf5.py +++ b/tests/utils/file_io/test_hdf5.py @@ -7,16 +7,16 @@ import pytest import pyuvdata.utils.file_io.hdf5 as hdf5_utils -from pyuvdata.utils import helpers +from pyuvdata import utils @pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values") def test_read_slicing(): """Test HDF5 slicing helper functions""" # check trivial slice representations - slices, _ = helpers._convert_to_slices([]) + slices, _ = utils.tools._convert_to_slices([]) assert slices == [slice(0, 0, 0)] - slices, _ = helpers._convert_to_slices(10) + slices, _ = utils.tools._convert_to_slices(10) assert slices == [slice(10, 11, 1)] # dataset shape checking @@ -28,7 +28,7 @@ def test_read_slicing(): # dataset indexing # check various kinds of indexing give the right answer - slices = [helpers._convert_to_slices(ind)[0] for ind in indices] + slices = [utils.tools._convert_to_slices(ind)[0] for ind in indices] slices[1] = 0 data = hdf5_utils._index_dset(dset, slices) assert data.shape == tuple(shape) @@ -37,9 +37,11 @@ def test_read_slicing(): bool_arr = np.zeros((10000,), dtype=bool) index_arr = np.arange(1, 10000, 2) bool_arr[index_arr] = True - assert helpers._convert_to_slices(bool_arr) == helpers._convert_to_slices(index_arr) - assert helpers._convert_to_slices(bool_arr, return_index_on_fail=True) == ( - helpers._convert_to_slices(index_arr, return_index_on_fail=True) + assert utils.tools._convert_to_slices(bool_arr) == utils.tools._convert_to_slices( + index_arr + ) + assert utils.tools._convert_to_slices(bool_arr, return_index_on_fail=True) == ( + utils.tools._convert_to_slices(index_arr, return_index_on_fail=True) ) # Index return on fail with two slices @@ -47,7 +49,7 @@ def test_read_slicing(): bool_arr[0:2] = [True, False] for item in [index_arr, bool_arr]: - result, check = helpers._convert_to_slices( + result, check = utils.tools._convert_to_slices( item, max_nslice=1, return_index_on_fail=True ) assert not check @@ -60,7 +62,7 @@ def test_read_slicing(): bool_arr[index_arr] = True for item in [index_arr, bool_arr]: - result, check = helpers._convert_to_slices(item, return_index_on_fail=True) + result, check = utils.tools._convert_to_slices(item, return_index_on_fail=True) assert not check assert len(result) == 1 assert result[0] is item diff --git a/tests/utils/test_bltaxis.py b/tests/utils/test_bltaxis.py new file mode 100644 index 0000000000..1a10dd1965 --- /dev/null +++ b/tests/utils/test_bltaxis.py @@ -0,0 +1,155 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for baseline-time axis utility functions.""" + +import numpy as np +import pytest + +from pyuvdata import utils + + +@pytest.mark.parametrize( + "blt_order", + [ + ("time", "baseline"), + ("baseline", "time"), + ("ant1", "time"), + ("ant2", "time"), + ("time", "ant1"), + ("time", "ant2"), + ("baseline",), + ("time",), + ("ant1",), + ("ant2",), + (), + ([0, 2, 6, 4, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]), + ], +) +def test_determine_blt_order(blt_order): + nant = 3 + ntime = 2 + + def getbl(ant1, ant2): + return utils.antnums_to_baseline(ant1, ant2, Nants_telescope=nant) + + def getantbls(): + # Arrange them backwards so by default they are NOT sorted + ant1 = np.arange(nant, dtype=int)[::-1] + ant2 = ant1.copy() + ANT1, ANT2 = np.meshgrid(ant1, ant2) + + return ANT1.flatten(), ANT2.flatten() + + def gettimebls(blt_order): + ant1, ant2 = getantbls() + time_array = np.linspace( + 2000, 1000, ntime + ) # backwards so not sorted by default + + TIME = np.tile(time_array, len(ant1)) + ANT1 = np.repeat(ant1, len(time_array)) + ANT2 = np.repeat(ant2, len(time_array)) + BASELINE = getbl(ANT1, ANT2) + + lc = locals() + if isinstance(blt_order, list): + inds = np.array(blt_order) + elif blt_order: + inds = np.lexsort(tuple(lc[k.upper()] for k in blt_order[::-1])) + else: + inds = np.arange(len(TIME)) + + return TIME[inds], ANT1[inds], ANT2[inds], BASELINE[inds] + + # time, bl + TIME, ANT1, ANT2, BL = gettimebls(blt_order) + order = utils.bltaxis.determine_blt_order( + time_array=TIME, + ant_1_array=ANT1, + ant_2_array=ANT2, + baseline_array=BL, + Nbls=nant**2, + Ntimes=ntime, + ) + if isinstance(blt_order, list): + assert order is None + elif blt_order: + assert order == blt_order + else: + assert order is None + + is_rect, time_first = utils.bltaxis.determine_rectangularity( + time_array=TIME, baseline_array=BL, nbls=nant**2, ntimes=ntime + ) + if blt_order in [("ant1", "time"), ("ant2", "time")]: + # sorting by ant1/ant2 then time means we split the other ant into a + # separate group + assert not is_rect + assert not time_first + elif isinstance(blt_order, list): + assert not is_rect + assert not time_first + else: + assert is_rect + assert time_first == ( + (len(blt_order) == 2 and blt_order[-1] == "time") + or (len(blt_order) == 1 and blt_order[0] != "time") + or not blt_order # we by default move time first (backwards, but still) + ) + + +def test_determine_blt_order_size_1(): + times = np.array([2458119.5]) + ant1 = np.array([0]) + ant2 = np.array([1]) + bl = utils.antnums_to_baseline(ant1, ant2, Nants_telescope=2) + + order = utils.bltaxis.determine_blt_order( + time_array=times, + ant_1_array=ant1, + ant_2_array=ant2, + baseline_array=bl, + Nbls=1, + Ntimes=1, + ) + assert order == ("baseline", "time") + is_rect, time_first = utils.bltaxis.determine_rectangularity( + time_array=times, baseline_array=bl, nbls=1, ntimes=1 + ) + assert is_rect + assert time_first + + +def test_determine_rect_time_first(): + times = np.linspace(2458119.5, 2458120.5, 10) + ant1 = np.arange(3) + ant2 = np.arange(3) + ANT1, ANT2 = np.meshgrid(ant1, ant2) + bls = utils.antnums_to_baseline(ANT1.flatten(), ANT2.flatten(), Nants_telescope=3) + + rng = np.random.default_rng(12345) + + TIME = np.tile(times, len(bls)) + BL = np.concatenate([rng.permuted(bls) for i in range(len(times))]) + + is_rect, time_first = utils.bltaxis.determine_rectangularity( + time_array=TIME, baseline_array=BL, nbls=9, ntimes=10 + ) + assert not is_rect + + # now, permute time instead of bls + TIME = np.concatenate([rng.permuted(times) for i in range(len(bls))]) + BL = np.tile(bls, len(times)) + is_rect, time_first = utils.bltaxis.determine_rectangularity( + time_array=TIME, baseline_array=BL, nbls=9, ntimes=10 + ) + assert not is_rect + + TIME = np.array([1000.0, 1000.0, 2000.0, 1000.0]) + BLS = np.array([0, 0, 1, 0]) + + is_rect, time_first = utils.bltaxis.determine_rectangularity( + time_array=TIME, baseline_array=BLS, nbls=2, ntimes=2 + ) + assert not is_rect diff --git a/tests/utils/test_coordinates.py b/tests/utils/test_coordinates.py index 1326f7ae34..09d9dae1bc 100644 --- a/tests/utils/test_coordinates.py +++ b/tests/utils/test_coordinates.py @@ -12,6 +12,7 @@ from pyuvdata import utils from pyuvdata.data import DATA_PATH +from pyuvdata.testing import check_warnings from pyuvdata.utils.coordinates import hasmoon selenoids = ["SPHERE", "GSFC", "GRAIL23", "CE-1-LAM-GEO"] @@ -747,3 +748,79 @@ def test_mwa_ecef_conversion(): # test other direction of ECEF rotation rot_xyz = utils.rotECEF_from_ECEF(new_xyz, lon) np.testing.assert_allclose(rot_xyz.T, xyz) + + +@pytest.mark.parametrize("err_state", ["err", "warn", "none"]) +@pytest.mark.parametrize("tel_loc", ["Center", "Moon", "Earth", "Space"]) +@pytest.mark.parametrize("check_frame", ["Moon", "Earth"]) +@pytest.mark.parametrize("del_tel_loc", [False, None, True]) +def test_check_surface_based_positions(err_state, tel_loc, check_frame, del_tel_loc): + tel_loc_dict = { + "Center": np.array([0, 0, 0]), + "Moon": np.array([0, 0, 1.737e6]), + "Earth": np.array([0, 6.37e6, 0]), + "Space": np.array([4.22e7, 0, 0]), + } + tel_frame_dict = {"Moon": "mcmf", "Earth": "itrs"} + + ant_pos = np.array( + [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] + ) + if del_tel_loc: + ant_pos += tel_loc_dict[tel_loc] + + fail_type = err_msg = err_type = None + err_check = check_warnings + if (tel_loc != check_frame) and (err_state != "none"): + if tel_loc == "Center": + fail_type = "below" + elif tel_loc == "Space": + fail_type = "above" + else: + fail_type = "above" if tel_loc == "Earth" else "below" + + if fail_type is not None: + err_msg = ( + f"{tel_frame_dict[check_frame]} position vector magnitudes must be " + f"on the order of the radius of {check_frame} -- they appear to lie well " + f"{fail_type} this." + ) + if err_state == "err": + err_type = ValueError + err_check = pytest.raises + else: + err_type = UserWarning + + with err_check(err_type, match=err_msg): + status = utils.coordinates.check_surface_based_positions( + telescope_loc=None if (del_tel_loc) else tel_loc_dict[tel_loc], + antenna_positions=None if (del_tel_loc is None) else ant_pos, + telescope_frame=tel_frame_dict[check_frame], + raise_error=err_state == "err", + raise_warning=err_state == "warn", + ) + + assert (err_state == "err") or (status == (tel_loc == check_frame)) + + +@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") +@pytest.mark.parametrize("tel_loc", ["Earth", "Moon"]) +@pytest.mark.parametrize("check_frame", ["Earth", "Moon"]) +def test_check_surface_based_positions_earthmoonloc(tel_loc, check_frame): + frame = "mcmf" if (check_frame == "Moon") else "itrs" + + if tel_loc == "Earth": + loc = EarthLocation.from_geodetic(0, 0, 0) + else: + loc = MoonLocation.from_selenodetic(0, 0, 0) + + if tel_loc == check_frame: + assert utils.coordinates.check_surface_based_positions( + telescope_loc=loc, telescope_frame=frame + ) + else: + with pytest.raises(ValueError, match=(f"{frame} position vector")): + utils.coordinates.check_surface_based_positions( + telescope_loc=[loc.x.value, loc.y.value, loc.z.value], + telescope_frame=frame, + ) diff --git a/tests/utils/test_helpers.py b/tests/utils/test_helpers.py deleted file mode 100644 index 787d5b5f16..0000000000 --- a/tests/utils/test_helpers.py +++ /dev/null @@ -1,294 +0,0 @@ -# -*- mode: python; coding: utf-8 -*- -# Copyright (c) 2024 Radio Astronomy Software Group -# Licensed under the 2-clause BSD License -"""Tests for helper utility functions.""" - -import numpy as np -import pytest -from astropy.coordinates import EarthLocation - -from pyuvdata import utils -from pyuvdata.testing import check_warnings -from pyuvdata.utils import helpers - -from .test_coordinates import hasmoon - -if hasmoon: - from lunarsky import MoonLocation - - -@pytest.mark.parametrize( - "filename1,filename2,answer", - [ - (["foo.uvh5"], ["bar.uvh5"], ["foo.uvh5", "bar.uvh5"]), - (["foo.uvh5", "bar.uvh5"], ["foo.uvh5"], ["foo.uvh5", "bar.uvh5"]), - (["foo.uvh5"], None, ["foo.uvh5"]), - (None, ["bar.uvh5"], ["bar.uvh5"]), - (None, None, None), - ], -) -def test_combine_filenames(filename1, filename2, answer): - combined_filenames = helpers._combine_filenames(filename1, filename2) - if answer is None: - assert combined_filenames is answer - else: - # use sets to test equality so that order doesn't matter - assert set(combined_filenames) == set(answer) - - return - - -def test_deprecated_utils_import(): - with check_warnings( - DeprecationWarning, - match="The _check_histories function has moved, please import it from " - "pyuvdata.utils.helpers. This warnings will become an error in version 3.2", - ): - utils._check_histories("foo", "foo") - - -@pytest.mark.parametrize( - "blt_order", - [ - ("time", "baseline"), - ("baseline", "time"), - ("ant1", "time"), - ("ant2", "time"), - ("time", "ant1"), - ("time", "ant2"), - ("baseline",), - ("time",), - ("ant1",), - ("ant2",), - (), - ([0, 2, 6, 4, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]), - ], -) -def test_determine_blt_order(blt_order): - nant = 3 - ntime = 2 - - def getbl(ant1, ant2): - return utils.antnums_to_baseline(ant1, ant2, Nants_telescope=nant) - - def getantbls(): - # Arrange them backwards so by default they are NOT sorted - ant1 = np.arange(nant, dtype=int)[::-1] - ant2 = ant1.copy() - ANT1, ANT2 = np.meshgrid(ant1, ant2) - - return ANT1.flatten(), ANT2.flatten() - - def gettimebls(blt_order): - ant1, ant2 = getantbls() - time_array = np.linspace( - 2000, 1000, ntime - ) # backwards so not sorted by default - - TIME = np.tile(time_array, len(ant1)) - ANT1 = np.repeat(ant1, len(time_array)) - ANT2 = np.repeat(ant2, len(time_array)) - BASELINE = getbl(ANT1, ANT2) - - lc = locals() - if isinstance(blt_order, list): - inds = np.array(blt_order) - elif blt_order: - inds = np.lexsort(tuple(lc[k.upper()] for k in blt_order[::-1])) - else: - inds = np.arange(len(TIME)) - - return TIME[inds], ANT1[inds], ANT2[inds], BASELINE[inds] - - # time, bl - TIME, ANT1, ANT2, BL = gettimebls(blt_order) - order = helpers.determine_blt_order( - time_array=TIME, - ant_1_array=ANT1, - ant_2_array=ANT2, - baseline_array=BL, - Nbls=nant**2, - Ntimes=ntime, - ) - if isinstance(blt_order, list): - assert order is None - elif blt_order: - assert order == blt_order - else: - assert order is None - - is_rect, time_first = helpers.determine_rectangularity( - time_array=TIME, baseline_array=BL, nbls=nant**2, ntimes=ntime - ) - if blt_order in [("ant1", "time"), ("ant2", "time")]: - # sorting by ant1/ant2 then time means we split the other ant into a - # separate group - assert not is_rect - assert not time_first - elif isinstance(blt_order, list): - assert not is_rect - assert not time_first - else: - assert is_rect - assert time_first == ( - (len(blt_order) == 2 and blt_order[-1] == "time") - or (len(blt_order) == 1 and blt_order[0] != "time") - or not blt_order # we by default move time first (backwards, but still) - ) - - -def test_determine_blt_order_size_1(): - times = np.array([2458119.5]) - ant1 = np.array([0]) - ant2 = np.array([1]) - bl = utils.antnums_to_baseline(ant1, ant2, Nants_telescope=2) - - order = helpers.determine_blt_order( - time_array=times, - ant_1_array=ant1, - ant_2_array=ant2, - baseline_array=bl, - Nbls=1, - Ntimes=1, - ) - assert order == ("baseline", "time") - is_rect, time_first = helpers.determine_rectangularity( - time_array=times, baseline_array=bl, nbls=1, ntimes=1 - ) - assert is_rect - assert time_first - - -def test_determine_rect_time_first(): - times = np.linspace(2458119.5, 2458120.5, 10) - ant1 = np.arange(3) - ant2 = np.arange(3) - ANT1, ANT2 = np.meshgrid(ant1, ant2) - bls = utils.antnums_to_baseline(ANT1.flatten(), ANT2.flatten(), Nants_telescope=3) - - rng = np.random.default_rng(12345) - - TIME = np.tile(times, len(bls)) - BL = np.concatenate([rng.permuted(bls) for i in range(len(times))]) - - is_rect, time_first = helpers.determine_rectangularity( - time_array=TIME, baseline_array=BL, nbls=9, ntimes=10 - ) - assert not is_rect - - # now, permute time instead of bls - TIME = np.concatenate([rng.permuted(times) for i in range(len(bls))]) - BL = np.tile(bls, len(times)) - is_rect, time_first = helpers.determine_rectangularity( - time_array=TIME, baseline_array=BL, nbls=9, ntimes=10 - ) - assert not is_rect - - TIME = np.array([1000.0, 1000.0, 2000.0, 1000.0]) - BLS = np.array([0, 0, 1, 0]) - - is_rect, time_first = helpers.determine_rectangularity( - time_array=TIME, baseline_array=BLS, nbls=2, ntimes=2 - ) - assert not is_rect - - -@pytest.mark.parametrize("err_state", ["err", "warn", "none"]) -@pytest.mark.parametrize("tel_loc", ["Center", "Moon", "Earth", "Space"]) -@pytest.mark.parametrize("check_frame", ["Moon", "Earth"]) -@pytest.mark.parametrize("del_tel_loc", [False, None, True]) -def test_check_surface_based_positions(err_state, tel_loc, check_frame, del_tel_loc): - tel_loc_dict = { - "Center": np.array([0, 0, 0]), - "Moon": np.array([0, 0, 1.737e6]), - "Earth": np.array([0, 6.37e6, 0]), - "Space": np.array([4.22e7, 0, 0]), - } - tel_frame_dict = {"Moon": "mcmf", "Earth": "itrs"} - - ant_pos = np.array( - [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] - ) - if del_tel_loc: - ant_pos += tel_loc_dict[tel_loc] - - fail_type = err_msg = err_type = None - err_check = check_warnings - if (tel_loc != check_frame) and (err_state != "none"): - if tel_loc == "Center": - fail_type = "below" - elif tel_loc == "Space": - fail_type = "above" - else: - fail_type = "above" if tel_loc == "Earth" else "below" - - if fail_type is not None: - err_msg = ( - f"{tel_frame_dict[check_frame]} position vector magnitudes must be " - f"on the order of the radius of {check_frame} -- they appear to lie well " - f"{fail_type} this." - ) - if err_state == "err": - err_type = ValueError - err_check = pytest.raises - else: - err_type = UserWarning - - with err_check(err_type, match=err_msg): - status = helpers.check_surface_based_positions( - telescope_loc=None if (del_tel_loc) else tel_loc_dict[tel_loc], - antenna_positions=None if (del_tel_loc is None) else ant_pos, - telescope_frame=tel_frame_dict[check_frame], - raise_error=err_state == "err", - raise_warning=err_state == "warn", - ) - - assert (err_state == "err") or (status == (tel_loc == check_frame)) - - -@pytest.mark.skipif(not hasmoon, reason="lunarsky not installed") -@pytest.mark.parametrize("tel_loc", ["Earth", "Moon"]) -@pytest.mark.parametrize("check_frame", ["Earth", "Moon"]) -def test_check_surface_based_positions_earthmoonloc(tel_loc, check_frame): - frame = "mcmf" if (check_frame == "Moon") else "itrs" - - if tel_loc == "Earth": - loc = EarthLocation.from_geodetic(0, 0, 0) - else: - loc = MoonLocation.from_selenodetic(0, 0, 0) - - if tel_loc == check_frame: - assert helpers.check_surface_based_positions( - telescope_loc=loc, telescope_frame=frame - ) - else: - with pytest.raises(ValueError, match=(f"{frame} position vector")): - helpers.check_surface_based_positions( - telescope_loc=[loc.x.value, loc.y.value, loc.z.value], - telescope_frame=frame, - ) - - -def test_slicify(): - assert helpers.slicify(None) is None - assert helpers.slicify(slice(None)) == slice(None) - assert helpers.slicify([]) is None - assert helpers.slicify([1, 2, 3]) == slice(1, 4, 1) - assert helpers.slicify([1]) == slice(1, 2, 1) - assert helpers.slicify([0, 2, 4]) == slice(0, 5, 2) - assert helpers.slicify([0, 1, 2, 7]) == [0, 1, 2, 7] - - -@pytest.mark.parametrize( - "obj1,obj2,union_result,interset_result,diff_result", - [ - [[1, 2, 3], [3, 4, 5], [1, 2, 3, 4, 5], [3], [1, 2]], # Partial overlap - [[1, 2], [1, 2], [1, 2], [1, 2], []], # Full overlap - [[1, 3, 5], [2, 4, 6], [1, 2, 3, 4, 5, 6], [], [1, 3, 5]], # No overlap - [[1, 2], None, [1, 2], [1, 2], [1, 2]], # Nones - ], -) -def test_sorted_unique_ops(obj1, obj2, union_result, interset_result, diff_result): - assert helpers._sorted_unique_union(obj1, obj2) == union_result - assert helpers._sorted_unique_intersection(obj1, obj2) == interset_result - assert helpers._sorted_unique_difference(obj1, obj2) == diff_result diff --git a/tests/utils/test_ps_cat.py b/tests/utils/test_phase_center_catalog.py similarity index 90% rename from tests/utils/test_ps_cat.py rename to tests/utils/test_phase_center_catalog.py index 7e0c94010f..a8dc084970 100644 --- a/tests/utils/test_ps_cat.py +++ b/tests/utils/test_phase_center_catalog.py @@ -5,7 +5,7 @@ import pytest -import pyuvdata.utils.ps_cat as ps_cat_utils +import pyuvdata.utils.phase_center_catalog as ps_cat_utils def test_generate_new_phase_center_id_errs(): diff --git a/tests/utils/test_lst.py b/tests/utils/test_times.py similarity index 98% rename from tests/utils/test_lst.py rename to tests/utils/test_times.py index 447de98550..e1f826e826 100644 --- a/tests/utils/test_lst.py +++ b/tests/utils/test_times.py @@ -1,7 +1,7 @@ # -*- mode: python; coding: utf-8 -*- # Copyright (c) 2024 Radio Astronomy Software Group # Licensed under the 2-clause BSD License -"""Tests for LST utility functions.""" +"""Tests for time related utility functions.""" import numpy as np import pytest @@ -9,7 +9,7 @@ from astropy.coordinates import EarthLocation from pyuvdata import utils -from pyuvdata.utils.lst import hasmoon +from pyuvdata.utils.times import hasmoon if hasmoon: from lunarsky import MoonLocation diff --git a/tests/utils/test_tools.py b/tests/utils/test_tools.py new file mode 100644 index 0000000000..2969dd0af0 --- /dev/null +++ b/tests/utils/test_tools.py @@ -0,0 +1,64 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Tests for helper utility functions.""" + +import pytest + +from pyuvdata import utils +from pyuvdata.testing import check_warnings + + +@pytest.mark.parametrize( + "filename1,filename2,answer", + [ + (["foo.uvh5"], ["bar.uvh5"], ["foo.uvh5", "bar.uvh5"]), + (["foo.uvh5", "bar.uvh5"], ["foo.uvh5"], ["foo.uvh5", "bar.uvh5"]), + (["foo.uvh5"], None, ["foo.uvh5"]), + (None, ["bar.uvh5"], ["bar.uvh5"]), + (None, None, None), + ], +) +def test_combine_filenames(filename1, filename2, answer): + combined_filenames = utils.tools._combine_filenames(filename1, filename2) + if answer is None: + assert combined_filenames is answer + else: + # use sets to test equality so that order doesn't matter + assert set(combined_filenames) == set(answer) + + return + + +def test_slicify(): + assert utils.tools.slicify(None) is None + assert utils.tools.slicify(slice(None)) == slice(None) + assert utils.tools.slicify([]) is None + assert utils.tools.slicify([1, 2, 3]) == slice(1, 4, 1) + assert utils.tools.slicify([1]) == slice(1, 2, 1) + assert utils.tools.slicify([0, 2, 4]) == slice(0, 5, 2) + assert utils.tools.slicify([0, 1, 2, 7]) == [0, 1, 2, 7] + + +@pytest.mark.parametrize( + "obj1,obj2,union_result,interset_result,diff_result", + [ + [[1, 2, 3], [3, 4, 5], [1, 2, 3, 4, 5], [3], [1, 2]], # Partial overlap + [[1, 2], [1, 2], [1, 2], [1, 2], []], # Full overlap + [[1, 3, 5], [2, 4, 6], [1, 2, 3, 4, 5, 6], [], [1, 3, 5]], # No overlap + [[1, 2], None, [1, 2], [1, 2], [1, 2]], # Nones + ], +) +def test_sorted_unique_ops(obj1, obj2, union_result, interset_result, diff_result): + assert utils.tools._sorted_unique_union(obj1, obj2) == union_result + assert utils.tools._sorted_unique_intersection(obj1, obj2) == interset_result + assert utils.tools._sorted_unique_difference(obj1, obj2) == diff_result + + +def test_deprecated_utils_import(): + with check_warnings( + DeprecationWarning, + match="The _check_histories function has moved, please import it from " + "pyuvdata.utils.history. This warnings will become an error in version 3.2", + ): + utils._check_histories("foo", "foo") diff --git a/tests/uvbeam/test_beamfits.py b/tests/uvbeam/test_beamfits.py index b527c569b9..343c4f0758 100644 --- a/tests/uvbeam/test_beamfits.py +++ b/tests/uvbeam/test_beamfits.py @@ -748,7 +748,7 @@ def test_multi_files(cst_efield_2freq, tmp_path): beam2.write_beamfits(testfile2, clobber=True) beam1.read_beamfits([testfile1, testfile2]) # Check history is correct, before replacing and doing a full object check - assert utils.helpers._check_histories( + assert utils.history._check_histories( beam_full.history + " Downselected " "to specific frequencies using pyuvdata. " "Combined data along frequency axis using" diff --git a/tests/uvbeam/test_mwa_beam.py b/tests/uvbeam/test_mwa_beam.py index 96534ca69f..4f08286534 100644 --- a/tests/uvbeam/test_mwa_beam.py +++ b/tests/uvbeam/test_mwa_beam.py @@ -159,4 +159,4 @@ def test_dead_dipoles(): + gain_str + beam1.pyuvdata_version_str ) - assert utils.helpers._check_histories(history_str, beam1.history) + assert utils.history._check_histories(history_str, beam1.history) diff --git a/tests/uvbeam/test_uvbeam.py b/tests/uvbeam/test_uvbeam.py index cc6779db8f..04e24f15b1 100644 --- a/tests/uvbeam/test_uvbeam.py +++ b/tests/uvbeam/test_uvbeam.py @@ -20,7 +20,6 @@ from pyuvdata import UVBeam, _uvbeam, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings -from pyuvdata.utils import helpers from .test_cst_beam import cst_files, cst_yaml_file from .test_mwa_beam import filename as mwa_beam_file @@ -1602,7 +1601,7 @@ def test_select_axis(cst_power_1freq, tmp_path): for i in np.unique(power_beam2.axis1_array): assert i in power_beam.axis1_array - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific parts of first image axis " "using pyuvdata.", @@ -1644,7 +1643,7 @@ def test_select_axis(cst_power_1freq, tmp_path): for i in np.unique(power_beam2.axis2_array): assert i in power_beam.axis2_array - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific parts of second image axis " "using pyuvdata.", @@ -1702,7 +1701,7 @@ def test_select_frequencies( for f in np.unique(beam2.freq_array): assert f in freqs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", beam2.history, ) @@ -1745,7 +1744,7 @@ def test_select_frequencies( for f in np.unique(beam2.freq_array): assert f in beam.freq_array[chans_to_keep] - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", beam2.history, ) @@ -1794,7 +1793,7 @@ def test_select_feeds(antenna_type, cst_efield_1freq, phased_array_beam_2freq): for f in np.unique(efield_beam2.feed_array): assert f in feeds_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific feeds using pyuvdata.", efield_beam2.history, ) @@ -1870,7 +1869,7 @@ def test_select_polarizations(pols_to_keep, cst_efield_1freq): pols_to_keep, x_orientation=power_beam2.x_orientation ) - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific polarizations using pyuvdata.", power_beam2.history, ) @@ -1975,7 +1974,7 @@ def test_select(beam_type, cst_power_1freq, cst_efield_1freq): for f in np.unique(beam2.feed_array): assert f in feeds_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific parts of first image axis, " "parts of second image axis, " @@ -1989,7 +1988,7 @@ def test_select(beam_type, cst_power_1freq, cst_efield_1freq): for p in np.unique(beam2.polarization_array): assert p in pols_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific parts of first image axis, " "parts of second image axis, " @@ -2006,7 +2005,7 @@ def test_add_axis1(power_beam_for_adding): beam2 = power_beam.select(axis1_inds=np.arange(180, 360), inplace=False) beam1 += beam2 # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( power_beam.history + " Downselected to specific parts of " "first image axis using pyuvdata. " "Combined data along first image axis " @@ -2032,7 +2031,7 @@ def test_add_axis2(power_beam_for_adding): beam2 = power_beam.select(axis2_inds=np.arange(90, 181), inplace=False) beam1 += beam2 # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( power_beam.history + " Downselected to specific parts of " "second image axis using pyuvdata. " "Combined data along second image axis " @@ -2058,7 +2057,7 @@ def test_add_frequencies(power_beam_for_adding): beam2 = power_beam.select(freq_chans=1, inplace=False) beam1 += beam2 # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( power_beam.history + " Downselected to specific frequencies " "using pyuvdata. Combined data along " "frequency axis using pyuvdata.", @@ -2082,7 +2081,7 @@ def test_add_pols(power_beam_for_adding): beam1 = power_beam.select(polarizations=-5, inplace=False) beam2 = power_beam.select(polarizations=-6, inplace=False) beam1 += beam2 - assert helpers._check_histories( + assert utils.history._check_histories( power_beam.history + " Downselected to specific polarizations " "using pyuvdata. Combined data along " "polarization axis using pyuvdata.", @@ -2120,7 +2119,7 @@ def test_add_feeds(antenna_type, efield_beam_for_adding, phased_array_beam_2freq with check_warnings(expected_warning, match=warn_msg): beam2 = efield_beam.select(feeds=efield_beam.feed_array[1], inplace=False) beam1 += beam2 - assert helpers._check_histories( + assert utils.history._check_histories( efield_beam.history + " Downselected to specific feeds " "using pyuvdata. Combined data along " "feed axis using pyuvdata.", @@ -2166,7 +2165,7 @@ def test_add_multi_power(power_beam_for_adding): inplace=False, ) beam1 += beam2 - assert helpers._check_histories( + assert utils.history._check_histories( power_beam.history + " Downselected to specific parts of " "first image axis, polarizations using " "pyuvdata. Combined data along first " @@ -2196,7 +2195,7 @@ def test_add_multi_efield(efield_beam_for_adding): inplace=False, ) beam1 += beam2 - assert helpers._check_histories( + assert utils.history._check_histories( efield_beam.history + " Downselected to specific parts of " "first image axis, parts of second " "image axis using pyuvdata. Combined " @@ -2273,7 +2272,7 @@ def test_add_cross_power(cross_power_beam_for_adding, use_double): beam2.history += " testing the history. Read/written with pyuvdata" new_beam = beam1 + beam2 - assert helpers._check_histories( + assert utils.history._check_histories( power_beam.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Unique part of next " "object history follows. testing the history.", @@ -2283,7 +2282,7 @@ def test_add_cross_power(cross_power_beam_for_adding, use_double): assert new_beam == power_beam new_beam = beam1.__add__(beam2, verbose_history=True) - assert helpers._check_histories( + assert utils.history._check_histories( power_beam.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Next object history " "follows. " + beam2.history, @@ -2363,7 +2362,7 @@ def test_select_healpix_pixels( for pi in np.unique(beam_healpix2.pixel_array): assert pi in pixels_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific healpix pixels using pyuvdata.", beam_healpix2.history, ) @@ -2453,7 +2452,7 @@ def test_select_healpix_pixels( else: history_add = "polarizations" - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific healpix pixels, frequencies, " f"{history_add} using pyuvdata.", @@ -2498,7 +2497,7 @@ def test_add_healpix( inplace=False, ) beam1 += beam2 - assert helpers._check_histories( + assert utils.history._check_histories( beam_healpix.history + " Downselected to specific healpix " "pixels, frequencies using pyuvdata. " "Combined data along healpix pixel, " @@ -2521,7 +2520,7 @@ def test_add_healpix( freq_chans=1, feeds=beam_healpix.feed_array[1], inplace=False ) beam1 += beam2 - assert helpers._check_histories( + assert utils.history._check_histories( beam_healpix.history + " Downselected to specific frequencies, " "feeds using pyuvdata. Combined data " "along frequency, feed axis using pyuvdata.", @@ -2541,7 +2540,7 @@ def test_add_healpix( pixels=beam_healpix.pixel_array[beam_healpix.Npixels // 2 :], inplace=False ) beam1 = beam1 + beam2 - assert helpers._check_histories( + assert utils.history._check_histories( beam_healpix.history + " Downselected to specific healpix pixels " "using pyuvdata. Combined data " "along healpix pixel axis using pyuvdata.", diff --git a/tests/uvcal/test_uvcal.py b/tests/uvcal/test_uvcal.py index 621df235ed..feb8043dfe 100644 --- a/tests/uvcal/test_uvcal.py +++ b/tests/uvcal/test_uvcal.py @@ -19,7 +19,6 @@ from pyuvdata import UVCal, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings -from pyuvdata.utils import helpers from . import extend_jones_axis, time_array_to_time_range @@ -728,7 +727,7 @@ def test_select_antennas(caltype, gain_data, delay_data, tmp_path): for ant in calobj2.ant_array: assert ant in ants_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific antennas using pyuvdata.", calobj2.history, ) @@ -825,7 +824,7 @@ def test_select_times(caltype, time_range, gain_data, delay_data, tmp_path): for t in np.unique(calobj2.time_array): assert t in times_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific times using pyuvdata.", calobj2.history, ) @@ -919,7 +918,7 @@ def test_select_frequencies(gain_data, tmp_path): for f in np.unique(calobj2.freq_array): assert f in freqs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", calobj2.history, ) @@ -984,7 +983,7 @@ def test_select_frequencies_multispw(multi_spw_gain, tmp_path): for f in np.unique(calobj2.freq_array): assert f in freqs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", calobj2.history, ) @@ -1056,7 +1055,7 @@ def test_select_freq_chans(gain_data): for f in np.unique(calobj2.freq_array): assert f in obj_freqs - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", calobj2.history, ) @@ -1104,7 +1103,7 @@ def test_select_spws_wideband(caltype, multi_spw_delay, wideband_gain, tmp_path) for spw in calobj2.spw_array: assert spw in spws_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific spectral windows using pyuvdata.", calobj2.history, ) @@ -1157,7 +1156,7 @@ def test_select_polarizations(caltype, jones_to_keep, gain_data, delay_data, tmp jones_to_keep, x_orientation=calobj2.telescope.x_orientation ) - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific jones polarization terms " "using pyuvdata.", @@ -1268,7 +1267,7 @@ def test_select(caltype, gain_data, delay_data): assert j in calobj2.jones_array for j in np.unique(calobj2.jones_array): assert j in jones_to_keep - assert helpers._check_histories(expected_history, calobj2.history) + assert utils.history._check_histories(expected_history, calobj2.history) @pytest.mark.parametrize("caltype", ["gain", "delay"]) @@ -1318,7 +1317,7 @@ def test_select_wideband(caltype, multi_spw_delay, wideband_gain): for j in np.unique(calobj2.jones_array): assert j in jones_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific antennas, times, " "spectral windows, jones polarization terms " @@ -1350,7 +1349,7 @@ def test_add_antennas(caltype, gain_data, method, delay_data): getattr(calobj, method)(calobj2, **kwargs) # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific " "antennas using pyuvdata. Combined " "data along antenna axis using pyuvdata.", @@ -1889,7 +1888,7 @@ def test_add_antennas_multispw(multi_spw_gain, quality, method): getattr(calobj, method)(calobj2, **kwargs) # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific " "antennas using pyuvdata. Combined " "data along antenna axis using pyuvdata.", @@ -1920,7 +1919,7 @@ def test_add_frequencies(gain_data, method): getattr(calobj, method)(calobj2, **kwargs) # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific " "frequencies using pyuvdata. Combined " "data along frequency axis using pyuvdata.", @@ -2059,7 +2058,7 @@ def test_add_frequencies_multispw(split_f_ind, method, multi_spw_gain): calobj_sum = getattr(calobj, method)(calobj2, **kwargs) # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific " "frequencies using pyuvdata. Combined " "data along frequency axis using pyuvdata.", @@ -2086,7 +2085,7 @@ def test_add_frequencies_multispw(split_f_ind, method, multi_spw_gain): calobj_sum = calobj2 + calobj # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific " "frequencies using pyuvdata. Combined " "data along frequency axis using pyuvdata.", @@ -2160,20 +2159,20 @@ def test_add_spw_wideband(axis, caltype, method, multi_spw_delay, wideband_gain) # Check history is correct, before replacing and doing a full object check if axis == "multi": - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific antennas, spectral " "windows using pyuvdata. Combined data along antenna, spectral window axis " "using pyuvdata.", calobj3.history, ) elif axis == "spw": - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific spectral windows using " "pyuvdata. Combined data along spectral window axis using pyuvdata.", calobj3.history, ) elif axis == "antenna": - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific antennas using pyuvdata. " "Combined data along antenna axis using pyuvdata.", calobj3.history, @@ -2191,20 +2190,20 @@ def test_add_spw_wideband(axis, caltype, method, multi_spw_delay, wideband_gain) # Check history is correct, before replacing and doing a full object check if axis == "multi": - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific antennas, spectral " "windows using pyuvdata. Combined data along antenna, spectral window axis " "using pyuvdata.", calobj3.history, ) elif axis == "spw": - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific spectral windows using " "pyuvdata. Combined data along spectral window axis using pyuvdata.", calobj3.history, ) elif axis == "antenna": - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific antennas using pyuvdata. " "Combined data along antenna axis using pyuvdata.", calobj3.history, @@ -2261,7 +2260,7 @@ def test_add_times(caltype, time_range, method, gain_data, delay_data): with check_warnings(add_warn, match=add_msg): getattr(calobj, method)(calobj2, **kwargs) # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific " "times using pyuvdata. Combined " "data along time axis using pyuvdata.", @@ -2390,7 +2389,7 @@ def test_add_times_multispw(method, multi_spw_gain, quality): kwargs = {} getattr(calobj, method)(calobj2, **kwargs) # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific " "times using pyuvdata. Combined " "data along time axis using pyuvdata.", @@ -2560,7 +2559,7 @@ def test_add(caltype, method, gain_data, delay_data): calobj_add = getattr(calobj, method)(calobj2, **kwargs) # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( calobj_original.history + " Downselected to specific " "antennas using pyuvdata. Combined " "data along antenna axis using pyuvdata.", @@ -2578,7 +2577,7 @@ def test_add(caltype, method, gain_data, delay_data): new_cal = getattr(calobj, method)(calobj2, **kwargs) additional_history = "Some random history string" - assert helpers._check_histories( + assert utils.history._check_histories( calobj_original.history + " Combined data along antenna axis " "using pyuvdata. Unique part of next object history follows. " + additional_history, @@ -2587,7 +2586,7 @@ def test_add(caltype, method, gain_data, delay_data): kwargs["verbose_history"] = True new_cal = getattr(calobj, method)(calobj2, **kwargs) - assert helpers._check_histories( + assert utils.history._check_histories( calobj_original.history + " Combined data along antenna axis " "using pyuvdata. Next object history follows. " + calobj2.history, new_cal.history, @@ -3060,7 +3059,7 @@ def test_multi_files(caltype, method, gain_data, delay_data, tmp_path, file_type calobj.read([f1, f2]) - assert helpers._check_histories( + assert utils.history._check_histories( calobj_full.history + " Downselected to specific times" " using pyuvdata. Combined data " "along time axis using pyuvdata.", @@ -3349,7 +3348,7 @@ def test_init_from_uvdata(multi_spw, uvcalibrate_data): ) uvc_new.telescope.antenna_positions = uvc2.telescope.antenna_positions - assert helpers._check_histories( + assert utils.history._check_histories( uvc_new.history[:200], ( "Initialized from a UVData object with pyuvdata." @@ -3419,7 +3418,7 @@ def test_init_from_uvdata_setfreqs(multi_spw, uvcalibrate_data): ) uvc_new.telescope.antenna_positions = uvc2.telescope.antenna_positions - assert helpers._check_histories( + assert utils.history._check_histories( uvc_new.history[:200], ( "Initialized from a UVData object with pyuvdata." @@ -3481,7 +3480,7 @@ def test_init_from_uvdata_settimes(metadata_only, uvcalibrate_data): ) uvc_new.telescope.antenna_positions = uvc2.telescope.antenna_positions - assert helpers._check_histories( + assert utils.history._check_histories( uvc_new.history[:200], ( "Initialized from a UVData object with pyuvdata." @@ -3536,7 +3535,7 @@ def test_init_from_uvdata_setjones(uvcalibrate_data): ) uvc_new.telescope.antenna_positions = uvc2.telescope.antenna_positions - assert helpers._check_histories( + assert utils.history._check_histories( uvc_new.history[:200], ( "Initialized from a UVData object with pyuvdata." @@ -3592,7 +3591,7 @@ def test_init_single_pol(uvcalibrate_data, pol): ) uvc_new.telescope.antenna_positions = uvc2.telescope.antenna_positions - assert helpers._check_histories( + assert utils.history._check_histories( uvc_new.history[:200], ( "Initialized from a UVData object with pyuvdata." @@ -3642,7 +3641,7 @@ def test_init_from_uvdata_circular_pol(uvcalibrate_data): ) uvc_new.telescope.antenna_positions = uvc2.telescope.antenna_positions - assert helpers._check_histories( + assert utils.history._check_histories( uvc_new.history[:200], ( "Initialized from a UVData object with pyuvdata." @@ -3721,7 +3720,7 @@ def test_init_from_uvdata_sky(uvcalibrate_data, fhd_cal_raw): ) uvc_new.telescope.antenna_positions = uvc2.telescope.antenna_positions - assert helpers._check_histories( + assert utils.history._check_histories( uvc_new.history[:200], ( "Initialized from a UVData object with pyuvdata." @@ -3816,7 +3815,7 @@ def test_init_from_uvdata_delay(multi_spw, set_frange, uvcalibrate_data): ) uvc_new.telescope.antenna_positions = uvc2.telescope.antenna_positions - assert helpers._check_histories( + assert utils.history._check_histories( uvc_new.history[:200], ( "Initialized from a UVData object with pyuvdata." @@ -3909,7 +3908,7 @@ def test_init_from_uvdata_wideband(multi_spw, set_frange, uvcalibrate_data): ) uvc_new.telescope.antenna_positions = uvc2.telescope.antenna_positions - assert helpers._check_histories( + assert utils.history._check_histories( uvc_new.history[:200], ( "Initialized from a UVData object with pyuvdata." diff --git a/tests/uvdata/test_fhd.py b/tests/uvdata/test_fhd.py index 5ff01a5c2c..73c090a08c 100644 --- a/tests/uvdata/test_fhd.py +++ b/tests/uvdata/test_fhd.py @@ -713,7 +713,7 @@ def test_multi_files(fhd_model, axis, fhd_model_files): fhd_uv2 = fhd_model - assert utils.helpers._check_histories( + assert utils.history._check_histories( fhd_uv2.history + " Combined data along polarization axis using pyuvdata.", fhd_uv1.history, ) diff --git a/tests/uvdata/test_miriad.py b/tests/uvdata/test_miriad.py index e65b65c708..bb20a61252 100644 --- a/tests/uvdata/test_miriad.py +++ b/tests/uvdata/test_miriad.py @@ -861,7 +861,7 @@ def test_loop_multi_phase(tmp_path, paper_miriad, frame): # without the "phsframe" variable, the unprojected phase center gets interpreted as # an ephem type phase center. - zen_id, _ = utils.ps_cat.look_in_catalog( + zen_id, _ = utils.phase_center_catalog.look_in_catalog( uv3.phase_center_catalog, cat_name="zenith" ) new_id = uv3._add_phase_center(cat_name="zenith", cat_type="unprojected") @@ -1935,7 +1935,7 @@ def test_multi_files(casa_uvfits, tmp_path): uv1.read([testfile1, testfile2], file_type="miriad") # Check history is correct, before replacing and doing a full object check - assert utils.helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis using" @@ -1961,7 +1961,7 @@ def test_multi_files(casa_uvfits, tmp_path): uv1 = UVData() uv1.read([testfile1, testfile2], axis="freq") # Check history is correct, before replacing and doing a full object check - assert utils.helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis using" diff --git a/tests/uvdata/test_ms.py b/tests/uvdata/test_ms.py index 6ab1ff7133..e2bb0add36 100644 --- a/tests/uvdata/test_ms.py +++ b/tests/uvdata/test_ms.py @@ -678,7 +678,9 @@ def test_ms_scannumber_multiphasecenter(tmp_path, multi_frame): miriad_uv._set_app_coords_helper() if multi_frame: - cat_id = utils.ps_cat.look_for_name(miriad_uv.phase_center_catalog, "NOISE") + cat_id = utils.phase_center_catalog.look_for_name( + miriad_uv.phase_center_catalog, "NOISE" + ) ra_use = miriad_uv.phase_center_catalog[cat_id[0]]["cat_lon"][0] dec_use = miriad_uv.phase_center_catalog[cat_id[0]]["cat_lat"][0] with pytest.raises( diff --git a/tests/uvdata/test_uvdata.py b/tests/uvdata/test_uvdata.py index 2dcb117190..f519249c94 100644 --- a/tests/uvdata/test_uvdata.py +++ b/tests/uvdata/test_uvdata.py @@ -23,7 +23,6 @@ from pyuvdata import UVCal, UVData, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings -from pyuvdata.utils import helpers from ..utils.test_coordinates import frame_selenoid from .test_mwa_corr_fits import filelist as mwa_corr_files @@ -1345,9 +1344,9 @@ def test_select_blts(paper_uvh5): assert len(blt_inds) == uv_object2.Nblts # verify that histories are different - assert not helpers._check_histories(old_history, uv_object2.history) + assert not utils.history._check_histories(old_history, uv_object2.history) - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific baseline-times using pyuvdata.", uv_object2.history, ) @@ -1360,7 +1359,7 @@ def test_select_blts(paper_uvh5): assert len(blt_inds) == uv_object2.Nblts - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific baseline-times using pyuvdata.", uv_object2.history, ) @@ -1399,7 +1398,7 @@ def test_select_phase_center_id(tmp_path, carma_miriad): uv2 = uv_obj.select(phase_center_ids=[1, 2], inplace=False) uv_sum = uv1 + uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_obj.history + " Downselected to specific phase center IDs using pyuvdata. " "Combined data along baseline-time axis using pyuvdata.", uv_sum.history, @@ -1433,7 +1432,7 @@ def test_select_phase_center_id_blts(carma_miriad): ) uv_sum = uv1 + uv2 + uv3 - assert helpers._check_histories( + assert utils.history._check_histories( uv_obj.history + " Downselected to specific baseline-times, phase center IDs using pyuvdata. " "Combined data along baseline-time axis using pyuvdata. " @@ -1477,7 +1476,7 @@ def test_select_antennas(casa_uvfits): ): assert ant in ants_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific antennas using pyuvdata.", uv_object2.history, ) @@ -1495,7 +1494,7 @@ def test_select_antennas(casa_uvfits): ): assert ant in ants_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific antennas using pyuvdata.", uv_object2.history, ) @@ -1614,7 +1613,7 @@ def test_select_bls(casa_uvfits): for pair in sorted_pairs_object2: assert pair in sorted_pairs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific baselines using pyuvdata.", uv_object2.history, ) @@ -1643,7 +1642,7 @@ def test_select_bls(casa_uvfits): for pair in sorted_pairs_object3: assert pair in sorted_pairs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific baselines using pyuvdata.", uv_object3.history, ) @@ -1682,7 +1681,7 @@ def test_select_bls(casa_uvfits): for bl in sorted_pairs_object2: assert bl in sorted_bls_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific baselines, polarizations using pyuvdata.", uv_object2.history, @@ -1711,7 +1710,7 @@ def test_select_bls(casa_uvfits): for pair in sorted_pairs_object2: assert pair in sorted_pairs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific baselines using pyuvdata.", uv_object2.history, ) @@ -1796,7 +1795,7 @@ def test_select_times(casa_uvfits): for t in np.unique(uv_object2.time_array): assert t in times_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific times using pyuvdata.", uv_object2.history, ) @@ -1811,7 +1810,7 @@ def test_select_times(casa_uvfits): for t in np.unique(uv_object2.time_array): assert t in times_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific times using pyuvdata.", uv_object2.history, ) @@ -1850,7 +1849,7 @@ def test_select_time_range(casa_uvfits): for t in np.unique(uv_object2.time_array): assert t in times_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific times using pyuvdata.", uv_object2.history, ) @@ -1878,7 +1877,7 @@ def test_select_lsts(casa_uvfits, tmp_path): for lst in np.unique(uv_object2.lst_array): assert lst in lsts_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -1893,7 +1892,7 @@ def test_select_lsts(casa_uvfits, tmp_path): for lst in np.unique(uv_object2.lst_array): assert lst in lsts_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -2037,7 +2036,7 @@ def test_select_lsts_multi_day(casa_uvfits): unique_jds = np.unique(np.asarray(uv_object2.time_array, dtype=np.int_)) assert len(unique_jds) == 2 - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -2146,7 +2145,7 @@ def test_select_lst_range(casa_uvfits, tmp_path): for lst in np.unique(uv_object2.lst_array): assert lst in lsts_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -2195,7 +2194,7 @@ def test_select_lst_range_too_big(casa_uvfits): for lst in np.unique(uv_object2.lst_array): assert lst in lsts_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -2230,7 +2229,7 @@ def test_select_lst_range_wrap_around(casa_uvfits): for lst in np.unique(uv_object2.lst_array): assert lst in lsts_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific lsts using pyuvdata.", uv_object2.history, ) @@ -2312,7 +2311,7 @@ def test_select_frequencies_writeerrors(casa_uvfits, tmp_path): for f in np.unique(uv_object2.freq_array): assert f in freqs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uv_object2.history, ) @@ -2327,7 +2326,7 @@ def test_select_frequencies_writeerrors(casa_uvfits, tmp_path): for f in np.unique(uv_object2.freq_array): assert f in freqs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uv_object2.history, ) @@ -2340,7 +2339,7 @@ def test_select_frequencies_writeerrors(casa_uvfits, tmp_path): for f in uv_object2.freq_array: assert f in [freqs_to_keep[0]] - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uv_object2.history, ) @@ -2423,7 +2422,7 @@ def test_select_freq_chans(casa_uvfits): for f in np.unique(uv_object2.freq_array): assert f in uv_object.freq_array[chans_to_keep] - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uv_object2.history, ) @@ -2438,7 +2437,7 @@ def test_select_freq_chans(casa_uvfits): for f in np.unique(uv_object2.freq_array): assert f in uv_object.freq_array[chans_to_keep] - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uv_object2.history, ) @@ -2490,7 +2489,7 @@ def test_select_polarizations(hera_uvh5, pols_to_keep): pols_to_keep, x_orientation=uv_object2.telescope.x_orientation ) - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific polarizations using pyuvdata.", uv_object2.history, ) @@ -2615,7 +2614,7 @@ def test_select(casa_uvfits): for p in np.unique(uv_object2.polarization_array): assert p in pols_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific baseline-times, antennas, " "baselines, times, frequencies, " @@ -2711,7 +2710,7 @@ def test_select_with_lst(casa_uvfits): for p in np.unique(uv_object2.polarization_array): assert p in pols_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific baseline-times, antennas, " "baselines, lsts, frequencies, " @@ -2735,7 +2734,7 @@ def test_select_not_inplace(casa_uvfits): old_history = uv_object.history uv1 = uv_object.select(freq_chans=np.arange(32), inplace=False) uv1 += uv_object.select(freq_chans=np.arange(32, 64), inplace=False) - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -3236,14 +3235,14 @@ def test_sum_vis(casa_uvfits): uv_summed = uv_half.sum_vis(uv_half_mod) assert np.array_equal(uv_summed.data_array, uv_full.data_array) - assert helpers._check_histories( + assert utils.history._check_histories( uv_half.history + " Visibilities summed using pyuvdata. Unique part of second " "object history follows. testing the history.", uv_summed.history, ) # add a test for full coverage of _combine_history_addition function assert ( - helpers._combine_history_addition( + utils.history._combine_history_addition( uv_half.history + " Visibilities summed using pyuvdata. Unique part of second " "object history follows. testing the history.", @@ -3255,7 +3254,7 @@ def test_sum_vis(casa_uvfits): uv_summed = uv_half.sum_vis(uv_half_mod, verbose_history=True) assert np.array_equal(uv_summed.data_array, uv_full.data_array) - assert helpers._check_histories( + assert utils.history._check_histories( uv_half.history + " Visibilities summed using pyuvdata. Second object history follows. " + uv_half_mod.history, @@ -3266,7 +3265,7 @@ def test_sum_vis(casa_uvfits): uv_diffed = uv_full.diff_vis(uv_half) assert np.array_equal(uv_diffed.data_array, uv_half.data_array) - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Visibilities differenced using pyuvdata.", uv_diffed.history ) @@ -3358,7 +3357,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv2.select(freq_chans=np.arange(32, 64)) uv1 += uv2 # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -3387,7 +3386,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv1.select(polarizations=uv1.polarization_array[0:2]) uv2.select(polarizations=uv2.polarization_array[2:4]) uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific polarizations using pyuvdata. " "Combined data along polarization axis " @@ -3413,7 +3412,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv1.select(times=times[0 : len(times) // 2]) uv2.select(times=times[len(times) // 2 :]) uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time axis " @@ -3433,7 +3432,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv1.select(blt_inds=ind1) uv2.select(blt_inds=ind2) uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific baseline-times using pyuvdata. " "Combined data along baseline-time axis " @@ -3470,7 +3469,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv3.baseline_array = uv3.baseline_array[-1::-1] uv1 += uv3 uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific baseline-times using pyuvdata. " "Combined data along baseline-time axis " @@ -3493,7 +3492,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4] ) uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific times, polarizations using " "pyuvdata. Combined data along " @@ -3533,7 +3532,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32)) uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64)) uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific times, frequencies using " "pyuvdata. Combined data along " @@ -3572,7 +3571,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv1.select(times=times[0 : len(times) // 2]) uv2.select(times=times[len(times) // 2 :]) uv1 = uv1 + uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time " @@ -3685,7 +3684,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): uv2.select(polarizations=uv2.polarization_array[2:4]) uv2.history += " testing the history. AIPS WTSCAL = 1.0" uv_new = uv1 + uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Unique part of next " "object history follows. testing the history.", @@ -3695,7 +3694,7 @@ def test_add(casa_uvfits, hera_uvh5_xx): assert uv_new == uv_full uv_new = uv1.__add__(uv2, verbose_history=True) - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Next object history " "follows. " + uv2.history, @@ -3727,7 +3726,7 @@ def test_add_unprojected(casa_uvfits): uv2.select(freq_chans=np.arange(32, 64)) uv1 += uv2 # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency " @@ -3743,7 +3742,7 @@ def test_add_unprojected(casa_uvfits): uv1.select(polarizations=uv1.polarization_array[0:2]) uv2.select(polarizations=uv2.polarization_array[2:4]) uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific polarizations using pyuvdata. " "Combined data along polarization " @@ -3760,7 +3759,7 @@ def test_add_unprojected(casa_uvfits): uv1.select(times=times[0 : len(times) // 2]) uv2.select(times=times[len(times) // 2 :]) uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time " @@ -3780,7 +3779,7 @@ def test_add_unprojected(casa_uvfits): uv1.select(blt_inds=ind1) uv2.select(blt_inds=ind2) uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific baseline-times using pyuvdata. " "Combined data along baseline-time " @@ -3802,7 +3801,7 @@ def test_add_unprojected(casa_uvfits): times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4] ) uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific times, polarizations using " "pyuvdata. Combined data along " @@ -3842,7 +3841,7 @@ def test_add_unprojected(casa_uvfits): uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32)) uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64)) uv1 += uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific times, frequencies using " "pyuvdata. Combined data along " @@ -3881,7 +3880,7 @@ def test_add_unprojected(casa_uvfits): uv1.select(times=times[0 : len(times) // 2]) uv2.select(times=times[len(times) // 2 :]) uv1 = uv1 + uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time " @@ -3923,7 +3922,7 @@ def test_add_unprojected(casa_uvfits): uv2.select(polarizations=uv2.polarization_array[2:4]) uv2.history += " testing the history. AIPS WTSCAL = 1.0" uv_new = uv1 + uv2 - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Unique part of next " "object history follows. testing the history.", @@ -3933,7 +3932,7 @@ def test_add_unprojected(casa_uvfits): assert uv_new == uv_full uv_new = uv1.__add__(uv2, verbose_history=True) - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Next object history " "follows." + uv2.history, @@ -4135,7 +4134,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv3.select(freq_chans=np.arange(40, 64)) uv1.fast_concat([uv2, uv3], "freq", inplace=True) # Check history is correct, before replacing and doing a full object check - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -4186,7 +4185,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv2.select(polarizations=uv2.polarization_array[1:3]) uv3.select(polarizations=uv3.polarization_array[3:4]) uv1.fast_concat([uv2, uv3], "polarization", inplace=True) - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific polarizations using pyuvdata. " "Combined data along polarization axis " @@ -4231,7 +4230,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv2.select(times=times[len(times) // 3 : (len(times) // 3) * 2]) uv3.select(times=times[(len(times) // 3) * 2 :]) uv1.fast_concat([uv2, uv3], "blt", inplace=True) - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time axis " @@ -4250,7 +4249,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv1.select(blt_inds=ind1) uv2.select(blt_inds=ind2) uv1.fast_concat(uv2, "blt", inplace=True) - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific baseline-times using pyuvdata. " "Combined data along baseline-time axis " @@ -4302,7 +4301,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv2.select(blt_inds=ind2) uv2.fast_concat(uv1, "blt", inplace=True) - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific baseline-times using pyuvdata. " "Combined data along baseline-time " @@ -4370,7 +4369,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv1.select(times=times[0 : len(times) // 2]) uv2.select(times=times[len(times) // 2 :]) uv1 = uv1.fast_concat(uv2, "blt", inplace=False) - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific times using pyuvdata. " "Combined data along baseline-time " @@ -4472,7 +4471,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): uv2.select(polarizations=uv2.polarization_array[2:4]) uv2.history += " testing the history. AIPS WTSCAL = 1.0" uv_new = uv1.fast_concat(uv2, "polarization") - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Unique part of next " "object history follows. testing the history.", @@ -4482,7 +4481,7 @@ def test_fast_concat(casa_uvfits, hera_uvh5_xx): assert uv_new == uv_full uv_new = uv1.fast_concat(uv2, "polarization", verbose_history=True) - assert helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to specific polarizations using pyuvdata. " "Combined data along polarization axis using pyuvdata. Next object history " "follows." + uv2.history, @@ -6490,7 +6489,7 @@ def test_overlapping_data_add(casa_uvfits, tmp_path): "Combined data along polarization axis using pyuvdata. Combined data along " "baseline-time axis using pyuvdata. Overwrote invalid data using pyuvdata." ) - assert helpers._check_histories(uvfull.history, uv.history + extra_history) + assert utils.history._check_histories(uvfull.history, uv.history + extra_history) uvfull.history = uv.history # make histories match assert uv == uvfull @@ -6505,7 +6504,7 @@ def test_overlapping_data_add(casa_uvfits, tmp_path): "Combined data along polarization axis using pyuvdata. Combined data along " "baseline-time axis using pyuvdata." ) - assert helpers._check_histories(uvfull.history, uv.history + extra_history2) + assert utils.history._check_histories(uvfull.history, uv.history + extra_history2) uvfull.history = uv.history # make histories match assert uv == uvfull @@ -6547,7 +6546,7 @@ def test_overlapping_data_add(casa_uvfits, tmp_path): uvfull.read(np.array([uv1_out, uv2_out, uv3_out, uv4_out])) uvfull.reorder_blts() uv.reorder_blts() - assert helpers._check_histories(uvfull.history, uv.history + extra_history2) + assert utils.history._check_histories(uvfull.history, uv.history + extra_history2) uvfull.history = uv.history # make histories match # make sure filenames are what we expect @@ -9627,7 +9626,9 @@ def test_print_object_multi(carma_miriad): ) def test_look_in_catalog_err(sma_mir, kwargs, err_type, err_msg): with pytest.raises(err_type, match=err_msg): - utils.ps_cat.look_in_catalog(sma_mir.phase_center_catalog, **kwargs) + utils.phase_center_catalog.look_in_catalog( + sma_mir.phase_center_catalog, **kwargs + ) @pytest.mark.parametrize( @@ -9656,7 +9657,7 @@ def test_look_in_catalog(hera_uvh5, name, stype, arg_dict, exp_id, exp_diffs): parameters and that recorded in the UVData object. """ hera_uvh5.print_phase_center_info() - [cat_id, num_diffs] = utils.ps_cat.look_in_catalog( + [cat_id, num_diffs] = utils.phase_center_catalog.look_in_catalog( hera_uvh5.phase_center_catalog, cat_name=name, cat_type=stype, @@ -9684,16 +9685,16 @@ def test_look_in_catalog_phase_dict(sma_mir): behave as expected """ # Now try lookup using a dictionary of properties - assert utils.ps_cat.look_in_catalog( + assert utils.phase_center_catalog.look_in_catalog( sma_mir.phase_center_catalog, cat_name="3c84" ) == (1, 5) phase_dict = sma_mir.phase_center_catalog[1] - assert utils.ps_cat.look_in_catalog( + assert utils.phase_center_catalog.look_in_catalog( sma_mir.phase_center_catalog, cat_name="3c84", phase_dict=phase_dict ) == (1, 0) # Make sure that if we set ignore_name, we still get a match - assert utils.ps_cat.look_in_catalog( + assert utils.phase_center_catalog.look_in_catalog( sma_mir.phase_center_catalog, cat_name="3c84", phase_dict=phase_dict, @@ -9701,7 +9702,7 @@ def test_look_in_catalog_phase_dict(sma_mir): ) == (1, 0) # Match w/ a mis-capitalization - assert utils.ps_cat.look_in_catalog( + assert utils.phase_center_catalog.look_in_catalog( sma_mir.phase_center_catalog, cat_name="3C84", phase_dict=phase_dict, @@ -10009,7 +10010,7 @@ def test_add_clear_phase_center(sma_mir): # Check to see that the catalog actually changed assert sma_mir.phase_center_catalog != check_dict # And ake sure we can ID by name, but find diffs if attributes dont match - assert utils.ps_cat.look_in_catalog( + assert utils.phase_center_catalog.look_in_catalog( sma_mir.phase_center_catalog, cat_name="Mars", cat_lon=[0], cat_lat=[0] ) == (0, 7) @@ -10088,14 +10089,18 @@ def test_split_phase_center(hera_uvh5): select_mask = np.isin(hera_uvh5.time_array, np.unique(hera_uvh5.time_array)[::2]) hera_uvh5.split_phase_center("3c84", new_name="3c84_2", select_mask=select_mask) - cat_id1 = utils.ps_cat.look_for_name(hera_uvh5.phase_center_catalog, "3c84") - cat_id2 = utils.ps_cat.look_for_name(hera_uvh5.phase_center_catalog, "3c84_2") + cat_id1 = utils.phase_center_catalog.look_for_name( + hera_uvh5.phase_center_catalog, "3c84" + ) + cat_id2 = utils.phase_center_catalog.look_for_name( + hera_uvh5.phase_center_catalog, "3c84_2" + ) # Check that the catalog IDs also line up w/ what we expect assert np.all(hera_uvh5.phase_center_id_array[~select_mask] == cat_id1) assert np.all(hera_uvh5.phase_center_id_array[select_mask] == cat_id2) assert hera_uvh5.Nphase == 2 - cat_id_all = utils.ps_cat.look_for_name( + cat_id_all = utils.phase_center_catalog.look_for_name( hera_uvh5.phase_center_catalog, ["3c84", "3c84_2"] ) assert np.all(np.isin(hera_uvh5.phase_center_id_array, cat_id_all)) @@ -10131,8 +10136,12 @@ def test_split_phase_center_downselect(hera_uvh5): downselect=True, ) - cat_id1 = utils.ps_cat.look_for_name(hera_uvh5.phase_center_catalog, "3c84") - cat_id3 = utils.ps_cat.look_for_name(hera_uvh5.phase_center_catalog, "3c84_3") + cat_id1 = utils.phase_center_catalog.look_for_name( + hera_uvh5.phase_center_catalog, "3c84" + ) + cat_id3 = utils.phase_center_catalog.look_for_name( + hera_uvh5.phase_center_catalog, "3c84_3" + ) assert np.all(hera_uvh5.phase_center_id_array[~select_mask] == cat_id1) assert np.all(hera_uvh5.phase_center_id_array[select_mask] == cat_id3) @@ -10152,7 +10161,9 @@ def test_split_phase_center_downselect(hera_uvh5): assert hera_uvh5.phase_center_catalog == catalog_copy assert np.all( hera_uvh5.phase_center_id_array - == utils.ps_cat.look_for_name(hera_uvh5.phase_center_catalog, "3c84") + == utils.phase_center_catalog.look_for_name( + hera_uvh5.phase_center_catalog, "3c84" + ) ) @@ -10326,12 +10337,16 @@ def test_phase_dict_helper_sidereal_lookup(sma_mir, dummy_phase_dict): ) assert ( phase_dict.pop("cat_id") - == utils.ps_cat.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] + == utils.phase_center_catalog.look_for_name( + sma_mir.phase_center_catalog, "3c84" + )[0] ) assert ( phase_dict == sma_mir.phase_center_catalog[ - utils.ps_cat.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] + utils.phase_center_catalog.look_for_name( + sma_mir.phase_center_catalog, "3c84" + )[0] ] ) @@ -10343,7 +10358,9 @@ def test_phase_dict_helper_jpl_lookup_existing(sma_mir): """ # Finally, check that we get a good result if feeding the same values, even if not # actually performing a lookup - cat_id = utils.ps_cat.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] + cat_id = utils.phase_center_catalog.look_for_name( + sma_mir.phase_center_catalog, "3c84" + )[0] phase_dict = sma_mir._phase_dict_helper( lon=sma_mir.phase_center_catalog[cat_id].get("cat_lon"), lat=sma_mir.phase_center_catalog[cat_id].get("cat_lat"), @@ -10363,7 +10380,9 @@ def test_phase_dict_helper_jpl_lookup_existing(sma_mir): assert ( phase_dict == sma_mir.phase_center_catalog[ - utils.ps_cat.look_for_name(sma_mir.phase_center_catalog, "3c84")[0] + utils.phase_center_catalog.look_for_name( + sma_mir.phase_center_catalog, "3c84" + )[0] ] ) diff --git a/tests/uvdata/test_uvfits.py b/tests/uvdata/test_uvfits.py index ca0a024bce..9d346fd3fe 100644 --- a/tests/uvdata/test_uvfits.py +++ b/tests/uvdata/test_uvfits.py @@ -1329,7 +1329,7 @@ def test_multi_files(casa_uvfits, tmp_path): uv1.read(np.array([testfile1, testfile2]), file_type="uvfits") # Check history is correct, before replacing and doing a full object check - assert utils.helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -1371,7 +1371,7 @@ def test_multi_files_axis(casa_uvfits, tmp_path): uv1.read([testfile1, testfile2], axis="freq") # Check history is correct, before replacing and doing a full object check - assert utils.helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " @@ -1414,7 +1414,7 @@ def test_multi_files_metadata_only(casa_uvfits, tmp_path): uv1.read([testfile1, testfile2], read_data=False) # Check history is correct, before replacing and doing a full object check - assert utils.helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis " diff --git a/tests/uvdata/test_uvh5.py b/tests/uvdata/test_uvh5.py index 365963b2b3..6beb528e61 100644 --- a/tests/uvdata/test_uvh5.py +++ b/tests/uvdata/test_uvh5.py @@ -431,7 +431,7 @@ def test_uvh5_read_multiple_files(casa_uvfits, tmp_path): uv1.read(np.array([testfile1, testfile2]), file_type="uvh5") # Check history is correct, before replacing and doing a full object check - assert utils.helpers._check_histories( + assert utils.history._check_histories( uv_in.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis using" @@ -475,7 +475,7 @@ def test_uvh5_read_multiple_files_metadata_only(casa_uvfits, tmp_path): uv_full.read_uvfits(uvfits_filename, read_data=False) uv1.read([testfile1, testfile2], read_data=False) # Check history is correct, before replacing and doing a full object check - assert utils.helpers._check_histories( + assert utils.history._check_histories( uv_full.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis using" @@ -515,7 +515,7 @@ def test_uvh5_read_multiple_files_axis(casa_uvfits, tmp_path): uv2.write_uvh5(testfile2, clobber=True) uv1.read([testfile1, testfile2], axis="freq") # Check history is correct, before replacing and doing a full object check - assert utils.helpers._check_histories( + assert utils.history._check_histories( uv_in.history + " Downselected to " "specific frequencies using pyuvdata. " "Combined data along frequency axis using" diff --git a/tests/uvflag/test_uvflag.py b/tests/uvflag/test_uvflag.py index 1a9f328791..b54a21451b 100644 --- a/tests/uvflag/test_uvflag.py +++ b/tests/uvflag/test_uvflag.py @@ -18,7 +18,6 @@ from pyuvdata import UVCal, UVData, UVFlag, __version__, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings -from pyuvdata.utils import helpers from pyuvdata.utils.file_io import hdf5 as hdf5_utils from pyuvdata.uvbase import old_telescope_metadata_attrs from pyuvdata.uvflag import and_rows_cols, flags2waterfall @@ -3262,13 +3261,13 @@ def test_select_blt_inds(input_uvf, uvf_mode, dimension): assert uvf1.Ntimes == new_nblts # verify that histories are different - assert not helpers._check_histories(uvf.history, uvf1.history) + assert not utils.history._check_histories(uvf.history, uvf1.history) if uvf.type == "baseline": addition_str = "baseline-times" else: addition_str = "times" - assert helpers._check_histories( + assert utils.history._check_histories( uvf.history + f" Downselected to specific {addition_str} using pyuvdata.", uvf1.history, ) @@ -3347,7 +3346,7 @@ def test_select_antenna_nums(input_uvf, uvf_mode, dimension): for ant in np.unique(uvf2.ant_array): assert ant in ants_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific antennas using pyuvdata.", uvf2.history, ) @@ -3427,7 +3426,7 @@ def test_select_bls(input_uvf, uvf_mode): for pair in sorted_pairs_object2: assert pair in sorted_pairs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific baselines using pyuvdata.", uvf2.history, ) @@ -3470,7 +3469,7 @@ def test_select_bls(input_uvf, uvf_mode): for pair in sorted_pairs_object2: assert pair in sorted_pairs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific baselines, polarizations using pyuvdata.", uvf2.history, @@ -3558,7 +3557,7 @@ def test_select_times(input_uvf, uvf_mode): for t in np.unique(uvf2.time_array): assert t in times_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific times using pyuvdata.", uvf2.history ) # check that it also works with higher dimension array @@ -3572,7 +3571,7 @@ def test_select_times(input_uvf, uvf_mode): for t in np.unique(uvf2.time_array): assert t in times_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific times using pyuvdata.", uvf2.history ) # check for errors associated with times not included in data @@ -3607,7 +3606,7 @@ def test_select_frequencies(input_uvf, uvf_mode): for f in np.unique(uvf2.freq_array): assert f in freqs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uvf2.history, ) @@ -3622,7 +3621,7 @@ def test_select_frequencies(input_uvf, uvf_mode): for f in np.unique(uvf2.freq_array): assert f in freqs_to_keep - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uvf2.history, ) @@ -3635,7 +3634,7 @@ def test_select_frequencies(input_uvf, uvf_mode): for f in uvf2.freq_array: assert f in [freqs_to_keep[0]] - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uvf2.history, ) @@ -3673,7 +3672,7 @@ def test_select_freq_chans(input_uvf, uvf_mode): for f in np.unique(uvf2.freq_array): assert f in uvf.freq_array[chans_to_keep] - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uvf2.history, ) @@ -3689,7 +3688,7 @@ def test_select_freq_chans(input_uvf, uvf_mode): for f in np.unique(uvf2.freq_array): assert f in uvf.freq_array[chans_to_keep] - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific frequencies using pyuvdata.", uvf2.history, ) @@ -3749,7 +3748,7 @@ def test_select_polarizations(uvf_mode, pols_to_keep, input_uvf): pols_to_keep, x_orientation=uvf2.telescope.x_orientation ) - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to specific polarizations using pyuvdata.", uvf2.history, ) @@ -3888,7 +3887,7 @@ def test_select(input_uvf, uvf_mode): assert p in pols_to_keep if uvf.type == "baseline": - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific baseline-times, antennas, " "baselines, times, frequencies, " @@ -3896,7 +3895,7 @@ def test_select(input_uvf, uvf_mode): uvf2.history, ) elif uvf.type == "antenna": - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific times, antennas, " "frequencies, " @@ -3904,7 +3903,7 @@ def test_select(input_uvf, uvf_mode): uvf2.history, ) else: - assert helpers._check_histories( + assert utils.history._check_histories( old_history + " Downselected to " "specific times, " "frequencies, " From 61880b0f9cd0e2e7fd286781aeb5d1c87ad57d3f Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Fri, 21 Jun 2024 13:44:04 -0700 Subject: [PATCH 04/12] move extensions under the related subpackages --- setup.py | 6 +++--- src/pyuvdata/uvbeam/uvbeam.py | 3 +-- src/pyuvdata/uvdata/aipy_extracts.py | 2 +- src/pyuvdata/uvdata/mwa_corr_fits.py | 4 ++-- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/setup.py b/setup.py index 53cebdf7a0..4e1498e8af 100644 --- a/setup.py +++ b/setup.py @@ -63,7 +63,7 @@ def is_platform_windows(): global_c_macros = [("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")] miriad_extension = Extension( - "pyuvdata._miriad", + "pyuvdata.uvdata._miriad", sources=[ "src/pyuvdata/uvdata/src/miriad_wrap.pyx", "src/pyuvdata/uvdata/src/uvio.c", @@ -79,7 +79,7 @@ def is_platform_windows(): ) corr_fits_extension = Extension( - "pyuvdata._corr_fits", + "pyuvdata.uvdata._corr_fits", sources=["src/pyuvdata/uvdata/corr_fits.pyx"], define_macros=global_c_macros, include_dirs=[numpy.get_include()], @@ -112,7 +112,7 @@ def is_platform_windows(): ) uvbeam_extension = Extension( - "pyuvdata._uvbeam", + "pyuvdata.uvbeam._uvbeam", sources=["src/pyuvdata/uvbeam/uvbeam.pyx"], define_macros=global_c_macros, include_dirs=[numpy.get_include()], diff --git a/src/pyuvdata/uvbeam/uvbeam.py b/src/pyuvdata/uvbeam/uvbeam.py index 6789ca9145..39ad4ce6d3 100644 --- a/src/pyuvdata/uvbeam/uvbeam.py +++ b/src/pyuvdata/uvbeam/uvbeam.py @@ -14,12 +14,11 @@ from docstring_parser import DocstringStyle from scipy import interpolate -from .. import _uvbeam from .. import parameter as uvp from .. import utils from ..docstrings import combine_docstrings, copy_replace_short_description from ..uvbase import UVBase -from . import initializers +from . import _uvbeam, initializers __all__ = ["UVBeam"] diff --git a/src/pyuvdata/uvdata/aipy_extracts.py b/src/pyuvdata/uvdata/aipy_extracts.py index 9c3d46e4ef..7031345284 100644 --- a/src/pyuvdata/uvdata/aipy_extracts.py +++ b/src/pyuvdata/uvdata/aipy_extracts.py @@ -17,7 +17,7 @@ import numpy as np try: - from pyuvdata import _miriad + from . import _miriad except ImportError as e: # pragma: no cover raise ImportError( "The miriad extension is not built but is required for reading miriad " diff --git a/src/pyuvdata/uvdata/mwa_corr_fits.py b/src/pyuvdata/uvdata/mwa_corr_fits.py index fcd6d7dac3..83dcf40a30 100644 --- a/src/pyuvdata/uvdata/mwa_corr_fits.py +++ b/src/pyuvdata/uvdata/mwa_corr_fits.py @@ -17,11 +17,11 @@ from scipy.integrate import simpson from scipy.special import erf -from .. import Telescope, _corr_fits, utils +from .. import Telescope, utils from ..data import DATA_PATH from ..docstrings import copy_replace_short_description from ..utils.file_io import fits as fits_utils -from . import UVData +from . import UVData, _corr_fits __all__ = ["input_output_mapping", "MWACorrFITS"] From e52955ce81a3dac3cb63aad4e98b8a474c641dcc Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Mon, 24 Jun 2024 12:34:52 -0700 Subject: [PATCH 05/12] A little more utils restructuring, update docs for new structure --- .flake8 | 2 + docs/developer_docs.rst | 175 +++++++- docs/functions.rst | 50 +++ docs/make_index.py | 2 +- docs/utility_functions.rst | 5 - src/pyuvdata/parameter.py | 13 +- src/pyuvdata/telescopes.py | 4 +- src/pyuvdata/utils/__init__.py | 14 +- .../utils/{file_io => io}/__init__.py | 0 src/pyuvdata/utils/{file_io => io}/antpos.py | 0 src/pyuvdata/utils/io/fhd.py | 415 ++++++++++++++++++ src/pyuvdata/utils/{file_io => io}/fits.py | 0 src/pyuvdata/utils/{file_io => io}/hdf5.py | 0 src/pyuvdata/utils/{file_io => io}/ms.py | 0 src/pyuvdata/utils/pol.py | 40 +- src/pyuvdata/uvbeam/beamfits.py | 2 +- src/pyuvdata/uvbeam/uvbeam.py | 2 +- src/pyuvdata/uvcal/calfits.py | 2 +- src/pyuvdata/uvcal/calh5.py | 2 +- src/pyuvdata/uvcal/fhd_cal.py | 8 +- src/pyuvdata/uvcal/ms_cal.py | 2 +- src/pyuvdata/uvdata/fhd.py | 411 +---------------- src/pyuvdata/uvdata/ms.py | 2 +- src/pyuvdata/uvdata/mwa_corr_fits.py | 2 +- src/pyuvdata/uvdata/uvdata.py | 2 +- src/pyuvdata/uvdata/uvfits.py | 2 +- src/pyuvdata/uvdata/uvh5.py | 2 +- tests/utils/{file_io => io}/__init__.py | 2 +- tests/utils/{file_io => io}/test_fits.py | 4 +- tests/utils/{file_io => io}/test_hdf5.py | 2 +- tests/utils/{file_io => io}/test_ms.py | 2 +- tests/utils/test_phasing.py | 21 +- tests/utils/test_pol.py | 11 + tests/uvbeam/test_beamfits.py | 2 +- tests/uvcal/test_calfits.py | 2 +- tests/uvcal/test_uvcal.py | 2 +- tests/uvdata/test_uvfits.py | 2 +- tests/uvdata/test_uvh5.py | 2 +- tests/uvflag/test_uvflag.py | 2 +- 39 files changed, 749 insertions(+), 464 deletions(-) create mode 100644 docs/functions.rst delete mode 100644 docs/utility_functions.rst rename src/pyuvdata/utils/{file_io => io}/__init__.py (100%) rename src/pyuvdata/utils/{file_io => io}/antpos.py (100%) create mode 100644 src/pyuvdata/utils/io/fhd.py rename src/pyuvdata/utils/{file_io => io}/fits.py (100%) rename src/pyuvdata/utils/{file_io => io}/hdf5.py (100%) rename src/pyuvdata/utils/{file_io => io}/ms.py (100%) rename tests/utils/{file_io => io}/__init__.py (75%) rename tests/utils/{file_io => io}/test_fits.py (85%) rename tests/utils/{file_io => io}/test_hdf5.py (97%) rename tests/utils/{file_io => io}/test_ms.py (99%) diff --git a/.flake8 b/.flake8 index 5964dd59e6..cf31b96a6c 100644 --- a/.flake8 +++ b/.flake8 @@ -15,8 +15,10 @@ per-file-ignores = src/pyuvdata/uvdata/uvdata.py: N802 src/pyuvdata/uvbeam/mwa_beam.py: N802 src/pyuvdata/utils/coordinates.py: N802, N803 + src/pyuvdata/utils/io/__init__.py: A005 tests/utils/test_coordinates.py: D,N802 tests/__init__.py: D,N802 + tests/utils/io/__init__.py: A005 docstring-convention = numpy select = C,E,W,T4,B9,F,D,A,N,RST,B rst-roles = diff --git a/docs/developer_docs.rst b/docs/developer_docs.rst index a925e32219..620c09ff50 100644 --- a/docs/developer_docs.rst +++ b/docs/developer_docs.rst @@ -26,6 +26,9 @@ attribute shapes and values. .. autoclass:: pyuvdata.parameter.LocationParameter :members: +.. autoclass:: pyuvdata.parameter.SkyCoordParameter + :members: + .. autoclass:: pyuvdata.uvbase.UVBase :members: @@ -36,6 +39,10 @@ specific code. The read and write methods on the user classes convert between the user classes and the file-specific classes automatically as needed, so users generally do not need to interact with these classes, but developers may need to. + +UVData +****** + .. autoclass:: pyuvdata.uvdata.fhd.FHD :members: @@ -57,12 +64,21 @@ generally do not need to interact with these classes, but developers may need to .. autoclass:: pyuvdata.uvdata.uvh5.UVH5 :members: +UVCal +***** + .. autoclass:: pyuvdata.uvcal.calfits.CALFITS :members: +.. autoclass:: pyuvdata.uvcal.calh5.CalH5 + :members: + .. autoclass:: pyuvdata.uvcal.fhd_cal.FHDCal :members: +UVBeam +****** + .. autoclass:: pyuvdata.uvbeam.beamfits.BeamFITS :members: @@ -73,6 +89,159 @@ generally do not need to interact with these classes, but developers may need to :members: +.. _Developer Docs Utility Functions: + +Utility Functions +----------------- +Note that we are also listing private functions here (functions that start with +an underscore). While they are listed here, **they are not considered part of the +public API, so they can change without notice**. If you find that you need to rely +one of them let us know in a github issue and we can consider making it part of +the public API. + + +File I/O Utility Functions +************************** + +Antenna position files +++++++++++++++++++++++ + +.. automodule:: pyuvdata.utils.io.antpos + :members: + :private-members: + :undoc-members: + +FHD files ++++++++++ + +.. automodule:: pyuvdata.utils.io.fhd + :members: + :private-members: + :undoc-members: + +FITS files +++++++++++ + +.. automodule:: pyuvdata.utils.io.fits + :members: + :private-members: + :undoc-members: + +HDF5 files +++++++++++ + +.. automodule:: pyuvdata.utils.io.hdf5 + :members: + :private-members: + :undoc-members: + +Measurement Set files ++++++++++++++++++++++ + +.. automodule:: pyuvdata.utils.io.ms + :members: + :private-members: + :undoc-members: + +Array collapse functions for flags +********************************** + +.. automodule:: pyuvdata.utils.array_collapse + :members: + :private-members: + :undoc-members: + +Functions for working with baseline numbers +******************************************* + +.. automodule:: pyuvdata.utils.bls + :members: + :private-members: + :undoc-members: + :ignore-module-all: + +Functions for working with the baseline-time axis +************************************************* + +.. automodule:: pyuvdata.utils.bltaxis + :members: + :private-members: + :undoc-members: + +Functions for working with telescope coordinates +************************************************ + +.. automodule:: pyuvdata.utils.coordinates + :members: + :private-members: + :undoc-members: + :ignore-module-all: + +Functions for working with the frequency axis +********************************************* + +.. automodule:: pyuvdata.utils.frequency + :members: + :private-members: + :undoc-members: + +Functions for working with history +********************************** + +.. automodule:: pyuvdata.utils.history + :members: + :private-members: + :undoc-members: + +Functions for working with phase center catalogs +************************************************ + +.. automodule:: pyuvdata.utils.phase_center_catalog + :members: + :private-members: + :undoc-members: + +Functions for working with phasing +********************************** + +.. automodule:: pyuvdata.utils.phasing + :members: + :private-members: + :undoc-members: + +Functions for working with polarizations +**************************************** + +.. automodule:: pyuvdata.utils.pol + :members: + :private-members: + :undoc-members: + :ignore-module-all: + +Functions for working with baseline redundancies +************************************************ + +.. automodule:: pyuvdata.utils.redundancy + :members: + :private-members: + :undoc-members: + +Functions for working with times and LSTs +***************************************** + +.. automodule:: pyuvdata.utils.times + :members: + :private-members: + :undoc-members: + +General utility functions +************************* + +.. automodule:: pyuvdata.utils.tools + :members: + :private-members: + :undoc-members: + Mir Parser ---------- .. automodule:: pyuvdata.uvdata.mir_parser @@ -82,10 +251,8 @@ Mir Parser :members: -Functions ----------- - -.. autofunction:: pyuvdata.uvdata.fhd.get_fhd_history +Other Functions +--------------- .. autofunction:: pyuvdata.uvbeam.mwa_beam.P1sin diff --git a/docs/functions.rst b/docs/functions.rst new file mode 100644 index 0000000000..3e187d8427 --- /dev/null +++ b/docs/functions.rst @@ -0,0 +1,50 @@ +Useful Functions +================ +There are some functions that interact with multiple types of objects to apply +calibrations solutions and flagging to other objects. + +.. autofunction:: pyuvdata.uvcalibrate + +.. autofunction:: pyuvdata.apply_uvflag + + +Utility Functions +----------------- +Some of our utility functions are widely used. The most commonly used ones are +noted here, for others see the developer docs: :ref:`developer docs utility functions`. + +.. autofunction:: pyuvdata.utils.baseline_to_antnums +.. autofunction:: pyuvdata.utils.antnums_to_baseline + +.. autofunction:: pyuvdata.utils.LatLonAlt_from_XYZ +.. autofunction:: pyuvdata.utils.XYZ_from_LatLonAlt +.. autofunction:: pyuvdata.utils.rotECEF_from_ECEF +.. autofunction:: pyuvdata.utils.ECEF_from_rotECEF +.. autofunction:: pyuvdata.utils.ENU_from_ECEF +.. autofunction:: pyuvdata.utils.ECEF_from_ENU + +.. autofunction:: pyuvdata.utils.polstr2num +.. autofunction:: pyuvdata.utils.polnum2str +.. autofunction:: pyuvdata.utils.jstr2num +.. autofunction:: pyuvdata.utils.jnum2str +.. autofunction:: pyuvdata.utils.conj_pol +.. autofunction:: pyuvdata.utils.x_orientation_pol_map +.. autofunction:: pyuvdata.utils.parse_polstr +.. autofunction:: pyuvdata.utils.parse_jpolstr + +.. autofunction:: pyuvdata.utils.get_lst_for_time + +.. autofunction:: pyuvdata.utils.uvw_track_generator + +.. autofunction:: pyuvdata.utils.collapse + +Polarization Dictionaries +------------------------- +We also define some useful dictionaries for mapping polarizations: + + * ``pyuvdata.utils.POL_STR2NUM_DICT``: maps visibility polarization strings to polarization integers + * ``pyuvdata.utils.POL_NUM2STR_DICT``: maps visibility polarization integers to polarization strings + * ``pyuvdata.utils.JONES_STR2NUM_DICT``: maps calibration polarization strings to polarization integers + * ``pyuvdata.utils.JONES_NUM2STR_DICT``: maps calibration polarization strings to polarization integers + * ``pyuvdata.utils.CONJ_POL_DICT``: maps how visibility polarizations change when antennas are swapped (visibilities are conjugated) + * ``pyuvdata.utils.XORIENTMAP``: maps x_orientation strings to cannonical names diff --git a/docs/make_index.py b/docs/make_index.py index a80bbdb673..64f44cf366 100644 --- a/docs/make_index.py +++ b/docs/make_index.py @@ -52,7 +52,7 @@ def write_index_rst(readme_file=None, write_file=None): " telescope\n" " fast_uvh5_meta\n" " fast_calh5_meta\n" - " utility_functions\n" + " functions\n" " developer_docs\n" ) diff --git a/docs/utility_functions.rst b/docs/utility_functions.rst deleted file mode 100644 index 2de1554868..0000000000 --- a/docs/utility_functions.rst +++ /dev/null @@ -1,5 +0,0 @@ -Utility Functions -================= - -.. automodule:: pyuvdata.utils - :members: diff --git a/src/pyuvdata/parameter.py b/src/pyuvdata/parameter.py index 57a0ce328b..1dcc2ec59e 100644 --- a/src/pyuvdata/parameter.py +++ b/src/pyuvdata/parameter.py @@ -31,7 +31,7 @@ hasmoon = False -__all__ = ["UVParameter", "AngleParameter", "LocationParameter"] +__all__ = ["UVParameter", "AngleParameter", "LocationParameter", "SkyCoordParameter"] def _get_generic_type(expected_type, strict_type_check=False): @@ -1228,6 +1228,17 @@ def __init__( ) def __eq__(self, other, *, silent=False): + """ + Test if classes match and values are within tolerances. + + Parameters + ---------- + other : UVParameter or subclass + The other UVParameter to compare with this one. + silent : bool + When set to False (default), descriptive text is printed out when parameters + do not match. If set to True, this text is not printed. + """ if not issubclass(self.value.__class__, SkyCoord) or not issubclass( other.value.__class__, SkyCoord ): diff --git a/src/pyuvdata/telescopes.py b/src/pyuvdata/telescopes.py index a03b364b47..8c6ce424e3 100644 --- a/src/pyuvdata/telescopes.py +++ b/src/pyuvdata/telescopes.py @@ -20,8 +20,8 @@ from . import parameter as uvp from . import utils from .data import DATA_PATH -from .utils.file_io import antpos -from .utils.file_io import hdf5 as hdf5_utils +from .utils.io import antpos +from .utils.io import hdf5 as hdf5_utils from .uvbase import UVBase __all__ = ["Telescope", "known_telescopes", "known_telescope_location", "get_telescope"] diff --git a/src/pyuvdata/utils/__init__.py b/src/pyuvdata/utils/__init__.py index 1769bb9f97..b477b06be5 100644 --- a/src/pyuvdata/utils/__init__.py +++ b/src/pyuvdata/utils/__init__.py @@ -22,9 +22,9 @@ from . import bls # noqa from . import bltaxis # noqa from . import coordinates # noqa -from . import file_io # noqa from . import frequency # noqa from . import history # noqa +from . import io # noqa from . import phase_center_catalog # noqa from . import phasing # noqa from . import pol # noqa @@ -63,7 +63,7 @@ def _fits_gethduaxis(hdu, axis): """ Make axis arrays for fits files. - Deprecated. Use pyuvdata.utils.file_io.fits._gethduaxis. + Deprecated. Use pyuvdata.utils.io.fits._gethduaxis. Parameters ---------- @@ -78,11 +78,11 @@ def _fits_gethduaxis(hdu, axis): Array of values for the specified axis. """ - from .file_io.fits import _gethduaxis + from .io.fits import _gethduaxis warnings.warn( "The _fits_gethduaxis function has moved, please import it as " - "pyuvdata.utils.file_io.fits._gethduaxis. This warnings will become an " + "pyuvdata.utils.io.fits._gethduaxis. This warnings will become an " "error in version 3.2", DeprecationWarning, ) @@ -94,7 +94,7 @@ def _fits_indexhdus(hdulist): """ Get a dict of table names and HDU numbers from a FITS HDU list. - Deprecated. Use pyuvdata.utils.file_io.fits._indexhdus. + Deprecated. Use pyuvdata.utils.io.fits._indexhdus. Parameters ---------- @@ -107,11 +107,11 @@ def _fits_indexhdus(hdulist): dictionary with table names as keys and HDU number as values. """ - from .file_io.fits import _indexhdus + from .io.fits import _indexhdus warnings.warn( "The _fits_indexhdus function has moved, please import it as " - "pyuvdata.utils.file_io.fits._indexhdus. This warnings will become an " + "pyuvdata.utils.io.fits._indexhdus. This warnings will become an " "error in version 3.2", DeprecationWarning, ) diff --git a/src/pyuvdata/utils/file_io/__init__.py b/src/pyuvdata/utils/io/__init__.py similarity index 100% rename from src/pyuvdata/utils/file_io/__init__.py rename to src/pyuvdata/utils/io/__init__.py diff --git a/src/pyuvdata/utils/file_io/antpos.py b/src/pyuvdata/utils/io/antpos.py similarity index 100% rename from src/pyuvdata/utils/file_io/antpos.py rename to src/pyuvdata/utils/io/antpos.py diff --git a/src/pyuvdata/utils/io/fhd.py b/src/pyuvdata/utils/io/fhd.py new file mode 100644 index 0000000000..235c862e5f --- /dev/null +++ b/src/pyuvdata/utils/io/fhd.py @@ -0,0 +1,415 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (c) 2024 Radio Astronomy Software Group +# Licensed under the 2-clause BSD License +"""Utilities for working with FHD files.""" +import os +import warnings + +import numpy as np +from astropy import units +from astropy.coordinates import EarthLocation +from scipy.io import readsav + +from ... import Telescope +from .. import coordinates + + +def fhd_filenames( + *, + vis_files: list[str] | np.ndarray | str | None = None, + params_file: str | None = None, + obs_file: str | None = None, + flags_file: str | None = None, + layout_file: str | None = None, + settings_file: str | None = None, + cal_file: str | None = None, +): + """ + Check the FHD input files for matching prefixes and folders. + + Parameters + ---------- + vis_files : str or array-like of str, optional + FHD visibility save file names, can be data or model visibilities. + params_file : str + FHD params save file name. + obs_file : str + FHD obs save file name. + flags_file : str + FHD flag save file name. + layout_file : str + FHD layout save file name. + layout_file : str + FHD layout save file name. + settings_file : str + FHD settings text file name. + cal_file : str + FHD cal save file name. + + Returns + ------- + A list of file basenames to be used in the object `filename` attribute. + + """ + file_types = { + "vis": {"files": vis_files, "suffix": "_vis", "sub_folder": "vis_data"}, + "cal": {"files": cal_file, "suffix": "_cal", "sub_folder": "calibration"}, + "flags": {"files": flags_file, "suffix": "_flags", "sub_folder": "vis_data"}, + "layout": {"files": layout_file, "suffix": "_layout", "sub_folder": "metadata"}, + "obs": {"files": obs_file, "suffix": "_obs", "sub_folder": "metadata"}, + "params": {"files": params_file, "suffix": "_params", "sub_folder": "metadata"}, + "settings": { + "files": settings_file, + "suffix": "_settings", + "sub_folder": "metadata", + }, + } + + basename_list = [] + prefix_list = [] + folder_list = [] + missing_suffix = [] + missing_subfolder = [] + for ftype, fdict in file_types.items(): + if fdict["files"] is None: + continue + if isinstance(fdict["files"], (list, np.ndarray)): + these_files = fdict["files"] + else: + these_files = [fdict["files"]] + + for fname in these_files: + dirname, basename = os.path.split(fname) + basename_list.append(basename) + if fdict["suffix"] in basename: + suffix_loc = basename.find(fdict["suffix"]) + prefix_list.append(basename[:suffix_loc]) + else: + missing_suffix.append(ftype) + fhd_folder, subfolder = os.path.split(dirname) + if subfolder == fdict["sub_folder"]: + folder_list.append(fhd_folder) + else: + missing_subfolder.append(ftype) + + if len(missing_suffix) > 0: + warnings.warn( + "Some FHD input files do not have the expected suffix so prefix " + f"matching could not be done. The affected file types are: {missing_suffix}" + ) + if len(missing_subfolder) > 0: + warnings.warn( + "Some FHD input files do not have the expected subfolder so FHD " + "folder matching could not be done. The affected file types are: " + f"{missing_subfolder}" + ) + + if np.unique(prefix_list).size > 1: + warnings.warn( + "The FHD input files do not all have matching prefixes, so they " + "may not be for the same data." + ) + if np.unique(folder_list).size > 1: + warnings.warn( + "The FHD input files do not all have the same parent folder, so " + "they may not be for the same FHD run." + ) + + return basename_list + + +def get_fhd_history(settings_file, *, return_user=False): + """ + Small function to get the important history from an FHD settings text file. + + Includes information about the command line call, the user, machine name and date + + Parameters + ---------- + settings_file : str + FHD settings file name + return_user : bool + optionally return the username who ran FHD + + Returns + ------- + history : str + string of history extracted from the settings file + user : str + Only returned if return_user is True + + """ + with open(settings_file, "r") as f: + settings_lines = f.readlines() + main_loc = None + command_loc = None + obs_loc = None + user_line = None + for ind, line in enumerate(settings_lines): + if line.startswith("##MAIN"): + main_loc = ind + if line.startswith("##COMMAND_LINE"): + command_loc = ind + if line.startswith("##OBS"): + obs_loc = ind + if line.startswith("User"): + user_line = ind + if ( + main_loc is not None + and command_loc is not None + and obs_loc is not None + and user_line is not None + ): + break + + main_lines = settings_lines[main_loc + 1 : command_loc] + command_lines = settings_lines[command_loc + 1 : obs_loc] + history_lines = ["FHD history\n"] + main_lines + command_lines + for ind, line in enumerate(history_lines): + history_lines[ind] = line.rstrip().replace("\t", " ") + history = "\n".join(history_lines) + user = settings_lines[user_line].split()[1] + + if return_user: + return history, user + else: + return history + + +def _xyz_close(xyz1, xyz2, loc_tols): + return np.allclose(xyz1, xyz2, rtol=loc_tols[0], atol=loc_tols[1]) + + +def _latlonalt_close(latlonalt1, latlonalt2, radian_tol, loc_tols): + latlon_close = np.allclose( + np.array(latlonalt1[0:2]), np.array(latlonalt2[0:2]), rtol=0, atol=radian_tol + ) + alt_close = np.isclose( + latlonalt1[2], latlonalt2[2], rtol=loc_tols[0], atol=loc_tols[1] + ) + return latlon_close and alt_close + + +def get_fhd_layout_info( + *, + layout_file, + telescope_name, + latitude, + longitude, + altitude, + obs_tile_names, + run_check_acceptability=True, +): + """ + Get the telescope and antenna positions from an FHD layout file. + + Parameters + ---------- + layout_file : str + FHD layout file name + telescope_name : str + Telescope name + latitude : float + telescope latitude in radians + longitude : float + telescope longitude in radians + altitude : float + telescope altitude in meters + obs_tile_names : array-like of str + Tile names from the bl_info structure inside the obs structure. + Only used if telescope_name is "mwa". + run_check_acceptability : bool + Option to check acceptable range of the telescope locations. + + Returns + ------- + dict + A dictionary of parameters from the layout file to assign to the object. The + keys are: + + * telescope_xyz : Telescope location in ECEF, shape (3,) (float) + * Nants_telescope : Number of antennas in the telescope (int) + * antenna_postions : Antenna positions in relative ECEF, + shape (Nants_telescope, 3) (float) + * antenna_names : Antenna names, length Nants_telescope (list of str) + * antenna_numbers : Antenna numbers, shape (Nants_telescope,) (array of int) + * gst0 : Greenwich sidereal time at midnight on reference date. (float) + * earth_omega : Earth's rotation rate in degrees per day. (float) + * dut1 : DUT1 (google it) AIPS 117 calls it UT1UTC. (float) + * timesys : Time system (should only ever be UTC). (str) + * diameters : Antenna diameters in meters. shape (Nants_telescope,) (float) + * extra_keywords : Dictionary of extra keywords to preserve on the object. + + """ + layout_dict = readsav(layout_file, python_dict=True) + layout = layout_dict["layout"] + + layout_fields = [name.lower() for name in layout.dtype.names] + # Try to get the telescope location from the layout file & + # compare it to the position from the obs structure. + arr_center = layout["array_center"][0] + layout_fields.remove("array_center") + + xyz_telescope_frame = layout["coordinate_frame"][0].decode("utf8").lower() + layout_fields.remove("coordinate_frame") + + if xyz_telescope_frame.strip() == "itrf": + # compare to lat/lon/alt + location_latlonalt = coordinates.XYZ_from_LatLonAlt( + latitude, longitude, altitude + ) + latlonalt_arr_center = coordinates.LatLonAlt_from_XYZ( + arr_center, check_acceptability=run_check_acceptability + ) + # tolerances are limited by the fact that lat/lon/alt are only saved + # as floats in the obs structure + loc_tols = (0, 0.1) # in meters + radian_tol = 10.0 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0) # 10mas + # check both lat/lon/alt and xyz because of subtle differences + # in tolerances + if _xyz_close(location_latlonalt, arr_center, loc_tols) or _latlonalt_close( + (latitude, longitude, altitude), latlonalt_arr_center, radian_tol, loc_tols + ): + telescope_location = EarthLocation.from_geocentric( + *location_latlonalt, unit="m" + ) + else: + # values do not agree with each other to within the tolerances. + # this is a known issue with FHD runs on cotter uvfits + # files for the MWA + # compare with the known_telescopes values + try: + telescope_obj = Telescope.from_known_telescopes(telescope_name) + except ValueError: + telescope_obj = None + # start warning message + message = ( + "Telescope location derived from obs lat/lon/alt " + "values does not match the location in the layout file." + ) + + if telescope_obj is not None: + message += " Using the value from known_telescopes." + telescope_location = telescope_obj.location + else: + message += ( + " Telescope is not in known_telescopes. " + "Defaulting to using the obs derived values." + ) + telescope_location = EarthLocation.from_geocentric( + *location_latlonalt, unit="m" + ) + # issue warning + warnings.warn(message) + else: + telescope_location = EarthLocation.from_geodetic( + lat=latitude * units.rad, + lon=longitude * units.rad, + height=altitude * units.m, + ) + + # The FHD positions derive directly from uvfits, so they are in the rotated + # ECEF frame and must be converted to ECEF + rot_ecef_positions = layout["antenna_coords"][0] + layout_fields.remove("antenna_coords") + # use the longitude from the layout file because that's how the antenna + # positions were calculated + latitude, longitude, altitude = coordinates.LatLonAlt_from_XYZ( + arr_center, check_acceptability=run_check_acceptability + ) + antenna_positions = coordinates.ECEF_from_rotECEF(rot_ecef_positions, longitude) + + antenna_names = [ant.decode("utf8") for ant in layout["antenna_names"][0].tolist()] + layout_fields.remove("antenna_names") + + # make these 0-indexed (rather than one indexed) + antenna_numbers = layout["antenna_numbers"][0] + layout_fields.remove("antenna_numbers") + + Nants_telescope = int(layout["n_antenna"][0]) + layout_fields.remove("n_antenna") + + if telescope_name.lower() == "mwa": + # check that obs.baseline_info.tile_names match the antenna names + # (accounting for possible differences in white space) + # this only applies for MWA because the tile_names come from + # metafits files. layout["antenna_names"] comes from the antenna table + # in the uvfits file and will be used if no metafits was submitted + if [ant.strip() for ant in obs_tile_names] != [ + ant.strip() for ant in antenna_names + ]: + warnings.warn( + "tile_names from obs structure does not match " + "antenna_names from layout" + ) + + gst0 = float(layout["gst0"][0]) + layout_fields.remove("gst0") + + if layout["ref_date"][0] != "": + rdate = layout["ref_date"][0].decode("utf8").lower() + else: + rdate = None + layout_fields.remove("ref_date") + + earth_omega = float(layout["earth_degpd"][0]) + layout_fields.remove("earth_degpd") + + dut1 = float(layout["dut1"][0]) + layout_fields.remove("dut1") + + timesys = layout["time_system"][0].decode("utf8").upper().strip() + layout_fields.remove("time_system") + + if "diameters" in layout_fields: + diameters = np.asarray(layout["diameters"][0]) + layout_fields.remove("diameters") + else: + diameters = None + + extra_keywords = {} + # ignore some fields, put everything else in extra_keywords + layout_fields_ignore = [ + "diff_utc", + "pol_type", + "n_pol_cal_params", + "mount_type", + "axis_offset", + "pola", + "pola_orientation", + "pola_cal_params", + "polb", + "polb_orientation", + "polb_cal_params", + "beam_fwhm", + ] + for field in layout_fields_ignore: + if field in layout_fields: + layout_fields.remove(field) + for field in layout_fields: + keyword = field + if len(keyword) > 8: + keyword = field.replace("_", "") + + value = layout[field][0] + if isinstance(value, bytes): + value = value.decode("utf8") + + extra_keywords[keyword.upper()] = value + + layout_param_dict = { + "telescope_location": telescope_location, + "Nants_telescope": Nants_telescope, + "antenna_positions": antenna_positions, + "antenna_names": antenna_names, + "antenna_numbers": antenna_numbers, + "gst0": gst0, + "rdate": rdate, + "earth_omega": earth_omega, + "dut1": dut1, + "timesys": timesys, + "diameters": diameters, + "extra_keywords": extra_keywords, + } + + return layout_param_dict diff --git a/src/pyuvdata/utils/file_io/fits.py b/src/pyuvdata/utils/io/fits.py similarity index 100% rename from src/pyuvdata/utils/file_io/fits.py rename to src/pyuvdata/utils/io/fits.py diff --git a/src/pyuvdata/utils/file_io/hdf5.py b/src/pyuvdata/utils/io/hdf5.py similarity index 100% rename from src/pyuvdata/utils/file_io/hdf5.py rename to src/pyuvdata/utils/io/hdf5.py diff --git a/src/pyuvdata/utils/file_io/ms.py b/src/pyuvdata/utils/io/ms.py similarity index 100% rename from src/pyuvdata/utils/file_io/ms.py rename to src/pyuvdata/utils/io/ms.py diff --git a/src/pyuvdata/utils/pol.py b/src/pyuvdata/utils/pol.py index d70250e0b8..3c01de1e5e 100644 --- a/src/pyuvdata/utils/pol.py +++ b/src/pyuvdata/utils/pol.py @@ -23,6 +23,7 @@ "jnum2str", "conj_pol", "_x_orientation_rep_dict", + "x_orientation_pol_map", "parse_polstr", "parse_jpolstr", ] @@ -84,7 +85,36 @@ def _x_orientation_rep_dict(x_orientation): - """Create replacement dict based on x_orientation.""" + """ + Create replacement dict based on x_orientation. + + Deprecated. Use x_orientation_pol_map instead. + + """ + warnings.warn( + "This function (_x_orientation_rep_dict) is deprecated, use " + "pyuvdata.utils.pol.x_orientation_pol_map instead.", + DeprecationWarning, + ) + + return x_orientation_pol_map(x_orientation) + + +def x_orientation_pol_map(x_orientation: str) -> dict: + """ + Return map from "x" and "y" pols to "e" and "n" based on x_orientation. + + Parameters + ---------- + x_orientation : str + String giving the x_orientation, one of "east" or "north". + + Returns + ------- + dict + Dictionary mapping "x" and "y" pols to "e" and "n" based on x_orientation. + + """ try: if XORIENTMAP[x_orientation.lower()] == "east": return {"x": "e", "y": "n"} @@ -151,7 +181,7 @@ def polstr2num(pol: str | IterableType[str], *, x_orientation: str | None = None dict_use = deepcopy(POL_STR2NUM_DICT) if x_orientation is not None: try: - rep_dict = _x_orientation_rep_dict(x_orientation) + rep_dict = x_orientation_pol_map(x_orientation) for key, value in POL_STR2NUM_DICT.items(): new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) dict_use[new_key] = value @@ -206,7 +236,7 @@ def polnum2str(num, *, x_orientation=None): dict_use = deepcopy(POL_NUM2STR_DICT) if x_orientation is not None: try: - rep_dict = _x_orientation_rep_dict(x_orientation) + rep_dict = x_orientation_pol_map(x_orientation) for key, value in POL_NUM2STR_DICT.items(): new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) dict_use[key] = new_val @@ -256,7 +286,7 @@ def jstr2num(jstr, *, x_orientation=None): dict_use = deepcopy(JONES_STR2NUM_DICT) if x_orientation is not None: try: - rep_dict = _x_orientation_rep_dict(x_orientation) + rep_dict = x_orientation_pol_map(x_orientation) for key, value in JONES_STR2NUM_DICT.items(): new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) dict_use[new_key] = value @@ -307,7 +337,7 @@ def jnum2str(jnum, *, x_orientation=None): dict_use = deepcopy(JONES_NUM2STR_DICT) if x_orientation is not None: try: - rep_dict = _x_orientation_rep_dict(x_orientation) + rep_dict = x_orientation_pol_map(x_orientation) for key, value in JONES_NUM2STR_DICT.items(): new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) dict_use[key] = new_val diff --git a/src/pyuvdata/uvbeam/beamfits.py b/src/pyuvdata/uvbeam/beamfits.py index ee40b4dde6..cfd155584e 100644 --- a/src/pyuvdata/uvbeam/beamfits.py +++ b/src/pyuvdata/uvbeam/beamfits.py @@ -12,7 +12,7 @@ from .. import utils from ..docstrings import copy_replace_short_description -from ..utils.file_io import fits as fits_utils +from ..utils.io import fits as fits_utils from . import UVBeam __all__ = ["BeamFITS"] diff --git a/src/pyuvdata/uvbeam/uvbeam.py b/src/pyuvdata/uvbeam/uvbeam.py index 39ad4ce6d3..1e48a493b1 100644 --- a/src/pyuvdata/uvbeam/uvbeam.py +++ b/src/pyuvdata/uvbeam/uvbeam.py @@ -3133,7 +3133,7 @@ def select( ) x_orient_dict = {} if beam_object.x_orientation is not None: - for key, value in utils._x_orientation_rep_dict( + for key, value in utils.x_orientation_pol_map( beam_object.x_orientation ).items(): if key in beam_object.feed_array: diff --git a/src/pyuvdata/uvcal/calfits.py b/src/pyuvdata/uvcal/calfits.py index 496e8b0d9f..251a275812 100644 --- a/src/pyuvdata/uvcal/calfits.py +++ b/src/pyuvdata/uvcal/calfits.py @@ -20,7 +20,7 @@ from .. import utils from ..docstrings import copy_replace_short_description -from ..utils.file_io import fits as fits_utils +from ..utils.io import fits as fits_utils from . import UVCal __all__ = ["CALFITS"] diff --git a/src/pyuvdata/uvcal/calh5.py b/src/pyuvdata/uvcal/calh5.py index f5908532f2..df81922b33 100644 --- a/src/pyuvdata/uvcal/calh5.py +++ b/src/pyuvdata/uvcal/calh5.py @@ -15,7 +15,7 @@ from .. import utils from ..docstrings import copy_replace_short_description from ..telescopes import Telescope -from ..utils.file_io import hdf5 as hdf5_utils +from ..utils.io import hdf5 as hdf5_utils from .uvcal import UVCal hdf5plugin_present = True diff --git a/src/pyuvdata/uvcal/fhd_cal.py b/src/pyuvdata/uvcal/fhd_cal.py index 664fad98d8..894ff24130 100644 --- a/src/pyuvdata/uvcal/fhd_cal.py +++ b/src/pyuvdata/uvcal/fhd_cal.py @@ -14,7 +14,7 @@ from .. import utils from ..docstrings import copy_replace_short_description -from ..uvdata.fhd import fhd_filenames, get_fhd_history, get_fhd_layout_info +from ..utils.io import fhd as fhd_utils from . import UVCal __all__ = ["FHDCal"] @@ -53,7 +53,7 @@ def read_fhd_cal( if not read_data and settings_file is None: raise ValueError("A settings_file must be provided if read_data is False.") - filenames = fhd_filenames( + filenames = fhd_utils.fhd_filenames( obs_file=obs_file, layout_file=layout_file, settings_file=settings_file, @@ -160,7 +160,7 @@ def read_fhd_cal( for ant in obs_tile_names ] - layout_param_dict = get_fhd_layout_info( + layout_param_dict = fhd_utils.get_fhd_layout_info( layout_file=layout_file, telescope_name=self.telescope.name, latitude=latitude, @@ -246,7 +246,7 @@ def read_fhd_cal( "[" + ", ".join(str(int(d)) for d in obs_data["delays"][0]) + "]" ) if settings_file is not None: - self.history, self.observer = get_fhd_history( + self.history, self.observer = fhd_utils.get_fhd_history( settings_file, return_user=True ) else: diff --git a/src/pyuvdata/uvcal/ms_cal.py b/src/pyuvdata/uvcal/ms_cal.py index 844d396e24..eea7deb33a 100644 --- a/src/pyuvdata/uvcal/ms_cal.py +++ b/src/pyuvdata/uvcal/ms_cal.py @@ -13,7 +13,7 @@ from .. import utils from ..docstrings import copy_replace_short_description -from ..utils.file_io import ms as ms_utils +from ..utils.io import ms as ms_utils from . import UVCal __all__ = ["MSCal"] diff --git a/src/pyuvdata/uvdata/fhd.py b/src/pyuvdata/uvdata/fhd.py index 1f82d9bee1..8947f06c21 100644 --- a/src/pyuvdata/uvdata/fhd.py +++ b/src/pyuvdata/uvdata/fhd.py @@ -5,7 +5,6 @@ """Class for reading FHD save files.""" from __future__ import annotations -import os import warnings import numpy as np @@ -15,410 +14,12 @@ from docstring_parser import DocstringStyle from scipy.io import readsav -from .. import Telescope, utils +from .. import utils from ..docstrings import copy_replace_short_description +from ..utils.io import fhd as fhd_utils from . import UVData -__all__ = ["get_fhd_history", "get_fhd_layout_info", "FHD"] - - -def fhd_filenames( - *, - vis_files: list[str] | np.ndarray | str | None = None, - params_file: str | None = None, - obs_file: str | None = None, - flags_file: str | None = None, - layout_file: str | None = None, - settings_file: str | None = None, - cal_file: str | None = None, -): - """ - Check the FHD input files for matching prefixes and folders. - - Parameters - ---------- - vis_files : str or array-like of str, optional - FHD visibility save file names, can be data or model visibilities. - params_file : str - FHD params save file name. - obs_file : str - FHD obs save file name. - flags_file : str - FHD flag save file name. - layout_file : str - FHD layout save file name. - layout_file : str - FHD layout save file name. - settings_file : str - FHD settings text file name. - cal_file : str - FHD cal save file name. - - Returns - ------- - A list of file basenames to be used in the object `filename` attribute. - - """ - file_types = { - "vis": {"files": vis_files, "suffix": "_vis", "sub_folder": "vis_data"}, - "cal": {"files": cal_file, "suffix": "_cal", "sub_folder": "calibration"}, - "flags": {"files": flags_file, "suffix": "_flags", "sub_folder": "vis_data"}, - "layout": {"files": layout_file, "suffix": "_layout", "sub_folder": "metadata"}, - "obs": {"files": obs_file, "suffix": "_obs", "sub_folder": "metadata"}, - "params": {"files": params_file, "suffix": "_params", "sub_folder": "metadata"}, - "settings": { - "files": settings_file, - "suffix": "_settings", - "sub_folder": "metadata", - }, - } - - basename_list = [] - prefix_list = [] - folder_list = [] - missing_suffix = [] - missing_subfolder = [] - for ftype, fdict in file_types.items(): - if fdict["files"] is None: - continue - if isinstance(fdict["files"], (list, np.ndarray)): - these_files = fdict["files"] - else: - these_files = [fdict["files"]] - - for fname in these_files: - dirname, basename = os.path.split(fname) - basename_list.append(basename) - if fdict["suffix"] in basename: - suffix_loc = basename.find(fdict["suffix"]) - prefix_list.append(basename[:suffix_loc]) - else: - missing_suffix.append(ftype) - fhd_folder, subfolder = os.path.split(dirname) - if subfolder == fdict["sub_folder"]: - folder_list.append(fhd_folder) - else: - missing_subfolder.append(ftype) - - if len(missing_suffix) > 0: - warnings.warn( - "Some FHD input files do not have the expected suffix so prefix " - f"matching could not be done. The affected file types are: {missing_suffix}" - ) - if len(missing_subfolder) > 0: - warnings.warn( - "Some FHD input files do not have the expected subfolder so FHD " - "folder matching could not be done. The affected file types are: " - f"{missing_subfolder}" - ) - - if np.unique(prefix_list).size > 1: - warnings.warn( - "The FHD input files do not all have matching prefixes, so they " - "may not be for the same data." - ) - if np.unique(folder_list).size > 1: - warnings.warn( - "The FHD input files do not all have the same parent folder, so " - "they may not be for the same FHD run." - ) - - return basename_list - - -def get_fhd_history(settings_file, *, return_user=False): - """ - Small function to get the important history from an FHD settings text file. - - Includes information about the command line call, the user, machine name and date - - Parameters - ---------- - settings_file : str - FHD settings file name - return_user : bool - optionally return the username who ran FHD - - Returns - ------- - history : str - string of history extracted from the settings file - user : str - Only returned if return_user is True - - """ - with open(settings_file, "r") as f: - settings_lines = f.readlines() - main_loc = None - command_loc = None - obs_loc = None - user_line = None - for ind, line in enumerate(settings_lines): - if line.startswith("##MAIN"): - main_loc = ind - if line.startswith("##COMMAND_LINE"): - command_loc = ind - if line.startswith("##OBS"): - obs_loc = ind - if line.startswith("User"): - user_line = ind - if ( - main_loc is not None - and command_loc is not None - and obs_loc is not None - and user_line is not None - ): - break - - main_lines = settings_lines[main_loc + 1 : command_loc] - command_lines = settings_lines[command_loc + 1 : obs_loc] - history_lines = ["FHD history\n"] + main_lines + command_lines - for ind, line in enumerate(history_lines): - history_lines[ind] = line.rstrip().replace("\t", " ") - history = "\n".join(history_lines) - user = settings_lines[user_line].split()[1] - - if return_user: - return history, user - else: - return history - - -def _xyz_close(xyz1, xyz2, loc_tols): - return np.allclose(xyz1, xyz2, rtol=loc_tols[0], atol=loc_tols[1]) - - -def _latlonalt_close(latlonalt1, latlonalt2, radian_tol, loc_tols): - latlon_close = np.allclose( - np.array(latlonalt1[0:2]), np.array(latlonalt2[0:2]), rtol=0, atol=radian_tol - ) - alt_close = np.isclose( - latlonalt1[2], latlonalt2[2], rtol=loc_tols[0], atol=loc_tols[1] - ) - return latlon_close and alt_close - - -def get_fhd_layout_info( - *, - layout_file, - telescope_name, - latitude, - longitude, - altitude, - obs_tile_names, - run_check_acceptability=True, -): - """ - Get the telescope and antenna positions from an FHD layout file. - - Parameters - ---------- - layout_file : str - FHD layout file name - telescope_name : str - Telescope name - latitude : float - telescope latitude in radians - longitude : float - telescope longitude in radians - altitude : float - telescope altitude in meters - obs_tile_names : array-like of str - Tile names from the bl_info structure inside the obs structure. - Only used if telescope_name is "mwa". - run_check_acceptability : bool - Option to check acceptable range of the telescope locations. - - Returns - ------- - dict - A dictionary of parameters from the layout file to assign to the object. The - keys are: - - * telescope_xyz : Telescope location in ECEF, shape (3,) (float) - * Nants_telescope : Number of antennas in the telescope (int) - * antenna_postions : Antenna positions in relative ECEF, - shape (Nants_telescope, 3) (float) - * antenna_names : Antenna names, length Nants_telescope (list of str) - * antenna_numbers : Antenna numbers, shape (Nants_telescope,) (array of int) - * gst0 : Greenwich sidereal time at midnight on reference date. (float) - * earth_omega : Earth's rotation rate in degrees per day. (float) - * dut1 : DUT1 (google it) AIPS 117 calls it UT1UTC. (float) - * timesys : Time system (should only ever be UTC). (str) - * diameters : Antenna diameters in meters. shape (Nants_telescope,) (float) - * extra_keywords : Dictionary of extra keywords to preserve on the object. - - """ - layout_dict = readsav(layout_file, python_dict=True) - layout = layout_dict["layout"] - - layout_fields = [name.lower() for name in layout.dtype.names] - # Try to get the telescope location from the layout file & - # compare it to the position from the obs structure. - arr_center = layout["array_center"][0] - layout_fields.remove("array_center") - - xyz_telescope_frame = layout["coordinate_frame"][0].decode("utf8").lower() - layout_fields.remove("coordinate_frame") - - if xyz_telescope_frame.strip() == "itrf": - # compare to lat/lon/alt - location_latlonalt = utils.XYZ_from_LatLonAlt(latitude, longitude, altitude) - latlonalt_arr_center = utils.LatLonAlt_from_XYZ( - arr_center, check_acceptability=run_check_acceptability - ) - # tolerances are limited by the fact that lat/lon/alt are only saved - # as floats in the obs structure - loc_tols = (0, 0.1) # in meters - radian_tol = 10.0 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0) # 10mas - # check both lat/lon/alt and xyz because of subtle differences - # in tolerances - if _xyz_close(location_latlonalt, arr_center, loc_tols) or _latlonalt_close( - (latitude, longitude, altitude), latlonalt_arr_center, radian_tol, loc_tols - ): - telescope_location = EarthLocation.from_geocentric( - *location_latlonalt, unit="m" - ) - else: - # values do not agree with each other to within the tolerances. - # this is a known issue with FHD runs on cotter uvfits - # files for the MWA - # compare with the known_telescopes values - try: - telescope_obj = Telescope.from_known_telescopes(telescope_name) - except ValueError: - telescope_obj = None - # start warning message - message = ( - "Telescope location derived from obs lat/lon/alt " - "values does not match the location in the layout file." - ) - - if telescope_obj is not None: - message += " Using the value from known_telescopes." - telescope_location = telescope_obj.location - else: - message += ( - " Telescope is not in known_telescopes. " - "Defaulting to using the obs derived values." - ) - telescope_location = EarthLocation.from_geocentric( - *location_latlonalt, unit="m" - ) - # issue warning - warnings.warn(message) - else: - telescope_location = EarthLocation.from_geodetic( - lat=latitude * units.rad, - lon=longitude * units.rad, - height=altitude * units.m, - ) - - # The FHD positions derive directly from uvfits, so they are in the rotated - # ECEF frame and must be converted to ECEF - rot_ecef_positions = layout["antenna_coords"][0] - layout_fields.remove("antenna_coords") - # use the longitude from the layout file because that's how the antenna - # positions were calculated - latitude, longitude, altitude = utils.LatLonAlt_from_XYZ( - arr_center, check_acceptability=run_check_acceptability - ) - antenna_positions = utils.ECEF_from_rotECEF(rot_ecef_positions, longitude) - - antenna_names = [ant.decode("utf8") for ant in layout["antenna_names"][0].tolist()] - layout_fields.remove("antenna_names") - - # make these 0-indexed (rather than one indexed) - antenna_numbers = layout["antenna_numbers"][0] - layout_fields.remove("antenna_numbers") - - Nants_telescope = int(layout["n_antenna"][0]) - layout_fields.remove("n_antenna") - - if telescope_name.lower() == "mwa": - # check that obs.baseline_info.tile_names match the antenna names - # (accounting for possible differences in white space) - # this only applies for MWA because the tile_names come from - # metafits files. layout["antenna_names"] comes from the antenna table - # in the uvfits file and will be used if no metafits was submitted - if [ant.strip() for ant in obs_tile_names] != [ - ant.strip() for ant in antenna_names - ]: - warnings.warn( - "tile_names from obs structure does not match " - "antenna_names from layout" - ) - - gst0 = float(layout["gst0"][0]) - layout_fields.remove("gst0") - - if layout["ref_date"][0] != "": - rdate = layout["ref_date"][0].decode("utf8").lower() - else: - rdate = None - layout_fields.remove("ref_date") - - earth_omega = float(layout["earth_degpd"][0]) - layout_fields.remove("earth_degpd") - - dut1 = float(layout["dut1"][0]) - layout_fields.remove("dut1") - - timesys = layout["time_system"][0].decode("utf8").upper().strip() - layout_fields.remove("time_system") - - if "diameters" in layout_fields: - diameters = np.asarray(layout["diameters"][0]) - layout_fields.remove("diameters") - else: - diameters = None - - extra_keywords = {} - # ignore some fields, put everything else in extra_keywords - layout_fields_ignore = [ - "diff_utc", - "pol_type", - "n_pol_cal_params", - "mount_type", - "axis_offset", - "pola", - "pola_orientation", - "pola_cal_params", - "polb", - "polb_orientation", - "polb_cal_params", - "beam_fwhm", - ] - for field in layout_fields_ignore: - if field in layout_fields: - layout_fields.remove(field) - for field in layout_fields: - keyword = field - if len(keyword) > 8: - keyword = field.replace("_", "") - - value = layout[field][0] - if isinstance(value, bytes): - value = value.decode("utf8") - - extra_keywords[keyword.upper()] = value - - layout_param_dict = { - "telescope_location": telescope_location, - "Nants_telescope": Nants_telescope, - "antenna_positions": antenna_positions, - "antenna_names": antenna_names, - "antenna_numbers": antenna_numbers, - "gst0": gst0, - "rdate": rdate, - "earth_omega": earth_omega, - "dut1": dut1, - "timesys": timesys, - "diameters": diameters, - "extra_keywords": extra_keywords, - } - - return layout_param_dict +__all__ = ["FHD"] class FHD(UVData): @@ -516,7 +117,7 @@ def read_fhd( "information will be missing." ) - filenames = fhd_filenames( + filenames = fhd_utils.fhd_filenames( vis_files=vis_files, params_file=params_file, obs_file=obs_file, @@ -649,7 +250,7 @@ def read_fhd( "Tile" + "0" * (3 - len(ant.strip())) + ant.strip() for ant in obs_tile_names ] - layout_param_dict = get_fhd_layout_info( + layout_param_dict = fhd_utils.get_fhd_layout_info( layout_file=layout_file, telescope_name=self.telescope.name, latitude=latitude, @@ -768,7 +369,7 @@ def read_fhd( # history: add the first few lines from the settings file if settings_file is not None: - self.history = get_fhd_history(settings_file) + self.history = fhd_utils.get_fhd_history(settings_file) else: self.history = "" diff --git a/src/pyuvdata/uvdata/ms.py b/src/pyuvdata/uvdata/ms.py index def94a725c..c7b0f404b7 100644 --- a/src/pyuvdata/uvdata/ms.py +++ b/src/pyuvdata/uvdata/ms.py @@ -16,7 +16,7 @@ from .. import utils from ..docstrings import copy_replace_short_description -from ..utils.file_io import ms as ms_utils +from ..utils.io import ms as ms_utils from . import UVData __all__ = ["MS"] diff --git a/src/pyuvdata/uvdata/mwa_corr_fits.py b/src/pyuvdata/uvdata/mwa_corr_fits.py index 83dcf40a30..a00f292b76 100644 --- a/src/pyuvdata/uvdata/mwa_corr_fits.py +++ b/src/pyuvdata/uvdata/mwa_corr_fits.py @@ -20,7 +20,7 @@ from .. import Telescope, utils from ..data import DATA_PATH from ..docstrings import copy_replace_short_description -from ..utils.file_io import fits as fits_utils +from ..utils.io import fits as fits_utils from . import UVData, _corr_fits __all__ = ["input_output_mapping", "MWACorrFITS"] diff --git a/src/pyuvdata/uvdata/uvdata.py b/src/pyuvdata/uvdata/uvdata.py index d10bb32b0c..3325f8ac9b 100644 --- a/src/pyuvdata/uvdata/uvdata.py +++ b/src/pyuvdata/uvdata/uvdata.py @@ -26,7 +26,7 @@ from .. import utils from ..docstrings import combine_docstrings, copy_replace_short_description from ..utils import phasing as phs_utils -from ..utils.file_io import hdf5 as hdf5_utils +from ..utils.io import hdf5 as hdf5_utils from ..uvbase import UVBase from .initializers import new_uvdata diff --git a/src/pyuvdata/uvdata/uvfits.py b/src/pyuvdata/uvdata/uvfits.py index e0639bba57..5e7f49cfc6 100644 --- a/src/pyuvdata/uvdata/uvfits.py +++ b/src/pyuvdata/uvdata/uvfits.py @@ -24,7 +24,7 @@ from .. import utils from ..docstrings import copy_replace_short_description -from ..utils.file_io import fits as fits_utils +from ..utils.io import fits as fits_utils from . import UVData __all__ = ["UVFITS"] diff --git a/src/pyuvdata/uvdata/uvh5.py b/src/pyuvdata/uvdata/uvh5.py index b43daa5dfe..4f76184ba2 100644 --- a/src/pyuvdata/uvdata/uvh5.py +++ b/src/pyuvdata/uvdata/uvh5.py @@ -18,7 +18,7 @@ from .. import Telescope, utils from ..docstrings import copy_replace_short_description -from ..utils.file_io import hdf5 as hdf5_utils +from ..utils.io import hdf5 as hdf5_utils from . import UVData __all__ = ["UVH5", "FastUVH5Meta"] diff --git a/tests/utils/file_io/__init__.py b/tests/utils/io/__init__.py similarity index 75% rename from tests/utils/file_io/__init__.py rename to tests/utils/io/__init__.py index b79a0ec501..ec5d480e53 100644 --- a/tests/utils/file_io/__init__.py +++ b/tests/utils/io/__init__.py @@ -1,4 +1,4 @@ # -*- mode: python; coding: utf-8 -*- # Copyright (c) 2024 Radio Astronomy Software Group # Licensed under the 2-clause BSD License -"""Tests for utility file_io functions.""" +"""Tests for utility file io functions.""" diff --git a/tests/utils/file_io/test_fits.py b/tests/utils/io/test_fits.py similarity index 85% rename from tests/utils/file_io/test_fits.py rename to tests/utils/io/test_fits.py index e8c14672b8..468eca0cae 100644 --- a/tests/utils/file_io/test_fits.py +++ b/tests/utils/io/test_fits.py @@ -23,7 +23,7 @@ def test_deprecated_utils_import(): with check_warnings( DeprecationWarning, match="The _fits_indexhdus function has moved, please import it as " - "pyuvdata.utils.file_io.fits._indexhdus. This warnings will become an " + "pyuvdata.utils.io.fits._indexhdus. This warnings will become an " "error in version 3.2", ): utils._fits_indexhdus(hdu_list) @@ -31,7 +31,7 @@ def test_deprecated_utils_import(): with check_warnings( DeprecationWarning, match="The _fits_gethduaxis function has moved, please import it as " - "pyuvdata.utils.file_io.fits._gethduaxis. This warnings will become an " + "pyuvdata.utils.io.fits._gethduaxis. This warnings will become an " "error in version 3.2", ): utils._fits_gethduaxis(vis_hdu, 5) diff --git a/tests/utils/file_io/test_hdf5.py b/tests/utils/io/test_hdf5.py similarity index 97% rename from tests/utils/file_io/test_hdf5.py rename to tests/utils/io/test_hdf5.py index c4b8fd573f..6baa537a8c 100644 --- a/tests/utils/file_io/test_hdf5.py +++ b/tests/utils/io/test_hdf5.py @@ -6,7 +6,7 @@ import numpy as np import pytest -import pyuvdata.utils.file_io.hdf5 as hdf5_utils +import pyuvdata.utils.io.hdf5 as hdf5_utils from pyuvdata import utils diff --git a/tests/utils/file_io/test_ms.py b/tests/utils/io/test_ms.py similarity index 99% rename from tests/utils/file_io/test_ms.py rename to tests/utils/io/test_ms.py index df65d1da8c..c1e04f2fb1 100644 --- a/tests/utils/file_io/test_ms.py +++ b/tests/utils/io/test_ms.py @@ -8,7 +8,7 @@ import numpy as np import pytest -import pyuvdata.utils.file_io.ms as ms_utils +import pyuvdata.utils.io.ms as ms_utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings diff --git a/tests/utils/test_phasing.py b/tests/utils/test_phasing.py index 556615b302..ea9a234ffa 100644 --- a/tests/utils/test_phasing.py +++ b/tests/utils/test_phasing.py @@ -1946,15 +1946,18 @@ def test_uvw_track_generator_moon(selenoid): if selenoid == "SPHERE": # check defaults - gen_results = utils.uvw_track_generator( - lon_coord=0.0, - lat_coord=0.0, - coord_frame="icrs", - telescope_loc=(0, 0, 0), - time_array=2456789.0, - antenna_positions=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), - telescope_frame="mcmf", - ) + try: + gen_results = utils.uvw_track_generator( + lon_coord=0.0, + lat_coord=0.0, + coord_frame="icrs", + telescope_loc=(0, 0, 0), + time_array=2456789.0, + antenna_positions=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), + telescope_frame="mcmf", + ) + except SpiceUNKNOWNFRAME as err: + pytest.skip("SpiceUNKNOWNFRAME error: " + str(err)) # Check that the total lengths all match 1 np.testing.assert_allclose((gen_results["uvw"] ** 2.0).sum(1), 2.0) diff --git a/tests/utils/test_pol.py b/tests/utils/test_pol.py index be9e2431bc..277d18a4c0 100644 --- a/tests/utils/test_pol.py +++ b/tests/utils/test_pol.py @@ -233,3 +233,14 @@ def test_pol_order(pols, aips_order, casa_order, order): assert all(check == casa_order) if order == "AIPS": assert all(check == aips_order) + + +def test_x_orientation_pol_map(): + with check_warnings( + DeprecationWarning, + match="This function (_x_orientation_rep_dict) is deprecated, use " + "pyuvdata.utils.pol.x_orientation_pol_map instead.", + ): + assert utils._x_orientation_rep_dict("east") == {"x": "e", "y": "n"} + + assert utils.x_orientation_pol_map("north") == {"x": "n", "y": "e"} diff --git a/tests/uvbeam/test_beamfits.py b/tests/uvbeam/test_beamfits.py index 343c4f0758..94e989373a 100644 --- a/tests/uvbeam/test_beamfits.py +++ b/tests/uvbeam/test_beamfits.py @@ -11,7 +11,7 @@ import pytest from astropy.io import fits -import pyuvdata.utils.file_io.fits as fits_utils +import pyuvdata.utils.io.fits as fits_utils from pyuvdata import UVBeam, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings diff --git a/tests/uvcal/test_calfits.py b/tests/uvcal/test_calfits.py index 8771545c9e..3fe3c3313a 100644 --- a/tests/uvcal/test_calfits.py +++ b/tests/uvcal/test_calfits.py @@ -11,7 +11,7 @@ import pytest from astropy.io import fits -import pyuvdata.utils.file_io.fits as fits_utils +import pyuvdata.utils.io.fits as fits_utils from pyuvdata import UVCal, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings diff --git a/tests/uvcal/test_uvcal.py b/tests/uvcal/test_uvcal.py index feb8043dfe..f55de147c4 100644 --- a/tests/uvcal/test_uvcal.py +++ b/tests/uvcal/test_uvcal.py @@ -15,7 +15,7 @@ from astropy.io import fits from astropy.table import Table -import pyuvdata.utils.file_io.fits as fits_utils +import pyuvdata.utils.io.fits as fits_utils from pyuvdata import UVCal, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings diff --git a/tests/uvdata/test_uvfits.py b/tests/uvdata/test_uvfits.py index 9d346fd3fe..063a9efb31 100644 --- a/tests/uvdata/test_uvfits.py +++ b/tests/uvdata/test_uvfits.py @@ -12,7 +12,7 @@ import pytest from astropy.io import fits -import pyuvdata.utils.file_io.fits as fits_utils +import pyuvdata.utils.io.fits as fits_utils from pyuvdata import UVData, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings diff --git a/tests/uvdata/test_uvh5.py b/tests/uvdata/test_uvh5.py index 6beb528e61..4405f15ccc 100644 --- a/tests/uvdata/test_uvh5.py +++ b/tests/uvdata/test_uvh5.py @@ -18,7 +18,7 @@ from astropy.time import Time from packaging import version -import pyuvdata.utils.file_io.hdf5 as hdf5_utils +import pyuvdata.utils.io.hdf5 as hdf5_utils from pyuvdata import UVData, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings diff --git a/tests/uvflag/test_uvflag.py b/tests/uvflag/test_uvflag.py index b54a21451b..9fcadc27de 100644 --- a/tests/uvflag/test_uvflag.py +++ b/tests/uvflag/test_uvflag.py @@ -18,7 +18,7 @@ from pyuvdata import UVCal, UVData, UVFlag, __version__, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings -from pyuvdata.utils.file_io import hdf5 as hdf5_utils +from pyuvdata.utils.io import hdf5 as hdf5_utils from pyuvdata.uvbase import old_telescope_metadata_attrs from pyuvdata.uvflag import and_rows_cols, flags2waterfall From 6494d0b5ff84d85ddc3644449ebdcfc01b1803c5 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Mon, 24 Jun 2024 13:20:30 -0700 Subject: [PATCH 06/12] fix an import for new extension structure --- tests/uvbeam/test_uvbeam.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/uvbeam/test_uvbeam.py b/tests/uvbeam/test_uvbeam.py index 04e24f15b1..e349884bc8 100644 --- a/tests/uvbeam/test_uvbeam.py +++ b/tests/uvbeam/test_uvbeam.py @@ -17,9 +17,10 @@ from astropy.coordinates import Angle from astropy.io import fits -from pyuvdata import UVBeam, _uvbeam, utils +from pyuvdata import UVBeam, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings +from pyuvdata.uvbeam import _uvbeam from .test_cst_beam import cst_files, cst_yaml_file from .test_mwa_beam import filename as mwa_beam_file From 97c6e203791aba17ba07910c95943a6ba152a64c Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Wed, 26 Jun 2024 16:42:52 -0700 Subject: [PATCH 07/12] Move uvcalibrate and apply_uvflag into utils subpackage --- CHANGELOG.md | 2 - docs/functions.rst | 16 ++-- docs/uvcal_tutorial.rst | 8 +- src/pyuvdata/__init__.py | 4 - src/pyuvdata/utils/__init__.py | 108 +---------------------- src/pyuvdata/{ => utils}/apply_uvflag.py | 2 +- src/pyuvdata/{ => utils}/uvcalibrate.py | 4 +- src/pyuvdata/uvflag/uvflag.py | 14 +-- tests/{ => utils}/test_apply_uvflag.py | 11 +-- tests/{ => utils}/test_uvcalibrate.py | 12 +-- 10 files changed, 30 insertions(+), 151 deletions(-) rename src/pyuvdata/{ => utils}/apply_uvflag.py (98%) rename src/pyuvdata/{ => utils}/uvcalibrate.py (99%) rename tests/{ => utils}/test_apply_uvflag.py (91%) rename tests/{ => utils}/test_uvcalibrate.py (98%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4fab458c82..954f9ca61d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,8 +35,6 @@ time for each time range or the time_array (if there's a time_array and no time_ - Restructured `utils.py` into a new submodule `utils` with functions split among several submodules. Utility functions used widely are still available from pyuvdata.utils, although this access pattern is deprecated for some of them. -- Moved `uvcalibrate` and `apply_uvflag` to their own modules at the top level. -Accessing them via `utils` is still available but is deprecated. - Modified `UVBeam.interp` to speed up processing when `check_azza_domain=True`. - Updated minimum dependencies: setuptools>=64, setuptools_scm>=8.0 - Restructured to a `src` layout. This should not affect most users, but the diff --git a/docs/functions.rst b/docs/functions.rst index 3e187d8427..69e32edd2e 100644 --- a/docs/functions.rst +++ b/docs/functions.rst @@ -1,18 +1,12 @@ -Useful Functions -================ -There are some functions that interact with multiple types of objects to apply -calibrations solutions and flagging to other objects. - -.. autofunction:: pyuvdata.uvcalibrate - -.. autofunction:: pyuvdata.apply_uvflag - - Utility Functions ------------------ +================ Some of our utility functions are widely used. The most commonly used ones are noted here, for others see the developer docs: :ref:`developer docs utility functions`. +.. autofunction:: pyuvdata.utils.uvcalibrate + +.. autofunction:: pyuvdata.utils.apply_uvflag + .. autofunction:: pyuvdata.utils.baseline_to_antnums .. autofunction:: pyuvdata.utils.antnums_to_baseline diff --git a/docs/uvcal_tutorial.rst b/docs/uvcal_tutorial.rst index acc64ea9ba..c4f79764ae 100644 --- a/docs/uvcal_tutorial.rst +++ b/docs/uvcal_tutorial.rst @@ -258,7 +258,7 @@ a) Data for a single antenna and instrumental polarization UVCal: Calibrating UVData ------------------------- Calibration solutions in a :class:`pyuvdata.UVCal` object can be applied to a -:class:`pyuvdata.UVData` object using the :func:`pyuvdata.uvcalibrate` function. +:class:`pyuvdata.UVData` object using the :func:`pyuvdata.utils.uvcalibrate` function. a) Calibration of UVData by UVCal @@ -267,7 +267,7 @@ a) Calibration of UVData by UVCal >>> # We can calibrate directly using a UVCal object >>> import os - >>> from pyuvdata import UVData, UVCal, uvcalibrate + >>> from pyuvdata import UVData, UVCal, utils >>> from pyuvdata.data import DATA_PATH >>> uvd = UVData.from_file( ... os.path.join(DATA_PATH, "zen.2458098.45361.HH.uvh5_downselected"), @@ -281,10 +281,10 @@ a) Calibration of UVData by UVCal >>> uvc.telescope.antenna_names = np.array( ... [name.replace("ant", "HH") for name in uvc.telescope.antenna_names] ... ) - >>> uvd_calibrated = uvcalibrate(uvd, uvc, inplace=False) + >>> uvd_calibrated = utils.uvcalibrate(uvd, uvc, inplace=False) >>> # We can also un-calibrate using the same UVCal - >>> uvd_uncalibrated = uvcalibrate(uvd_calibrated, uvc, inplace=False, undo=True) + >>> uvd_uncalibrated = utils.uvcalibrate(uvd_calibrated, uvc, inplace=False, undo=True) UVCal: Selecting data diff --git a/src/pyuvdata/__init__.py b/src/pyuvdata/__init__.py index d60cc9ebd2..93835f4d39 100644 --- a/src/pyuvdata/__init__.py +++ b/src/pyuvdata/__init__.py @@ -30,7 +30,6 @@ warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") -from .apply_uvflag import apply_uvflag # noqa from .telescopes import ( # noqa Telescope, get_telescope, @@ -39,7 +38,6 @@ ) from .uvbeam import UVBeam # noqa from .uvcal import UVCal # noqa -from .uvcalibrate import uvcalibrate # noqa from .uvdata import FastUVH5Meta # noqa from .uvdata import UVData # noqa from .uvflag import UVFlag # noqa @@ -54,8 +52,6 @@ "known_telescopes", "known_telescope_location", "get_telescope", - "uvcalibrate", - "apply_uvflag", ] diff --git a/src/pyuvdata/utils/__init__.py b/src/pyuvdata/utils/__init__.py index b477b06be5..727bf72a85 100644 --- a/src/pyuvdata/utils/__init__.py +++ b/src/pyuvdata/utils/__init__.py @@ -18,6 +18,7 @@ LST_RAD_TOL = 2 * np.pi * 5e-3 / (86400.0) # these seem to be necessary for the installed package to access these submodules +from . import apply_uvflag # noqa from . import array_collapse # noqa from . import bls # noqa from . import bltaxis # noqa @@ -31,14 +32,17 @@ from . import redundancy # noqa from . import times # noqa from . import tools # noqa +from . import uvcalibrate # noqa # Add things to the utils namespace used by outside packages +from .apply_uvflag import apply_uvflag # noqa from .array_collapse import collapse # noqa from .bls import * # noqa from .coordinates import * # noqa from .phasing import uvw_track_generator # noqa from .pol import * # noqa from .times import get_lst_for_time # noqa +from .uvcalibrate import uvcalibrate # noqa # deprecated imports @@ -117,107 +121,3 @@ def _fits_indexhdus(hdulist): ) return _indexhdus(hdulist) - - -def uvcalibrate(uvdata, uvcal, **kwargs): - """ - Calibrate a UVData object with a UVCal object. - - Deprecated, use pyuvdata.uvcalibrate - - Parameters - ---------- - uvdata : UVData object - UVData object to calibrate. - uvcal : UVCal object - UVCal object containing the calibration. - inplace : bool, optional - if True edit uvdata in place, else return a calibrated copy - prop_flags : bool, optional - if True, propagate calibration flags to data flags - and doesn't use flagged gains. Otherwise, uses flagged gains and - does not propagate calibration flags to data flags. - Dterm_cal : bool, optional - Calibrate the off-diagonal terms in the Jones matrix if present - in uvcal. Default is False. Currently not implemented. - flip_gain_conj : bool, optional - This function uses the UVData ant_1_array and ant_2_array to specify the - antennas in the UVCal object. By default, the conjugation convention, which - follows the UVData convention (i.e. ant2 - ant1), is that the applied - gain = ant1_gain * conjugate(ant2_gain). If the other convention is required, - set flip_gain_conj=True. - delay_convention : str, optional - Exponent sign to use in conversion of 'delay' to 'gain' cal_type - if the input uvcal is not inherently 'gain' cal_type. Default to 'minus'. - undo : bool, optional - If True, undo the provided calibration. i.e. apply the calibration with - flipped gain_convention. Flag propagation rules apply the same. - time_check : bool - Option to check that times match between the UVCal and UVData - objects if UVCal has a single time or time range. Times are always - checked if UVCal has multiple times. - ant_check : bool - Option to check that all antennas with data on the UVData - object have calibration solutions in the UVCal object. If this option is - set to False, uvcalibrate will proceed without erroring and data for - antennas without calibrations will be flagged. - - Returns - ------- - UVData, optional - Returns if not inplace - - """ - from ..uvcalibrate import uvcalibrate - - warnings.warn( - "uvcalibrate has moved, please import it as 'from pyuvdata import " - "uvcalibrate'. This warnings will become an error in version 3.2", - DeprecationWarning, - ) - - return uvcalibrate(uvdata, uvcal, **kwargs) - - -def apply_uvflag(uvd, uvf, **kwargs): - """ - Apply flags from a UVFlag to a UVData instantiation. - - Deprecated, use pyuvdata.apply_uvflag - - Note that if uvf.Nfreqs or uvf.Ntimes is 1, it will broadcast flags across - that axis. - - Parameters - ---------- - uvd : UVData object - UVData object to add flags to. - uvf : UVFlag object - A UVFlag object in flag mode. - inplace : bool - If True overwrite flags in uvd, otherwise return new object - unflag_first : bool - If True, completely unflag the UVData before applying flags. - Else, OR the inherent uvd flags with uvf flags. - flag_missing : bool - If input uvf is a baseline type and antpairs in uvd do not exist in uvf, - flag them in uvd. Otherwise leave them untouched. - force_pol : bool - If True, broadcast flags to all polarizations if they do not match. - Only works if uvf.Npols == 1. - - Returns - ------- - UVData - If not inplace, returns new UVData object with flags applied - - """ - from ..apply_uvflag import apply_uvflag - - warnings.warn( - "uvcalibrate has moved, please import it as 'from pyuvdata import " - "uvcalibrate'. This warnings will become an error in version 3.2", - DeprecationWarning, - ) - - return apply_uvflag(uvd, uvf, **kwargs) diff --git a/src/pyuvdata/apply_uvflag.py b/src/pyuvdata/utils/apply_uvflag.py similarity index 98% rename from src/pyuvdata/apply_uvflag.py rename to src/pyuvdata/utils/apply_uvflag.py index 5ded0d5620..a43f92b759 100644 --- a/src/pyuvdata/apply_uvflag.py +++ b/src/pyuvdata/utils/apply_uvflag.py @@ -114,7 +114,7 @@ def apply_uvflag( # addition of boolean is OR uvd.flag_array[uvd_ap_inds] += uvf.flag_array[uvf_ap_inds] - uvd.history += "\nFlagged with pyuvdata.apply_uvflags." + uvd.history += "\nFlagged with pyuvdata.utils.apply_uvflags." if not inplace: return uvd diff --git a/src/pyuvdata/uvcalibrate.py b/src/pyuvdata/utils/uvcalibrate.py similarity index 99% rename from src/pyuvdata/uvcalibrate.py rename to src/pyuvdata/utils/uvcalibrate.py index 19b54edc03..50ac958084 100644 --- a/src/pyuvdata/uvcalibrate.py +++ b/src/pyuvdata/utils/uvcalibrate.py @@ -6,7 +6,7 @@ import numpy as np -from .utils.pol import POL_TO_FEED_DICT, jnum2str, parse_jpolstr, polnum2str, polstr2num +from .pol import POL_TO_FEED_DICT, jnum2str, parse_jpolstr, polnum2str, polstr2num def uvcalibrate( @@ -410,7 +410,7 @@ def uvcalibrate( uvdata.data_array[blt_inds, :, pol_ind] /= gain # update attributes - uvdata.history += "\nCalibrated with pyuvdata.uvcalibrate." + uvdata.history += "\nCalibrated with pyuvdata.utils.uvcalibrate." if undo: uvdata.vis_units = "uncalib" else: diff --git a/src/pyuvdata/uvflag/uvflag.py b/src/pyuvdata/uvflag/uvflag.py index d683bc825e..a111c235a0 100644 --- a/src/pyuvdata/uvflag/uvflag.py +++ b/src/pyuvdata/uvflag/uvflag.py @@ -1396,9 +1396,10 @@ def to_baseline( """Convert a UVFlag object of type "waterfall" or "antenna" to type "baseline". Broadcasts the flag array to all baselines. - This function does NOT apply flags to uv (see pyuvdata.apply_uvflag for that). - Note that the antenna metadata arrays (`antenna_names`, `antenna_numbers` - and `antenna_positions`) may be reordered to match the ordering on `uv`. + This function does NOT apply flags to uv (see pyuvdata.utils.apply_uvflag + for that). Note that the antenna metadata arrays (`antenna_names`, + `antenna_numbers` and `antenna_positions`) may be reordered to match the + ordering on `uv`. Parameters ---------- @@ -1621,9 +1622,10 @@ def to_antenna( """Convert a UVFlag object of type "waterfall" to type "antenna". Broadcasts the flag array to all antennas. - This function does NOT apply flags to uv (see pyuvdata.apply_uvflag for that). - Note that the antenna metadata arrays (`antenna_names`, `antenna_numbers` - and `antenna_positions`) may be reordered to match the ordering on `uv`. + This function does NOT apply flags to uv (see pyuvdata.utils.apply_uvflag + for that). Note that the antenna metadata arrays (`antenna_names`, + `antenna_numbers` and `antenna_positions`) may be reordered to match the + ordering on `uv`. Parameters ---------- diff --git a/tests/test_apply_uvflag.py b/tests/utils/test_apply_uvflag.py similarity index 91% rename from tests/test_apply_uvflag.py rename to tests/utils/test_apply_uvflag.py index 71d516c68a..ac37d89304 100644 --- a/tests/test_apply_uvflag.py +++ b/tests/utils/test_apply_uvflag.py @@ -6,8 +6,8 @@ import numpy as np import pytest -from pyuvdata import UVFlag, apply_uvflag, utils -from pyuvdata.testing import check_warnings +from pyuvdata import UVFlag +from pyuvdata.utils import apply_uvflag @pytest.mark.filterwarnings("ignore:The shapes of several attributes will be changing") @@ -26,12 +26,7 @@ def test_apply_uvflag(uvcalibrate_uvdata_oldfiles): uvf.flag_array[uvf.antpair2ind(9, 10)[:2]] = True # apply flags and check for basic flag propagation - with check_warnings( - DeprecationWarning, - match="uvcalibrate has moved, please import it as 'from pyuvdata import " - "uvcalibrate'. This warnings will become an error in version 3.2", - ): - uvdf = utils.apply_uvflag(uvd, uvf, inplace=False) + uvdf = apply_uvflag(uvd, uvf, inplace=False) assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2]) # test inplace diff --git a/tests/test_uvcalibrate.py b/tests/utils/test_uvcalibrate.py similarity index 98% rename from tests/test_uvcalibrate.py rename to tests/utils/test_uvcalibrate.py index d04ca2150c..528ba63b14 100644 --- a/tests/test_uvcalibrate.py +++ b/tests/utils/test_uvcalibrate.py @@ -8,9 +8,10 @@ import numpy as np import pytest -from pyuvdata import UVCal, utils, uvcalibrate +from pyuvdata import UVCal, utils from pyuvdata.data import DATA_PATH from pyuvdata.testing import check_warnings +from pyuvdata.utils import uvcalibrate @pytest.mark.filterwarnings("ignore:Fixing auto-correlations to be be real-only,") @@ -137,14 +138,7 @@ def test_uvcalibrate(uvcalibrate_data, flip_gain_conj, gain_convention, time_ran # set the gain_scale to "Jy" to test that vis units are set properly uvc.gain_scale = "Jy" - with check_warnings( - DeprecationWarning, - match="uvcalibrate has moved, please import it as 'from pyuvdata import " - "uvcalibrate'. This warnings will become an error in version 3.2", - ): - uvdcal = utils.uvcalibrate( - uvd, uvc, inplace=False, flip_gain_conj=flip_gain_conj - ) + uvdcal = uvcalibrate(uvd, uvc, inplace=False, flip_gain_conj=flip_gain_conj) if gain_convention == "divide": assert uvdcal.vis_units == "uncalib" else: From c65bea9637bfb10266894a4d459e91ed09cfdfb1 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Wed, 26 Jun 2024 16:47:19 -0700 Subject: [PATCH 08/12] Docs updates based on PR comments Other docs and import cleanups --- docs/developer_docs.rst | 191 +++++------------------------ docs/functions.rst | 44 ------- docs/make_index.py | 2 +- docs/make_telescope.py | 12 +- docs/make_uvcal.py | 5 - docs/make_uvdata.py | 10 +- docs/utility_functions.rst | 219 ++++++++++++++++++++++++++++++++++ src/pyuvdata/__init__.py | 10 +- src/pyuvdata/utils/io/ms.py | 3 +- src/pyuvdata/uvcal/uvcal.py | 6 +- src/pyuvdata/uvdata/mir.py | 3 +- src/pyuvdata/uvdata/miriad.py | 3 +- src/pyuvdata/uvdata/uvdata.py | 9 +- tests/test_telescopes.py | 6 +- 14 files changed, 274 insertions(+), 249 deletions(-) delete mode 100644 docs/functions.rst create mode 100644 docs/utility_functions.rst diff --git a/docs/developer_docs.rst b/docs/developer_docs.rst index 620c09ff50..b07d96d1bc 100644 --- a/docs/developer_docs.rst +++ b/docs/developer_docs.rst @@ -40,8 +40,9 @@ the user classes and the file-specific classes automatically as needed, so users generally do not need to interact with these classes, but developers may need to. -UVData -****** +UVData Classes +************** + .. autoclass:: pyuvdata.uvdata.fhd.FHD :members: @@ -64,8 +65,9 @@ UVData .. autoclass:: pyuvdata.uvdata.uvh5.UVH5 :members: -UVCal -***** + +UVCal Classes +************* .. autoclass:: pyuvdata.uvcal.calfits.CALFITS :members: @@ -76,8 +78,8 @@ UVCal .. autoclass:: pyuvdata.uvcal.fhd_cal.FHDCal :members: -UVBeam -****** +UVBeam Classes +************** .. autoclass:: pyuvdata.uvbeam.beamfits.BeamFITS :members: @@ -89,182 +91,43 @@ UVBeam :members: -.. _Developer Docs Utility Functions: - -Utility Functions ------------------ -Note that we are also listing private functions here (functions that start with -an underscore). While they are listed here, **they are not considered part of the -public API, so they can change without notice**. If you find that you need to rely -one of them let us know in a github issue and we can consider making it part of -the public API. - - -File I/O Utility Functions -************************** - -Antenna position files -++++++++++++++++++++++ - -.. automodule:: pyuvdata.utils.io.antpos - :members: - :private-members: - :undoc-members: - -FHD files -+++++++++ - -.. automodule:: pyuvdata.utils.io.fhd - :members: - :private-members: - :undoc-members: - -FITS files -++++++++++ - -.. automodule:: pyuvdata.utils.io.fits - :members: - :private-members: - :undoc-members: - -HDF5 files -++++++++++ - -.. automodule:: pyuvdata.utils.io.hdf5 - :members: - :private-members: - :undoc-members: - -Measurement Set files -+++++++++++++++++++++ - -.. automodule:: pyuvdata.utils.io.ms - :members: - :private-members: - :undoc-members: - -Array collapse functions for flags -********************************** - -.. automodule:: pyuvdata.utils.array_collapse - :members: - :private-members: - :undoc-members: - -Functions for working with baseline numbers -******************************************* - -.. automodule:: pyuvdata.utils.bls - :members: - :private-members: - :undoc-members: - :ignore-module-all: - -Functions for working with the baseline-time axis -************************************************* - -.. automodule:: pyuvdata.utils.bltaxis - :members: - :private-members: - :undoc-members: - -Functions for working with telescope coordinates -************************************************ - -.. automodule:: pyuvdata.utils.coordinates - :members: - :private-members: - :undoc-members: - :ignore-module-all: - -Functions for working with the frequency axis -********************************************* - -.. automodule:: pyuvdata.utils.frequency - :members: - :private-members: - :undoc-members: - -Functions for working with history -********************************** - -.. automodule:: pyuvdata.utils.history - :members: - :private-members: - :undoc-members: - -Functions for working with phase center catalogs -************************************************ +Other Modules and Functions +--------------------------- -.. automodule:: pyuvdata.utils.phase_center_catalog - :members: - :private-members: - :undoc-members: - -Functions for working with phasing -********************************** - -.. automodule:: pyuvdata.utils.phasing - :members: - :private-members: - :undoc-members: +MWA Beam Functions +****************** +Functions related to constructing the MWA beam from the input files which are in +a harmonic space. -Functions for working with polarizations -**************************************** +.. autofunction:: pyuvdata.uvbeam.mwa_beam.P1sin -.. automodule:: pyuvdata.utils.pol - :members: - :private-members: - :undoc-members: - :ignore-module-all: +.. autofunction:: pyuvdata.uvbeam.mwa_beam.P1sin_array -Functions for working with baseline redundancies -************************************************ -.. automodule:: pyuvdata.utils.redundancy - :members: - :private-members: - :undoc-members: - -Functions for working with times and LSTs -***************************************** +aipy extracts +************* -.. automodule:: pyuvdata.utils.times +.. automodule:: pyuvdata.uvdata.aipy_extracts :members: - :private-members: - :undoc-members: -General utility functions -************************* -.. automodule:: pyuvdata.utils.tools - :members: - :private-members: - :undoc-members: +MIR parser +********** -Mir Parser ----------- .. automodule:: pyuvdata.uvdata.mir_parser :members: +MIR metadata +************ + .. automodule:: pyuvdata.uvdata.mir_meta_data :members: -Other Functions ---------------- - -.. autofunction:: pyuvdata.uvbeam.mwa_beam.P1sin - -.. autofunction:: pyuvdata.uvbeam.mwa_beam.P1sin_array +UVFlag Functions +**************** +Flag handling functions. .. autofunction:: pyuvdata.uvflag.uvflag.and_rows_cols .. autofunction:: pyuvdata.uvflag.uvflag.flags2waterfall - - -aipy extracts -------------- - -.. automodule:: pyuvdata.uvdata.aipy_extracts - :members: diff --git a/docs/functions.rst b/docs/functions.rst deleted file mode 100644 index 69e32edd2e..0000000000 --- a/docs/functions.rst +++ /dev/null @@ -1,44 +0,0 @@ -Utility Functions -================ -Some of our utility functions are widely used. The most commonly used ones are -noted here, for others see the developer docs: :ref:`developer docs utility functions`. - -.. autofunction:: pyuvdata.utils.uvcalibrate - -.. autofunction:: pyuvdata.utils.apply_uvflag - -.. autofunction:: pyuvdata.utils.baseline_to_antnums -.. autofunction:: pyuvdata.utils.antnums_to_baseline - -.. autofunction:: pyuvdata.utils.LatLonAlt_from_XYZ -.. autofunction:: pyuvdata.utils.XYZ_from_LatLonAlt -.. autofunction:: pyuvdata.utils.rotECEF_from_ECEF -.. autofunction:: pyuvdata.utils.ECEF_from_rotECEF -.. autofunction:: pyuvdata.utils.ENU_from_ECEF -.. autofunction:: pyuvdata.utils.ECEF_from_ENU - -.. autofunction:: pyuvdata.utils.polstr2num -.. autofunction:: pyuvdata.utils.polnum2str -.. autofunction:: pyuvdata.utils.jstr2num -.. autofunction:: pyuvdata.utils.jnum2str -.. autofunction:: pyuvdata.utils.conj_pol -.. autofunction:: pyuvdata.utils.x_orientation_pol_map -.. autofunction:: pyuvdata.utils.parse_polstr -.. autofunction:: pyuvdata.utils.parse_jpolstr - -.. autofunction:: pyuvdata.utils.get_lst_for_time - -.. autofunction:: pyuvdata.utils.uvw_track_generator - -.. autofunction:: pyuvdata.utils.collapse - -Polarization Dictionaries -------------------------- -We also define some useful dictionaries for mapping polarizations: - - * ``pyuvdata.utils.POL_STR2NUM_DICT``: maps visibility polarization strings to polarization integers - * ``pyuvdata.utils.POL_NUM2STR_DICT``: maps visibility polarization integers to polarization strings - * ``pyuvdata.utils.JONES_STR2NUM_DICT``: maps calibration polarization strings to polarization integers - * ``pyuvdata.utils.JONES_NUM2STR_DICT``: maps calibration polarization strings to polarization integers - * ``pyuvdata.utils.CONJ_POL_DICT``: maps how visibility polarizations change when antennas are swapped (visibilities are conjugated) - * ``pyuvdata.utils.XORIENTMAP``: maps x_orientation strings to cannonical names diff --git a/docs/make_index.py b/docs/make_index.py index 64f44cf366..a80bbdb673 100644 --- a/docs/make_index.py +++ b/docs/make_index.py @@ -52,7 +52,7 @@ def write_index_rst(readme_file=None, write_file=None): " telescope\n" " fast_uvh5_meta\n" " fast_calh5_meta\n" - " functions\n" + " utility_functions\n" " developer_docs\n" ) diff --git a/docs/make_telescope.py b/docs/make_telescope.py index 40b8cacb0c..3f18033176 100644 --- a/docs/make_telescope.py +++ b/docs/make_telescope.py @@ -44,13 +44,6 @@ def write_telescope_rst(write_file=None): "`optional`_. The :meth:`pyuvdata.Telescope.check` method can be called\n" "on the object to verify that all of the required attributes have been\n" "set in a consistent way.\n\n" - "Note that angle type attributes also have convenience properties named the\n" - "same thing with ``_degrees`` appended through which you can get or set the\n" - "value in degrees. Similarly location type attributes (which are given in\n" - "geocentric xyz coordinates) have convenience properties named the\n" - "same thing with ``_lat_lon_alt`` and ``_lat_lon_alt_degrees`` appended\n" - "through which you can get or set the values using latitude, longitude and\n" - "altitude values in radians or degrees and meters.\n\n" ) out += "Required\n********\n" out += ( @@ -90,7 +83,8 @@ def write_telescope_rst(write_file=None): ":class:`astropy.coordinates.EarthLocation` object, which\n" "is shown here using the Geodetic representation. Also note that for\n" "some telescopes we store csv files giving antenna layout information\n" - "which can be used if data files are missing that information.\n\n" + "which can be used if data files are missing that information.\n" + "We also provide a convenience function to get known telescope locations.\n\n" ) known_tel_use = copy.deepcopy(_KNOWN_TELESCOPES) @@ -104,6 +98,8 @@ def write_telescope_rst(write_file=None): json_obj = json_obj[:-1] + " }" out += ".. code-block:: JavaScript\n\n {json_str}\n\n".format(json_str=json_obj) + out += ".. autofunction:: pyuvdata.telescopes.known_telescope_location\n\n" + t = Time.now() t.format = "iso" t.out_subfmt = "date" diff --git a/docs/make_uvcal.py b/docs/make_uvcal.py index fb2554580e..c2f4b51c29 100644 --- a/docs/make_uvcal.py +++ b/docs/make_uvcal.py @@ -53,11 +53,6 @@ def write_uvcal_rst(write_file=None): "``delay_array``, ``flag_array``, ``quality_array``) are not. The\n" ":meth:`pyuvdata.UVCal.check` method will still pass for metadata only\n" "objects.\n\n" - "Note location type attributes (which are given in topocentric xyz\n" - "coordinates) have convenience properties named the same thing with\n" - "``_lat_lon_alt`` and ``_lat_lon_alt_degrees`` appended through which you can\n" - "get or set the values using latitude, longitude and altitude values in\n" - "radians or degrees and meters.\n\n" ) out += "Required\n********\n" out += ( diff --git a/docs/make_uvdata.py b/docs/make_uvdata.py index 65b2b452bd..7d84f8c27e 100644 --- a/docs/make_uvdata.py +++ b/docs/make_uvdata.py @@ -49,13 +49,9 @@ def write_uvdata_rst(write_file=None): "``flag_array``, ``nsample_array``) are not. The\n" ":meth:`pyuvdata.UVData.check` method will still pass for metadata only\n" "objects.\n\n" - "Note that angle type attributes also have convenience properties named the\n" - "same thing with ``_degrees`` appended through which you can get or set the\n" - "value in degrees. Similarly location type attributes (which are given in\n" - "geocentric xyz coordinates) have convenience properties named the\n" - "same thing with ``_lat_lon_alt`` and ``_lat_lon_alt_degrees`` appended\n" - "through which you can get or set the values using latitude, longitude and\n" - "altitude values in radians or degrees and meters.\n\n" + "Note that angle type attributes also have convenience properties named\n" + "the same thing with ``_degrees`` appended through which you can get or\n" + "set the value in degrees.\n\n" ) out += "Required\n********\n" out += ( diff --git a/docs/utility_functions.rst b/docs/utility_functions.rst new file mode 100644 index 0000000000..67a4366699 --- /dev/null +++ b/docs/utility_functions.rst @@ -0,0 +1,219 @@ +Utility Functions +================= +Some of our utility functions are widely used and so are available to be imported +from the `pyuvdata.utils` namespace. These are shown here, for the full list of +all utility functions see: :ref:`utility subpackage`. + +.. autofunction:: pyuvdata.utils.uvcalibrate + +.. autofunction:: pyuvdata.utils.apply_uvflag + +.. autofunction:: pyuvdata.utils.baseline_to_antnums +.. autofunction:: pyuvdata.utils.antnums_to_baseline + +.. autofunction:: pyuvdata.utils.LatLonAlt_from_XYZ +.. autofunction:: pyuvdata.utils.XYZ_from_LatLonAlt +.. autofunction:: pyuvdata.utils.rotECEF_from_ECEF +.. autofunction:: pyuvdata.utils.ECEF_from_rotECEF +.. autofunction:: pyuvdata.utils.ENU_from_ECEF +.. autofunction:: pyuvdata.utils.ECEF_from_ENU + +.. autofunction:: pyuvdata.utils.polstr2num +.. autofunction:: pyuvdata.utils.polnum2str +.. autofunction:: pyuvdata.utils.jstr2num +.. autofunction:: pyuvdata.utils.jnum2str +.. autofunction:: pyuvdata.utils.conj_pol +.. autofunction:: pyuvdata.utils.x_orientation_pol_map +.. autofunction:: pyuvdata.utils.parse_polstr +.. autofunction:: pyuvdata.utils.parse_jpolstr + +.. autofunction:: pyuvdata.utils.get_lst_for_time + +.. autofunction:: pyuvdata.utils.uvw_track_generator + +.. autofunction:: pyuvdata.utils.collapse + +Polarization Dictionaries +------------------------- +We also define some useful dictionaries for mapping polarizations: + + * ``pyuvdata.utils.POL_STR2NUM_DICT``: maps visibility polarization strings to polarization integers + * ``pyuvdata.utils.POL_NUM2STR_DICT``: maps visibility polarization integers to polarization strings + * ``pyuvdata.utils.JONES_STR2NUM_DICT``: maps calibration polarization strings to polarization integers + * ``pyuvdata.utils.JONES_NUM2STR_DICT``: maps calibration polarization strings to polarization integers + * ``pyuvdata.utils.CONJ_POL_DICT``: maps how visibility polarizations change when antennas are swapped (visibilities are conjugated) + * ``pyuvdata.utils.XORIENTMAP``: maps x_orientation strings to cannonical names + + +.. _Utility subpackage: + +Utils subpackage +---------------- +This gives the full documentation of all functions inside the utils subpackage. +Unless they are also listed above, these functions must be imported using their +full subpackage/submodule path. + +Note that we are also listing private functions here (functions that start with +an underscore). While they are listed here, **they are not considered part of the +public API, so they can change without notice**. If you find that you need to rely +one of them let us know in a github issue and we can consider making it part of +the public API. + + +File I/O Utility Functions +************************** + +Antenna position files +~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: pyuvdata.utils.io.antpos + :members: + :private-members: + :undoc-members: + +FHD files +~~~~~~~~~ + +.. automodule:: pyuvdata.utils.io.fhd + :members: + :private-members: + :undoc-members: + +FITS files +~~~~~~~~~~ + +.. automodule:: pyuvdata.utils.io.fits + :members: + :private-members: + :undoc-members: + +HDF5 files +~~~~~~~~~~ + +.. automodule:: pyuvdata.utils.io.hdf5 + :members: + :private-members: + :undoc-members: + +Measurement Set files +~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: pyuvdata.utils.io.ms + :members: + :private-members: + :undoc-members: + +Applying UVFlags to other objects +********************************* + +.. automodule:: pyuvdata.utils.apply_uvflag.apply_uvflag + :members: + :private-members: + :undoc-members: + +Array collapse functions for flags +********************************** + +.. automodule:: pyuvdata.utils.array_collapse + :members: + :private-members: + :undoc-members: + +Functions for working with baseline numbers +******************************************* + +.. automodule:: pyuvdata.utils.bls + :members: + :private-members: + :undoc-members: + :ignore-module-all: + +Functions for working with the baseline-time axis +************************************************* + +.. automodule:: pyuvdata.utils.bltaxis + :members: + :private-members: + :undoc-members: + +Functions for working with telescope coordinates +************************************************ + +.. automodule:: pyuvdata.utils.coordinates + :members: + :private-members: + :undoc-members: + :ignore-module-all: + +Functions for working with the frequency axis +********************************************* + +.. automodule:: pyuvdata.utils.frequency + :members: + :private-members: + :undoc-members: + +Functions for working with history +********************************** + +.. automodule:: pyuvdata.utils.history + :members: + :private-members: + :undoc-members: + +Functions for working with phase center catalogs +************************************************ + +.. automodule:: pyuvdata.utils.phase_center_catalog + :members: + :private-members: + :undoc-members: + +Functions for working with phasing +********************************** + +.. automodule:: pyuvdata.utils.phasing + :members: + :private-members: + :undoc-members: + +Functions for working with polarizations +**************************************** + +.. automodule:: pyuvdata.utils.pol + :members: + :private-members: + :undoc-members: + :ignore-module-all: + +Functions for working with baseline redundancies +************************************************ + +.. automodule:: pyuvdata.utils.redundancy + :members: + :private-members: + :undoc-members: + +Functions for working with times and LSTs +***************************************** + +.. automodule:: pyuvdata.utils.times + :members: + :private-members: + :undoc-members: + +General utility functions +************************* + +.. automodule:: pyuvdata.utils.tools + :members: + :private-members: + :undoc-members: + +Applying calibration solutions to data +************************************** + +.. automodule:: pyuvdata.utils.uvcalibrate.uvcalibrate + :members: + :private-members: + :undoc-members: diff --git a/src/pyuvdata/__init__.py b/src/pyuvdata/__init__.py index 93835f4d39..8b541bb534 100644 --- a/src/pyuvdata/__init__.py +++ b/src/pyuvdata/__init__.py @@ -30,12 +30,8 @@ warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") -from .telescopes import ( # noqa - Telescope, - get_telescope, - known_telescope_location, - known_telescopes, -) +from .telescopes import Telescope # noqa +from .telescopes import get_telescope # noqa # NB: get_telescopes is deprecated from .uvbeam import UVBeam # noqa from .uvcal import UVCal # noqa from .uvdata import FastUVH5Meta # noqa @@ -49,8 +45,6 @@ "UVFlag", "UVBeam", "Telescope", - "known_telescopes", - "known_telescope_location", "get_telescope", ] diff --git a/src/pyuvdata/utils/io/ms.py b/src/pyuvdata/utils/io/ms.py index 286c4f4738..c065c5947c 100644 --- a/src/pyuvdata/utils/io/ms.py +++ b/src/pyuvdata/utils/io/ms.py @@ -10,7 +10,8 @@ from astropy.coordinates import EarthLocation from astropy.time import Time -from ... import __version__, known_telescope_location, known_telescopes, utils +from ... import __version__, utils +from ...telescopes import known_telescope_location, known_telescopes from ...uvdata.uvdata import reporting_request try: diff --git a/src/pyuvdata/uvcal/uvcal.py b/src/pyuvdata/uvcal/uvcal.py index 31502777d3..4daa1b8518 100644 --- a/src/pyuvdata/uvcal/uvcal.py +++ b/src/pyuvdata/uvcal/uvcal.py @@ -542,16 +542,16 @@ def __init__(self): "Optional parameter, similar to the UVData parameter of the same name. " "Dictionary that acts as a catalog, containing information on individual " "phase centers. Keys are the catalog IDs of the different phase centers in " - "the object (matched to the parameter `phase_center_id_array`). At a " + "the object (matched to the parameter ``phase_center_id_array``). At a " "minimum, each dictionary must contain the keys: " "'cat_name' giving the phase center name (this does not have to be unique, " "non-unique values can be used to indicate sets of phase centers that make " "up a mosaic observation), " "'cat_type', which can be 'sidereal' (fixed position in RA/Dec), 'ephem' " "(position in RA/Dec which moves with time), 'driftscan' (fixed postion in " - "Az/El, NOT the same as the old `phase_type`='drift') or 'unprojected' " + "Az/El, NOT the same as the old ``phase_type`` = 'drift') or 'unprojected' " "(baseline coordinates in ENU, but data are not phased, similar to " - "the old `phase_type`='drift') " + "the old ``phase_type`` = 'drift') " "'cat_lon' (longitude coord, e.g. RA, either a single value or a one " "dimensional array of length Npts --the number of ephemeris data points-- " "for ephem type phase centers), " diff --git a/src/pyuvdata/uvdata/mir.py b/src/pyuvdata/uvdata/mir.py index ca2806e6af..4d4382d396 100644 --- a/src/pyuvdata/uvdata/mir.py +++ b/src/pyuvdata/uvdata/mir.py @@ -11,8 +11,9 @@ from astropy.time import Time from docstring_parser import DocstringStyle -from .. import Telescope, known_telescope_location, utils +from .. import Telescope, utils from ..docstrings import copy_replace_short_description +from ..telescopes import known_telescope_location from . import UVData, mir_parser __all__ = ["generate_sma_antpos_dict", "Mir"] diff --git a/src/pyuvdata/uvdata/miriad.py b/src/pyuvdata/uvdata/miriad.py index d4be90c7e4..ceb8e8ffe0 100644 --- a/src/pyuvdata/uvdata/miriad.py +++ b/src/pyuvdata/uvdata/miriad.py @@ -16,8 +16,9 @@ from astropy.time import Time from docstring_parser import DocstringStyle -from .. import known_telescope_location, utils +from .. import utils from ..docstrings import copy_replace_short_description +from ..telescopes import known_telescope_location from . import UVData from .uvdata import reporting_request diff --git a/src/pyuvdata/uvdata/uvdata.py b/src/pyuvdata/uvdata/uvdata.py index 3325f8ac9b..aa3e1eaf0f 100644 --- a/src/pyuvdata/uvdata/uvdata.py +++ b/src/pyuvdata/uvdata/uvdata.py @@ -21,10 +21,11 @@ from docstring_parser import DocstringStyle from scipy import ndimage as nd -from .. import Telescope, known_telescopes +from .. import Telescope from .. import parameter as uvp from .. import utils from ..docstrings import combine_docstrings, copy_replace_short_description +from ..telescopes import known_telescopes from ..utils import phasing as phs_utils from ..utils.io import hdf5 as hdf5_utils from ..uvbase import UVBase @@ -325,16 +326,16 @@ def __init__(self): desc = ( "Dictionary that acts as a catalog, containing information on individual " "phase centers. Keys are the catalog IDs of the different phase centers in " - "the object (matched to the parameter `phase_center_id_array`). At a " + "the object (matched to the parameter ``phase_center_id_array``). At a " "minimum, each dictionary must contain the keys: " "'cat_name' giving the phase center name (this does not have to be unique, " "non-unique values can be used to indicate sets of phase centers that make " "up a mosaic observation), " "'cat_type', which can be 'sidereal' (fixed position in RA/Dec), 'ephem' " "(position in RA/Dec which moves with time), 'driftscan' (fixed postion in " - "Az/El, NOT the same as the old `phase_type`='drift') or 'unprojected' " + "Az/El, NOT the same as the old ``phase_type`` = 'drift') or 'unprojected' " "(baseline coordinates in ENU, but data are not phased, similar to " - "the old `phase_type`='drift') " + "the old ``phase_type`` = 'drift') " "'cat_lon' (longitude coord, e.g. RA, either a single value or a one " "dimensional array of length Npts --the number of ephemeris data points-- " "for ephem type phase centers), " diff --git a/tests/test_telescopes.py b/tests/test_telescopes.py index b8fe18fb52..825c5d61ce 100644 --- a/tests/test_telescopes.py +++ b/tests/test_telescopes.py @@ -140,7 +140,9 @@ def test_properties(): def test_known_telescopes(): """Test known_telescopes function returns expected results.""" - assert sorted(pyuvdata.known_telescopes()) == sorted(expected_known_telescopes) + assert sorted(pyuvdata.telescopes.known_telescopes()) == sorted( + expected_known_telescopes + ) def test_update_params_from_known(): @@ -203,7 +205,7 @@ def test_update_params_from_known(): def test_from_known(): - for inst in pyuvdata.known_telescopes(): + for inst in pyuvdata.telescopes.known_telescopes(): # don't run check b/c some telescopes won't have antenna info defined telescope_obj = Telescope.from_known_telescopes(inst, run_check=False) assert telescope_obj.name == inst From 43fca6efd793fe50a84242be640e7813b7689cbe Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Wed, 26 Jun 2024 16:53:51 -0700 Subject: [PATCH 09/12] unpin numpy for python-casacore and lunarsky given updates Also update lunarsky min dependency throughout --- CHANGELOG.md | 1 + README.md | 2 +- ci/pyuvdata_min_versions_tests.yml | 2 +- ci/pyuvdata_tests.yml | 2 +- ci/pyuvdata_tests_mac_arm.yml | 2 +- ci/pyuvdata_tests_windows.yml | 2 +- environment.yaml | 2 +- pyproject.toml | 4 ++-- 8 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 954f9ca61d..19f99c38b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ time for each time range or the time_array (if there's a time_array and no time_ - Added new keyword handling for v.6 of the MIR data format within `MirParser`. ### Changed +- Updated minimum optional dependency versions: lunarsky>=0.2.4 - Restructured `utils.py` into a new submodule `utils` with functions split among several submodules. Utility functions used widely are still available from pyuvdata.utils, although this access pattern is deprecated for some of them. diff --git a/README.md b/README.md index aa7836cc8b..247613cae7 100644 --- a/README.md +++ b/README.md @@ -157,7 +157,7 @@ Optional: * astropy-healpix >= 1.0.2 (for working with beams in HEALPix formats) * astroquery >= 0.4.4 (for enabling phasing to ephemeris objects using JPL-Horizons) * hdf5plugin >= 3.2.0 (for enabling bitshuffle and other hdf5 compression filters in uvh5 files) -* lunarsky >=0.2.2 (for working with simulated datasets for lunar telescopes) +* lunarsky >=0.2.4 (for working with simulated datasets for lunar telescopes) * novas and novas_de405 (for using the NOVAS library for astrometry) * python-casacore >= 3.5.2 (for working with CASA measurement sets) diff --git a/ci/pyuvdata_min_versions_tests.yml b/ci/pyuvdata_min_versions_tests.yml index 1cee115d51..500715aa47 100644 --- a/ci/pyuvdata_min_versions_tests.yml +++ b/ci/pyuvdata_min_versions_tests.yml @@ -23,6 +23,6 @@ dependencies: - setuptools_scm==8.0.* - pip - pip: - - lunarsky==0.2.2 + - lunarsky==0.2.4 - novas - novas_de405 diff --git a/ci/pyuvdata_tests.yml b/ci/pyuvdata_tests.yml index bd4491152b..719dae1ace 100644 --- a/ci/pyuvdata_tests.yml +++ b/ci/pyuvdata_tests.yml @@ -23,6 +23,6 @@ dependencies: - setuptools_scm>=8.0 - pip - pip: - - lunarsky>=0.2.2 + - lunarsky>=0.2.4 - novas - novas_de405 diff --git a/ci/pyuvdata_tests_mac_arm.yml b/ci/pyuvdata_tests_mac_arm.yml index 6c2e08db25..70e7128d95 100644 --- a/ci/pyuvdata_tests_mac_arm.yml +++ b/ci/pyuvdata_tests_mac_arm.yml @@ -23,4 +23,4 @@ dependencies: - setuptools_scm>=8.0 - pip - pip: - - lunarsky>=0.2.2 + - lunarsky>=0.2.4 diff --git a/ci/pyuvdata_tests_windows.yml b/ci/pyuvdata_tests_windows.yml index c6034ad726..4039686e9d 100644 --- a/ci/pyuvdata_tests_windows.yml +++ b/ci/pyuvdata_tests_windows.yml @@ -22,4 +22,4 @@ dependencies: - setuptools_scm>=8.0 - pip - pip: - - lunarsky>=0.2.2 + - lunarsky>=0.2.4 diff --git a/environment.yaml b/environment.yaml index 032bea5324..1f818d6241 100644 --- a/environment.yaml +++ b/environment.yaml @@ -26,6 +26,6 @@ dependencies: - setuptools_scm>=8.0 - sphinx - pip: - - lunarsky>=0.2.2 + - lunarsky>=0.2.4 - novas - novas_de405 diff --git a/pyproject.toml b/pyproject.toml index 42fcc8f4bd..c257bca984 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,10 +55,10 @@ classifiers = [ [project.optional-dependencies] astroquery = ["astroquery>=0.4.4"] -casa = ["python-casacore>=3.5.2", "numpy>=1.23,<2.0"] +casa = ["python-casacore>=3.5.2"] hdf5_compression = ["hdf5plugin>=3.2.0"] healpix = ["astropy_healpix>=1.0.2"] -lunar = ["lunarsky>=0.2.2", "numpy>=1.23,<2.0"] +lunar = ["lunarsky>=0.2.4"] novas = ["novas", "novas_de405"] all = ["pyuvdata[astroquery,casa,hdf5_compression,healpix,lunar,novas]"] test = [ From e05d3155ca2b9b96ed69f57c6d03353cc1950f0a Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 27 Jun 2024 13:09:54 -0700 Subject: [PATCH 10/12] make sure python-casacore isn't installed with numpy 2.0 from pip --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index c257bca984..480e962d92 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ classifiers = [ [project.optional-dependencies] astroquery = ["astroquery>=0.4.4"] -casa = ["python-casacore>=3.5.2"] +casa = ["python-casacore>=3.5.2", "numpy>=1.23,<2.0"] hdf5_compression = ["hdf5plugin>=3.2.0"] healpix = ["astropy_healpix>=1.0.2"] lunar = ["lunarsky>=0.2.4"] From baf64b48365bd908a8e5f2905c0119f1111fd04b Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 27 Jun 2024 13:11:48 -0700 Subject: [PATCH 11/12] Fix tutorials --- docs/developer_docs.rst | 12 +----------- docs/uvcal_tutorial.rst | 4 ++-- docs/uvdata_tutorial.rst | 14 ++++++-------- src/pyuvdata/utils/bls.py | 4 ++-- src/pyuvdata/uvdata/uvdata.py | 3 ++- 5 files changed, 13 insertions(+), 24 deletions(-) diff --git a/docs/developer_docs.rst b/docs/developer_docs.rst index b07d96d1bc..8a5d589bc6 100644 --- a/docs/developer_docs.rst +++ b/docs/developer_docs.rst @@ -94,16 +94,6 @@ UVBeam Classes Other Modules and Functions --------------------------- -MWA Beam Functions -****************** -Functions related to constructing the MWA beam from the input files which are in -a harmonic space. - -.. autofunction:: pyuvdata.uvbeam.mwa_beam.P1sin - -.. autofunction:: pyuvdata.uvbeam.mwa_beam.P1sin_array - - aipy extracts ************* @@ -126,7 +116,7 @@ MIR metadata UVFlag Functions **************** -Flag handling functions. +Some useful flag handling functions. .. autofunction:: pyuvdata.uvflag.uvflag.and_rows_cols diff --git a/docs/uvcal_tutorial.rst b/docs/uvcal_tutorial.rst index c4f79764ae..cf4b5e4e01 100644 --- a/docs/uvcal_tutorial.rst +++ b/docs/uvcal_tutorial.rst @@ -326,7 +326,7 @@ b) Select antennas to keep using the antenna names, also select frequencies to k >>> cal = UVCal.from_file(filename) >>> # print all the antenna names with data in the original file - >>> print([cal.telescope.antenna_names[np.where(cal.telescope.antenna_numbers==a)[0][0]] for a in cal.ant_array]) + >>> print([str(cal.telescope.antenna_names[np.where(cal.telescope.antenna_numbers==a)[0][0]]) for a in cal.ant_array]) ['ant0', 'ant1', 'ant11', 'ant12', 'ant13', 'ant23', 'ant24', 'ant25'] >>> # print the first 10 frequencies in the original file @@ -336,7 +336,7 @@ b) Select antennas to keep using the antenna names, also select frequencies to k >>> cal.select(antenna_names=['ant11', 'ant13', 'ant25'], freq_chans=np.arange(0, 4)) >>> # print all the antenna names with data after the select - >>> print([cal.telescope.antenna_names[np.where(cal.telescope.antenna_numbers==a)[0][0]] for a in cal.ant_array]) + >>> print([str(cal.telescope.antenna_names[np.where(cal.telescope.antenna_numbers==a)[0][0]]) for a in cal.ant_array]) ['ant11', 'ant13', 'ant25'] >>> # print all the frequencies after the select diff --git a/docs/uvdata_tutorial.rst b/docs/uvdata_tutorial.rst index 9318040180..76af847d6a 100644 --- a/docs/uvdata_tutorial.rst +++ b/docs/uvdata_tutorial.rst @@ -1055,7 +1055,7 @@ c) Select a few antenna pairs to keep >>> # note that order of the values in the pair does not matter >>> # print all the antenna pairs after the select - >>> print(sorted(set(zip(uvd.ant_1_array, uvd.ant_2_array)))) + >>> print(sorted(set(zip(uvd.ant_1_array.tolist(), uvd.ant_2_array.tolist())))) [(1, 2), (1, 7), (1, 21)] d) Select antenna pairs using baseline numbers @@ -1079,7 +1079,7 @@ d) Select antenna pairs using baseline numbers >>> # print unique baselines and antennas after select >>> print(np.unique(uvd.baseline_array)) [73736 73753 81945] - >>> print(list(set(zip(uvd.ant_1_array, uvd.ant_2_array)))) + >>> print(list(set(zip(uvd.ant_1_array.tolist(), uvd.ant_2_array.tolist())))) [(8, 25), (4, 25), (4, 8)] e) Select polarizations @@ -2045,19 +2045,17 @@ in the full data array based on redundancy. >>> uv_backup = uv0.copy() >>> uvd2 = uv0.compress_by_redundancy(method="select", tol=tol, inplace=False) >>> uv0.compress_by_redundancy(method="select", tol=tol) - >>> uvd2 == uv0 - True + >>> assert uvd2 == uv0 >>> # Note -- Compressing and inflating changes the baseline order, reorder before comparing. >>> uv0.inflate_by_redundancy(tol=tol) >>> uv_backup.reorder_blts(conj_convention="u>0", uvw_tol=tol) >>> uv0.reorder_blts() - >>> np.all(uv0.baseline_array == uv_backup.baseline_array) - True + >>> assert np.all(uv0.baseline_array == uv_backup.baseline_array) >>> uvd2.inflate_by_redundancy(tol=tol) - >>> uvd2 == uv0 - True + >>> assert uvd2 == uv0 + UVData: Normalizing data ------------------------ diff --git a/src/pyuvdata/utils/bls.py b/src/pyuvdata/utils/bls.py index e5ba647ced..331481b674 100644 --- a/src/pyuvdata/utils/bls.py +++ b/src/pyuvdata/utils/bls.py @@ -44,9 +44,9 @@ def baseline_to_antnums(baseline, *, Nants_telescope): # noqa: N803 np.ascontiguousarray(baseline, dtype=np.uint64) ) if return_array: - return ant1, ant2 + return np.astype(ant1, int), np.astype(ant2, int) else: - return ant1.item(0), ant2.item(0) + return int(ant1.item(0)), int(ant2.item(0)) def antnums_to_baseline( diff --git a/src/pyuvdata/uvdata/uvdata.py b/src/pyuvdata/uvdata/uvdata.py index aa3e1eaf0f..efef9f3674 100644 --- a/src/pyuvdata/uvdata/uvdata.py +++ b/src/pyuvdata/uvdata/uvdata.py @@ -2926,7 +2926,8 @@ def get_antpairs(self): list of tuples of int list of unique antpair tuples (ant1, ant2) with data associated with them. """ - return list(zip(*self.baseline_to_antnums(self.get_baseline_nums()))) + ant1_arr, ant2_arr = self.baseline_to_antnums(self.get_baseline_nums()) + return list(zip((ant1_arr).tolist(), (ant2_arr).tolist())) def get_pols(self): """ From cc044874b78dbb45135949a2a717efd51b70c460 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 27 Jun 2024 13:51:42 -0700 Subject: [PATCH 12/12] Fix compatibility with older numpy versions --- src/pyuvdata/utils/bls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pyuvdata/utils/bls.py b/src/pyuvdata/utils/bls.py index 331481b674..444482f817 100644 --- a/src/pyuvdata/utils/bls.py +++ b/src/pyuvdata/utils/bls.py @@ -44,7 +44,7 @@ def baseline_to_antnums(baseline, *, Nants_telescope): # noqa: N803 np.ascontiguousarray(baseline, dtype=np.uint64) ) if return_array: - return np.astype(ant1, int), np.astype(ant2, int) + return ant1.astype(int), ant2.astype(int) else: return int(ant1.item(0)), int(ant2.item(0))