diff --git a/docs/source/database.rst b/docs/source/database.rst index 28c96b7a3..0469952af 100644 --- a/docs/source/database.rst +++ b/docs/source/database.rst @@ -12,4 +12,4 @@ reset_database.py ----------------- .. automodule:: jwql.database.reset_database :members: - :undoc-members: \ No newline at end of file + :undoc-members: diff --git a/docs/source/website.rst b/docs/source/website.rst index 6f0da9b31..3825fa2ae 100644 --- a/docs/source/website.rst +++ b/docs/source/website.rst @@ -44,6 +44,12 @@ monitor_views.py :members: :undoc-members: +monitor_models +-------------- +.. automodule:: jwql.website.apps.jwql.monitor_models.common + :members: + :undoc-members: + settings.py ----------- .. automodule:: jwql.website.jwql_proj.settings @@ -60,4 +66,4 @@ views.py -------- .. automodule:: jwql.website.apps.jwql.views :members: - :undoc-members: \ No newline at end of file + :undoc-members: diff --git a/environment_python_3.10.yml b/environment_python_3.10.yml index 9f2ed409b..e322ea9ca 100644 --- a/environment_python_3.10.yml +++ b/environment_python_3.10.yml @@ -24,7 +24,7 @@ channels: dependencies: - astropy=5.3.4 - beautifulsoup4=4.12.2 - - bokeh=2.4.3 + - bokeh=3.3.0 - celery=5.3.4 - cryptography=41.0.4 - django=4.2.6 @@ -59,6 +59,7 @@ dependencies: - astroquery==0.4.6 - bandit==1.7.5 - jwst==1.12.3 + - jwst_backgrounds==1.2.0 - pysiaf==0.20.0 - pysqlite3==0.5.2 - pyvo==1.4.2 diff --git a/environment_python_3.9.yml b/environment_python_3.9.yml index 4f4159244..a68f005c5 100644 --- a/environment_python_3.9.yml +++ b/environment_python_3.9.yml @@ -24,7 +24,7 @@ channels: dependencies: - astropy=5.3.3 - beautifulsoup4=4.12.2 - - bokeh=2.4.3 + - bokeh=3.3.0 - celery=5.3.4 - cryptography=41.0.4 - django=4.2.5 @@ -59,6 +59,7 @@ dependencies: - astroquery==0.4.6 - bandit==1.7.5 - jwst==1.12.3 + - jwst_backgrounds==1.2.0 - pysiaf==0.20.0 - pysqlite3==0.5.2 - pyvo==1.4.2 diff --git a/jwql/bokeh_templating/factory.py b/jwql/bokeh_templating/factory.py index 867451e53..7c77bfa5d 100644 --- a/jwql/bokeh_templating/factory.py +++ b/jwql/bokeh_templating/factory.py @@ -36,8 +36,7 @@ # Figures get their own constructor so we remove references to Figures from # the keyword maps. -Figure = mappings.pop("Figure") -del sequences["figure"] +Figure = mappings.pop("figure") def mapping_factory(tool, element_type): diff --git a/jwql/database/database_interface.py b/jwql/database/database_interface.py index 73b536eed..d94532ef4 100644 --- a/jwql/database/database_interface.py +++ b/jwql/database/database_interface.py @@ -84,10 +84,9 @@ from jwql.utils.constants import ANOMALIES_PER_INSTRUMENT from jwql.utils.constants import FILE_SUFFIX_TYPES from jwql.utils.constants import JWST_INSTRUMENT_NAMES +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - # Monkey patch Query with data_frame method @property diff --git a/jwql/database/monitor_table_definitions/nircam/nircam_claw_stats.txt b/jwql/database/monitor_table_definitions/nircam/nircam_claw_stats.txt index 0730d1e3f..e1396d4a5 100644 --- a/jwql/database/monitor_table_definitions/nircam/nircam_claw_stats.txt +++ b/jwql/database/monitor_table_definitions/nircam/nircam_claw_stats.txt @@ -15,4 +15,6 @@ MEDIAN, float STDDEV, float FRAC_MASKED, float SKYFLAT_FILENAME, string +DOY, float +TOTAL_BKG, float ENTRY_DATE, datetime \ No newline at end of file diff --git a/jwql/edb/engineering_database.py b/jwql/edb/engineering_database.py index fea43aafa..b4812d81b 100644 --- a/jwql/edb/engineering_database.py +++ b/jwql/edb/engineering_database.py @@ -58,20 +58,20 @@ from astroquery.mast import Mast from bokeh.embed import components from bokeh.layouts import column -from bokeh.models import BoxAnnotation, ColumnDataSource, DatetimeTickFormatter, HoverTool, Range1d +from bokeh.models import BoxAnnotation, ColumnDataSource, DatetimeTickFormatter, HoverTool +from bokeh.models import Range1d from bokeh.plotting import figure, output_file, show, save import numpy as np from jwst.lib.engdb_tools import ENGDB_Service from jwql.utils.constants import MIRI_POS_RATIO_VALUES +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.credentials import get_mast_base_url, get_mast_token from jwql.utils.utils import get_config MAST_EDB_MNEMONIC_SERVICE = 'Mast.JwstEdb.Mnemonics' MAST_EDB_DICTIONARY_SERVICE = 'Mast.JwstEdb.Dictionary' -# Temporary until JWST operations: switch to test string for MAST request URL -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') if not ON_GITHUB_ACTIONS: Mast._portal_api_connection.MAST_REQUEST_URL = get_config()['mast_request_url'] @@ -283,7 +283,7 @@ def __mul__(self, mnem): before = np.where(common_dates == self.data['dates'][block])[0] if len(before) > 0: - new_blocks.append(before[0]) # + 1) + new_blocks.append(before[0]) # + 1) except IndexError: # The final block value is usually equal to the length of the array, and will # therefore cause an Index Error in the lines above. Ignore that error here. @@ -421,7 +421,7 @@ def block_stats(self, sigma=3, ignore_vals=[], ignore_edges=False, every_change= # calculated, remove those every change values and block values from the EdbMnemonic # instance. if every_change: - if len(remove_change_indexes) > 0: + if len(remove_change_indexes) > 0: self.every_change_values = np.delete(self.every_change_values, remove_change_indexes) self.blocks = np.delete(self.blocks, remove_change_indexes) @@ -439,7 +439,7 @@ def block_stats(self, sigma=3, ignore_vals=[], ignore_edges=False, every_change= stdevs.append(stdevval) maxs.append(meanval) mins.append(meanval) - #if hasattr(self, 'every_change_values'): + # if hasattr(self, 'every_change_values'): # updated_every_change_vals.append(self.every_change_values[i + 1]) self.mean = means self.median = medians @@ -510,9 +510,9 @@ def block_stats_filter_positions(self, sigma=5): self.data["euvalues"].data[index:self.blocks[i + 1]], sigma=sigma) if np.isfinite(meanval): - #this is preventing the nans above from being added. not sure what to do here. - #bokeh cannot deal with nans. but we need entries in order to have the blocks indexes - #remain correct. but maybe we dont care about the block indexes after averaging + # this is preventing the nans above from being added. not sure what to do here. + # bokeh cannot deal with nans. but we need entries in order to have the blocks indexes + # remain correct. but maybe we dont care about the block indexes after averaging medtimes.append(calc_median_time(self.data["dates"].data[index:self.blocks[i + 1]][good])) means.append(meanval) medians.append(medianval) @@ -523,7 +523,7 @@ def block_stats_filter_positions(self, sigma=5): # If there were blocks composed entirely of bad data, meaning no mean values were # calculated, remove those every change values and block values from the EdbMnemonic # instance. - if len(remove_change_indexes) > 0: + if len(remove_change_indexes) > 0: self.every_change_values = np.delete(self.every_change_values, remove_change_indexes) self.blocks = np.delete(self.blocks, remove_change_indexes) @@ -690,37 +690,33 @@ def bokeh_plot(self, show_plot=False, savefig=False, out_dir='./', nominal_value if plot_mean: source_mean = ColumnDataSource(data={'mean_x': self.median_times, 'mean_y': self.mean}) mean_data = fig.scatter(x='mean_x', y='mean_y', line_width=1, line_color='orange', alpha=0.75, source=source_mean) - mean_hover_tool = HoverTool(tooltips=[('Mean', '@mean_y'), - ('Date', '@mean_x{%d %b %Y %H:%M:%S}') - ], mode='mouse', renderers=[mean_data]) + mean_hover_tool = HoverTool(tooltips=[('Mean', '@mean_y'), ('Date', '@mean_x{%d %b %Y %H:%M:%S}')], + mode='mouse', renderers=[mean_data]) mean_hover_tool.formatters = {'@mean_x': 'datetime'} fig.tools.append(mean_hover_tool) if plot_median: source_median = ColumnDataSource(data={'median_x': self.median_times, 'median_y': self.median}) median_data = fig.scatter(x='median_x', y='median_y', line_width=1, line_color='orangered', alpha=0.75, source=source_median) - median_hover_tool = HoverTool(tooltips=[('Median', '@median_y'), - ('Date', '@median_x{%d %b %Y %H:%M:%S}') - ], mode='mouse', renderers=[median_data]) + median_hover_tool = HoverTool(tooltips=[('Median', '@median_y'), ('Date', '@median_x{%d %b %Y %H:%M:%S}')], + mode='mouse', renderers=[median_data]) median_hover_tool.formatters = {'@median_x': 'datetime'} fig.tools.append(median_hover_tool) - # If the max and min arrays are to be plotted, create columndata sources for them as well + # If the max and min arrays are to be plotted, create columndata sources for them as well if plot_max: source_max = ColumnDataSource(data={'max_x': self.median_times, 'max_y': self.max}) max_data = fig.scatter(x='max_x', y='max_y', line_width=1, color='black', line_color='black', source=source_max) - max_hover_tool = HoverTool(tooltips=[('Max', '@max_y'), - ('Date', '@max_x{%d %b %Y %H:%M:%S}') - ], mode='mouse', renderers=[max_data]) + max_hover_tool = HoverTool(tooltips=[('Max', '@max_y'), ('Date', '@max_x{%d %b %Y %H:%M:%S}')], + mode='mouse', renderers=[max_data]) max_hover_tool.formatters = {'@max_x': 'datetime'} fig.tools.append(max_hover_tool) if plot_min: source_min = ColumnDataSource(data={'min_x': self.median_times, 'min_y': self.min}) min_data = fig.scatter(x='min_x', y='min_y', line_width=1, color='black', line_color='black', source=source_min) - minn_hover_tool = HoverTool(tooltips=[('Min', '@min_y'), - ('Date', '@min_x{%d %b %Y %H:%M:%S}') - ], mode='mouse', renderers=[min_data]) + minn_hover_tool = HoverTool(tooltips=[('Min', '@min_y'), ('Date', '@min_x{%d %b %Y %H:%M:%S}')], + mode='mouse', renderers=[min_data]) min_hover_tool.formatters = {'@min_x': 'datetime'} fig.tools.append(min_hover_tool) @@ -740,12 +736,12 @@ def bokeh_plot(self, show_plot=False, savefig=False, out_dir='./', nominal_value fig = add_limit_boxes(fig, yellow=yellow_limits, red=red_limits) # Make the x axis tick labels look nice - fig.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + fig.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) fig.xaxis.major_label_orientation = np.pi / 4 @@ -1206,12 +1202,12 @@ def plot_data_plus_devs(self, use_median=False, show_plot=False, savefig=False, fig_dev.line(data_dates, dev, color='red') # Make the x axis tick labels look nice - fig_dev.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + fig_dev.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) fig.xaxis.major_label_orientation = np.pi / 4 diff --git a/jwql/example_config.json b/jwql/example_config.json index b6dfe2806..95bc8c8cf 100644 --- a/jwql/example_config.json +++ b/jwql/example_config.json @@ -2,7 +2,7 @@ "admin_account" : "", "auth_mast" : "", "connection_string" : "", - "database" : { + "databases" : { "engine" : "", "name" : "", "user" : "", @@ -10,13 +10,23 @@ "host" : "", "port" : "" }, - "django_database" : { - "ENGINE" : "", - "NAME" : "", - "USER" : "", - "PASSWORD" : "", - "HOST" : "", - "PORT" : "" + "django_databases" : { + "default": { + "ENGINE" : "", + "NAME" : "", + "USER" : "", + "PASSWORD" : "", + "HOST" : "", + "PORT" : "" + }, + "monitors": { + "ENGINE" : "", + "NAME" : "", + "USER" : "", + "PASSWORD" : "", + "HOST" : "", + "PORT" : "" + } }, "jwql_dir" : "", "jwql_version": "", diff --git a/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py b/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py index c0366df14..d3ae2e795 100755 --- a/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py +++ b/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py @@ -105,19 +105,13 @@ from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline from jwql.utils import crds_tools, instrument_properties, monitor_utils from jwql.utils.constants import DARKS_BAD_PIXEL_TYPES, DARK_EXP_TYPES, FLATS_BAD_PIXEL_TYPES, FLAT_EXP_TYPES -from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, ON_GITHUB_ACTIONS +from jwql.utils.constants import ON_READTHEDOCS from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.mast_utils import mast_query from jwql.utils.permissions import set_permissions from jwql.utils.utils import copy_files, create_png_from_fits, ensure_dir_exists, get_config, filesystem_path -# Determine if the code is being run by Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - -# Determine if the code is being run as part of a Readthedocs build -ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: # pragma: no cover - ON_READTHEDOCS = os.environ['READTHEDOCS'] if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: from jwql.website.apps.jwql.monitor_pages.monitor_bad_pixel_bokeh import BadPixelPlots diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 00de6b2bf..9a1968921 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -101,8 +101,9 @@ from jwql.instrument_monitors import pipeline_tools from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline from jwql.utils import calculations, instrument_properties, mast_utils, monitor_utils -from jwql.utils.constants import ASIC_TEMPLATES, DARK_MONITOR_MAX_BADPOINTS_TO_PLOT, JWST_INSTRUMENT_NAMES, FULL_FRAME_APERTURES -from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_DATAPRODUCTS, RAPID_READPATTERNS +from jwql.utils.constants import ASIC_TEMPLATES, DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME, DARK_MONITOR_MAX_BADPOINTS_TO_PLOT +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, FULL_FRAME_APERTURES, JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.constants import JWST_DATAPRODUCTS, MINIMUM_DARK_CURRENT_GROUPS, RAPID_READPATTERNS from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.permissions import set_permissions from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path, save_png @@ -233,7 +234,8 @@ def add_bad_pix(self, coordinates, pixel_type, files, mean_filename, baseline_fi with engine.begin() as connection: connection.execute(self.pixel_table.__table__.insert(), entry) - def create_mean_slope_figure(self, image, num_files, hotxy=None, deadxy=None, noisyxy=None, baseline_file=None): + def create_mean_slope_figure(self, image, num_files, hotxy=None, deadxy=None, noisyxy=None, baseline_file=None, + min_time='', max_time=''): """Create and save a png containing the mean dark slope image, to be displayed in the web app @@ -257,10 +259,17 @@ def create_mean_slope_figure(self, image, num_files, hotxy=None, deadxy=None, no baseline_file : str Name of fits file containing the mean slope image to which ``image`` was compared when looking for new hot/dead/noisy pixels + + min_time : str + Earliest observation time, in MJD, used in the creation of ``image``. + + max_time : str + Latest observation time, in MJD, used in the creation of ``image``. + """ output_filename = '{}_{}_{}_to_{}_mean_slope_image.png'.format(self.instrument.lower(), self.aperture.lower(), - self.query_start, self.query_end) + min_time, max_time) mean_slope_dir = os.path.join(get_config()['outputs'], 'dark_monitor', 'mean_slope_images') @@ -274,8 +283,8 @@ def create_mean_slope_figure(self, image, num_files, hotxy=None, deadxy=None, no img_mn, img_med, img_dev = sigma_clipped_stats(image[4: ny - 4, 4: nx - 4]) # Create figure - start_time = Time(float(self.query_start), format='mjd').tt.datetime.strftime("%m/%d/%Y") - end_time = Time(float(self.query_end), format='mjd').tt.datetime.strftime("%m/%d/%Y") + start_time = Time(float(min_time), format='mjd').tt.datetime.strftime("%m/%d/%Y") + end_time = Time(float(max_time), format='mjd').tt.datetime.strftime("%m/%d/%Y") self.plot = figure(title=f'{self.aperture}: {num_files} files. {start_time} to {end_time}', tools='') # tools='pan,box_zoom,reset,wheel_zoom,save') @@ -432,14 +441,30 @@ def exclude_existing_badpix(self, badpix, pixel_type): new_pixels_y.append(y) logging.info("\t\tKeeping {} {} pixels".format(len(new_pixels_x), pixel_type)) -# pixel = (x, y) -# if pixel not in already_found: -# new_pixels_x.append(x) -# new_pixels_y.append(y) session.close() return (new_pixels_x, new_pixels_y) + def exclude_too_few_groups(self, result_list): + """Given a list of mast query results, go through and exlclude + files that have too few groups to be useful + + Parameters + ---------- + result_list : list + List of dictionaries containing a MAST query result + + Returns + ------- + filtered_results : list + List of dictionaries with files containing too few groups excluded + """ + filtered_results = [] + for result in result_list: + if result['ngroups'] >= MINIMUM_DARK_CURRENT_GROUPS: + filtered_results.append(result) + return filtered_results + def find_hot_dead_pixels(self, mean_image, comparison_image, hot_threshold=2., dead_threshold=0.1): """Create the ratio of the slope image to a baseline slope image. Pixels in the ratio image with values above @@ -675,14 +700,19 @@ def process(self, file_list): # Basic metadata that will be needed later self.get_metadata(file_list[0]) + # For MIRI, save the rateints files. For other instruments save the rate files. + if self.instrument == 'miri': + output_suffix = 'rateints' + else: + output_suffix = 'rate' + # Run pipeline steps on files, generating slope files pipeline_files = [] slope_files = [] for filename in file_list: + logging.info(f'\tWorking on file: {filename}') - logging.info('\tWorking on file: {}'.format(filename)) - - rate_file = filename.replace("dark", "rate") + rate_file = filename.replace("dark", output_suffix) rate_file_name = os.path.basename(rate_file) local_rate_file = os.path.join(self.working_data_dir, rate_file_name) @@ -697,15 +727,16 @@ def process(self, file_list): step_args = {'dark_current': {'skip': True}} # Call the pipeline - outputs = run_parallel_pipeline(pipeline_files, "dark", ["rate"], self.instrument, step_args=step_args) + outputs = run_parallel_pipeline(pipeline_files, "dark", [output_suffix], self.instrument, step_args=step_args) + for filename in file_list: - processed_file = filename.replace("_dark", "_rate") + processed_file = filename.replace("_dark", f"_{output_suffix}") if processed_file not in slope_files and os.path.isfile(processed_file): slope_files.append(processed_file) os.remove(filename) obs_times = [] - logging.info('\tSlope images to use in the dark monitor for {}, {}:'.format(self.instrument, self.aperture)) + logging.info(f'\tSlope images to use in the dark monitor for {self.instrument}, {self.aperture}:') for item in slope_files: logging.info('\t\t{}'.format(item)) # Get the observation time for each file @@ -719,13 +750,23 @@ def process(self, file_list): mid_time = instrument_properties.mean_time(obs_times) try: - - # Read in all slope images and place into a list - slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files) + # Read in all slope images and create a stack of ints (from rateints files) + # or mean ints (from rate files) + slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files, skipped_initial_ints=self.skipped_initial_ints) + logging.info(f'Shape of slope image stack: {slope_image_stack.shape}') # Calculate a mean slope image from the inputs slope_image, stdev_image = calculations.mean_image(slope_image_stack, sigma_threshold=3) - mean_slope_file = self.save_mean_slope_image(slope_image, stdev_image, slope_files) + + # Use the min and max observation time of the input files to create the slope file name + min_time_str = min_time.strftime('%Y-%m-%dT%H:%m:%S') + min_time_mjd = Time(min_time_str, format='isot', scale='utc').mjd + min_time_mjd_trunc = "{:.4f}".format(min_time_mjd) + max_time_str = max_time.strftime('%Y-%m-%dT%H:%m:%S') + max_time_mjd = Time(max_time_str, format='isot', scale='utc').mjd + max_time_mjd_trunc = "{:.4f}".format(max_time_mjd) + mean_slope_file = self.save_mean_slope_image(slope_image, stdev_image, slope_files, + min_time_mjd_trunc, max_time_mjd_trunc) # Free up memory del slope_image_stack @@ -753,7 +794,15 @@ def process(self, file_list): baseline_stdev = deepcopy(stdev_image) else: logging.info('\tBaseline file is {}'.format(baseline_file)) - baseline_mean, baseline_stdev = self.read_baseline_slope_image(baseline_file) + + if not os.path.isfile(baseline_file): + logging.warning((f'\tBaseline file {baseline_file} does not exist. Setting ' + 'the current mean slope image to be the new baseline.')) + baseline_file = mean_slope_file + baseline_mean = deepcopy(slope_image) + baseline_stdev = deepcopy(stdev_image) + else: + baseline_mean, baseline_stdev = self.read_baseline_slope_image(baseline_file) # Check the hot/dead pixel population for changes logging.info("\tFinding new hot/dead pixels") @@ -796,10 +845,10 @@ def process(self, file_list): logging.info('\tFound {} new noisy pixels'.format(len(new_noisy_pixels[0]))) self.add_bad_pix(new_noisy_pixels, 'noisy', file_list, mean_slope_file, baseline_file, min_time, mid_time, max_time) - logging.info("Creating Mean Slope Image") # Create png file of mean slope image. Add bad pixels only for full frame apertures self.create_mean_slope_figure(slope_image, len(slope_files), hotxy=new_hot_pix, deadxy=new_dead_pix, - noisyxy=new_noisy_pixels, baseline_file=baseline_file) + noisyxy=new_noisy_pixels, baseline_file=baseline_file, + min_time=min_time_mjd_trunc, max_time=max_time_mjd_trunc) logging.info('\tSigma-clipped mean of the slope images saved to: {}'.format(mean_slope_file)) # ----- Calculate image statistics ----- @@ -825,7 +874,10 @@ def process(self, file_list): # Construct new entry for dark database table source_files = [os.path.basename(item) for item in file_list] for key in amp_mean.keys(): - dark_db_entry = {'aperture': self.aperture, 'amplifier': key, 'mean': amp_mean[key], + dark_db_entry = {'aperture': self.aperture, + 'amplifier': key, + 'readpattern': self.readpatt, + 'mean': amp_mean[key], 'stdev': amp_stdev[key], 'source_files': source_files, 'obs_start_time': min_time, @@ -899,119 +951,138 @@ def run(self): self.query_end = Time.now().mjd # Loop over all instruments - for instrument in JWST_INSTRUMENT_NAMES: + for instrument in ['miri', 'nircam']: # JWST_INSTRUMENT_NAMES: self.instrument = instrument + logging.info(f'\n\nWorking on {instrument}') # Identify which database tables to use self.identify_tables() - # Get a list of all possible apertures from pysiaf - possible_apertures = list(Siaf(instrument).apernames) - possible_apertures = [ap for ap in possible_apertures if ap not in apertures_to_skip] + # Run the monitor only on the apertures listed in the threshold file. Skip all others. + instrument_entries = limits['Instrument'] == instrument + possible_apertures = limits['Aperture'][instrument_entries] # Get a list of all possible readout patterns associated with the aperture possible_readpatts = RAPID_READPATTERNS[instrument] for aperture in possible_apertures: logging.info('') - logging.info('Working on aperture {} in {}'.format(aperture, instrument)) + logging.info(f'Working on aperture {aperture} in {instrument}') # Find appropriate threshold for the number of new files needed match = aperture == limits['Aperture'] - - # If the aperture is not listed in the threshold file, we need - # a default - if not np.any(match): - file_count_threshold = 30 - logging.warning(('\tAperture {} is not present in the threshold file. Continuing ' - 'with the default threshold of 30 files.'.format(aperture))) - else: - file_count_threshold = limits['Threshold'][match][0] + integration_count_threshold = limits['Threshold'][match][0] + self.skipped_initial_ints = limits['N_skipped_integs'][match][0] self.aperture = aperture - # We need a separate search for each readout pattern for readpatt in possible_readpatts: self.readpatt = readpatt - logging.info('\tWorking on readout pattern: {}'.format(self.readpatt)) + logging.info(f'\tWorking on readout pattern: {self.readpatt}') # Locate the record of the most recent MAST search self.query_start = self.most_recent_search() - logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end)) + logging.info(f'\tQuery times: {self.query_start} {self.query_end}') # Query MAST using the aperture and the time of the # most recent previous search as the starting time - new_entries = monitor_utils.mast_query_darks(instrument, aperture, self.query_start, self.query_end, readpatt=self.readpatt) + new_entries = monitor_utils.mast_query_darks(instrument, aperture, self.query_start, + self.query_end, readpatt=self.readpatt) # Exclude ASIC tuning data len_new_darks = len(new_entries) new_entries = monitor_utils.exclude_asic_tuning(new_entries) len_no_asic = len(new_entries) num_asic = len_new_darks - len_no_asic - logging.info("\tFiltering out ASIC tuning files removed {} dark files.".format(num_asic)) - logging.info('\tAperture: {}, Readpattern: {}, new entries: {}'.format(self.aperture, self.readpatt, - len(new_entries))) + # Exclude files that don't have enough groups to be useful + new_entries = self.exclude_too_few_groups(new_entries) + len_new_darks = len(new_entries) - # Check to see if there are enough new files to meet the - # monitor's signal-to-noise requirements - if len(new_entries) >= file_count_threshold: - logging.info('\tMAST query has returned sufficient new dark files for {}, {}, {} to run the dark monitor.' - .format(self.instrument, self.aperture, self.readpatt)) - - # Get full paths to the files - new_filenames = [] - for file_entry in new_entries: - try: - new_filenames.append(filesystem_path(file_entry['filename'])) - except FileNotFoundError: - logging.warning('\t\tUnable to locate {} in filesystem. Not including in processing.' - .format(file_entry['filename'])) - - # In some (unusual) cases, there are files in MAST with the correct aperture name - # but incorrect array sizes. Make sure that the new files all have the expected - # aperture size - temp_filenames = [] - bad_size_filenames = [] - expected_ap = Siaf(instrument)[aperture] - expected_xsize = expected_ap.XSciSize - expected_ysize = expected_ap.YSciSize - for new_file in new_filenames: - with fits.open(new_file) as hdulist: - xsize = hdulist[0].header['SUBSIZE1'] - ysize = hdulist[0].header['SUBSIZE2'] - if xsize == expected_xsize and ysize == expected_ysize: - temp_filenames.append(new_file) - else: - bad_size_filenames.append(new_file) - if len(temp_filenames) != len(new_filenames): - logging.info('\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: ') - for badfile in bad_size_filenames: - logging.info('\t\t{}'.format(badfile)) - new_filenames = deepcopy(temp_filenames) - - # If it turns out that the monitor doesn't find enough - # of the files returned by the MAST query to meet the threshold, - # then the monitor will not be run - if len(new_filenames) < file_count_threshold: - logging.info(("\tFilesystem search for the files identified by MAST has returned {} files. " - "This is less than the required minimum number of files ({}) necessary to run " - "the monitor. Quitting.").format(len(new_filenames), file_count_threshold)) - monitor_run = False + logging.info(f'\tAperture: {self.aperture}, Readpattern: {self.readpatt}, new entries: {len(new_entries)}') + + # Get full paths to the files + new_filenames = [] + for file_entry in new_entries: + try: + new_filenames.append(filesystem_path(file_entry['filename'])) + except FileNotFoundError: + logging.warning((f"\t\tUnable to locate {file_entry['filename']} in filesystem. " + "Not including in processing.")) + + # Generate a count of the total number of integrations across the files. This number will + # be compared to the threshold value to determine if the monitor is run. + # Also, in some (unusual) cases, there are files in MAST with the correct aperture name + # but incorrect array sizes. Make sure that the new files all have the expected + # aperture size + total_integrations = 0 + integrations = [] + starting_times = [] + ending_times = [] + temp_filenames = [] + bad_size_filenames = [] + expected_ap = Siaf(instrument)[aperture] + expected_xsize = expected_ap.XSciSize + expected_ysize = expected_ap.YSciSize + for new_file in new_filenames: + with fits.open(new_file) as hdulist: + xsize = hdulist[0].header['SUBSIZE1'] + ysize = hdulist[0].header['SUBSIZE2'] + nints = hdulist[0].header['NINTS'] + # If the array size matches expectataions, or if Siaf doesn't give an expected size, then + # keep the file. Also, make sure there is at leasat one integration, after ignoring any user-input + # number of integrations. + keep_ints = int(nints) - self.skipped_initial_ints + if ((keep_ints > 0) and ((xsize == expected_xsize and ysize == expected_ysize) + or expected_xsize is None or expected_ysize is None)): + temp_filenames.append(new_file) + total_integrations += int(nints) + integrations.append(int(nints) - self.skipped_initial_ints) + starting_times.append(hdulist[0].header['EXPSTART']) + ending_times.append(hdulist[0].header['EXPEND']) else: - logging.info(("\tFilesystem search for the files identified by MAST has returned {} files.") - .format(len(new_filenames))) - monitor_run = True - - if monitor_run: - # Set up directories for the copied data - ensure_dir_exists(os.path.join(self.working_dir, 'data')) - self.working_data_dir = os.path.join(self.working_dir, - 'data/{}_{}'.format(self.instrument.lower(), - self.aperture.lower())) - ensure_dir_exists(self.working_data_dir) - + bad_size_filenames.append(new_file) + logging.info((f'\t\t{new_file} has unexpected aperture size. Expecting ' + f'{expected_xsize}x{expected_ysize}. Got {xsize}x{ysize}')) + + if len(temp_filenames) != len(new_filenames): + logging.info(('\t\tSome files returned by MAST have unexpected aperture sizes. These files ' + 'will be ignored: ')) + for badfile in bad_size_filenames: + logging.info('\t\t\t{}'.format(badfile)) + new_filenames = deepcopy(temp_filenames) + + # Check to see if there are enough new integrations to meet the + # monitor's signal-to-noise requirements + if len(new_filenames) > 0: + logging.info((f'\t\tFilesystem search for new dark integrations for {self.instrument}, {self.aperture}, ' + f'{self.readpatt} has found {total_integrations} integrations spread ' + f'across {len(new_filenames)} files.')) + if total_integrations >= integration_count_threshold: + logging.info(f'\tThis meets the threshold of {integration_count_threshold} integrations.') + monitor_run = True + + # Set up directories for the copied data + ensure_dir_exists(os.path.join(self.working_dir, 'data')) + self.working_data_dir = os.path.join(self.working_dir, + 'data/{}_{}'.format(self.instrument.lower(), + self.aperture.lower())) + ensure_dir_exists(self.working_data_dir) + + # Split the list of good files into sub-lists based on the integration + # threshold. The monitor will then be run on each sub-list independently, + # in order to produce results with roughly the same signal-to-noise. This + # also prevents the monitor running on a huge chunk of files in the case + # where it hasn't been run in a while and data have piled up in the meantime. + self.split_files_into_sub_lists(new_filenames, starting_times, ending_times, + integrations, integration_count_threshold) + + # Run the monitor once on each list + for new_file_list, batch_start_time, batch_end_time, batch_integrations in zip(self.file_batches, + self.start_time_batches, + self.end_time_batches, + self.integration_batches): # Copy files from filesystem - dark_files, not_copied = copy_files(new_filenames, self.data_dir) + dark_files, not_copied = copy_files(new_file_list, self.working_data_dir) # Check that there were no problems with the file copying. If any of the copied # files have different sizes between the MAST filesystem and the JWQL filesystem, @@ -1020,43 +1091,73 @@ def run(self): copied_size = os.stat(dark_file).st_size orig_size = os.stat(filesystem_path(os.path.basename(dark_file))).st_size if orig_size != copied_size: - logging.info(f"\tProblem copying {os.path.basename(dark_file)} from the filesystem.") - logging.info(f"Size in filesystem: {orig_size}, size of copy: {copied_size}. Skipping file.") + logging.error(f"\tProblem copying {os.path.basename(dark_file)} from the filesystem!") + logging.error(f"Size in filesystem: {orig_size}, size of copy: {copied_size}. Skipping file.") not_copied.append(dark_file) dark_files.remove(dark_file) os.remove(dark_file) - logging.info('\tNew_filenames: {}'.format(new_filenames)) - logging.info('\tData dir: {}'.format(self.data_dir)) - logging.info('\tCopied to working dir: {}'.format(dark_files)) + logging.info('\tNew_filenames: {}'.format(new_file_list)) + logging.info('\tData dir: {}'.format(self.working_data_dir)) + logging.info('\tCopied to data dir: {}'.format(dark_files)) logging.info('\tNot copied: {}'.format(not_copied)) - # Run the dark monitor - self.process(dark_files) + # Get the starting and ending time of the files in this monitor run + batch_start_time = np.min(np.array(batch_start_time)) + batch_end_time = np.max(np.array(batch_end_time)) + + if len(dark_files) > 0: + # Run the dark monitor + logging.info(f'\tRunning process for {instrument}, {aperture}, {readpatt} with:') + for dkfile in dark_files: + logging.info(f'\t{dkfile}') + self.process(dark_files) + else: + logging.info('\tNo files remaining to process. Skipping monitor.') + monitor_run = False + + # Update the query history once for each group of files + new_entry = {'instrument': instrument, + 'aperture': aperture, + 'readpattern': self.readpatt, + 'start_time_mjd': batch_start_time, + 'end_time_mjd': batch_end_time, + 'files_found': len(dark_files), + 'run_monitor': monitor_run, + 'entry_date': datetime.datetime.now()} + + with engine.begin() as connection: + connection.execute( + self.query_table.__table__.insert(), new_entry) + logging.info('\tUpdated the query history table') + logging.info('NEW ENTRY: ') + logging.info(new_entry) else: - logging.info(('\tDark monitor skipped. MAST query has returned {} new dark files for ' - '{}, {}, {}. {} new files are required to run dark current monitor.') - .format(len(new_entries), instrument, aperture, self.readpatt, file_count_threshold)) + logging.info((f'\tThis is below the threshold of {integration_count_threshold} ' + 'integrations. Monitor not run.')) monitor_run = False - # Update the query history - new_entry = {'instrument': instrument, - 'aperture': aperture, - 'readpattern': self.readpatt, - 'start_time_mjd': self.query_start, - 'end_time_mjd': self.query_end, - 'files_found': len(new_entries), - 'run_monitor': monitor_run, - 'entry_date': datetime.datetime.now()} - with engine.begin() as connection: - connection.execute( - self.query_table.__table__.insert(), new_entry) - logging.info('\tUpdated the query history table') + # Update the query history + new_entry = {'instrument': instrument, + 'aperture': aperture, + 'readpattern': self.readpatt, + 'start_time_mjd': self.query_start, + 'end_time_mjd': self.query_end, + 'files_found': len(new_entries), + 'run_monitor': monitor_run, + 'entry_date': datetime.datetime.now()} + + with engine.begin() as connection: + connection.execute( + self.query_table.__table__.insert(), new_entry) + logging.info('\tUpdated the query history table') + logging.info('NEW ENTRY: ') + logging.info(new_entry) logging.info('Dark Monitor completed successfully.') - def save_mean_slope_image(self, slope_img, stdev_img, files): + def save_mean_slope_image(self, slope_img, stdev_img, files, min_time, max_time): """Save the mean slope image and associated stdev image to a file @@ -1072,6 +1173,12 @@ def save_mean_slope_image(self, slope_img, stdev_img, files): files : list List of input files used to construct the mean slope image + min_time : str + Earliest observation time, in MJD, corresponding to ``files``. + + max_time : str + Latest observation time, in MJD, corresponding to ``files``. + Returns ------- output_filename : str @@ -1080,7 +1187,7 @@ def save_mean_slope_image(self, slope_img, stdev_img, files): output_filename = '{}_{}_{}_to_{}_mean_slope_image.fits'.format(self.instrument.lower(), self.aperture.lower(), - self.query_start, self.query_end) + min_time, max_time) mean_slope_dir = os.path.join(get_config()['outputs'], 'dark_monitor', 'mean_slope_images') ensure_dir_exists(mean_slope_dir) @@ -1092,6 +1199,8 @@ def save_mean_slope_image(self, slope_img, stdev_img, files): primary_hdu.header['APERTURE'] = (self.aperture, 'Aperture name') primary_hdu.header['QRY_STRT'] = (self.query_start, 'MAST Query start time (MJD)') primary_hdu.header['QRY_END'] = (self.query_end, 'MAST Query end time (MJD)') + primary_hdu.header['MIN_TIME'] = (min_time, 'Beginning obs time (MJD)') + primary_hdu.header['MAX_TIME'] = (max_time, 'Ending obs time (MJD)') files_string = 'FILES USED: ' for filename in files: @@ -1128,6 +1237,202 @@ def shift_to_full_frame(self, coords): return (x, y) + def split_files_into_sub_lists(self, files, start_times, end_times, integration_list, threshold): + """Given a list of filenames and a list of the number of integrations + within each, split the files into sub-lists, where the files in each + list have a total number of integrations that is just over the given + threshold value. + + General assumption: Keeping files in different epochs separate is probably more + important than rigidly enforcing that the required number of integrations is reached. + + When dividing up the input files into separate lists, we first divide up by + epoch, where the start/end of epochs are defined as times where + DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME days pass without any new data appearing. + Each epoch is then potentially subdivided further based on the threshold number + of integrations (not exposures). The splitting does not operate within files. + For example, if the threshold is 2 integrations, and a particular file contains 5 + integrations, then the dark monitor will be called once on that file, working on + all 5 integrations. + + At the end of the epoch, if the final group of file(s) do not have enough + integrations to reach the threshold, they are ignored since there is no way + to know if there are more files in the same epoch that have not yet been taken. So + the files are ignored, and the query end time will be adjusted such that these files + will be found in the next run of the monitor. + + Dark calibration plans per instrument: + NIRCam - for full frame, takes only 2 integrations (150 groups) once per ~30-50 days. + for subarrays, takes 5-10 integrations once per 30-50 days + team response - + NIRISS - full frame - 2 exps of 5 ints within each 2 week period. No requirement for + the 2 exps to be taken at the same time though. Could be separated + by almost 2 weeks, and be closer to the darks from the previous or + following 2 week period. + subarrays - 30 ints in each month-long span + MIRI - 2 ints every 2 hours-5 days for a while, then 2 ints every 14-21 days + team response - monitor should run on each exp separately. It should also throw out + the first integration of each exp. + + NIRSpec - full frame 5-6 integrations spread over each month + subarray - 12 ints spread over each 2 month period + FGS - N/A + + Parameters + ---------- + files : list + List of filenames + + integration_list : list + List of integers describing how many integrations are in each file + + start_times : list + List of MJD dates corresponding to the exposure start time of each file in ``files`` + + end_times : list + List of MJD dates corresponding to the exposures end time of each file in ``files`` + + integration_list : list + List of the number of integrations for each file in ``files`` + + threshold : int + Threshold number of integrations needed to trigger a run of the + dark monitor + """ + + logging.info('\t\tSplitting into sub-lists. Inputs at the beginning: (file, start time, end time, nints, threshold)') + for f, st, et, inte in zip(files, start_times, end_times, integration_list): + logging.info(f'\t\t {f}, {st}, {et}, {inte}, {threshold}') + logging.info('\n') + + # Eventual return parameters + self.file_batches = [] + self.start_time_batches = [] + self.end_time_batches = [] + self.integration_batches = [] + + # Add the current time onto the end of start_times + start_times = np.array(start_times) + + # Get the delta t between each pair of files. Insert 0 as the initial + # delta_t, to make the coding easier + delta_t = start_times[1:] - start_times[0:-1] # units are days + delta_t = np.insert(delta_t, 0, 0) + + # Divide up the list such that you don't cross large delta t values. We want to measure + # dark current during each "epoch" within a calibration proposal + dividers = np.where(delta_t >= DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME[self.instrument])[0] + + # Add dividers at the beginning index to make the coding easier + dividers = np.insert(dividers, 0, 0) + + # If there is no divider at the end of the list of files, then add one + if dividers[-1] < len(delta_t): + dividers = np.insert(dividers, len(dividers), len(delta_t)) + + logging.info(f'\t\t\tThreshold delta time used to divide epochs: {DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME[self.instrument]} days') + logging.info(f'\t\t\tdelta_t between files: {delta_t} days.') + logging.info(f'\t\t\tFinal dividers (divide data based on time gaps between files): {dividers}') + logging.info('\n') + + # Loop over epochs. + # Within each batch, divide up the exposures into multiple batches if the total + # number of integrations are above 2*threshold. + for i in range(len(dividers) - 1): + batch_ints = integration_list[dividers[i]:dividers[i + 1]] + batch_files = files[dividers[i]:dividers[i + 1]] + batch_start_times = start_times[dividers[i]:dividers[i + 1]] + batch_end_times = end_times[dividers[i]:dividers[i + 1]] + batch_int_sum = np.sum(batch_ints) + + logging.info(f'\t\t\tLoop over time-based batches. Working on batch {i}') + logging.info(f'\t\t\tBatch Files, Batch integrations') + for bi, bf in zip(batch_ints, batch_files): + logging.info(f'\t\t\t{bf}, {bi}') + + # Calculate the total number of integrations up to each file + batch_int_sums = np.array([np.sum(batch_ints[0:jj]) for jj in range(1, len(batch_ints) + 1)]) + + base = 0 + startidx = 0 + endidx = 0 + complete = False + + # Divide into sublists + while True: + + endidx = np.where(batch_int_sums >= (base + threshold))[0] + + # Check if we reach the end of the file list + if len(endidx) == 0: + endidx = len(batch_int_sums) - 1 + complete = True + else: + endidx = endidx[0] + if endidx == (len(batch_int_sums) - 1): + complete = True + + logging.debug(f'\t\t\tstartidx: {startidx}') + logging.debug(f'\t\t\tendidx: {endidx}') + logging.debug(f'\t\t\tcomplete: {complete}') + + subgroup_ints = batch_ints[startidx: endidx + 1] + subgroup_files = batch_files[startidx: endidx + 1] + subgroup_start_times = batch_start_times[startidx: endidx + 1] + subgroup_end_times = batch_end_times[startidx: endidx + 1] + subgroup_int_sum = np.sum(subgroup_ints) + + logging.debug(f'\t\t\tsubgroup_ints: {subgroup_ints}') + logging.debug(f'\t\t\tsubgroup_files: {subgroup_files}') + logging.debug(f'\t\t\tsubgroup_int_sum: {subgroup_int_sum}') + + # Add to output lists. The exception is if we are in the + # final subgroup of the final epoch. In that case, we don't know + # if more data are coming soon that may be able to be combined. So + # in that case, we ignore the files for this run of the monitor. + if (i == len(dividers) - 2) and endidx == len(batch_files) - 1: + # Here we are in the final subgroup of the final epoch, where we + # do not necessarily know if there will be future data to combine + # with these data + logging.debug(f'\t\t\tShould be final epoch and final subgroup. epoch number: {i}') + + if np.sum(subgroup_ints) >= threshold: + logging.debug('\t\t\tADDED - final subgroup of final epoch') + self.file_batches.append(subgroup_files) + self.start_time_batches.append(subgroup_start_times) + self.end_time_batches.append(subgroup_end_times) + self.integration_batches.append(subgroup_ints) + else: + # Here the final subgroup does not have enough integrations to reach the threshold + # and we're not sure if the epoch is complete, so we skip these files and save them + # for a future dark monitor run + logging.info('\t\t\tSkipping final subgroup. Not clear if the epoch is complete') + pass + + else: + self.file_batches.append(subgroup_files) + self.start_time_batches.append(subgroup_start_times) + self.end_time_batches.append(subgroup_end_times) + self.integration_batches.append(subgroup_ints) + + if not complete: + startidx = deepcopy(endidx + 1) + base = batch_int_sums[endidx] + else: + # If we reach the end of the list before the expected number of + # subgroups, then we quit. + break + + logging.info(f'\n\t\t\tEpoch number: {i}') + logging.info('\t\t\tFiles, integrations in file batch:') + for bi, bf in zip(batch_ints, batch_files): + logging.info(f'\t\t\t{bf}, {bi}') + logging.info(f'\n\t\t\tSplit into separate subgroups for processing:') + logging.info('\t\t\tFiles and number of integrations in each subgroup:') + for fb, ib in zip(self.file_batches, self.integration_batches): + logging.info(f'\t\t\t{fb}, {ib}') + logging.info(f'\t\t\tDONE WITH SUBGROUPS\n\n\n\n') + def stats_by_amp(self, image, amps): """Calculate statistics in the input image for each amplifier as well as the full image @@ -1203,7 +1508,7 @@ def stats_by_amp(self, image, amps): maxx = copy(mxx) if mxy > maxy: maxy = copy(mxy) - amps['5'] = [(0, maxx, 1), (0, maxy, 1)] + amps['5'] = [(4, maxx, 1), (4, maxy, 1)] logging.info(('\tFull frame exposure detected. Adding the full frame to the list ' 'of amplifiers upon which to calculate statistics.')) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt index d423bbbdb..010831446 100644 --- a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt +++ b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt @@ -1,624 +1,89 @@ -Instrument Aperture Threshold -nircam NRCA1_FULL_OSS 10 -nircam NRCA2_FULL_OSS 10 -nircam NRCA3_FULL_OSS 10 -nircam NRCA4_FULL_OSS 10 -nircam NRCA5_FULL_OSS 10 -nircam NRCB1_FULL_OSS 10 -nircam NRCB2_FULL_OSS 10 -nircam NRCB3_FULL_OSS 10 -nircam NRCB4_FULL_OSS 10 -nircam NRCB5_FULL_OSS 10 -nircam NRCALL_FULL 10 -nircam NRCAS_FULL 10 -nircam NRCA1_FULL 10 -nircam NRCA2_FULL 10 -nircam NRCA3_FULL 10 -nircam NRCA4_FULL 10 -nircam NRCA5_FULL 10 -nircam NRCBS_FULL 10 -nircam NRCB1_FULL 10 -nircam NRCB2_FULL 10 -nircam NRCB3_FULL 10 -nircam NRCB4_FULL 10 -nircam NRCB5_FULL 10 -nircam NRCB1_FULLP 10 -nircam NRCB5_FULLP 10 -nircam NRCA1_SUB160 30 -nircam NRCA2_SUB160 30 -nircam NRCA3_SUB160 30 -nircam NRCA4_SUB160 30 -nircam NRCA5_SUB160 30 -nircam NRCB1_SUB160 30 -nircam NRCB2_SUB160 30 -nircam NRCB3_SUB160 30 -nircam NRCB4_SUB160 30 -nircam NRCB5_SUB160 30 -nircam NRCA1_SUB320 30 -nircam NRCA2_SUB320 30 -nircam NRCA3_SUB320 30 -nircam NRCA4_SUB320 30 -nircam NRCA5_SUB320 30 -nircam NRCB1_SUB320 30 -nircam NRCB2_SUB320 30 -nircam NRCB3_SUB320 30 -nircam NRCB4_SUB320 30 -nircam NRCB5_SUB320 30 -nircam NRCA1_SUB640 30 -nircam NRCA2_SUB640 30 -nircam NRCA3_SUB640 30 -nircam NRCA4_SUB640 30 -nircam NRCA5_SUB640 30 -nircam NRCB1_SUB640 30 -nircam NRCB2_SUB640 30 -nircam NRCB3_SUB640 30 -nircam NRCB4_SUB640 30 -nircam NRCB5_SUB640 30 -nircam NRCA5_GRISM256_F322W2 30 -nircam NRCA5_GRISM128_F322W2 30 -nircam NRCA5_GRISM64_F322W2 30 -nircam NRCA5_GRISM256_F277W 30 -nircam NRCA5_GRISM128_F277W 30 -nircam NRCA5_GRISM64_F277W 30 -nircam NRCA5_GRISM256_F356W 30 -nircam NRCA5_GRISM128_F356W 30 -nircam NRCA5_GRISM64_F356W 30 -nircam NRCA5_GRISM256_F444W 30 -nircam NRCA5_GRISM128_F444W 30 -nircam NRCA5_GRISM64_F444W 30 -nircam NRCA5_GRISM_F322W2 30 -nircam NRCA5_GRISM_F277W 30 -nircam NRCA5_GRISM_F356W 30 -nircam NRCA5_GRISM_F444W 30 -nircam NRCA1_GRISMTS 30 -nircam NRCA1_GRISMTS256 30 -nircam NRCA1_GRISMTS128 30 -nircam NRCA1_GRISMTS64 30 -nircam NRCA3_GRISMTS 30 -nircam NRCA3_GRISMTS256 30 -nircam NRCA3_GRISMTS128 30 -nircam NRCA3_GRISMTS64 30 -nircam NRCA5_TAGRISMTS32 30 -nircam NRCA5_TAGRISMTS32_F405N 30 -nircam NRCA5_TAGRISMTS_SCI_F322W2 30 -nircam NRCA5_TAGRISMTS_SCI_F444W 30 -nircam NRCA3_DHSPIL 30 -nircam NRCA3_DHSPIL_SUB96 30 -nircam NRCA3_DHSPIL_WEDGES 30 -nircam NRCB4_DHSPIL 30 -nircam NRCB4_DHSPIL_SUB96 30 -nircam NRCB4_DHSPIL_WEDGES 30 -nircam NRCA3_FP1 30 -nircam NRCA3_FP1_SUB8 30 -nircam NRCA3_FP1_SUB64 30 -nircam NRCA3_FP2MIMF 30 -nircam NRCA1_FP3MIMF 30 -nircam NRCA2_FP4MIMF 30 -nircam NRCA4_FP5MIMF 30 -nircam NRCB4_FP1 30 -nircam NRCB4_FP1_SUB8 30 -nircam NRCB4_FP1_SUB64 30 -nircam NRCB4_FP2MIMF 30 -nircam NRCB2_FP3MIMF 30 -nircam NRCB1_FP4MIMF 30 -nircam NRCB3_FP5MIMF 30 -nircam NRCA3_SUB64P 30 -nircam NRCA3_SUB160P 30 -nircam NRCA3_SUB400P 30 -nircam NRCA5_SUB64P 30 -nircam NRCA5_SUB160P 30 -nircam NRCA5_SUB400P 30 -nircam NRCB1_SUB64P 30 -nircam NRCB1_SUB160P 30 -nircam NRCB1_SUB400P 30 -nircam NRCB5_SUB64P 30 -nircam NRCB5_SUB160P 30 -nircam NRCB5_SUB400P 30 -nircam NRCB5_TAPSIMG32 30 -nircam NRCA5_GRISMC_WFSS 30 -nircam NRCA5_GRISMR_WFSS 30 -nircam NRCALL_GRISMC_WFSS 30 -nircam NRCALL_GRISMR_WFSS 30 -nircam NRCB5_GRISMC_WFSS 30 -nircam NRCB5_GRISMR_WFSS 30 -nircam NRCA2_MASK210R 30 -nircam NRCA5_MASK335R 30 -nircam NRCA5_MASK430R 30 -nircam NRCA4_MASKSWB 30 -nircam NRCA5_MASKLWB 30 -nircam NRCA2_TAMASK210R 30 -nircam NRCA5_TAMASK335R 30 -nircam NRCA5_TAMASK430R 30 -nircam NRCA4_TAMASKSWB 30 -nircam NRCA5_TAMASKLWB 30 -nircam NRCA5_TAMASKLWBL 30 -nircam NRCA4_TAMASKSWBS 30 -nircam NRCB1_MASK210R 30 -nircam NRCB5_MASK335R 30 -nircam NRCB5_MASK430R 30 -nircam NRCB3_MASKSWB 30 -nircam NRCB5_MASKLWB 30 -nircam NRCB1_TAMASK210R 30 -nircam NRCB5_TAMASK335R 30 -nircam NRCB5_TAMASK430R 30 -nircam NRCB3_TAMASKSWB 30 -nircam NRCB5_TAMASKLWB 30 -nircam NRCB5_TAMASKLWBL 30 -nircam NRCB3_TAMASKSWBS 30 -nircam NRCA2_FSTAMASK210R 30 -nircam NRCA4_FSTAMASKSWB 30 -nircam NRCA5_FSTAMASKLWB 30 -nircam NRCA5_FSTAMASK335R 30 -nircam NRCA5_FSTAMASK430R 30 -nircam NRCA4_MASKSWB_F182M 30 -nircam NRCA4_MASKSWB_F187N 30 -nircam NRCA4_MASKSWB_F210M 30 -nircam NRCA4_MASKSWB_F212N 30 -nircam NRCA4_MASKSWB_F200W 30 -nircam NRCA4_MASKSWB_NARROW 30 -nircam NRCA5_MASKLWB_F250M 30 -nircam NRCA5_MASKLWB_F300M 30 -nircam NRCA5_MASKLWB_F277W 30 -nircam NRCA5_MASKLWB_F335M 30 -nircam NRCA5_MASKLWB_F360M 30 -nircam NRCA5_MASKLWB_F356W 30 -nircam NRCA5_MASKLWB_F410M 30 -nircam NRCA5_MASKLWB_F430M 30 -nircam NRCA5_MASKLWB_F460M 30 -nircam NRCA5_MASKLWB_F480M 30 -nircam NRCA5_MASKLWB_F444W 30 -nircam NRCA5_MASKLWB_NARROW 30 -nircam NRCA2_FULL_MASK210R 10 -nircam NRCA5_FULL_MASK335R 10 -nircam NRCA5_FULL_MASK430R 10 -nircam NRCA4_FULL_MASKSWB 10 -nircam NRCA4_FULL_MASKSWB_F182M 10 -nircam NRCA4_FULL_MASKSWB_F187N 10 -nircam NRCA4_FULL_MASKSWB_F210M 10 -nircam NRCA4_FULL_MASKSWB_F212N 10 -nircam NRCA4_FULL_MASKSWB_F200W 10 -nircam NRCA5_FULL_MASKLWB 10 -nircam NRCA5_FULL_MASKLWB_F250M 10 -nircam NRCA5_FULL_MASKLWB_F300M 10 -nircam NRCA5_FULL_MASKLWB_F277W 10 -nircam NRCA5_FULL_MASKLWB_F335M 10 -nircam NRCA5_FULL_MASKLWB_F360M 10 -nircam NRCA5_FULL_MASKLWB_F356W 10 -nircam NRCA5_FULL_MASKLWB_F410M 10 -nircam NRCA5_FULL_MASKLWB_F430M 10 -nircam NRCA5_FULL_MASKLWB_F460M 10 -nircam NRCA5_FULL_MASKLWB_F480M 10 -nircam NRCA5_FULL_MASKLWB_F444W 10 -nircam NRCA2_FULL_WEDGE_RND 10 -nircam NRCA4_FULL_WEDGE_BAR 10 -nircam NRCA5_FULL_WEDGE_RND 10 -nircam NRCA5_FULL_WEDGE_BAR 10 -nircam NRCA2_FULL_TAMASK210R 10 -nircam NRCA5_FULL_TAMASK335R 10 -nircam NRCA5_FULL_TAMASK430R 10 -nircam NRCA4_FULL_TAMASKSWB 10 -nircam NRCA5_FULL_TAMASKLWB 10 -nircam NRCA5_FULL_TAMASKLWBL 10 -nircam NRCA4_FULL_TAMASKSWBS 10 -nircam NRCA2_FULL_FSTAMASK210R 10 -nircam NRCA4_FULL_FSTAMASKSWB 10 -nircam NRCA5_FULL_FSTAMASKLWB 10 -nircam NRCA5_FULL_FSTAMASK335R 10 -nircam NRCA5_FULL_FSTAMASK430R 10 -niriss NIS_CEN_OSS 10 -niriss NIS_CEN 10 -niriss NIS_AMI1 30 -niriss NIS_AMI2 30 -niriss NIS_AMI3 30 -niriss NIS_AMI4 30 -niriss NIS_AMITA 30 -niriss NIS_SOSSTA 30 -niriss NIS_WFSS_OFFSET 30 -niriss NIS_WFSS64 30 -niriss NIS_WFSS64R 30 -niriss NIS_WFSS64R3 30 -niriss NIS_WFSS64C 30 -niriss NIS_WFSS64C3 30 -niriss NIS_WFSS128 30 -niriss NIS_WFSS128R 30 -niriss NIS_WFSS128R3 30 -niriss NIS_WFSS128C 30 -niriss NIS_WFSS128C3 30 -niriss NIS_SUB64 30 -niriss NIS_SUB128 30 -niriss NIS_SUB256 30 -niriss NIS_SUBAMPCAL 30 -niriss NIS_SUBSTRIP96 30 -niriss NIS_SUBSTRIP256 30 -niriss NIS_FP1MIMF 30 -niriss NIS_FP2MIMF 30 -niriss NIS_FP3MIMF 30 -niriss NIS_FP4MIMF 30 -niriss NIS_FP5MIMF 30 -niriss NIS_AMIFULL 10 -niriss NIS_SOSSFULL 10 -niriss NIS_WFSS 10 -miri MIRIM_FULL_OSS 10 -miri MIRIM_FULL 10 -miri MIRIM_ILLUM 30 -miri MIRIM_BRIGHTSKY 30 -miri MIRIM_SUB256 30 -miri MIRIM_SUB128 30 -miri MIRIM_SUB64 30 -miri MIRIM_SLITLESSPRISM 30 -miri MIRIM_SLITLESSUPPER 30 -miri MIRIM_SLITLESSLOWER 30 -miri MIRIM_MASK1065 30 -miri MIRIM_MASK1140 30 -miri MIRIM_MASK1550 30 -miri MIRIM_MASKLYOT 30 -miri MIRIM_TAMRS 30 -miri MIRIM_TALRS 30 -miri MIRIM_TABLOCK 30 -miri MIRIM_TALYOT_UL 30 -miri MIRIM_TALYOT_UR 30 -miri MIRIM_TALYOT_LL 30 -miri MIRIM_TALYOT_LR 30 -miri MIRIM_TALYOT_CUL 30 -miri MIRIM_TALYOT_CUR 30 -miri MIRIM_TALYOT_CLL 30 -miri MIRIM_TALYOT_CLR 30 -miri MIRIM_TA1550_UL 30 -miri MIRIM_TA1550_UR 30 -miri MIRIM_TA1550_LL 30 -miri MIRIM_TA1550_LR 30 -miri MIRIM_TA1550_CUL 30 -miri MIRIM_TA1550_CUR 30 -miri MIRIM_TA1550_CLL 30 -miri MIRIM_TA1550_CLR 30 -miri MIRIM_TA1140_UL 30 -miri MIRIM_TA1140_UR 30 -miri MIRIM_TA1140_LL 30 -miri MIRIM_TA1140_LR 30 -miri MIRIM_TA1140_CUL 30 -miri MIRIM_TA1140_CUR 30 -miri MIRIM_TA1140_CLL 30 -miri MIRIM_TA1140_CLR 30 -miri MIRIM_TA1065_UL 30 -miri MIRIM_TA1065_UR 30 -miri MIRIM_TA1065_LL 30 -miri MIRIM_TA1065_LR 30 -miri MIRIM_TA1065_CUL 30 -miri MIRIM_TA1065_CUR 30 -miri MIRIM_TA1065_CLL 30 -miri MIRIM_TA1065_CLR 30 -miri MIRIM_TAFULL 10 -miri MIRIM_TAILLUM 30 -miri MIRIM_TABRIGHTSKY 30 -miri MIRIM_TASUB256 30 -miri MIRIM_TASUB128 30 -miri MIRIM_TASUB64 30 -miri MIRIM_TASLITLESSPRISM 30 -miri MIRIM_CORON1065 30 -miri MIRIM_CORON1140 30 -miri MIRIM_CORON1550 30 -miri MIRIM_CORONLYOT 30 -miri MIRIM_KNIFE 30 -miri MIRIM_FP1MIMF 30 -miri MIRIM_FP2MIMF 30 -miri MIRIM_FP3MIMF 30 -miri MIRIM_FP4MIMF 30 -miri MIRIM_FP5MIMF 30 -miri MIRIM_SLIT 30 -miri MIRIFU_CHANNEL1A 30 -miri MIRIFU_1ASLICE01 30 -miri MIRIFU_1ASLICE02 30 -miri MIRIFU_1ASLICE03 30 -miri MIRIFU_1ASLICE04 30 -miri MIRIFU_1ASLICE05 30 -miri MIRIFU_1ASLICE06 30 -miri MIRIFU_1ASLICE07 30 -miri MIRIFU_1ASLICE08 30 -miri MIRIFU_1ASLICE09 30 -miri MIRIFU_1ASLICE10 30 -miri MIRIFU_1ASLICE11 30 -miri MIRIFU_1ASLICE12 30 -miri MIRIFU_1ASLICE13 30 -miri MIRIFU_1ASLICE14 30 -miri MIRIFU_1ASLICE15 30 -miri MIRIFU_1ASLICE16 30 -miri MIRIFU_1ASLICE17 30 -miri MIRIFU_1ASLICE18 30 -miri MIRIFU_1ASLICE19 30 -miri MIRIFU_1ASLICE20 30 -miri MIRIFU_1ASLICE21 30 -miri MIRIFU_CHANNEL1B 30 -miri MIRIFU_1BSLICE01 30 -miri MIRIFU_1BSLICE02 30 -miri MIRIFU_1BSLICE03 30 -miri MIRIFU_1BSLICE04 30 -miri MIRIFU_1BSLICE05 30 -miri MIRIFU_1BSLICE06 30 -miri MIRIFU_1BSLICE07 30 -miri MIRIFU_1BSLICE08 30 -miri MIRIFU_1BSLICE09 30 -miri MIRIFU_1BSLICE10 30 -miri MIRIFU_1BSLICE11 30 -miri MIRIFU_1BSLICE12 30 -miri MIRIFU_1BSLICE13 30 -miri MIRIFU_1BSLICE14 30 -miri MIRIFU_1BSLICE15 30 -miri MIRIFU_1BSLICE16 30 -miri MIRIFU_1BSLICE17 30 -miri MIRIFU_1BSLICE18 30 -miri MIRIFU_1BSLICE19 30 -miri MIRIFU_1BSLICE20 30 -miri MIRIFU_1BSLICE21 30 -miri MIRIFU_CHANNEL1C 30 -miri MIRIFU_1CSLICE01 30 -miri MIRIFU_1CSLICE02 30 -miri MIRIFU_1CSLICE03 30 -miri MIRIFU_1CSLICE04 30 -miri MIRIFU_1CSLICE05 30 -miri MIRIFU_1CSLICE06 30 -miri MIRIFU_1CSLICE07 30 -miri MIRIFU_1CSLICE08 30 -miri MIRIFU_1CSLICE09 30 -miri MIRIFU_1CSLICE10 30 -miri MIRIFU_1CSLICE11 30 -miri MIRIFU_1CSLICE12 30 -miri MIRIFU_1CSLICE13 30 -miri MIRIFU_1CSLICE14 30 -miri MIRIFU_1CSLICE15 30 -miri MIRIFU_1CSLICE16 30 -miri MIRIFU_1CSLICE17 30 -miri MIRIFU_1CSLICE18 30 -miri MIRIFU_1CSLICE19 30 -miri MIRIFU_1CSLICE20 30 -miri MIRIFU_1CSLICE21 30 -miri MIRIFU_CHANNEL2A 30 -miri MIRIFU_2ASLICE01 30 -miri MIRIFU_2ASLICE02 30 -miri MIRIFU_2ASLICE03 30 -miri MIRIFU_2ASLICE04 30 -miri MIRIFU_2ASLICE05 30 -miri MIRIFU_2ASLICE06 30 -miri MIRIFU_2ASLICE07 30 -miri MIRIFU_2ASLICE08 30 -miri MIRIFU_2ASLICE09 30 -miri MIRIFU_2ASLICE10 30 -miri MIRIFU_2ASLICE11 30 -miri MIRIFU_2ASLICE12 30 -miri MIRIFU_2ASLICE13 30 -miri MIRIFU_2ASLICE14 30 -miri MIRIFU_2ASLICE15 30 -miri MIRIFU_2ASLICE16 30 -miri MIRIFU_2ASLICE17 30 -miri MIRIFU_CHANNEL2B 30 -miri MIRIFU_2BSLICE01 30 -miri MIRIFU_2BSLICE02 30 -miri MIRIFU_2BSLICE03 30 -miri MIRIFU_2BSLICE04 30 -miri MIRIFU_2BSLICE05 30 -miri MIRIFU_2BSLICE06 30 -miri MIRIFU_2BSLICE07 30 -miri MIRIFU_2BSLICE08 30 -miri MIRIFU_2BSLICE09 30 -miri MIRIFU_2BSLICE10 30 -miri MIRIFU_2BSLICE11 30 -miri MIRIFU_2BSLICE12 30 -miri MIRIFU_2BSLICE13 30 -miri MIRIFU_2BSLICE14 30 -miri MIRIFU_2BSLICE15 30 -miri MIRIFU_2BSLICE16 30 -miri MIRIFU_2BSLICE17 30 -miri MIRIFU_CHANNEL2C 30 -miri MIRIFU_2CSLICE01 30 -miri MIRIFU_2CSLICE02 30 -miri MIRIFU_2CSLICE03 30 -miri MIRIFU_2CSLICE04 30 -miri MIRIFU_2CSLICE05 30 -miri MIRIFU_2CSLICE06 30 -miri MIRIFU_2CSLICE07 30 -miri MIRIFU_2CSLICE08 30 -miri MIRIFU_2CSLICE09 30 -miri MIRIFU_2CSLICE10 30 -miri MIRIFU_2CSLICE11 30 -miri MIRIFU_2CSLICE12 30 -miri MIRIFU_2CSLICE13 30 -miri MIRIFU_2CSLICE14 30 -miri MIRIFU_2CSLICE15 30 -miri MIRIFU_2CSLICE16 30 -miri MIRIFU_2CSLICE17 30 -miri MIRIFU_CHANNEL3A 30 -miri MIRIFU_3ASLICE01 30 -miri MIRIFU_3ASLICE02 30 -miri MIRIFU_3ASLICE03 30 -miri MIRIFU_3ASLICE04 30 -miri MIRIFU_3ASLICE05 30 -miri MIRIFU_3ASLICE06 30 -miri MIRIFU_3ASLICE07 30 -miri MIRIFU_3ASLICE08 30 -miri MIRIFU_3ASLICE09 30 -miri MIRIFU_3ASLICE10 30 -miri MIRIFU_3ASLICE11 30 -miri MIRIFU_3ASLICE12 30 -miri MIRIFU_3ASLICE13 30 -miri MIRIFU_3ASLICE14 30 -miri MIRIFU_3ASLICE15 30 -miri MIRIFU_3ASLICE16 30 -miri MIRIFU_CHANNEL3B 30 -miri MIRIFU_3BSLICE01 30 -miri MIRIFU_3BSLICE02 30 -miri MIRIFU_3BSLICE03 30 -miri MIRIFU_3BSLICE04 30 -miri MIRIFU_3BSLICE05 30 -miri MIRIFU_3BSLICE06 30 -miri MIRIFU_3BSLICE07 30 -miri MIRIFU_3BSLICE08 30 -miri MIRIFU_3BSLICE09 30 -miri MIRIFU_3BSLICE10 30 -miri MIRIFU_3BSLICE11 30 -miri MIRIFU_3BSLICE12 30 -miri MIRIFU_3BSLICE13 30 -miri MIRIFU_3BSLICE14 30 -miri MIRIFU_3BSLICE15 30 -miri MIRIFU_3BSLICE16 30 -miri MIRIFU_CHANNEL3C 30 -miri MIRIFU_3CSLICE01 30 -miri MIRIFU_3CSLICE02 30 -miri MIRIFU_3CSLICE03 30 -miri MIRIFU_3CSLICE04 30 -miri MIRIFU_3CSLICE05 30 -miri MIRIFU_3CSLICE06 30 -miri MIRIFU_3CSLICE07 30 -miri MIRIFU_3CSLICE08 30 -miri MIRIFU_3CSLICE09 30 -miri MIRIFU_3CSLICE10 30 -miri MIRIFU_3CSLICE11 30 -miri MIRIFU_3CSLICE12 30 -miri MIRIFU_3CSLICE13 30 -miri MIRIFU_3CSLICE14 30 -miri MIRIFU_3CSLICE15 30 -miri MIRIFU_3CSLICE16 30 -miri MIRIFU_CHANNEL4A 30 -miri MIRIFU_4ASLICE01 30 -miri MIRIFU_4ASLICE02 30 -miri MIRIFU_4ASLICE03 30 -miri MIRIFU_4ASLICE04 30 -miri MIRIFU_4ASLICE05 30 -miri MIRIFU_4ASLICE06 30 -miri MIRIFU_4ASLICE07 30 -miri MIRIFU_4ASLICE08 30 -miri MIRIFU_4ASLICE09 30 -miri MIRIFU_4ASLICE10 30 -miri MIRIFU_4ASLICE11 30 -miri MIRIFU_4ASLICE12 30 -miri MIRIFU_CHANNEL4B 30 -miri MIRIFU_4BSLICE01 30 -miri MIRIFU_4BSLICE02 30 -miri MIRIFU_4BSLICE03 30 -miri MIRIFU_4BSLICE04 30 -miri MIRIFU_4BSLICE05 30 -miri MIRIFU_4BSLICE06 30 -miri MIRIFU_4BSLICE07 30 -miri MIRIFU_4BSLICE08 30 -miri MIRIFU_4BSLICE09 30 -miri MIRIFU_4BSLICE10 30 -miri MIRIFU_4BSLICE11 30 -miri MIRIFU_4BSLICE12 30 -miri MIRIFU_CHANNEL4C 30 -miri MIRIFU_4CSLICE01 30 -miri MIRIFU_4CSLICE02 30 -miri MIRIFU_4CSLICE03 30 -miri MIRIFU_4CSLICE04 30 -miri MIRIFU_4CSLICE05 30 -miri MIRIFU_4CSLICE06 30 -miri MIRIFU_4CSLICE07 30 -miri MIRIFU_4CSLICE08 30 -miri MIRIFU_4CSLICE09 30 -miri MIRIFU_4CSLICE10 30 -miri MIRIFU_4CSLICE11 30 -miri MIRIFU_4CSLICE12 30 -nirspec NRS1_FULL_OSS 10 -nirspec NRS1_FULL 10 -nirspec NRS2_FULL_OSS 10 -nirspec NRS2_FULL 10 -nirspec NRS_S200A1_SLIT 30 -nirspec NRS_S200A2_SLIT 30 -nirspec NRS_S400A1_SLIT 30 -nirspec NRS_S1600A1_SLIT 30 -nirspec NRS_S200B1_SLIT 30 -nirspec NRS_FULL_IFU 10 -nirspec NRS_IFU_SLICE00 30 -nirspec NRS_IFU_SLICE01 30 -nirspec NRS_IFU_SLICE02 30 -nirspec NRS_IFU_SLICE03 30 -nirspec NRS_IFU_SLICE04 30 -nirspec NRS_IFU_SLICE05 30 -nirspec NRS_IFU_SLICE06 30 -nirspec NRS_IFU_SLICE07 30 -nirspec NRS_IFU_SLICE08 30 -nirspec NRS_IFU_SLICE09 30 -nirspec NRS_IFU_SLICE10 30 -nirspec NRS_IFU_SLICE11 30 -nirspec NRS_IFU_SLICE12 30 -nirspec NRS_IFU_SLICE13 30 -nirspec NRS_IFU_SLICE14 30 -nirspec NRS_IFU_SLICE15 30 -nirspec NRS_IFU_SLICE16 30 -nirspec NRS_IFU_SLICE17 30 -nirspec NRS_IFU_SLICE18 30 -nirspec NRS_IFU_SLICE19 30 -nirspec NRS_IFU_SLICE20 30 -nirspec NRS_IFU_SLICE21 30 -nirspec NRS_IFU_SLICE22 30 -nirspec NRS_IFU_SLICE23 30 -nirspec NRS_IFU_SLICE24 30 -nirspec NRS_IFU_SLICE25 30 -nirspec NRS_IFU_SLICE26 30 -nirspec NRS_IFU_SLICE27 30 -nirspec NRS_IFU_SLICE28 30 -nirspec NRS_IFU_SLICE29 30 -nirspec NRS_FULL_MSA 10 -nirspec NRS_FULL_MSA1 10 -nirspec NRS_FULL_MSA2 10 -nirspec NRS_FULL_MSA3 10 -nirspec NRS_FULL_MSA4 10 -nirspec NRS_VIGNETTED_MSA 30 -nirspec NRS_VIGNETTED_MSA1 30 -nirspec NRS_VIGNETTED_MSA2 30 -nirspec NRS_VIGNETTED_MSA3 30 -nirspec NRS_VIGNETTED_MSA4 30 -nirspec NRS_FIELD1_MSA4 30 -nirspec NRS_FIELD2_MSA4 30 -nirspec NRS1_FP1MIMF 30 -nirspec NRS1_FP2MIMF 30 -nirspec NRS1_FP3MIMF 30 -nirspec NRS2_FP4MIMF 30 -nirspec NRS2_FP5MIMF 30 -nirspec CLEAR_GWA_OTE 30 -nirspec F110W_GWA_OTE 30 -nirspec F140X_GWA_OTE 30 -nirspec NRS_SKY_OTEIP 30 -nirspec NRS_CLEAR_OTEIP_MSA_L0 30 -nirspec NRS_CLEAR_OTEIP_MSA_L1 30 -nirspec NRS_F070LP_OTEIP_MSA_L0 30 -nirspec NRS_F070LP_OTEIP_MSA_L1 30 -nirspec NRS_F100LP_OTEIP_MSA_L0 30 -nirspec NRS_F100LP_OTEIP_MSA_L1 30 -nirspec NRS_F170LP_OTEIP_MSA_L0 30 -nirspec NRS_F170LP_OTEIP_MSA_L1 30 -nirspec NRS_F290LP_OTEIP_MSA_L0 30 -nirspec NRS_F290LP_OTEIP_MSA_L1 30 -nirspec NRS_F110W_OTEIP_MSA_L0 30 -nirspec NRS_F110W_OTEIP_MSA_L1 30 -nirspec NRS_F140X_OTEIP_MSA_L0 30 -nirspec NRS_F140X_OTEIP_MSA_L1 30 -fgs FGS1_FULL_OSS 10 -fgs FGS1_FULL 10 -fgs FGS2_FULL_OSS 10 -fgs FGS2_FULL 10 -fgs FGS1_SUB128LL 30 -fgs FGS1_SUB128DIAG 30 -fgs FGS1_SUB128CNTR 30 -fgs FGS1_SUB32LL 30 -fgs FGS1_SUB32DIAG 30 -fgs FGS1_SUB32CNTR 30 -fgs FGS1_SUB8LL 30 -fgs FGS1_SUB8DIAG 30 -fgs FGS1_SUB8CNTR 30 -fgs FGS2_SUB128LL 30 -fgs FGS2_SUB128DIAG 30 -fgs FGS2_SUB128CNTR 30 -fgs FGS2_SUB32LL 30 -fgs FGS2_SUB32DIAG 30 -fgs FGS2_SUB32CNTR 30 -fgs FGS2_SUB8LL 30 -fgs FGS2_SUB8DIAG 30 -fgs FGS2_SUB8CNTR 30 -fgs FGS1_FP1MIMF 30 -fgs FGS1_FP2MIMF 30 -fgs FGS1_FP3MIMF 30 -fgs FGS1_FP4MIMF 30 -fgs FGS1_FP5MIMF 30 -fgs FGS2_FP1MIMF 30 -fgs FGS2_FP2MIMF 30 -fgs FGS2_FP3MIMF 30 -fgs FGS2_FP4MIMF 30 -fgs FGS2_FP5MIMF 30 \ No newline at end of file +Instrument Aperture Threshold N_skipped_integs +nircam NRCA1_FULL 1 0 +nircam NRCA2_FULL 1 0 +nircam NRCA3_FULL 1 0 +nircam NRCA4_FULL 1 0 +nircam NRCA5_FULL 1 0 +nircam NRCB1_FULL 1 0 +nircam NRCB2_FULL 1 0 +nircam NRCB3_FULL 1 0 +nircam NRCB4_FULL 1 0 +nircam NRCB5_FULL 1 0 +nircam NRCA1_SUB160 4 0 +nircam NRCA2_SUB160 4 0 +nircam NRCA3_SUB160 4 0 +nircam NRCA4_SUB160 4 0 +nircam NRCA5_SUB160 4 0 +nircam NRCB1_SUB160 4 0 +nircam NRCB2_SUB160 4 0 +nircam NRCB3_SUB160 4 0 +nircam NRCB4_SUB160 4 0 +nircam NRCB5_SUB160 4 0 +nircam NRCA1_SUB320 4 0 +nircam NRCA2_SUB320 4 0 +nircam NRCA3_SUB320 4 0 +nircam NRCA4_SUB320 4 0 +nircam NRCA5_SUB320 4 0 +nircam NRCB1_SUB320 4 0 +nircam NRCB2_SUB320 4 0 +nircam NRCB3_SUB320 4 0 +nircam NRCB4_SUB320 4 0 +nircam NRCB5_SUB320 4 0 +nircam NRCA1_SUB640 4 0 +nircam NRCA2_SUB640 4 0 +nircam NRCA3_SUB640 4 0 +nircam NRCA4_SUB640 4 0 +nircam NRCA5_SUB640 4 0 +nircam NRCB1_SUB640 4 0 +nircam NRCB2_SUB640 4 0 +nircam NRCB3_SUB640 4 0 +nircam NRCB4_SUB640 4 0 +nircam NRCB5_SUB640 4 0 +niriss NIS_CEN 1 0 +niriss NIS_AMI1 1 0 +niriss NIS_AMI2 1 0 +niriss NIS_AMI3 1 0 +niriss NIS_AMI4 1 0 +niriss NIS_SUB64 1 0 +niriss NIS_SUB128 1 0 +niriss NIS_SUB256 1 0 +miri MIRIM_FULL 1 1 +miri MIRIM_BRIGHTSKY 1 0 +miri MIRIM_SUB256 1 0 +miri MIRIM_SUB128 1 0 +miri MIRIM_SUB64 1 0 +miri MIRIM_SLITLESSPRISM 1 0 +miri MIRIM_MASK1065 1 0 +miri MIRIM_MASK1140 1 0 +miri MIRIM_MASK1550 1 0 +miri MIRIM_MASKLYOT 1 0 +miri MIRIM_CORON1065 1 0 +miri MIRIM_CORON1140 1 0 +miri MIRIM_CORON1550 1 0 +miri MIRIM_CORONLYOT 1 0 +miri MIRIM_SLIT 1 0 +miri MIRIFU_CHANNEL1A 1 0 +miri MIRIFU_CHANNEL1B 1 0 +miri MIRIFU_CHANNEL1C 1 0 +miri MIRIFU_CHANNEL2A 1 0 +miri MIRIFU_CHANNEL2B 1 0 +miri MIRIFU_CHANNEL2C 1 0 +miri MIRIFU_CHANNEL3A 1 0 +miri MIRIFU_CHANNEL3B 1 0 +miri MIRIFU_CHANNEL3C 1 0 +miri MIRIFU_CHANNEL4A 1 0 +miri MIRIFU_CHANNEL4B 1 0 +miri MIRIFU_CHANNEL4C 1 0 +nirspec NRS1_FULL 1 0 +nirspec NRS2_FULL 1 0 +nirspec NRS_S200A1_SLIT 1 0 +nirspec NRS_S200A2_SLIT 1 0 +nirspec NRS_S400A1_SLIT 1 0 +nirspec NRS_S1600A1_SLIT 1 0 +nirspec NRS_S200B1_SLIT 1 0 +nirspec NRS_FULL_IFU 1 0 +nirspec NRS_FULL_MSA 1 0 +fgs FGS1_FULL 1 0 +fgs FGS2_FULL 1 0 +fgs FGS1_SUB128CNTR 1 0 +fgs FGS2_SUB128CNTR 1 0 diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index 2154ee00a..f07e48875 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -381,7 +381,7 @@ from bokeh.embed import components, json_item from bokeh.layouts import gridplot from bokeh.models import BoxAnnotation, ColumnDataSource, DatetimeTickFormatter, HoverTool, Range1d -from bokeh.models.widgets import Tabs, Panel +from bokeh.models.layouts import Tabs from bokeh.plotting import figure, output_file, save, show from bokeh.palettes import Turbo256 from jwql.database import database_interface @@ -991,14 +991,14 @@ def get_dependency_data(self, dependency, starttime, endtime): if dependency["name"] in self.query_results: # We need the full time to be covered - if ((self.query_results[dependency["name"]].requested_start_time <= starttime) and - (self.query_results[dependency["name"]].requested_end_time >= endtime)): + if ((self.query_results[dependency["name"]].requested_start_time <= starttime) + and (self.query_results[dependency["name"]].requested_end_time >= endtime)): logging.info(f'Dependency {dependency["name"]} is already present in self.query_results.') # Extract data for the requested time range - matching_times = np.where((self.query_results[dependency["name"]].data["dates"] >= starttime) & - (self.query_results[dependency["name"]].data["dates"] <= endtime)) + matching_times = np.where((self.query_results[dependency["name"]].data["dates"] >= starttime) + & (self.query_results[dependency["name"]].data["dates"] <= endtime)) dep_mnemonic = {"dates": self.query_results[dependency["name"]].data["dates"][matching_times], "euvalues": self.query_results[dependency["name"]].data["euvalues"][matching_times]} @@ -1138,16 +1138,16 @@ def get_history_every_change(self, mnemonic, start_date, end_date): devs = [] # Keep only data that fall at least partially within the plot range - if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) - | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): + if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) + | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): times.extend(row.time) values.extend(row.mnemonic_value) medians.append(row.median) devs.append(row.stdev) hist[row.dependency_value] = (times, values, medians, devs) else: - if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) - | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): + if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) + | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): hist[row.dependency_value] = (row.time, row.mnemonic_value, row.median, row.stdev) return hist @@ -2143,12 +2143,12 @@ def plot_every_change_data(data, mnem_name, units, show_plot=False, savefig=True fig = add_limit_boxes(fig, yellow=yellow_limits, red=red_limits) # Make the x axis tick labels look nice - fig.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + fig.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) fig.xaxis.major_label_orientation = np.pi / 4 diff --git a/jwql/instrument_monitors/common_monitors/readnoise_monitor.py b/jwql/instrument_monitors/common_monitors/readnoise_monitor.py index 10fc82237..36543cc6e 100755 --- a/jwql/instrument_monitors/common_monitors/readnoise_monitor.py +++ b/jwql/instrument_monitors/common_monitors/readnoise_monitor.py @@ -44,25 +44,29 @@ import crds import matplotlib matplotlib.use('Agg') -import matplotlib.pyplot as plt # noqa: E348 (comparison to true) -import numpy as np # noqa: E348 (comparison to true) -from pysiaf import Siaf # noqa: E348 (comparison to true) -from sqlalchemy.sql.expression import and_ # noqa: E348 (comparison to true) - -from jwql.database.database_interface import FGSReadnoiseQueryHistory, FGSReadnoiseStats # noqa: E348 (comparison to true) -from jwql.database.database_interface import MIRIReadnoiseQueryHistory, MIRIReadnoiseStats # noqa: E348 (comparison to true) -from jwql.database.database_interface import NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats # noqa: E348 (comparison to true) -from jwql.database.database_interface import NIRISSReadnoiseQueryHistory, NIRISSReadnoiseStats # noqa: E348 (comparison to true) -from jwql.database.database_interface import NIRSpecReadnoiseQueryHistory, NIRSpecReadnoiseStats # noqa: E348 (comparison to true) -from jwql.database.database_interface import session, engine # noqa: E348 (comparison to true) -from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline # noqa: E348 (comparison to true) -from jwql.instrument_monitors import pipeline_tools # noqa: E348 (comparison to true) -from jwql.utils import instrument_properties, monitor_utils # noqa: E348 (comparison to true) -from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE # noqa: E348 (comparison to true) -from jwql.utils.logging_functions import log_info, log_fail # noqa: E348 (comparison to true) -from jwql.utils.monitor_utils import update_monitor_table # noqa: E348 (comparison to true) -from jwql.utils.permissions import set_permissions # noqa: E348 (comparison to true) -from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config, copy_files # noqa: E348 (comparison to true) +import matplotlib.pyplot as plt # noqa: E402 (module level import not at top of file) +import numpy as np # noqa: E402 (module level import not at top of file) +from pysiaf import Siaf # noqa: E402 (module level import not at top of file) + +from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline # noqa: E402 (module level import not at top of file) +from jwql.instrument_monitors import pipeline_tools # noqa: E402 (module level import not at top of file) +from jwql.utils import instrument_properties, monitor_utils # noqa: E402 (module level import not at top of file) +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE # noqa: E402 (module level import not at top of file) +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS # noqa: E402 (module level import not at top of file) +from jwql.utils.logging_functions import log_info, log_fail # noqa: E402 (module level import not at top of file) +from jwql.utils.monitor_utils import update_monitor_table # noqa: E402 (module level import not at top of file) +from jwql.utils.permissions import set_permissions # noqa: E402 (module level import not at top of file) +from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config, copy_files # noqa: E402 (module level import not at top of file) + +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + # Need to set up django apps before we can access the models + import django # noqa: E402 (module level import not at top of file) + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + django.setup() + + # Import * is okay here because this module specifically only contains database models + # for this monitor + from jwql.website.apps.jwql.monitor_models.readnoise import * # noqa: E402 (module level import not at top of file) class Readnoise(): @@ -149,17 +153,8 @@ def file_exists_in_database(self, filename): file_exists : bool ``True`` if filename exists in the readnoise stats database. """ - - query = session.query(self.stats_table) - results = query.filter(self.stats_table.uncal_filename == filename).all() - - if len(results) != 0: - file_exists = True - else: - file_exists = False - - session.close() - return file_exists + results = self.stats_table.objects.filter(uncal_filename__iexact=filename).values() + return (len(results) != 0) def get_amp_stats(self, image, amps): """Calculates the sigma-clipped mean and stddev, as well as the @@ -385,17 +380,18 @@ def most_recent_search(self): Date (in MJD) of the ending range of the previous MAST query where the readnoise monitor was run. """ - - query = session.query(self.query_table).filter(and_(self.query_table.aperture == self.aperture, - self.query_table.run_monitor == True)).order_by(self.query_table.end_time_mjd).all() # noqa: E712 (comparison to True) + filter_kwargs = { + 'aperture__iexact': self.aperture, + 'run_monitor__exact': True + } + query = self.query_table.objects.filter(**filter_kwargs).order_by("-end_time_mjd").all() if len(query) == 0: query_result = 59607.0 # a.k.a. Jan 28, 2022 == First JWST images (MIRI) logging.info(('\tNo query history for {}. Beginning search date will be set to {}.'.format(self.aperture, query_result))) else: - query_result = query[-1].end_time_mjd + query_result = query[0].end_time_mjd - session.close() return query_result def process(self, file_list): @@ -512,24 +508,24 @@ def process(self, file_list): 'readnoise_filename': os.path.basename(readnoise_outfile), 'full_image_mean': float(full_image_mean), 'full_image_stddev': float(full_image_stddev), - 'full_image_n': full_image_n.astype(float), - 'full_image_bin_centers': full_image_bin_centers.astype(float), + 'full_image_n': list(full_image_n.astype(float)), + 'full_image_bin_centers': list(full_image_bin_centers.astype(float)), 'readnoise_diff_image': os.path.basename(readnoise_diff_png), 'diff_image_mean': float(diff_image_mean), 'diff_image_stddev': float(diff_image_stddev), - 'diff_image_n': diff_image_n.astype(float), - 'diff_image_bin_centers': diff_image_bin_centers.astype(float), + 'diff_image_n': list(diff_image_n.astype(float)), + 'diff_image_bin_centers': list(diff_image_bin_centers.astype(float)), 'entry_date': datetime.datetime.now() } for key in amp_stats.keys(): if isinstance(amp_stats[key], (int, float)): readnoise_db_entry[key] = float(amp_stats[key]) else: - readnoise_db_entry[key] = amp_stats[key].astype(float) + readnoise_db_entry[key] = list(amp_stats[key].astype(float)) # Add this new entry to the readnoise database table - with engine.begin() as connection: - connection.execute(self.stats_table.__table__.insert(), readnoise_db_entry) + entry = self.stats_table(**readnoise_db_entry) + entry.save() logging.info('\tNew entry added to readnoise database table') # Remove the raw and calibrated files to save memory space @@ -658,8 +654,8 @@ def run(self): 'files_found': len(new_files), 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - with engine.begin() as connection: - connection.execute(self.query_table.__table__.insert(), new_entry) + stats_entry = self.query_table(**new_entry) + stats_entry.save() logging.info('\tUpdated the query history table') logging.info('Readnoise Monitor completed successfully.') diff --git a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py index 1d250cd29..b73d916c0 100644 --- a/jwql/instrument_monitors/nircam_monitors/claw_monitor.py +++ b/jwql/instrument_monitors/nircam_monitors/claw_monitor.py @@ -30,6 +30,7 @@ from astropy.convolution import Gaussian2DKernel, convolve from astropy.io import fits from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats +from astropy.table import Table from astropy.time import Time from astroquery.mast import Mast import matplotlib @@ -37,12 +38,23 @@ import numpy as np import pandas as pd from photutils.segmentation import detect_sources, detect_threshold +from scipy.ndimage import binary_dilation -from jwql.database.database_interface import session, engine -from jwql.database.database_interface import NIRCamClawQueryHistory, NIRCamClawStats from jwql.utils import monitor_utils +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.utils import ensure_dir_exists, filesystem_path, get_config +from jwst_backgrounds import jbt + +if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: + # Need to set up django apps before we can access the models + import django # noqa: E402 (module level import not at top of file) + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") + django.setup() + + # Import * is okay here because this module specifically only contains database models + # for this monitor + from jwql.website.apps.jwql.monitor_models.claw import * # noqa: E402 (module level import not at top of file) matplotlib.use('Agg') warnings.filterwarnings('ignore', message="nan_treatment='interpolate', however, NaN values detected post convolution*") @@ -111,41 +123,55 @@ def __init__(self): ensure_dir_exists(self.output_dir_bkg) # Get the claw monitor database tables - self.query_table = eval('NIRCamClawQueryHistory') - self.stats_table = eval('NIRCamClawStats') + self.query_table = NIRCamClawQueryHistory + self.stats_table = NIRCamClawStats - def make_background_plots(self): + def make_background_plots(self, plot_type='bkg'): """Makes plots of the background levels over time in NIRCam data. + + Attributes + ---------- + plot_type : str + The type of plot to make, either ``bkg`` for background trending, + ``bkg_rms`` for background rms trending, or ``model`` for background + measured vs model trending. """ + columns = ['filename', 'filter', 'pupil', 'detector', 'effexptm', 'expstart_mjd', 'entry_date', 'mean', 'median', + 'stddev', 'frac_masked', 'total_bkg'] + # Get all of the background data. - query = session.query(NIRCamClawStats.filename, NIRCamClawStats.filter, NIRCamClawStats.pupil, NIRCamClawStats.detector, - NIRCamClawStats.effexptm, NIRCamClawStats.expstart_mjd, NIRCamClawStats.entry_date, NIRCamClawStats.mean, - NIRCamClawStats.median, NIRCamClawStats.frac_masked).all() - df_orig = pd.DataFrame(query, columns=['filename', 'filter', 'pupil', 'detector', 'effexptm', 'expstart_mjd', - 'entry_date', 'mean', 'median', 'frac_masked']) - df_orig = df_orig.drop_duplicates(subset='filename', keep="last") # remove any duplicate filename entries, keep the most recent - - # Use the same time xlimits/xticks for all plots - start_mjd = 59650 # March 2022, middle of commissioning - end_mjd = Time.now().mjd + 0.05 * (Time.now().mjd - start_mjd) - time_tick_vals = np.linspace(start_mjd, end_mjd, 5) - time_tick_labels = [Time(m, format='mjd').isot.split('T')[0] for m in time_tick_vals] + background_data = NIRCamClawStats.objects.all().values(*columns) + df_orig = pd.DataFrame.from_records(background_data) + # remove any duplicate filename entries, keep the most recent + df_orig = df_orig.drop_duplicates(subset='filename', keep="last") + + # Get label info based on plot type + if plot_type == 'bkg': + plot_title = 'backgrounds' + plot_label = 'Background [MJy/sr]' + if plot_type == 'bkg_rms': + plot_title = 'backgrounds_rms' + plot_label = 'Background RMS [MJy/sr]' + if plot_type == 'model': + plot_title = 'backgrounds_vs_models' + plot_label = 'Measured / Predicted' # Make backgroud trending plots for all wide filters for fltr in ['F070W', 'F090W', 'F115W', 'F150W', 'F200W', 'F277W', 'F356W', 'F444W']: - logging.info('Working on background trending plots for {}'.format(fltr)) + logging.info('Working on {} trending plots for {}'.format(plot_title, fltr)) found_limits = False if int(fltr[1:4]) < 250: # i.e. SW - detectors_to_run = ['NRCA2', 'NRCA4', 'NRCB3', 'NRCB1', 'NRCA1', 'NRCA3', 'NRCB4', 'NRCB2'] # in on-sky order, don't change order + # in on-sky order, don't change order + detectors_to_run = ['NRCA2', 'NRCA4', 'NRCB3', 'NRCB1', 'NRCA1', 'NRCA3', 'NRCB4', 'NRCB2'] grid = plt.GridSpec(2, 4, hspace=.4, wspace=.4, width_ratios=[1, 1, 1, 1]) - fig = plt.figure(figsize=(40, 20)) + fig = plt.figure(figsize=(45, 20)) fig.suptitle(fltr, fontsize=70) frack_masked_thresh = 0.075 else: # i.e. LW detectors_to_run = ['NRCALONG', 'NRCBLONG'] grid = plt.GridSpec(1, 2, hspace=.2, wspace=.4, width_ratios=[1, 1]) - fig = plt.figure(figsize=(20, 10)) + fig = plt.figure(figsize=(25, 10)) fig.suptitle(fltr, fontsize=70, y=1.05) frack_masked_thresh = 0.15 for i, det in enumerate(detectors_to_run): @@ -153,23 +179,46 @@ def make_background_plots(self): # Get relevant data for this filter/detector and remove bad datasets, e.g. crowded fields, # extended objects, nebulas, short exposures. - df = df_orig[(df_orig['filter'] == fltr) & (df_orig['pupil'] == 'CLEAR') & (df_orig['detector'] == det) & - (df_orig['effexptm'] > 300) & (df_orig['frac_masked'] < frack_masked_thresh) & - (abs(1 - (df_orig['mean'] / df_orig['median'])) < 0.05)] - - # Plot the background levels over time + df = df_orig[(df_orig['filter'] == fltr) & (df_orig['pupil'] == 'CLEAR') & (df_orig['detector'] == det) + & (df_orig['effexptm'] > 300) & (df_orig['frac_masked'] < frack_masked_thresh) + & (abs(1 - (df_orig['mean'] / df_orig['median'])) < 0.05)] + if len(df) > 0: + df = df.sort_values(by=['expstart_mjd']) + + # Get relevant background stat for plot type + if plot_type == 'bkg': + plot_data = df['median'].values + if plot_type == 'bkg_rms': + df = df[df['stddev'] != 0] # older data has no accurate stddev measures + plot_data = df['stddev'].values + if plot_type == 'model': + plot_data = df['median'].values / df['total_bkg'].values + plot_expstarts = df['expstart_mjd'].values + + # Plot the background data over time ax = fig.add_subplot(grid[i]) - ax.scatter(df['expstart_mjd'], df['median']) + ax.scatter(plot_expstarts, plot_data, alpha=0.3) - # Match scaling in all plots to the first detector with data. Shade median+/-10% region. + # Match scaling in all plots to the first detector with data. if len(df) > 0: if found_limits is False: - first_med = np.nanmedian(df['median']) + first_mean, first_med, first_stddev = sigma_clipped_stats(plot_data) + start_mjd = plot_expstarts.min() + end_mjd = Time.now().mjd + padding = 0.05 * (end_mjd - start_mjd) + start_mjd = start_mjd - padding + end_mjd = end_mjd + padding + time_tick_vals = np.linspace(start_mjd, end_mjd, 5) + time_tick_labels = [Time(m, format='mjd').isot.split('T')[0] for m in time_tick_vals] found_limits = True - ax.set_ylim(first_med - first_med * 0.5, first_med + first_med * 0.5) - med = np.nanmedian(df['median']) + ax.set_ylim(first_med - 8 * first_stddev, first_med + 8 * first_stddev) + + # Plot overall median line with shaded stddev + mean, med, stddev = sigma_clipped_stats(plot_data) ax.axhline(med, ls='-', color='black') - ax.axhspan(med - med * 0.1, med + med * 0.1, color='gray', alpha=0.4, lw=0) + ax.axhspan(med - stddev, med + stddev, color='gray', alpha=0.4, lw=0) + else: + start_mjd, end_mjd, time_tick_vals, time_tick_labels = 0, 1, [0.5], ['N/A'] # Axis formatting ax.set_title(det, fontsize=40) @@ -177,10 +226,9 @@ def make_background_plots(self): ax.set_xticks(time_tick_vals) ax.set_xticklabels(time_tick_labels, fontsize=20, rotation=45) ax.yaxis.set_tick_params(labelsize=20) - ax.set_ylabel('Background [MJy/sr]', fontsize=30) - # ax.set_xlabel('Date [YYYY-MM-DD]') + ax.set_ylabel(plot_label, fontsize=30) ax.grid(ls='--', color='gray') - fig.savefig(os.path.join(self.output_dir_bkg, '{}_backgrounds.png'.format(fltr)), dpi=180, bbox_inches='tight') + fig.savefig(os.path.join(self.output_dir_bkg, '{}_{}.png'.format(fltr, plot_title)), dpi=180, bbox_inches='tight') fig.clf() plt.close() @@ -222,7 +270,7 @@ def process(self): obs_start = '{}T{}'.format(hdu[0].header['DATE-OBS'], hdu[0].header['TIME-OBS']) pa_v3 = hdu[1].header['PA_V3'] - # Make source segmap, add the masked data to the stack, and get background stats + # Make source segmap and add the masked data to the stack data = hdu['SCI'].data dq = hdu['DQ'].data threshold = detect_threshold(data, 1.0) @@ -230,11 +278,38 @@ def process(self): kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) kernel.normalize() data_conv = convolve(data, kernel) - segmap = detect_sources(data_conv, threshold, npixels=6) - segmap = segmap.data - segmap[dq & 1 != 0] = 1 # flag DO_NOT_USE pixels - stack[n] = np.ma.masked_array(data, mask=segmap != 0) - mean, med, stddev = sigma_clipped_stats(data[segmap == 0]) + segmap_orig = detect_sources(data_conv, threshold, npixels=6) + segmap_orig = segmap_orig.data + stack[n] = np.ma.masked_array(data, mask=(segmap_orig != 0) | (dq & 1 != 0)) + + # Calculate image stats. Before calculating, expand segmap of extended objects. + # This is only done after adding the data to the claw stack to avoid flagging the claws + # themselves from those stacks, but is needed here since extended wings can impact image + # stats, mainly the stddev. + objects, object_counts = np.unique(segmap_orig, return_counts=True) + large_objects = objects[(object_counts > 200) & (objects != 0)] + segmap_extended = np.zeros(segmap_orig.shape).astype(int) + for object in large_objects: + segmap_extended[segmap_orig == object] = 1 + image_edge_mask = np.zeros(segmap_orig.shape).astype(int) + image_edge_mask[10:2038, 10:2038] = 1 + segmap_extended[(image_edge_mask == 0) | (dq & 1 != 0)] = 0 # omit edge and other bpix from dilation + segmap_extended = binary_dilation(segmap_extended, iterations=30).astype(int) + segmap = segmap_extended + segmap_orig + mean, med, stddev = sigma_clipped_stats(data[(segmap == 0) & (dq == 0)]) + + # Get predicted background level using JWST background tool + ra, dec = hdu[1].header['RA_V1'], hdu[1].header['DEC_V1'] + wv = self.filter_wave[self.fltr.upper()] + date = hdu[0].header['DATE-BEG'] + doy = int(Time(date).yday.split(':')[1]) + try: + jbt.get_background(ra, dec, wv, thisday=doy, plot_background=False, plot_bathtub=False, + write_bathtub=True, bathtub_file='background_versus_day.txt') + bkg_table = Table.read('background_versus_day.txt', names=('day', 'total_bkg'), format='ascii') + total_bkg = bkg_table['total_bkg'][bkg_table['day'] == doy][0] + except Exception as e: + total_bkg = np.nan # Add this file's stats to the claw database table. Can't insert values with numpy.float32 # datatypes into database so need to change the datatypes of these values. @@ -247,18 +322,20 @@ def process(self): 'expstart': '{}T{}'.format(hdu[0].header['DATE-OBS'], hdu[0].header['TIME-OBS']), 'expstart_mjd': hdu[0].header['EXPSTART'], 'effexptm': hdu[0].header['EFFEXPTM'], - 'ra': hdu[1].header['RA_V1'], - 'dec': hdu[1].header['DEC_V1'], + 'ra': ra, + 'dec': dec, 'pa_v3': hdu[1].header['PA_V3'], 'mean': float(mean), 'median': float(med), 'stddev': float(stddev), - 'frac_masked': len(segmap[segmap != 0]) / (segmap.shape[0] * segmap.shape[1]), + 'frac_masked': len(segmap_orig[(segmap_orig != 0) | (dq & 1 != 0)]) / (segmap_orig.shape[0] * segmap_orig.shape[1]), 'skyflat_filename': os.path.basename(self.outfile), + 'doy': float(doy), + 'total_bkg': float(total_bkg), 'entry_date': datetime.datetime.now() } - with engine.begin() as connection: - connection.execute(self.stats_table.__table__.insert(), claw_db_entry) + entry = self.stats_table(**claw_db_entry) + entry.save() hdu.close() # Make the normalized skyflat for this detector @@ -346,6 +423,12 @@ def run(self): mast_table = self.query_mast() logging.info('{} files found between {} and {}.'.format(len(mast_table), self.query_start_mjd, self.query_end_mjd)) + # Define pivot wavelengths + self.filter_wave = {'F070W': 0.704, 'F090W': 0.902, 'F115W': 1.154, 'F150W': 1.501, 'F150W2': 1.659, + 'F200W': 1.989, 'F212N': 2.121, 'F250M': 2.503, 'F277W': 2.762, 'F300M': 2.989, + 'F322W2': 3.232, 'F356W': 3.568, 'F410M': 4.082, 'F430M': 4.281, 'F444W': 4.408, + 'F480M': 4.874} + # Create observation-level median stacks for each filter/pupil combo, in pixel-space combos = np.array(['{}_{}_{}_{}'.format(str(row['program']), row['observtn'], row['filter'], row['pupil']).lower() for row in mast_table]) mast_table['combos'] = combos @@ -363,7 +446,7 @@ def run(self): for row in mast_table_combo: try: existing_files.append(filesystem_path(row['filename'])) - except: + except Exception as e: pass self.files = np.array(existing_files) self.detectors = np.array(mast_table_combo['detector']) @@ -377,7 +460,9 @@ def run(self): # Update the background trending plots, if any new data exists if len(mast_table) > 0: logging.info('Making background trending plots.') - self.make_background_plots() + self.make_background_plots(plot_type='bkg') + self.make_background_plots(plot_type='bkg_rms') + self.make_background_plots(plot_type='model') # Update the query history new_entry = {'instrument': 'nircam', @@ -385,8 +470,8 @@ def run(self): 'end_time_mjd': self.query_end_mjd, 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - with engine.begin() as connection: - connection.execute(self.query_table.__table__.insert(), new_entry) + entry = self.query_table(**new_entry) + entry.save() logging.info('Claw Monitor completed successfully.') diff --git a/jwql/instrument_monitors/pipeline_tools.py b/jwql/instrument_monitors/pipeline_tools.py index c39e3bda4..e3ab88433 100644 --- a/jwql/instrument_monitors/pipeline_tools.py +++ b/jwql/instrument_monitors/pipeline_tools.py @@ -193,8 +193,8 @@ def get_pipeline_steps(instrument): return required_steps -def image_stack(file_list): - """Given a list of fits files containing 2D images, read in all data +def image_stack(file_list, skipped_initial_ints=0): + """Given a list of fits files containing 2D or 3D images, read in all data and place into a 3D stack Parameters @@ -202,6 +202,13 @@ def image_stack(file_list): file_list : list List of fits file names + skipped_initial_ints : int + Number of initial integrations from each file to skip over and + not include in the stack. Only works with files containing 3D + arrays (e.g. rateints files). This is primarily for MIRI, where + we want to skip the first N integrations due to dark current + instability. + Returns ------- cube : numpy.ndarray @@ -219,7 +226,8 @@ def image_stack(file_list): if i == 0: ndim_base = image.shape if len(ndim_base) == 3: - cube = copy.deepcopy(image) + cube = copy.deepcopy(image[skipped_initial_ints:, :, :]) + num_ints -= skipped_initial_ints elif len(ndim_base) == 2: cube = np.expand_dims(image, 0) else: @@ -227,9 +235,12 @@ def image_stack(file_list): if ndim_base[-2:] == ndim[-2:]: if len(ndim) == 2: image = np.expand_dims(image, 0) + cube = np.vstack((cube, image)) + elif len(ndim) == 3: + cube = np.vstack((cube, image[skipped_initial_ints:, :, :])) + num_ints -= skipped_initial_ints elif len(ndim) > 3: raise ValueError("4-dimensional input slope images not supported.") - cube = np.vstack((cube, image)) else: raise ValueError("Input images are of inconsistent size in x/y dimension.") exptimes.append([exptime] * num_ints) diff --git a/jwql/pull_jwql_branch.sh b/jwql/pull_jwql_branch.sh new file mode 100644 index 000000000..5544124d7 --- /dev/null +++ b/jwql/pull_jwql_branch.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +function echo_format { + echo "" + echo "Usage: $0 [-r|--reset_service] [-n|--notify ]" + echo "" + echo "WARNING! the optional parameters should only be used during a JWQL release in production" + echo "branch: the git branch to pull from" + echo "[-r|--reset_service]: Reset the jwql service" + echo "[-n|--notify ]: Notify via provided email" + echo "" + echo "Local:" + echo "$ bash pull_jwql_branch.sh develop" + echo "" + echo "Test:" + echo "$ bash pull_jwql_branch.sh v1.2 -r" + echo "" + echo "Production:" + echo "$ bash pull_jwql_branch.sh v1.2 -r -n group_email_address@stsci.edu" +} + +# Check if the required number of arguments are provided +if [ "$#" -lt 1 ]; then + echo_format + exit 1 +fi + +# Set default values for optional flags +reset=false +notify=false +recipient="" + +# Retrieve the branch_name from the command line argument +branch_name=$1 +# Parse optional flags +while [[ $# -gt 1 ]]; do + case "$2" in + -r|--reset_service) + reset=true + ;; + -n|--notify) + notify=true + recipient="$3" + shift + ;; + *) + echo "Error: Invalid option $2" + echo_format + exit 1 + ;; + esac + shift +done + +if [ "$notify" = true ] && [ -z "$recipient" ]; then + echo_format + exit 1 +fi + +echo "Branch: $branch_name"; +echo "Reset: $reset"; +echo "Notify: $notify $recipient"; + +# 1. Pull updated code from GitHub deployment branch (keep second checkout in case its already defined for some weird reason) +git checkout -b $branch_name --track origin/$branch_name +git checkout $branch_name +git fetch origin $branch_name +git pull origin $branch_name +git fetch origin --tags + +# 2. Bring the service down +if [ "$reset" = true ]; then + sudo /bin/systemctl stop jwql.service +fi + +# 3. Install jwql +pip install -e .. + +# 4. Merge Any Migrations that exist in either database (router.py will sort where they go) +python ./website/manage.py migrate jwql +python ./website/manage.py migrate jwql --database=monitors + +# 5. Bring the service back up +if [ "$reset" = true ]; then + sudo /bin/systemctl start jwql.service +fi + +# 6. Initialize any new databases that have been added +python ./database/database_interface.py + +# 7. Send out notification email +if [ "$notify" = true ] && [ -n "$recipient" ]; then + subject="JWQL $branch_name Released" + message_content="Hello, A new version of JWQL ($branch_name) has just been released. Visit https://github.com/spacetelescope/jwql/releases for more information." + echo "$message_content" | mail -s "$subject" "$recipient" + echo "Notification Email Sent" +fi \ No newline at end of file diff --git a/jwql/tests/test_api_views.py b/jwql/tests/test_api_views.py index e3a2d1ca1..7b0994536 100644 --- a/jwql/tests/test_api_views.py +++ b/jwql/tests/test_api_views.py @@ -29,9 +29,8 @@ from jwql.utils.utils import get_base_url from jwql.utils.constants import JWST_INSTRUMENT_NAMES +from jwql.utils.constants import ON_GITHUB_ACTIONS -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') urls = [] diff --git a/jwql/tests/test_bad_pixel_monitor.py b/jwql/tests/test_bad_pixel_monitor.py index 6446570b6..b3cae5fc3 100644 --- a/jwql/tests/test_bad_pixel_monitor.py +++ b/jwql/tests/test_bad_pixel_monitor.py @@ -32,9 +32,7 @@ from jwql.database.database_interface import FGSBadPixelQueryHistory, FGSBadPixelStats from jwql.instrument_monitors.common_monitors import bad_pixel_monitor from jwql.tests.resources import has_test_db - -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS def test_bad_map_to_list(): @@ -163,6 +161,7 @@ def test_get_possible_apertures(instrument, expected_list): assert ap_list == expected_list +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_identify_tables(): """Be sure the correct database tables are identified """ diff --git a/jwql/tests/test_bias_monitor.py b/jwql/tests/test_bias_monitor.py index 8becc2558..e5335ea34 100644 --- a/jwql/tests/test_bias_monitor.py +++ b/jwql/tests/test_bias_monitor.py @@ -28,8 +28,7 @@ from jwql.instrument_monitors.common_monitors import bias_monitor from jwql.tests.resources import has_test_db from jwql.utils.utils import get_config - -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS def test_collapse_image(): @@ -65,7 +64,7 @@ def test_extract_zeroth_group(): # Extract the zeroth group using the bias monitor # nosec comment added to ignore bandit security check output_filename = monitor.extract_zeroth_group(filename) - os.chmod(output_filename, 508) # nosec + os.chmod(output_filename, 508) # nosec data = fits.getdata(output_filename, 'SCI')[0, 0, :, :] # Remove the copied test file and its zeroth group file so this test can be properly repeated @@ -96,6 +95,7 @@ def test_get_amp_medians(): assert amp_medians == amp_medians_truth +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_identify_tables(): """Be sure the correct database tables are identified""" diff --git a/jwql/tests/test_cosmic_ray_monitor.py b/jwql/tests/test_cosmic_ray_monitor.py index c9d590a3b..dfe27e4e2 100644 --- a/jwql/tests/test_cosmic_ray_monitor.py +++ b/jwql/tests/test_cosmic_ray_monitor.py @@ -26,8 +26,7 @@ from jwql.instrument_monitors.common_monitors.cosmic_ray_monitor import CosmicRay from jwql.database.database_interface import MIRICosmicRayQueryHistory from jwql.utils.utils import get_config - -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS def define_test_data(nints): diff --git a/jwql/tests/test_dark_monitor.py b/jwql/tests/test_dark_monitor.py index 9cb11a96f..ab869cc06 100644 --- a/jwql/tests/test_dark_monitor.py +++ b/jwql/tests/test_dark_monitor.py @@ -28,9 +28,148 @@ from jwql.instrument_monitors.common_monitors import dark_monitor from jwql.tests.resources import has_test_db from jwql.utils.monitor_utils import mast_query_darks +from jwql.utils.constants import DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME from jwql.utils.utils import get_config - -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS + + +def generate_data_for_file_splitting_test(): + # Define data for parameterized test_split_files_into_sub_lists calls + files = [f'file_{idx}.fits' for idx in range(10)] + now = Time.now().mjd + deltat = [26., 25., 24., 23., 22., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + threshold = 5. # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits', 'file_4.fits'], + ['file_5.fits', 'file_6.fits', 'file_7.fits', 'file_8.fits', 'file_9.fits'] + ] + test1 = (files, start_times, end_times, integration_list, threshold, expected) + + # Final epoch may not be over. Not enough ints in final epoch + deltat = [26., 25., 24., 23., 22., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + threshold = 6. # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits', 'file_4.fits'] + ] + test2 = (files, start_times, end_times, integration_list, threshold, expected) + + # Final epoch may not be over. Not enough ints in final subgroup of final epoch + deltat = [26., 25., 24., 23., 22., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + threshold = 6. # integrations + integration_list = [3, 3, 2, 2, 2, 1, 3, 3, 2, 2] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits', 'file_4.fits'], + ['file_5.fits', 'file_6.fits', 'file_7.fits'] + ] + test3 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [40., 39., 38., 37., 36., 18., 17., 16., 15., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + threshold = 5. # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits', 'file_4.fits'], + ['file_5.fits', 'file_6.fits', 'file_7.fits', 'file_8.fits'] + ] + test4 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [40., 39., 38., 37., 36., 18., 17., 16., 15., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + threshold = 6. # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits', 'file_4.fits'], + ['file_5.fits', 'file_6.fits', 'file_7.fits', 'file_8.fits'] + ] + test5 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [9., 8., 7., 6., 5., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + integration_list = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + threshold = 6 + expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits']] + test6 = (files, start_times, end_times, integration_list, threshold, expected) + + threshold = 9 + expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', + 'file_6.fits', 'file_7.fits', 'file_8.fits']] + test7 = (files, start_times, end_times, integration_list, threshold, expected) + + integration_list = [1] * len(start_times) + threshold = 10 + expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', + 'file_6.fits', 'file_7.fits', 'file_8.fits', 'file_9.fits'] + ] + test8 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [23., 22., 21., 20., 19., 18., 17., 16., 15., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + integration_list = [1] * len(start_times) + threshold = 10 + expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', + 'file_6.fits', 'file_7.fits', 'file_8.fits'] + ] + test9 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [9., 8., 7., 6., 5., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + integration_list = [1] * len(start_times) + threshold = 10 + expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', + 'file_6.fits', 'file_7.fits', 'file_8.fits', 'file_9.fits'] + ] + test10 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [9., 8., 7., 6., 5., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + integration_list = [1] * len(start_times) + threshold = 11 + expected = [] + test11 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [40., 39., 38., 37., 24., 23., 22., 21., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + threshold = 6 # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits'], + ['file_4.fits', 'file_5.fits', 'file_6.fits', 'file_7.fits'] + ] + test12 = (files, start_times, end_times, integration_list, threshold, expected) + + # In this case, the final 2 files are grouped together due to being taken close + # in time to one another. However, they do not contain enough integrations to + # reach the threshold. Since these are the final two files, we have no way of + # knowing if they are just the first two observations of a larger set that should + # be grouped. Therefore, the dark monitor ignores these final two files, under + # the assumption that they will be used the next time the monitor is run. + deltat = [50., 49., 48., 47., 34., 33., 32., 31., 20., 19.] + start_times = [now - dt for dt in deltat] + end_times = [s + 0.1 for s in start_times] + threshold = 6 # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits'], + ['file_4.fits', 'file_5.fits', 'file_6.fits', 'file_7.fits'] + ] + test13 = (files, start_times, end_times, integration_list, threshold, expected) + + return [test1, test2, test3, test4, test5, test6, test7, test8, test9, test10, test11, test12, test13] def test_find_hot_dead_pixels(): @@ -138,6 +277,16 @@ def test_shift_to_full_frame(): assert np.all(new_coords[1] == np.array([518, 515])) +@pytest.mark.parametrize("files,start_times,end_times,integration_list,threshold,expected", generate_data_for_file_splitting_test()) +def test_split_files_into_sub_lists(files, start_times, end_times, integration_list, threshold, expected): + """Test that file lists are appropriately split into subgroups for separate monitor runs""" + d = dark_monitor.Dark() + d.instrument = 'nircam' + d.split_files_into_sub_lists(files, start_times, end_times, integration_list, threshold) + + assert d.file_batches == expected + + @pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') def test_add_bad_pix(): coord = ([1, 2, 3], [4, 5, 6]) diff --git a/jwql/tests/test_data_containers.py b/jwql/tests/test_data_containers.py index 6cb278c5e..7c4f68401 100644 --- a/jwql/tests/test_data_containers.py +++ b/jwql/tests/test_data_containers.py @@ -31,20 +31,21 @@ import pandas as pd import pytest +from jwql.utils.constants import ON_GITHUB_ACTIONS + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql.website.jwql_proj.settings") # Skip testing this module if on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') -from jwql.website.apps.jwql import data_containers -from jwql.tests.resources import ( - MockSessionFileAnomaly, MockSessionGroupAnomaly, - MockGetRequest, MockPostRequest) -from jwql.utils import constants +from jwql.website.apps.jwql import data_containers # noqa: E402 (module level import not at top of file) +from jwql.tests.resources import MockSessionFileAnomaly, MockSessionGroupAnomaly # noqa: E402 (module level import not at top of file) +from jwql.tests.resources import MockGetRequest, MockPostRequest # noqa: E402 (module level import not at top of file) +from jwql.utils import constants # noqa: E402 (module level import not at top of file) if not ON_GITHUB_ACTIONS: - from jwql.utils.utils import get_config - + from jwql.utils.utils import get_config # noqa: E402 (module level import not at top of file) + from jwql.website.apps.jwql.models import RootFileInfo + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') def test_build_table(): tab = data_containers.build_table('filesystem_general') @@ -127,6 +128,58 @@ def test_get_acknowledgements(): assert len(acknowledgements) > 0 +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') +def test_get_additional_exposure_info(): + """Tests ``get_additional_exposure_info`` function.""" + # Test an exposure-level case + group_root = 'jw01068002001_02102_00008' + image_info = data_containers.get_image_info(group_root) + root_file_info = RootFileInfo.objects.filter(root_name__startswith=group_root) + basic, additional = data_containers.get_additional_exposure_info(root_file_info, image_info) + expected_basic = {'exp_type': 'NRC_IMAGE', + 'category': 'COM', + 'visit_status': 'SUCCESSFUL', + 'subarray': 'SUB320', + 'pupil': 'CLEAR'} + # We can only test a subset of the keys in additional, since things like the pipeline version, + # crds context, etc can change over time. + expected_additional = {'READPATT': 'RAPID', + 'TITLE': 'NIRCam Subarray-Mode Commissioning, CAR NIRCam-019', + 'NGROUPS': 10, + 'PI_NAME': 'Hilbert, Bryan', + 'NINTS': 10, + 'TARGNAME': 'GP2-JMAG14-STAR-OFFSET', + 'EXPTIME': 106.904, + 'EXPSTART': 59714.6163261875} + for key in expected_basic: + assert basic[key] == expected_basic[key] + for key in expected_additional: + assert additional[key] == expected_additional[key] + + # Test an image-level case + file_root = 'jw01022016001_03101_00001_nrs1' + image_info = data_containers.get_image_info(file_root) + root_file_info = RootFileInfo.objects.get(root_name=file_root) + basic, additional = data_containers.get_additional_exposure_info(root_file_info, image_info) + expected_basic = {'exp_type': 'NRS_IFU', + 'category': 'COM', + 'visit_status': 'SUCCESSFUL', + 'subarray': 'FULL', + 'filter': 'F100LP', + 'grating': 'G140H'} + expected_additional = {'READPATT': 'NRSRAPID', + 'TITLE': 'CAR FGS-017 Straylight for Moving Targets (All SIs)', + 'NGROUPS': 13, + 'PI_NAME': 'Stansberry, John A.', + 'NINTS': 2, + 'TARGNAME': 'JUPITER', + 'EXPTIME': 279.156, + 'EXPSTART': 59764.77659749352} + assert basic == expected_basic + for key in expected_additional: + assert additional[key] == expected_additional[key] + + @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_get_all_proposals(): """Tests the ``get_all_proposals`` function.""" @@ -142,17 +195,11 @@ def test_get_all_proposals(): (['uncal', 'rate', 'bad'], {'bad'})), (False, ['rate', 'uncal', 'bad'], ['uncal', 'rate', 'bad']), - (True, - ['rate', 'uncal', 'bad', - 'o006_crfints', 'o001_crf'], - (['uncal', 'rate', 'o001_crf', - 'o006_crfints', 'bad'], {'bad'})), - (False, - ['rate', 'uncal', 'bad', - 'o006_crfints', 'o001_crf'], - ['uncal', 'rate', 'o001_crf', - 'o006_crfints', 'bad']), - ]) + (True, ['rate', 'uncal', 'bad', 'o006_crfints', 'o001_crf'], + (['uncal', 'rate', 'o001_crf', 'o006_crfints', 'bad'], {'bad'})), + (False, ['rate', 'uncal', 'bad', 'o006_crfints', 'o001_crf'], + ['uncal', 'rate', 'o001_crf', 'o006_crfints', 'bad'])]) + def test_get_available_suffixes(untracked, input_suffixes, expected): result = data_containers.get_available_suffixes( input_suffixes, return_untracked=untracked) @@ -292,6 +339,7 @@ def test_get_anomaly_form_post_group(mocker): assert update_mock.call_count == 2 """ +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to django models.') def test_get_dashboard_components(): request = MockPostRequest() diff --git a/jwql/tests/test_database_interface.py b/jwql/tests/test_database_interface.py index 08890a4f1..9719d9ab4 100755 --- a/jwql/tests/test_database_interface.py +++ b/jwql/tests/test_database_interface.py @@ -29,9 +29,7 @@ from jwql.database import database_interface as di from jwql.tests.resources import has_test_db from jwql.utils.utils import get_config - -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to development database server.') diff --git a/jwql/tests/test_edb.py b/jwql/tests/test_edb.py index a7da24be5..bdbb4b179 100644 --- a/jwql/tests/test_edb.py +++ b/jwql/tests/test_edb.py @@ -28,9 +28,7 @@ import pytest from jwql.edb import engineering_database as ed - -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS def test_add(): @@ -243,8 +241,8 @@ def test_multiplication(): info['description'] = 'Voltage at some place' mnemonic1 = ed.EdbMnemonic('TEST_VOLTAGE', Time('2021-12-18T07:20:00'), Time('2021-12-18T07:30:00'), tab, {}, info, blocks=blocks1) mnemonic1.meta = {'Count': 1, - 'TlmMnemonics': [{'TlmMnemonic': 'TEST_VOLTAGE', - 'AllPoints': 1}]} + 'TlmMnemonics': [{'TlmMnemonic': 'TEST_VOLTAGE', + 'AllPoints': 1}]} dates2 = np.array([datetime(2021, 12, 18, 7, n, 10) for n in range(20, 30)]) data2 = np.array([15, 15, 15, 19, 19, 19, 19, 19, 12, 12]) @@ -258,8 +256,8 @@ def test_multiplication(): info['description'] = 'Current at some place' mnemonic2 = ed.EdbMnemonic('TEST_CURRENT', Time('2021-12-18T07:20:10'), Time('2021-12-18T07:30:10'), tab, {}, info, blocks=blocks2) mnemonic2.meta = {'Count': 1, - 'TlmMnemonics': [{'TlmMnemonic': 'TEST_CURRENT', - 'AllPoints': 1}]} + 'TlmMnemonics': [{'TlmMnemonic': 'TEST_CURRENT', + 'AllPoints': 1}]} prod = mnemonic1 * mnemonic2 assert np.allclose(prod.data["euvalues"].data, diff --git a/jwql/tests/test_edb_telemetry_monitor.py b/jwql/tests/test_edb_telemetry_monitor.py index f7a91e5d3..8ddd21f6e 100644 --- a/jwql/tests/test_edb_telemetry_monitor.py +++ b/jwql/tests/test_edb_telemetry_monitor.py @@ -38,9 +38,6 @@ from jwql.tests.resources import has_test_db from jwql.utils.constants import MIRI_POS_RATIO_VALUES -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - def test_add_every_change_history(): """Test that every_change data is correctly combined with an existing @@ -107,13 +104,13 @@ def test_conditions(): """Test the extraction of data using the ```equal``` class. """ # Create data for mnemonic of interest - #start_time = Time('2022-02-02') - #end_time = Time('2022-02-03') + # start_time = Time('2022-02-02') + # end_time = Time('2022-02-03') start_time = datetime.datetime(2022, 2, 2) end_time = datetime.datetime(2022, 2, 3) temp_data = Table() temp_data["euvalues"] = np.array([35., 35.1, 35.2, 36., 36.1, 36.2, 37.1, 37., 36., 36.]) - #temp_data["dates"] = np.array([Time('2022-02-02') + TimeDelta(0.1 * i, format='jd') for i in range(10)]) + # temp_data["dates"] = np.array([Time('2022-02-02') + TimeDelta(0.1 * i, format='jd') for i in range(10)]) temp_data["dates"] = np.array([start_time + datetime.timedelta(days=0.1 * i) for i in range(10)]) meta = {} info = {} @@ -267,7 +264,7 @@ def test_organize_every_change(): """ basetime = datetime.datetime(2021, 4, 6, 14, 0, 0) dates = np.array([basetime + datetime.timedelta(seconds=600 * i) for i in range(20)]) - #dates = np.array([basetime + TimeDelta(600 * i, format='sec') for i in range(20)]) + # dates = np.array([basetime + TimeDelta(600 * i, format='sec') for i in range(20)]) vals = np.array([300.5, 310.3, -250.5, -500.9, 32.2, 300.1, 310.8, -250.2, -500.2, 32.7, 300.2, 310.4, -250.6, -500.8, 32.3, diff --git a/jwql/tests/test_instrument_properties.py b/jwql/tests/test_instrument_properties.py index 4176cd69c..4a072c7d1 100644 --- a/jwql/tests/test_instrument_properties.py +++ b/jwql/tests/test_instrument_properties.py @@ -23,8 +23,7 @@ import numpy as np from jwql.utils import instrument_properties - -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS if not ON_GITHUB_ACTIONS: from jwql.utils.utils import get_config diff --git a/jwql/tests/test_loading_times.py b/jwql/tests/test_loading_times.py index 7c83fed72..02fa6ac24 100644 --- a/jwql/tests/test_loading_times.py +++ b/jwql/tests/test_loading_times.py @@ -25,13 +25,11 @@ import urllib.request from jwql.utils.constants import MONITORS +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_base_url TIME_CONSTRAINT = 30 # seconds -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - urls = [] # Generic URLs diff --git a/jwql/tests/test_logging_functions.py b/jwql/tests/test_logging_functions.py index e68a65ee8..53913a0d0 100644 --- a/jwql/tests/test_logging_functions.py +++ b/jwql/tests/test_logging_functions.py @@ -25,11 +25,9 @@ from jwql.utils import logging_functions from jwql.utils.logging_functions import configure_logging, log_fail, log_info, make_log_file +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - @log_fail @log_info diff --git a/jwql/tests/test_mast_utils.py b/jwql/tests/test_mast_utils.py index 74c3f0328..d32c2c9b2 100755 --- a/jwql/tests/test_mast_utils.py +++ b/jwql/tests/test_mast_utils.py @@ -22,11 +22,10 @@ from astroquery.mast import Mast from jwql.utils.constants import JWST_INSTRUMENT_NAMES +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils import mast_utils as mu from jwql.utils.utils import get_config -# Temporary until JWST operations: switch to test string for MAST request URL -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') if not ON_GITHUB_ACTIONS: Mast._portal_api_connection.MAST_REQUEST_URL = get_config()['mast_request_url'] @@ -34,9 +33,9 @@ def test_astroquery_mast(): """Test if the astroquery.mast service can complete a request""" service = 'Mast.Caom.Filtered' - params = {'columns': 'COUNT_BIG(*)', + params = {'columns': 'COUNT_BIG(*)', 'filters': [{"paramName": "obs_collection", - "values": ["JWST"]},], + "values": ["JWST"]}, ], 'pagesize': 1, 'page': 1} response = Mast.service_request_async(service, params) result = response[0].json() diff --git a/jwql/tests/test_msata_monitor.py b/jwql/tests/test_msata_monitor.py index fc0286d6c..c4e7b41ad 100644 --- a/jwql/tests/test_msata_monitor.py +++ b/jwql/tests/test_msata_monitor.py @@ -31,11 +31,10 @@ from jwql.instrument_monitors.nirspec_monitors.ta_monitors.msata_monitor import MSATA from jwql.database.database_interface import NIRSpecTAQueryHistory +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config, ensure_dir_exists from jwql.utils import monitor_utils, permissions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - # define the type of a Bokeh plot type bokeh_plot_type = type(figure()) @@ -53,38 +52,38 @@ def define_testdata(): msata_data : pandas dataframe """ msata_dict = { - # info taken from main_hdr dict - 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'], - 'date_obs': ['2022-06-22'], - 'visit_id': ['V09999001001P0000000002101'], - 'tafilter': ['F110W'], - 'detector': ['NRS1'], - 'readout': ['NRSRAPID'], - 'subarray': ['FULL'], - # info taken from ta_hdr dict - 'num_refstars': [12], - 'ta_status': ['SUCCESSFUL'], - 'status_rsn': ['-999'], - 'v2halffacet': [-0.27568], - 'v3halffacet': [0.10975], - 'v2msactr': [378.523987], - 'v3msactr': [-428.374481], - 'lsv2offset': [-999.0], - 'lsv3offset': [-999.0], - 'lsoffsetmag': [-999.0], - 'lsrolloffset': [-999.0], - 'lsv2sigma': [-999.0], - 'lsv3sigma': [-999.0], - 'lsiterations': [-999], - 'guidestarid': ['-999'], - 'guidestarx': [-999.0], - 'guidestary': [-999.0], - 'guidestarroll': [-999.0], - 'samx': [-999.0], - 'samy': [-999.0], - 'samroll': [-999.0], - 'stars_in_fit': [-999] - } + # info taken from main_hdr dict + 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'], + 'date_obs': ['2022-06-22'], + 'visit_id': ['V09999001001P0000000002101'], + 'tafilter': ['F110W'], + 'detector': ['NRS1'], + 'readout': ['NRSRAPID'], + 'subarray': ['FULL'], + # info taken from ta_hdr dict + 'num_refstars': [12], + 'ta_status': ['SUCCESSFUL'], + 'status_rsn': ['-999'], + 'v2halffacet': [-0.27568], + 'v3halffacet': [0.10975], + 'v2msactr': [378.523987], + 'v3msactr': [-428.374481], + 'lsv2offset': [-999.0], + 'lsv3offset': [-999.0], + 'lsoffsetmag': [-999.0], + 'lsrolloffset': [-999.0], + 'lsv2sigma': [-999.0], + 'lsv3sigma': [-999.0], + 'lsiterations': [-999], + 'guidestarid': ['-999'], + 'guidestarx': [-999.0], + 'guidestary': [-999.0], + 'guidestarroll': [-999.0], + 'samx': [-999.0], + 'samy': [-999.0], + 'samroll': [-999.0], + 'stars_in_fit': [-999] + } # add info from ta_table num_refstars = msata_dict['num_refstars'][0] msata_dict['box_peak_value'] = [[8000 for _ in range(num_refstars)]] diff --git a/jwql/tests/test_permissions.py b/jwql/tests/test_permissions.py index dd8878294..0fc6480d5 100755 --- a/jwql/tests/test_permissions.py +++ b/jwql/tests/test_permissions.py @@ -23,15 +23,11 @@ import os import pytest -from jwql.utils.permissions import set_permissions, has_permissions, \ - get_owner_string, get_group_string +from jwql.utils.permissions import set_permissions, has_permissions, get_owner_string, get_group_string # directory to be created and populated during tests running TEST_DIRECTORY = os.path.join(os.environ['HOME'], 'permission_test') -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - @pytest.fixture(scope="module") def test_directory(test_dir=TEST_DIRECTORY): diff --git a/jwql/tests/test_pipeline_tools.py b/jwql/tests/test_pipeline_tools.py index a79d687c8..ff072e0fb 100644 --- a/jwql/tests/test_pipeline_tools.py +++ b/jwql/tests/test_pipeline_tools.py @@ -24,13 +24,10 @@ import numpy as np from jwql.instrument_monitors import pipeline_tools +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config -# Determine if tests are being run on github actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - - @pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_completed_pipeline_steps(): """Test that the list of completed pipeline steps for a file is diff --git a/jwql/tests/test_plotting.py b/jwql/tests/test_plotting.py index 18caafa9f..5432659d5 100755 --- a/jwql/tests/test_plotting.py +++ b/jwql/tests/test_plotting.py @@ -43,7 +43,7 @@ def test_bar_chart(): # And generate a figure plt = bar_chart(data, 'index') - assert str(type(plt)) == "" + assert str(type(plt)) == "" def test_bokeh_version(): diff --git a/jwql/tests/test_preview_image.py b/jwql/tests/test_preview_image.py index ce0a7de5a..001371c96 100644 --- a/jwql/tests/test_preview_image.py +++ b/jwql/tests/test_preview_image.py @@ -30,16 +30,9 @@ from jwst.datamodels import dqflags from jwql.utils.preview_image import PreviewImage, crop_to_subarray +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.utils import get_config, ensure_dir_exists -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - -# Determine if the code is being run as part of a Readthedocs build -ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: - ON_READTHEDOCS = os.environ['READTHEDOCS'] - def test_crop_to_subarray(): """Test that the code correctly crops larger arrays down to diff --git a/jwql/tests/test_protect_module.py b/jwql/tests/test_protect_module.py index 3bcafa5c1..28af6cd86 100644 --- a/jwql/tests/test_protect_module.py +++ b/jwql/tests/test_protect_module.py @@ -21,11 +21,9 @@ from jwql.utils import protect_module as pm from pytest import fixture, mark +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.protect_module import lock_module, _PID_LOCKFILE_KEY -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - @fixture def module_lock(): diff --git a/jwql/tests/test_readnoise_monitor.py b/jwql/tests/test_readnoise_monitor.py index d00607dd5..77fdce3d8 100644 --- a/jwql/tests/test_readnoise_monitor.py +++ b/jwql/tests/test_readnoise_monitor.py @@ -27,10 +27,9 @@ from jwql.database.database_interface import NIRCamReadnoiseQueryHistory, NIRCamReadnoiseStats, session from jwql.instrument_monitors.common_monitors import readnoise_monitor from jwql.tests.resources import has_test_db +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - def test_determine_pipeline_steps(): """Test the correct pipeline steps are called""" @@ -92,6 +91,7 @@ def test_get_metadata(): assert monitor.expstart == '2016-01-18T04:35:14.523' +@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Requires access to central storage.') def test_identify_tables(): """Be sure the correct database tables are identified""" diff --git a/jwql/tests/test_utils.py b/jwql/tests/test_utils.py index cd50c6a01..ecc34790b 100644 --- a/jwql/tests/test_utils.py +++ b/jwql/tests/test_utils.py @@ -26,13 +26,10 @@ from bokeh.plotting import figure import numpy as np +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import copy_files, get_config, filename_parser, filesystem_path, save_png, _validate_config -# Determine if tests are being run on Github Actions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - - FILENAME_PARSER_TEST_DATA = [ # Test full path diff --git a/jwql/tests/test_wata_monitor.py b/jwql/tests/test_wata_monitor.py index 0b9099beb..91624147a 100644 --- a/jwql/tests/test_wata_monitor.py +++ b/jwql/tests/test_wata_monitor.py @@ -28,11 +28,10 @@ from jwql.instrument_monitors.nirspec_monitors.ta_monitors.wata_monitor import WATA from jwql.database.database_interface import NIRSpecTAQueryHistory +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.utils import get_config, ensure_dir_exists from jwql.utils import monitor_utils, permissions -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - # define the type of a Bokeh plot type bokeh_plot_type = type(figure()) @@ -50,48 +49,48 @@ def define_testdata(): wata_data : pandas dataframe """ wata_dict = { - # info taken from main_hdr dict - 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'], - 'date_obs': ['2022-06-22'], - 'visit_id': ['V09999001001P0000000002101'], - 'tafilter': ['F110W'], - 'detector': ['NRS1'], - 'readout': ['NRSRAPID'], - 'subarray': ['FULL'], - # info taken from ta_hdr dict - 'ta_status': ['SUCCESSFUL'], - 'status_reason': ['-999'], - 'star_name': ['-999'], - 'star_ra': [-999.0], - 'star_dec': [-999.0], - 'star_mag': [-999.0], - 'star_catalog': [-999], - 'planned_v2': [-999.0], - 'planned_v3': [-999.0], - 'stamp_start_col': [-999], - 'stamp_start_row': [-999], - 'star_detector': ['-999'], - 'max_val_box': [-999.0], - 'max_val_box_col': [-999.0], - 'max_val_box_row': [-999.0], - 'iterations': [-999], - 'corr_col': [-999.0], - 'corr_row': [-999.0], - 'stamp_final_col': [-999.0], - 'stamp_final_row': [-999.0], - 'detector_final_col': [-999.0], - 'detector_final_row': [-999.0], - 'final_sci_x': [-999.0], - 'final_sci_y': [-999.0], - 'measured_v2': [-999.0], - 'measured_v3': [-999.0], - 'ref_v2': [-999.0], - 'ref_v3': [-999.0], - 'v2_offset': [-999.0], - 'v3_offset': [-999.0], - 'sam_x': [-999.0], - 'sam_y': [-999.0], - } + # info taken from main_hdr dict + 'filename': ['jw09999001001_02101_00001_nrs1_uncal.fits'], + 'date_obs': ['2022-06-22'], + 'visit_id': ['V09999001001P0000000002101'], + 'tafilter': ['F110W'], + 'detector': ['NRS1'], + 'readout': ['NRSRAPID'], + 'subarray': ['FULL'], + # info taken from ta_hdr dict + 'ta_status': ['SUCCESSFUL'], + 'status_reason': ['-999'], + 'star_name': ['-999'], + 'star_ra': [-999.0], + 'star_dec': [-999.0], + 'star_mag': [-999.0], + 'star_catalog': [-999], + 'planned_v2': [-999.0], + 'planned_v3': [-999.0], + 'stamp_start_col': [-999], + 'stamp_start_row': [-999], + 'star_detector': ['-999'], + 'max_val_box': [-999.0], + 'max_val_box_col': [-999.0], + 'max_val_box_row': [-999.0], + 'iterations': [-999], + 'corr_col': [-999.0], + 'corr_row': [-999.0], + 'stamp_final_col': [-999.0], + 'stamp_final_row': [-999.0], + 'detector_final_col': [-999.0], + 'detector_final_row': [-999.0], + 'final_sci_x': [-999.0], + 'final_sci_y': [-999.0], + 'measured_v2': [-999.0], + 'measured_v3': [-999.0], + 'ref_v2': [-999.0], + 'ref_v3': [-999.0], + 'v2_offset': [-999.0], + 'v3_offset': [-999.0], + 'sam_x': [-999.0], + 'sam_y': [-999.0], + } # create the additional arrays bool_status, status_colors = [], [] for tas, do_str in zip(wata_dict['ta_status'], wata_dict['date_obs']): diff --git a/jwql/utils/calculations.py b/jwql/utils/calculations.py index 34e866c2e..a2a44ac3c 100644 --- a/jwql/utils/calculations.py +++ b/jwql/utils/calculations.py @@ -17,6 +17,7 @@ """ import numpy as np +import warnings from astropy.modeling import fitting, models from astropy.stats import sigma_clip @@ -169,8 +170,9 @@ def mean_stdev(image, sigma_threshold=3): stdev_value : float Sigma-clipped standard deviation of image """ - - clipped, lower, upper = sigmaclip(image, low=sigma_threshold, high=sigma_threshold) + # Ignore the warning about NaNs being clipped. + warnings.filterwarnings('ignore', message='Input data contains invalid values (NaNs or infs), which were automatically clipped.*') + clipped = sigma_clip(image, sigma=sigma_threshold, masked=False) mean_value = np.mean(clipped) stdev_value = np.std(clipped) diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index 48af112b4..5a588f439 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -28,7 +28,9 @@ ``utils.py`` """ +import asdf import inflection +import os # Each amplifier is represented by 2 tuples, the first for x coordinates # and the second for y coordinates. Within each tuple are value for @@ -205,6 +207,18 @@ # Types of potential bad pixels identified by the dark current monitor DARK_MONITOR_BADPIX_TYPES = ["hot", "dead", "noisy"] +# Minimum amount of time, in days, between epochs of dark current observations. If the +# dark monitor sees this much time, or longer, between two dark current files, it assumes +# that the two files are part of separate epochs. This means the monitor will run separately +# on these files, rather than bundling them together into a batch, where they would have +# been combined into a mean dark rate +DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME = {'nircam': 10., + 'niriss': 10., + 'miri': 0.00001, # Treat each MIRI exposure separately + 'nirspec': 10., + 'fgs': 10. + } + # Maximum number of potential new bad pixels to overplot on the dark monitor # mean dark image plot. Too many overplotted points starts to obscure the image # itself, and are most likely not really new bad pixels @@ -614,6 +628,10 @@ # Maximum number of records returned by MAST for a single query MAST_QUERY_LIMIT = 550000 +# Minimum number of groups per integration required to include data +# in the dark current monitor +MINIMUM_DARK_CURRENT_GROUPS = 10 + # Expected position sensor values for MIRI. Used by the EDB monitor # to filter out bad values. Tuple values are the expected value and # the standard deviation associated with the value @@ -651,6 +669,52 @@ }, } +# Names of all of the monitor database tables +MONITOR_TABLE_NAMES = [ + "fgs_bad_pixel_query_history", "fgs_bad_pixel_stats", + "miri_bad_pixel_query_history", "miri_bad_pixel_stats", + "nircam_bad_pixel_query_history", "nircam_bad_pixel_stats", + "niriss_bad_pixel_query_history", "niriss_bad_pixel_stats", + "nirspec_bad_pixel_query_history", "nirspec_bad_pixel_stats", + "nircam_bias_query_history", "nircam_bias_stats", + "niriss_bias_query_history", "niriss_bias_stats", + "nirspec_bias_query_history", "nirspec_bias_stats", + "nircam_claw_query_history", "nircam_claw_stats", + "monitor", + "central_storage", + "filesystem_characteristics", + "filesystem_general", + "filesystem_instrument", + "fgs_anomaly", + "miri_anomaly", + "nircam_anomaly", + "niriss_anomaly", + "nirspec_anomaly", + "fgs_cosmic_ray_query_history", "fgs_cosmic_ray_stats", + "miri_cosmic_ray_query_history", "miri_cosmic_ray_stats", + "nircam_cosmic_ray_query_history", "nircam_cosmic_ray_stats", + "niriss_cosmic_ray_query_history", "niriss_cosmic_ray_stats", + "nirspec_cosmic_ray_query_history", "nirspec_cosmic_ray_stats", + "fgs_dark_dark_current", "fgs_dark_pixel_stats", "fgs_dark_query_history", + "miri_dark_dark_current", "miri_dark_pixel_stats", "miri_dark_query_history", + "nircam_dark_dark_current", "nircam_dark_pixel_stats", "nircam_dark_query_history", + "niriss_dark_dark_current", "niriss_dark_pixel_stats", "niriss_dark_query_history", + "nirspec_dark_dark_current", "nirspec_dark_pixel_stats", "nirspec_dark_query_history", + "fgs_edb_blocks_stats", "fgs_edb_daily_stats", "fgs_edb_every_change_stats", "fgs_edb_time_interval_stats", "fgs_edb_time_stats", + "miri_edb_blocks_stats", "miri_edb_daily_stats", "miri_edb_every_change_stats", "miri_edb_time_interval_stats", "miri_edb_time_stats", + "nircam_edb_blocks_stats", "nircam_edb_daily_stats", "nircam_edb_every_change_stats", "nircam_edb_time_interval_stats", "nircam_edb_time_stats", + "niriss_edb_blocks_stats", "niriss_edb_daily_stats", "niriss_edb_every_change_stats", "niriss_edb_time_interval_stats", "niriss_edb_time_stats", + "nirspec_edb_blocks_stats", "nirspec_edb_daily_stats", "nirspec_edb_every_change_stats", "nirspec_edb_time_interval_stats", "nirspec_edb_time_stats", + "nirspec_grating_stats", + "fgs_readnoise_query_history", "fgs_readnoise_stats", + "miri_readnoise_query_history", "miri_readnoise_stats", + "nircam_readnoise_query_history", "nircam_readnoise_stats", + "niriss_readnoise_query_history", "niriss_readnoise_stats", + "nirspec_readnoise_query_history", "nirspec_readnoise_stats", + "miri_ta_query_history", "miri_ta_stats", + "nirspec_ta_query_history", "nirspec_ta_stats" +] + # Suffix for msa files MSA_SUFFIX = ["msa"] @@ -720,6 +784,12 @@ # Possible suffix types for AMI files NIRISS_AMI_SUFFIX_TYPES = ["amiavg", "aminorm", "ami", "psf-amiavg"] +# Determine if the code is being run as part of CI checking on github +ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') + +# Determine if the code is being run as part of a Readthedocs build +ON_READTHEDOCS = os.environ.get('READTHEDOCS', False) + # Base name for the file listing the preview images for a given instrument. # The complete name will have "_{instrument.lower}.txt" added to the end of this. PREVIEW_IMAGE_LISTFILE = "preview_image_inventory" @@ -913,57 +983,13 @@ class QueryConfigKeys: "SUBGRISMSTRIPE256", ] +schema = asdf.schema.load_schema("http://stsci.edu/schemas/jwst_datamodel/subarray.schema") SUBARRAYS_PER_INSTRUMENT = { - "nircam": [ - "FULL", - "FULLP", - "SUB640", - "SUB320", - "SUB160", - "SUB400P", - "SUB160P", - "SUB64P", - "SUB32TATS", - "SUB640A210R", - "SUB640ASWB", - "SUB320A335R", - "SUB320A430R", - "SUB320ALWB", - "SUBGRISM256", - "SUBGRISM128", - "SUBGRISM64", - "SUB32TATSGRISM", - ], - "niriss": [ - "FULL", - "SUBSTRIP96", - "SUBSTRIP256", - "SUB80", - "SUB64", - "SUB128", - "SUB256", - "WFSS64R", - "WFSS128R", - "WFSS64C", - "WFSS128C", - "SUBAMPCAL", - "SUBTAAMI", - "SUBTASOSS", - ], - "nirspec": [], - "miri": [ - "BRIGHTSKY", - "FULL", - "MASK1065", - "MASK1140", - "MASK1550", - "MASKLYOT", - "SLITLESSPRISM", - "SUB64", - "SUB128", - "SUB256", - ], - "fgs": [], + "nircam": ['FULL'] + sorted(schema["properties"]["meta"]["properties"]["subarray"]["properties"]["name"]["anyOf"][2]['enum']), + "niriss": ['FULL'] + sorted(schema["properties"]["meta"]["properties"]["subarray"]["properties"]["name"]["anyOf"][4]['enum']), + "nirspec": ['FULL'] + sorted(schema["properties"]["meta"]["properties"]["subarray"]["properties"]["name"]["anyOf"][6]['enum']), + "miri": ['FULL'] + sorted(schema["properties"]["meta"]["properties"]["subarray"]["properties"]["name"]["anyOf"][1]['enum']), + "fgs": ['FULL'] + sorted(schema["properties"]["meta"]["properties"]["subarray"]["properties"]["name"]["anyOf"][0]['enum']) } # Filename suffixes that need to include the association value in the suffix in @@ -984,6 +1010,15 @@ class QueryConfigKeys: # Possible suffix types for time-series exposures TIME_SERIES_SUFFIX_TYPES = ["phot", "whtlt"] +# Instrument Documentation Links +URL_DICT = { + "fgs": "https://jwst-docs.stsci.edu/jwst-observatory-hardware/jwst-fine-guidance-sensor", + "miri": "https://jwst-docs.stsci.edu/jwst-mid-infrared-instrument", + "niriss": "https://jwst-docs.stsci.edu/jwst-near-infrared-imager-and-slitless-spectrograph", + "nirspec": "https://jwst-docs.stsci.edu/jwst-near-infrared-spectrograph", + "nircam": "https://jwst-docs.stsci.edu/jwst-near-infrared-camera", +} + # Possible suffix types for WFS&C files WFSC_SUFFIX_TYPES = ["wfscmb"] @@ -997,12 +1032,3 @@ class QueryConfigKeys: + WFSC_SUFFIX_TYPES + MSA_SUFFIX ) - -# Instrument Documentation Links -URL_DICT = { - "fgs": "https://jwst-docs.stsci.edu/jwst-observatory-hardware/jwst-fine-guidance-sensor", - "miri": "https://jwst-docs.stsci.edu/jwst-mid-infrared-instrument", - "niriss": "https://jwst-docs.stsci.edu/jwst-near-infrared-imager-and-slitless-spectrograph", - "nirspec": "https://jwst-docs.stsci.edu/jwst-near-infrared-spectrograph", - "nircam": "https://jwst-docs.stsci.edu/jwst-near-infrared-camera", -} diff --git a/jwql/utils/instrument_properties.py b/jwql/utils/instrument_properties.py index 33d900b4d..88acb5465 100644 --- a/jwql/utils/instrument_properties.py +++ b/jwql/utils/instrument_properties.py @@ -129,6 +129,11 @@ def amplifier_info(filename, omit_reference_pixels=True): except KeyError: raise KeyError('DQ extension not found.') + # If the file contains multiple frames (e.g. rateints file) + # keep just the first + if len(data_quality.shape) == 3: + data_quality = data_quality[0, :, :] + # Reference pixels should be flagged in the DQ array with the # REFERENCE_PIXEL flag. Find the science pixels by looping for # pixels that don't have that bit set. diff --git a/jwql/utils/interactive_preview_image.py b/jwql/utils/interactive_preview_image.py index da9563770..1ffd2ab7b 100644 --- a/jwql/utils/interactive_preview_image.py +++ b/jwql/utils/interactive_preview_image.py @@ -168,7 +168,7 @@ def create_bokeh_image(self): plot_width = min_dim fig = figure(tools='pan,reset,save', match_aspect=True, - plot_width=plot_width, plot_height=plot_height) + width=plot_width, height=plot_height) fig.add_tools(BoxZoomTool(match_aspect=True)) fig.add_tools(WheelZoomTool(zoom_on_axis=False)) @@ -256,7 +256,7 @@ def line_plots(self, main_figure): for index_direction in directions: if index_direction == 'x': # column plots - fig = figure(plot_width=200, plot_height=main_figure.height, tools='', + fig = figure(width=200, height=main_figure.height, tools='', y_axis_location='right', margin=(0, 0, 0, 30)) fig.toolbar.logo = None @@ -280,7 +280,7 @@ def line_plots(self, main_figure): else: # row plots - fig = figure(plot_height=200, plot_width=main_figure.width, tools='') + fig = figure(height=200, width=main_figure.width, tools='') fig.toolbar.logo = None fig.y_range = Range1d() @@ -387,7 +387,7 @@ def line_plots(self, main_figure): idx = line[i].data_source.data['x']; } for (let j=0; j < data.length; j++) { - if (idx[j] >= match_range.start + if (idx[j] >= match_range.start && idx[j] <= match_range.end) { if (Number.isFinite(data[j])) { min_val = Math.min(data[j], min_val); @@ -444,7 +444,7 @@ def add_hover_tool(self, source, images): hover_callback = CustomJS(args={'s': source, 'd': hover_div, 'u': self.signal_units, 'dq': is_dq}, code=""" const idx = cb_data.index.image_indices; - if (idx.length > 0) { + if (idx.length > 0) { var x = idx[0].dim1; var y = idx[0].dim2; var flat = idx[0].flat_index; @@ -471,25 +471,25 @@ def add_hover_tool(self, source, images): } label = "Value (" + u + ")"; } - d.text = "
Pixel Value
" + - "
" + - "
" + + d.text = "
Pixel Value
" + + "
" + + "
" + "
(x, y) =
" + "
(" + x + ", " + y + ")
" + "
" if ('ra' in s.data && 'dec' in s.data) { var ra = s.data['ra'][0][flat].toPrecision(8); var dec = s.data['dec'][0][flat].toPrecision(8); - d.text += "
" + + d.text += "
" + "
RA (deg)=
" + "
" + ra + "
" + "
" + - "
" + + "
" + "
Dec (deg)=
" + "
" + dec + "
" + "
" } - d.text += "
" + + d.text += "
" + "
" + label + "=
" + "
" + val + "
"; } else { diff --git a/jwql/utils/mast_utils.py b/jwql/utils/mast_utils.py index 6524a8c6a..446645800 100644 --- a/jwql/utils/mast_utils.py +++ b/jwql/utils/mast_utils.py @@ -24,12 +24,12 @@ import pandas as pd from jwql.utils.constants import JWST_DATAPRODUCTS, JWST_INSTRUMENT_NAMES, JWST_INSTRUMENT_NAMES_MIXEDCASE, MAST_QUERY_LIMIT +from jwql.utils.constants import ON_GITHUB_ACTIONS from jwql.utils.permissions import set_permissions from jwql.utils.utils import ensure_dir_exists, get_config from jwql.utils.plotting import bar_chart -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') if not ON_GITHUB_ACTIONS: Mast._portal_api_connection.MAST_REQUEST_URL = get_config()['mast_request_url'] diff --git a/jwql/utils/monitor_utils.py b/jwql/utils/monitor_utils.py index f5c673cbd..536ac1ad4 100644 --- a/jwql/utils/monitor_utils.py +++ b/jwql/utils/monitor_utils.py @@ -19,11 +19,12 @@ import datetime import os from astroquery.mast import Mast, Observations +import numpy as np from django import setup - from jwql.database.database_interface import Monitor, engine from jwql.utils.constants import ASIC_TEMPLATES, JWST_DATAPRODUCTS, MAST_QUERY_LIMIT +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.logging_functions import configure_logging, get_log_status from jwql.utils import mast_utils from jwql.utils.utils import filename_parser @@ -33,12 +34,6 @@ # a MAST query. Mast._portal_api_connection.PAGESIZE = MAST_QUERY_LIMIT -# Determine if the code is being run as part of a github action or Readthedocs build -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') -ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: # pragma: no cover - ON_READTHEDOCS = os.environ['READTHEDOCS'] - if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: # These lines are needed in order to use the Django models in a standalone # script (as opposed to code run as a result of a webpage request). If these @@ -159,6 +154,11 @@ def mast_query_darks(instrument, aperture, start_date, end_date, readpatt=None): if len(query['data']) > 0: query_results.extend(query['data']) + # Put the file entries in chronological order + expstarts = [e['expstart'] for e in query_results] + idx = np.argsort(expstarts) + query_results = list(np.array(query_results)[idx]) + return query_results diff --git a/jwql/utils/plotting.py b/jwql/utils/plotting.py index 0e0dd6d06..4adba0655 100755 --- a/jwql/utils/plotting.py +++ b/jwql/utils/plotting.py @@ -70,7 +70,7 @@ def bar_chart(dataframe, groupcol, datacols=None, **kwargs): # Make the figure hover = HoverTool(tooltips=[('count', '@counts')]) - plt = figure(x_range=FactorRange(*x), plot_height=250, tools=[hover], + plt = figure(x_range=FactorRange(*x), height=250, tools=[hover], **kwargs) plt.vbar(x='x', top='counts', width=0.9, source=source, line_color="white", fill_color=factor_cmap('x', palette=Category20c[colors], diff --git a/jwql/utils/preview_image.py b/jwql/utils/preview_image.py index f86c10272..088996ab9 100755 --- a/jwql/utils/preview_image.py +++ b/jwql/utils/preview_image.py @@ -43,6 +43,7 @@ import numpy as np from jwql.utils import permissions +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.utils import get_config # Use the 'Agg' backend to avoid invoking $DISPLAY @@ -52,17 +53,9 @@ import matplotlib.colors as colors # noqa from matplotlib.ticker import AutoMinorLocator # noqa -# Only import jwst if not running from readthedocs -# Determine if the code is being run as part of a Readthedocs build -ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: - ON_READTHEDOCS = os.environ['READTHEDOCS'] - if not ON_READTHEDOCS: from jwst.datamodels import dqflags -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: CONFIGS = get_config() @@ -763,6 +756,7 @@ def expand_for_i2d(array, xdim, ydim): else: return array + def nan_to_zero(image): """Set any pixels with a value of NaN to zero diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py index fd5ecbabd..b5d2e5a46 100644 --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -53,12 +53,9 @@ FILE_GUIDESTAR_ATTMPT_LEN_MAX, FILE_OBS_LEN, FILE_PARALLEL_SEQ_ID_LEN, \ FILE_PROG_ID_LEN, FILE_SEG_LEN, FILE_SOURCE_ID_LEN, FILE_SUFFIX_TYPES, \ FILE_TARG_ID_LEN, FILE_VISIT_GRP_LEN, FILE_VISIT_LEN, FILETYPE_WO_STANDARD_SUFFIX, \ - JWST_INSTRUMENT_NAMES_SHORTHAND - + JWST_INSTRUMENT_NAMES_SHORTHAND, ON_GITHUB_ACTIONS __location__ = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - def _validate_config(config_file_dict): """Check that the config.json file contains all the needed entries with @@ -81,7 +78,7 @@ def _validate_config(config_file_dict): "admin_account": {"type": "string"}, "auth_mast": {"type": "string"}, "connection_string": {"type": "string"}, - "database": { + "databases": { "type": "object", "properties": { "engine": {"type": "string"}, @@ -93,7 +90,36 @@ def _validate_config(config_file_dict): }, "required": ['engine', 'name', 'user', 'password', 'host', 'port'] }, - + "django_databases": { + "type": "object", + "properties": { + "default": { + "type": "object", + "properties": { + "ENGINE": {"type": "string"}, + "NAME": {"type": "string"}, + "USER": {"type": "string"}, + "PASSWORD": {"type": "string"}, + "HOST": {"type": "string"}, + "PORT": {"type": "string"} + }, + "required": ['ENGINE', 'NAME', 'USER', 'PASSWORD', 'HOST', 'PORT'] + }, + "monitors": { + "type": "object", + "properties": { + "ENGINE": {"type": "string"}, + "NAME": {"type": "string"}, + "USER": {"type": "string"}, + "PASSWORD": {"type": "string"}, + "HOST": {"type": "string"}, + "PORT": {"type": "string"} + }, + "required": ['ENGINE', 'NAME', 'USER', 'PASSWORD', 'HOST', 'PORT'] + } + }, + "required": ["default", "monitors"] + }, "jwql_dir": {"type": "string"}, "jwql_version": {"type": "string"}, "server_type": {"type": "string"}, @@ -110,11 +136,11 @@ def _validate_config(config_file_dict): "cores": {"type": "string"} }, # List which entries are needed (all of them) - "required": ["connection_string", "database", "filesystem", - "preview_image_filesystem", "thumbnail_filesystem", - "outputs", "jwql_dir", "admin_account", "log_dir", - "test_dir", "test_data", "setup_file", "auth_mast", - "mast_token", "working"] + "required": ["connection_string", "databases", "django_databases", + "filesystem", "preview_image_filesystem", + "thumbnail_filesystem", "outputs", "jwql_dir", + "admin_account", "log_dir", "test_dir", "test_data", + "setup_file", "auth_mast", "mast_token", "working"] } # Test that the provided config file dict matches the schema diff --git a/jwql/website/apps/jwql/bokeh_containers.py b/jwql/website/apps/jwql/bokeh_containers.py index dad0ad69d..f4569747a 100644 --- a/jwql/website/apps/jwql/bokeh_containers.py +++ b/jwql/website/apps/jwql/bokeh_containers.py @@ -24,7 +24,8 @@ from bokeh.embed import components from bokeh.layouts import layout -from bokeh.models.widgets import Tabs, Panel +from bokeh.models import DatetimeTickFormatter +from bokeh.models.layouts import TabPanel, Tabs from bokeh.plotting import figure, output_file import numpy as np import pysiaf @@ -162,9 +163,9 @@ def cosmic_ray_monitor_tabs(instrument): # Allow figure sizes to scale with window histogram_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable - histogram_tab = Panel(child=histogram_layout, title="Histogram") + histogram_tab = TabPanel(child=histogram_layout, title="Histogram") line_layout.sizing_mode = "scale_width" # Make sure the sizing is adjustable - line_tab = Panel(child=line_layout, title="Trending") + line_tab = TabPanel(child=line_layout, title="Trending") # Build tabs tabs = Tabs(tabs=[histogram_tab, line_tab]) @@ -199,9 +200,9 @@ def dark_monitor_tabs(instrument): image_layout = standard_monitor_plot_layout(instrument, plots.dark_image_data) # Create a tab for each type of plot - histogram_tab = Panel(child=histogram_layout, title="Dark Rate Histogram") - line_tab = Panel(child=trending_layout, title="Trending") - image_tab = Panel(child=image_layout, title="Mean Dark Image") + histogram_tab = TabPanel(child=histogram_layout, title="Dark Rate Histogram") + line_tab = TabPanel(child=trending_layout, title="Trending") + image_tab = TabPanel(child=image_layout, title="Mean Dark Image") # Build tabs tabs = Tabs(tabs=[histogram_tab, line_tab, image_tab]) @@ -282,10 +283,10 @@ def generic_telemetry_plot(times, values, name, nominal_value=None, yellow_limit if nominal_value is not None: fig.line(times, np.repeat(nominal_value, len(times)), line_dash='dashed') - fig.xaxis.formatter = DatetimeTickFormatter(hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"], + fig.xaxis.formatter = DatetimeTickFormatter(hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) fig.xaxis.major_label_orientation = np.pi / 4 @@ -354,7 +355,7 @@ def readnoise_monitor_tabs(instrument): plots[5:6] ) readnoise_layout.sizing_mode = 'scale_width' - readnoise_tab = Panel(child=readnoise_layout, title=aperture) + readnoise_tab = TabPanel(child=readnoise_layout, title=aperture) tabs.append(readnoise_tab) # Build tabs @@ -401,7 +402,7 @@ def standard_monitor_plot_layout(instrument, plots): elif instrument.lower() == 'niriss': full_frame_lists = [ [plots['NIS_CEN']] - ] + ] elif instrument.lower() == 'miri': full_frame_lists = [ [plots['MIRIM_FULL']] diff --git a/jwql/website/apps/jwql/bokeh_dashboard.py b/jwql/website/apps/jwql/bokeh_dashboard.py index 2c282e274..536ef9710 100644 --- a/jwql/website/apps/jwql/bokeh_dashboard.py +++ b/jwql/website/apps/jwql/bokeh_dashboard.py @@ -37,7 +37,7 @@ from bokeh.layouts import column from bokeh.models import Axis, ColumnDataSource, DatetimeTickFormatter, HoverTool, OpenURL, TapTool -from bokeh.models.widgets import Panel, Tabs +from bokeh.models.layouts import TabPanel, Tabs from bokeh.plotting import figure from bokeh.transform import cumsum import numpy as np @@ -49,6 +49,7 @@ from jwql.utils.constants import ANOMALY_CHOICES_PER_INSTRUMENT, FILTERS_PER_INSTRUMENT, JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import get_base_url, get_config from jwql.website.apps.jwql.data_containers import build_table +from jwql.website.apps.jwql.models import Anomalies def build_table_latest_entry(tablename): @@ -217,7 +218,7 @@ def dashboard_disk_usage(self): # Initialize plot plots[data['shortname']] = figure(tools='pan,box_zoom,wheel_zoom,reset,save', - plot_width=800, + width=800, x_axis_type='datetime', title=f"Available & Used Storage on {data['shortname']}", x_axis_label='Date', @@ -226,12 +227,12 @@ def dashboard_disk_usage(self): plots[data['shortname']].line(x='date', y='available', source=source, legend_label='Available', line_dash='dashed', line_color='#C85108', line_width=3) plots[data['shortname']].circle(x='date', y='available', source=source,color='#C85108', size=10) plots[data['shortname']].line(x='date', y='used', source=source, legend_label='Used', line_dash='dashed', line_color='#355C7D', line_width=3) - plots[data['shortname']].circle(x='date', y='used', source=source,color='#355C7D', size=10) + plots[data['shortname']].circle(x='date', y='used', source=source, color='#355C7D', size=10) - plots[data['shortname']].xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], - days=["%d %B %Y"], - months=["%d %B %Y"], - years=["%B %Y"], + plots[data['shortname']].xaxis.formatter = DatetimeTickFormatter(hours="%H:%M %d %B %Y", + days="%d %B %Y", + months="%d %B %Y", + years="%B %Y" ) plots[data['shortname']].xaxis.major_label_orientation = pi / 4 plots[data['shortname']].legend.location = 'top_left' @@ -242,16 +243,15 @@ def dashboard_disk_usage(self): ]) hover_tool[data['shortname']].formatters = {'@date': 'datetime'} plots[data['shortname']].tools.append(hover_tool[data['shortname']]) - tabs.append(Panel(child=plots[data['shortname']], title=f"{data['shortname']} Storage")) + tabs.append(TabPanel(child=plots[data['shortname']], title=f"{data['shortname']} Storage")) tabs = Tabs(tabs=tabs) di.session.close() return tabs - def dashboard_central_store_data_volume(self): - """Create trending plot of data volume for various JWQL-related areas on disk. + """ Create trending plot of data volume for various JWQL-related areas on disk. These plots show data volumes calculated by walking over subdirectories/files in the JWQL-specific directories. So these plots may not include the total used disk volume, in the cases where JWQL is sharing a disk with other projects. These @@ -264,14 +264,14 @@ def dashboard_central_store_data_volume(self): """ # Initialize plot plot = figure(tools='pan,box_zoom,wheel_zoom,reset,save', - plot_width=800, + width=800, x_axis_type='datetime', title='JWQL directory size', x_axis_label='Date', y_axis_label='Disk Space (TB)') # This part of the plot should cycle through areas and plot area used values vs. date - #arealist = ['logs', 'outputs', 'test', 'preview_images', 'thumbnails', 'all'] + # arealist = ['logs', 'outputs', 'test', 'preview_images', 'thumbnails', 'all'] arealist = ['logs', 'outputs', 'preview_images', 'thumbnails'] colors = ['#F8B195', '#F67280', '#6C5B7B', '#355C7D'] for area, color in zip(arealist, colors): @@ -297,10 +297,10 @@ def dashboard_central_store_data_volume(self): hover_tool.formatters = {'@date': 'datetime'} plot.tools.append(hover_tool) - plot.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], - days=["%d %B %Y"], - months=["%d %B %Y"], - years=["%B %Y"], + plot.xaxis.formatter = DatetimeTickFormatter(hours="%H:%M %d %B %Y", + days="%d %B %Y", + months="%d %B %Y", + years="%B %Y" ) plot.xaxis.major_label_orientation = pi / 4 plot.legend.location = 'top_left' @@ -308,7 +308,7 @@ def dashboard_central_store_data_volume(self): # Put the "all" plot in a separate figure because it will be larger than all the pieces, which would # throw off the y range if it were in a single plot cen_store_plot = figure(tools='pan,box_zoom,wheel_zoom,reset,save', - plot_width=800, + width=800, x_axis_type='datetime', title='JWQL central store directory, total data volume', x_axis_label='Date', @@ -332,10 +332,10 @@ def dashboard_central_store_data_volume(self): legend_str = 'File volume' cen_store_plot.line(x='date', y='used', source=cen_store_source, legend_label=legend_str, line_dash='dashed', line_color='#355C7D', line_width=3) cen_store_plot.circle(x='date', y='used', source=cen_store_source, color='#355C7D', size=10) - cen_store_plot.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], - days=["%d %B %Y"], - months=["%d %B %Y"], - years=["%B %Y"], + cen_store_plot.xaxis.formatter = DatetimeTickFormatter(hours="%H:%M %d %B %Y", + days="%d %B %Y", + months="%d %B %Y", + years="%B %Y" ) cen_store_plot.xaxis.major_label_orientation = pi / 4 cen_store_plot.legend.location = 'top_left' @@ -349,7 +349,6 @@ def dashboard_central_store_data_volume(self): di.session.close() return plot, cen_store_plot - def dashboard_filetype_bar_chart(self): """Build bar chart of files based off of type @@ -447,34 +446,34 @@ def dashboard_files_per_day(self): date_times = [pd.to_datetime(datetime).date() for datetime in source['date'].values] source['datestr'] = [date_time.strftime("%Y-%m-%d") for date_time in date_times] - p1 = figure(title="Number of Files in Filesystem (MAST)", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", plot_width=800, x_axis_label='Date', y_axis_label='Number of Files Added') + p1 = figure(title="Number of Files in Filesystem (MAST)", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", width=800, x_axis_label='Date', y_axis_label='Number of Files Added') p1.line(x='date', y='total_file_count', source=source, color='#6C5B7B', line_dash='dashed', line_width=3) p1.scatter(x='date', y='total_file_count', source=source, color='#C85108', size=10) disable_scientific_notation(p1) - tab1 = Panel(child=p1, title='Files Per Day') + tab1 = TabPanel(child=p1, title='Files Per Day') # Create separate tooltip for storage plot. # Show date and used and available storage together - p2 = figure(title="Available & Used Storage in Filesystem (MAST)", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", plot_width=800, x_axis_label='Date', y_axis_label='Disk Space (TB)') + p2 = figure(title="Available & Used Storage in Filesystem (MAST)", tools="reset,hover,box_zoom,wheel_zoom", tooltips="@datestr: @total_file_count", width=800, x_axis_label='Date', y_axis_label='Disk Space (TB)') p2.line(x='date', y='available', source=source, color='#C85108', line_dash='dashed', line_width=3, legend_label='Available Storage') p2.line(x='date', y='used', source=source, color='#355C7D', line_dash='dashed', line_width=3, legend_label='Used Storage') p2.scatter(x='date', y='available', source=source, color='#C85108', size=10) p2.scatter(x='date', y='used', source=source, color='#355C7D', size=10) disable_scientific_notation(p2) - tab2 = Panel(child=p2, title='Storage') + tab2 = TabPanel(child=p2, title='Storage') - p1.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], - days=["%d %B %Y"], - months=["%d %B %Y"], - years=["%B %Y"], + p1.xaxis.formatter = DatetimeTickFormatter(hours="%H:%M %d %B %Y", + days="%d %B %Y", + months="%d %B %Y", + years="%B %Y" ) p1.xaxis.major_label_orientation = pi / 4 - p2.xaxis.formatter = DatetimeTickFormatter(hours=["%H:%M %d %B %Y"], - days=["%d %B %Y"], - months=["%d %B %Y"], - years=["%B %Y"], + p2.xaxis.formatter = DatetimeTickFormatter(hours="%H:%M %d %B %Y", + days="%d %B %Y", + months="%d %B %Y", + years="%B %Y" ) p2.xaxis.major_label_orientation = pi / 4 p2.legend.location = 'top_left' @@ -534,11 +533,11 @@ def make_panel(self, x_value, top, instrument, title, x_axis_label): data = pd.Series(dict(zip(x_value, top))).reset_index(name='top').rename(columns={'index': 'x'}) source = ColumnDataSource(data) - plot = figure(x_range=x_value, title=title, plot_width=850, tools="hover", tooltips="@x: @top", x_axis_label=x_axis_label) + plot = figure(x_range=x_value, title=title, width=850, tools="hover", tooltips="@x: @top", x_axis_label=x_axis_label) plot.vbar(x='x', top='top', source=source, width=0.9, color='#6C5B7B') plot.xaxis.major_label_orientation = pi / 4 disable_scientific_notation(plot) - tab = Panel(child=plot, title=instrument) + tab = TabPanel(child=plot, title=instrument) return tab @@ -603,7 +602,7 @@ def dashboard_exposure_count_by_filter(self): # Place the pie charts in a column/Panel, and append to the figure colplots = column(pie_fig, small_pie_fig) - tab = Panel(child=colplots, title=f'{instrument}') + tab = TabPanel(child=colplots, title=f'{instrument}') figures.append(tab) else: @@ -644,8 +643,8 @@ def dashboard_exposure_count_by_filter(self): lw_data['angle'] = lw_data['value'] / lw_data['value'].sum() * 2 * np.pi # Zoomed in version of the small contributors - sw_small = sw_data.loc[sw_data['value'] <0.5].copy() - lw_small = lw_data.loc[lw_data['value'] <0.5].copy() + sw_small = sw_data.loc[sw_data['value'] < 0.5].copy() + lw_small = lw_data.loc[lw_data['value'] < 0.5].copy() sw_small['angle'] = sw_small['value'] / sw_small['value'].sum() * 2 * np.pi lw_small['angle'] = lw_small['value'] / lw_small['value'].sum() * 2 * np.pi sw_small['colors'] = ['#bec4d4'] * len(sw_small) @@ -735,7 +734,6 @@ def dashboard_exposure_count_by_filter(self): show(p) """ - # Create pie charts for SW/LW, the main set of filters, and those that aren't used # as much. sw_pie_fig = create_filter_based_pie_chart("Percentage of observations using filter/pupil combinations: All Filters", sw_data) @@ -747,8 +745,8 @@ def dashboard_exposure_count_by_filter(self): sw_colplots = column(sw_pie_fig, sw_small_pie_fig) lw_colplots = column(lw_pie_fig, lw_small_pie_fig) - tab_sw = Panel(child=sw_colplots, title=f'{instrument} SW') - tab_lw = Panel(child=lw_colplots, title=f'{instrument} LW') + tab_sw = TabPanel(child=sw_colplots, title=f'{instrument} SW') + tab_lw = TabPanel(child=lw_colplots, title=f'{instrument} LW') figures.append(tab_sw) figures.append(tab_lw) @@ -765,14 +763,13 @@ def dashboard_exposure_count_by_filter(self): # Place the pie charts in a column/Panel, and append to the figure colplots = column(pie_fig, small_pie_fig) - tab = Panel(child=colplots, title=f'{instrument}') + tab = TabPanel(child=colplots, title=f'{instrument}') figures.append(tab) tabs = Tabs(tabs=figures) return tabs - def dashboard_anomaly_per_instrument(self): """Create figure for number of anamolies for each JWST instrument. @@ -785,18 +782,29 @@ def dashboard_anomaly_per_instrument(self): # Set title and figures list to make panels title = 'Anomaly Types per Instrument' figures = [] + filter_kwargs = {} - # For unique instrument values, loop through data - # Find all entries for instrument/filetype combo - # Make figure and append it to list. + # Make a tab for each instrument for instrument in ANOMALY_CHOICES_PER_INSTRUMENT.keys(): - data = build_table('{}_anomaly'.format(instrument)) - data = data.drop(columns=['id', 'rootname', 'user']) - if not pd.isnull(self.delta_t) and not data.empty: - data = data[(data['flag_date'] >= (self.date - self.delta_t)) & (data['flag_date'] <= self.date)] + # only show data for currently marked anomalies and current instrument + filter_kwargs['root_file_info__instrument__iexact'] = instrument + queryset = Anomalies.objects.filter(**filter_kwargs) + + # Convert the queryset to a Pandas DataFrame using only relevant columns + labels = [anomaly_keys for anomaly_keys, values in ANOMALY_CHOICES_PER_INSTRUMENT[instrument]] + data = pd.DataFrame.from_records(queryset.values(), columns=labels) + + # Sum columns to generate the bokeh panel summed_anomaly_columns = data.sum(axis=0, numeric_only=True).to_frame(name='counts') - figures.append(self.make_panel(summed_anomaly_columns.index.values, summed_anomaly_columns['counts'], instrument, title, 'Anomaly Type')) - tabs = Tabs(tabs=figures) + # Create plot of zeroes if empty (lookin at you FGS) + if len(summed_anomaly_columns.index.values): + plot_columns = summed_anomaly_columns.index.values + summed_values = summed_anomaly_columns['counts'] + else: + plot_columns = list(summed_anomaly_columns.index.values.base) + summed_values = np.zeros(len(plot_columns)) + figures.append(self.make_panel(plot_columns, summed_values, instrument, title, 'Anomaly Type')) + tabs = Tabs(tabs=figures) return tabs diff --git a/jwql/website/apps/jwql/clean_old_log_files.py b/jwql/website/apps/jwql/clean_old_log_files.py new file mode 100644 index 000000000..dd0a6b95c --- /dev/null +++ b/jwql/website/apps/jwql/clean_old_log_files.py @@ -0,0 +1,84 @@ +#! /usr/bin/env python + +"""Clean old log files from the collection of log files + +Authors +------- + + - Bryan Hilbert + +Use +--- + + To delete log files that are older than some threshold age: + :: + + > python clean_old_log_files.py --time_limit 7 # days + +""" +import argparse +from datetime import datetime, timedelta +import os +import socket + +from jwql.utils.utils import get_config + +HOSTNAME = socket.gethostname() +configs = get_config() +LOG_BASE_DIR = configs['log_dir'] + + +def define_options(): + """Create parser to take the time limit. + + Returns + ------- + parser : argparse.ArgumentParser + Parser containing time limit + """ + usage = 'clean_old_log_files.py -t 14' + parser = argparse.ArgumentParser(usage=usage) + parser.add_argument('-t', '--time_limit', type=int, default=14, + help='Time limit in days. Log files older than this will be deleted.') + return parser + + +def run(time_limit=timedelta(days=14)): + """Look through log directories and delete log files that are older than ``time_limit``. + Have time_limit default to be 14 days. + + Inputs + ------ + time_limit : datetime.timdelta + Files older than this time limit will be deleted + """ + now = datetime.now() + + if 'pljwql' in HOSTNAME: + subdir = 'ops' + elif 'tljwql' in HOSTNAME: + subdir = 'test' + else: + # This should cover the dev server as well as local machines + subdir = 'dev' + + log_dir = os.path.join(LOG_BASE_DIR, subdir) + for logtype in os.scandir(log_dir): + if logtype.is_dir(): + for item in os.scandir(logtype): + # We only try to delete log files produced by the machine on which + # this script is running. e.g. log files produced by the test server + # can only be deleted by running this script on the test server. + if HOSTNAME in item.name and item.name[-4:] == '.log': + stat_result = item.stat() + last_modified_time = datetime.fromtimestamp(stat_result.st_mtime) + age = now - last_modified_time + if age > time_limit: + full_path = os.path.join(log_dir, logtype, item) + os.remove(full_path) + + +if __name__ == '__main__': + parser = define_options() + args = parser.parse_args() + run(timedelta(days=args.time_limit)) diff --git a/jwql/website/apps/jwql/data_containers.py b/jwql/website/apps/jwql/data_containers.py index d2b62121c..4c9a6a742 100644 --- a/jwql/website/apps/jwql/data_containers.py +++ b/jwql/website/apps/jwql/data_containers.py @@ -43,6 +43,7 @@ from django.conf import settings from django.contrib import messages from django.core.exceptions import ObjectDoesNotExist +from django.db.models.query import QuerySet import numpy as np from operator import itemgetter import pandas as pd @@ -59,6 +60,7 @@ from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_INSTRUMENT_NAMES from jwql.utils.constants import REPORT_KEYS_PER_INSTRUMENT from jwql.utils.constants import SUFFIXES_TO_ADD_ASSOCIATION, SUFFIXES_WITH_AVERAGED_INTS, QueryConfigKeys +from jwql.utils.constants import ON_GITHUB_ACTIONS, ON_READTHEDOCS from jwql.utils.credentials import get_mast_token from jwql.utils.permissions import set_permissions from jwql.utils.utils import get_rootnames_for_instrument_proposal @@ -68,15 +70,6 @@ # a MAST query. Mast._portal_api_connection.PAGESIZE = MAST_QUERY_LIMIT -# astroquery.mast import that depends on value of auth_mast -# this import has to be made before any other import of astroquery.mast -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') - -# Determine if the code is being run as part of a Readthedocs build -ON_READTHEDOCS = False -if 'READTHEDOCS' in os.environ: # pragma: no cover - ON_READTHEDOCS = os.environ['READTHEDOCS'] - if not ON_GITHUB_ACTIONS and not ON_READTHEDOCS: # These lines are needed in order to use the Django models in a standalone @@ -368,6 +361,145 @@ def get_acknowledgements(): return acknowledgements +def get_additional_exposure_info(root_file_infos, image_info): + """Create dictionaries of basic exposure information from an exposure's + RootFileInfo entry, as well as header information. Originally designed to + be used in jwql.website.apps.jwql.views.view_image() + + Parameters + ---------- + root_file_infos : jwql.website.apps.jwql.models.RootFileInfo or django.db.models.query.QuerySet + RootFileInfo for a particular file base name, or a QuerySet of RootFileInfos for + an exposure base name. + + image_info : : dict + A dictionary containing various information for the given + ``file_root``. + + Returns + ------- + basic_info : dict + Dictionary of information about the file/exposure + + additional_info : dict + Dictionary of extra information about the file/exposure + """ + # Get headers from the file so we can pass along info that is common to all + # suffixes. The order of possible_suffixes_to_use is itentional, because the + # uncal file will not have info on the pipeline version used, and so we would + # rather grab information from the rate or cal files. + possible_suffixes_to_use = np.array(['rate', 'rateints', 'cal', 'calints', 'uncal']) + existing_suffixes = np.array([suffix in image_info['suffixes'] for suffix in possible_suffixes_to_use]) + + if isinstance(root_file_infos, QuerySet): + root_file_info = root_file_infos[0] + filter_value = '/'.join(set([e.filter for e in root_file_infos])) + pupil_value = '/'.join(set([e.pupil for e in root_file_infos])) + grating_value = '/'.join(set([e.grating for e in root_file_infos])) + elif isinstance(root_file_infos, RootFileInfo): + root_file_info = root_file_infos + filter_value = root_file_info.filter + pupil_value = root_file_info.pupil + grating_value = root_file_info.grating + + # Initialize dictionary of file info to show at the top of the page, along + # with another for info that will be in the collapsible text box. + basic_info = {'exp_type': root_file_info.exp_type, + 'category': 'N/A', + 'visit_status': 'N/A', + 'subarray': root_file_info.subarray, + 'filter': filter_value + } + + # The order of the elements is important here, in that the webpage displays + # them in the order they are here, and we've set this order to try and group + # together related keywords. + if isinstance(root_file_infos, QuerySet): + additional_info = {'READPATT': root_file_info.read_patt, + 'TITLE': 'N/A', + 'NGROUPS': 'N/A', + 'PI_NAME': 'N/A', + 'NINTS': 'N/A', + 'TARGNAME': 'N/A', + 'EXPTIME': 'N/A', + 'TARG_RA': 'N/A', + 'CAL_VER': 'N/A', + 'TARG_DEC': 'N/A', + 'CRDS context': 'N/A', + 'PA_V3': 'N/A', + 'EXPSTART': root_file_info.expstart + } + elif isinstance(root_file_infos, RootFileInfo): + additional_info = {'READPATT': root_file_info.read_patt, + 'TITLE': 'N/A', + 'NGROUPS': 'N/A', + 'PI_NAME': 'N/A', + 'NINTS': 'N/A', + 'TARGNAME': 'N/A', + 'EXPTIME': 'N/A', + 'RA_REF': 'N/A', + 'CAL_VER': 'N/A', + 'DEC_REF': 'N/A', + 'CRDS context': 'N/A', + 'ROLL_REF': 'N/A', + 'EXPSTART': root_file_info.expstart + } + + # Deal with instrument-specific parameters + if root_file_info.instrument == 'NIRSpec': + basic_info['grating'] = grating_value + + if root_file_info.instrument in ['NIRCam', 'NIRISS']: + basic_info['pupil'] = pupil_value + + # If any of the desired files are present, get the headers and populate the header + # info dictionary + if any(existing_suffixes): + suffix = possible_suffixes_to_use[existing_suffixes][0] + filename = f'{root_file_info.root_name}_{suffix}.fits' + + # get_image_info() has already globbed over the directory with the files and + # returned the list of existing suffixes, so we shouldn't need to check for + # file existence here. + file_path = filesystem_path(filename, check_existence=True) + + header = fits.getheader(file_path) + header_sci = fits.getheader(file_path, 1) + + basic_info['category'] = header['CATEGORY'] + basic_info['visit_status'] = header['VISITSTA'] + additional_info['NGROUPS'] = header['NGROUPS'] + additional_info['NINTS'] = header['NINTS'] + additional_info['EXPTIME'] = header['EFFEXPTM'] + additional_info['TITLE'] = header['TITLE'] + additional_info['PI_NAME'] = header['PI_NAME'] + additional_info['TARGNAME'] = header['TARGPROP'] + + # For the exposure level (i.e. multiple files) present the target + # RA and Dec. For the image level, give RA_REF, DEC_REF, since those + # are specific to the detector. Similarly, for the exposure level, show + # PA_V3, which applies to all detectors. At the image level, show + # ROLL_REF, which is detector-specific. + if isinstance(root_file_infos, QuerySet): + additional_info['TARG_RA'] = header['TARG_RA'] + additional_info['TARG_DEC'] = header['TARG_DEC'] + additional_info['PA_V3'] = header_sci['PA_V3'] + elif isinstance(root_file_infos, RootFileInfo): + additional_info['RA_REF'] = header_sci['RA_REF'] + additional_info['DEC_REF'] = header_sci['DEC_REF'] + additional_info['ROLL_REF'] = header_sci['ROLL_REF'] + + additional_info['CAL_VER'] = 'N/A' + additional_info['CRDS context'] = 'N/A' + + # Pipeline version and CRDS context info are not in uncal files + if suffix != 'uncal': + additional_info['CAL_VER'] = header['CAL_VER'] + additional_info['CRDS context'] = header['CRDS_CTX'] + + return basic_info, additional_info + + def get_all_proposals(): """Return a list of all proposals that exist in the filesystem. @@ -412,8 +544,7 @@ def get_available_suffixes(all_suffixes, return_untracked=True): untracked_suffixes = set(all_suffixes) for poss_suffix in EXPOSURE_PAGE_SUFFIX_ORDER: if 'crf' not in poss_suffix: - if (poss_suffix in all_suffixes - and poss_suffix not in suffixes): + if (poss_suffix in all_suffixes and poss_suffix not in suffixes): suffixes.append(poss_suffix) untracked_suffixes.remove(poss_suffix) else: @@ -423,8 +554,7 @@ def get_available_suffixes(all_suffixes, return_untracked=True): # So in this case, we strip the e.g. o001 from the # suffixes and check which list elements match. for image_suffix in all_suffixes: - if (image_suffix.endswith(poss_suffix) - and image_suffix not in suffixes): + if (image_suffix.endswith(poss_suffix) and image_suffix not in suffixes): suffixes.append(image_suffix) untracked_suffixes.remove(image_suffix) @@ -1391,7 +1521,7 @@ def get_preview_images_by_rootname(rootname): def get_proposals_by_category(instrument): - """Return a dictionary of program numbers based on category type + """Return a dictionary of program numbers and category type Parameters ---------- instrument : str @@ -1400,24 +1530,16 @@ def get_proposals_by_category(instrument): Returns ------- category_sorted_dict : dict - Dictionary with category as the key and a list of program id's as the value + Dictionary with program number as the key and program category as the value """ + tap_service = vo.dal.TAPService("https://vao.stsci.edu/caomtap/tapservice.aspx") + tap_results = tap_service.search(f"""select distinct prpID,prpProject from CaomObservation where collection='JWST' + and maxLevel>0 and insName like '{instrument.lower()}%'""") + # Put the results into an astropy Table + prop_table = tap_results.to_table() - service = "Mast.Jwst.Filtered.{}".format(instrument) - params = {"columns": "program, category", - "filters": [{'paramName': 'instrume', 'values': [instrument]}]} - response = Mast.service_request_async(service, params) - results = response[0].json()['data'] - - if len(results) == MAST_QUERY_LIMIT: - logging.error(f"MAST_QUERY_LIMIT of {MAST_QUERY_LIMIT} reached for {instrument} in get_proposals_by_category") - - # Get all unique dictionaries - unique_results = list(map(dict, set(tuple(sorted(sub.items())) for sub in results))) - - # Make a dictionary of {program: category} to pull from - proposals_by_category = {d['program']: d['category'] for d in unique_results} - + # Convert to a dictionary + proposals_by_category = {int(d['prpID']): d['prpProject'] for d in prop_table} return proposals_by_category diff --git a/jwql/website/apps/jwql/migrations/0017_nirspecreadnoisestats_nirspecreadnoisequeryhistory_and_more.py b/jwql/website/apps/jwql/migrations/0017_nirspecreadnoisestats_nirspecreadnoisequeryhistory_and_more.py new file mode 100644 index 000000000..b963ad7ad --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0017_nirspecreadnoisestats_nirspecreadnoisequeryhistory_and_more.py @@ -0,0 +1,384 @@ +# Generated by Django 4.2.5 on 2024-02-23 16:50 + +import django.contrib.postgres.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0016_anomalies_bright_object_not_a_short_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='NIRSpecReadnoiseStats', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uncal_filename', models.CharField(blank=True, null=True)), + ('aperture', models.CharField(blank=True, null=True)), + ('detector', models.CharField(blank=True, null=True)), + ('subarray', models.CharField(blank=True, null=True)), + ('read_pattern', models.CharField(blank=True, null=True)), + ('nints', models.CharField(blank=True, null=True)), + ('ngroups', models.CharField(blank=True, null=True)), + ('expstart', models.CharField(blank=True, null=True)), + ('readnoise_filename', models.CharField(blank=True, null=True)), + ('full_image_mean', models.FloatField(blank=True, null=True)), + ('full_image_stddev', models.FloatField(blank=True, null=True)), + ('full_image_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('full_image_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('readnoise_diff_image', models.CharField(blank=True, null=True)), + ('diff_image_mean', models.FloatField(blank=True, null=True)), + ('diff_image_stddev', models.FloatField(blank=True, null=True)), + ('diff_image_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('diff_image_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ('amp1_mean', models.FloatField(blank=True, null=True)), + ('amp1_stddev', models.FloatField(blank=True, null=True)), + ('amp1_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp1_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp2_mean', models.FloatField(blank=True, null=True)), + ('amp2_stddev', models.FloatField(blank=True, null=True)), + ('amp2_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp2_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp3_mean', models.FloatField(blank=True, null=True)), + ('amp3_stddev', models.FloatField(blank=True, null=True)), + ('amp3_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp3_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp4_mean', models.FloatField(blank=True, null=True)), + ('amp4_stddev', models.FloatField(blank=True, null=True)), + ('amp4_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp4_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ], + options={ + 'db_table': 'nirspec_readnoise_stats', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='NIRSpecReadnoiseQueryHistory', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('instrument', models.CharField(blank=True, null=True)), + ('aperture', models.CharField(blank=True, null=True)), + ('start_time_mjd', models.FloatField(blank=True, null=True)), + ('end_time_mjd', models.FloatField(blank=True, null=True)), + ('entries_found', models.IntegerField(blank=True, null=True)), + ('files_found', models.IntegerField(blank=True, null=True)), + ('run_monitor', models.BooleanField(blank=True, null=True)), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ], + options={ + 'db_table': 'nirspec_readnoise_query_history', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='NIRISSReadnoiseStats', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uncal_filename', models.CharField(blank=True, null=True)), + ('aperture', models.CharField(blank=True, null=True)), + ('detector', models.CharField(blank=True, null=True)), + ('subarray', models.CharField(blank=True, null=True)), + ('read_pattern', models.CharField(blank=True, null=True)), + ('nints', models.CharField(blank=True, null=True)), + ('ngroups', models.CharField(blank=True, null=True)), + ('expstart', models.CharField(blank=True, null=True)), + ('readnoise_filename', models.CharField(blank=True, null=True)), + ('full_image_mean', models.FloatField(blank=True, null=True)), + ('full_image_stddev', models.FloatField(blank=True, null=True)), + ('full_image_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('full_image_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('readnoise_diff_image', models.CharField(blank=True, null=True)), + ('diff_image_mean', models.FloatField(blank=True, null=True)), + ('diff_image_stddev', models.FloatField(blank=True, null=True)), + ('diff_image_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('diff_image_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ('amp1_mean', models.FloatField(blank=True, null=True)), + ('amp1_stddev', models.FloatField(blank=True, null=True)), + ('amp1_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp1_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp2_mean', models.FloatField(blank=True, null=True)), + ('amp2_stddev', models.FloatField(blank=True, null=True)), + ('amp2_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp2_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp3_mean', models.FloatField(blank=True, null=True)), + ('amp3_stddev', models.FloatField(blank=True, null=True)), + ('amp3_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp3_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp4_mean', models.FloatField(blank=True, null=True)), + ('amp4_stddev', models.FloatField(blank=True, null=True)), + ('amp4_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp4_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ], + options={ + 'db_table': 'niriss_readnoise_stats', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='NIRISSReadnoiseQueryHistory', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('instrument', models.CharField(blank=True, null=True)), + ('aperture', models.CharField(blank=True, null=True)), + ('start_time_mjd', models.FloatField(blank=True, null=True)), + ('end_time_mjd', models.FloatField(blank=True, null=True)), + ('entries_found', models.IntegerField(blank=True, null=True)), + ('files_found', models.IntegerField(blank=True, null=True)), + ('run_monitor', models.BooleanField(blank=True, null=True)), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ], + options={ + 'db_table': 'niriss_readnoise_query_history', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='NIRCamReadnoiseStats', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uncal_filename', models.CharField(blank=True, null=True)), + ('aperture', models.CharField(blank=True, null=True)), + ('detector', models.CharField(blank=True, null=True)), + ('subarray', models.CharField(blank=True, null=True)), + ('read_pattern', models.CharField(blank=True, null=True)), + ('nints', models.CharField(blank=True, null=True)), + ('ngroups', models.CharField(blank=True, null=True)), + ('expstart', models.CharField(blank=True, null=True)), + ('readnoise_filename', models.CharField(blank=True, null=True)), + ('full_image_mean', models.FloatField(blank=True, null=True)), + ('full_image_stddev', models.FloatField(blank=True, null=True)), + ('full_image_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('full_image_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('readnoise_diff_image', models.CharField(blank=True, null=True)), + ('diff_image_mean', models.FloatField(blank=True, null=True)), + ('diff_image_stddev', models.FloatField(blank=True, null=True)), + ('diff_image_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('diff_image_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ('amp1_mean', models.FloatField(blank=True, null=True)), + ('amp1_stddev', models.FloatField(blank=True, null=True)), + ('amp1_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp1_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp2_mean', models.FloatField(blank=True, null=True)), + ('amp2_stddev', models.FloatField(blank=True, null=True)), + ('amp2_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp2_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp3_mean', models.FloatField(blank=True, null=True)), + ('amp3_stddev', models.FloatField(blank=True, null=True)), + ('amp3_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp3_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp4_mean', models.FloatField(blank=True, null=True)), + ('amp4_stddev', models.FloatField(blank=True, null=True)), + ('amp4_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp4_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ], + options={ + 'db_table': 'nircam_readnoise_stats', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='NIRCamReadnoiseQueryHistory', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('instrument', models.CharField(blank=True, null=True)), + ('aperture', models.CharField(blank=True, null=True)), + ('start_time_mjd', models.FloatField(blank=True, null=True)), + ('end_time_mjd', models.FloatField(blank=True, null=True)), + ('entries_found', models.IntegerField(blank=True, null=True)), + ('files_found', models.IntegerField(blank=True, null=True)), + ('run_monitor', models.BooleanField(blank=True, null=True)), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ], + options={ + 'db_table': 'nircam_readnoise_query_history', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='NIRCamClawStats', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ('filename', models.CharField(blank=True, null=True)), + ('proposal', models.CharField(blank=True, null=True)), + ('obs', models.CharField(blank=True, null=True)), + ('detector', models.CharField(blank=True, null=True)), + ('filter', models.CharField(blank=True, null=True)), + ('pupil', models.CharField(blank=True, null=True)), + ('expstart', models.CharField(blank=True, null=True)), + ('expstart_mjd', models.FloatField(blank=True, null=True)), + ('effexptm', models.FloatField(blank=True, null=True)), + ('ra', models.FloatField(blank=True, null=True)), + ('dec', models.FloatField(blank=True, null=True)), + ('pa_v3', models.FloatField(blank=True, null=True)), + ('mean', models.FloatField(blank=True, null=True)), + ('median', models.FloatField(blank=True, null=True)), + ('stddev', models.FloatField(blank=True, null=True)), + ('frac_masked', models.FloatField(blank=True, null=True)), + ('skyflat_filename', models.CharField(blank=True, null=True)), + ], + options={ + 'db_table': 'nircam_claw_stats', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='NIRCamClawQueryHistory', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ('instrument', models.CharField(blank=True, null=True)), + ('start_time_mjd', models.FloatField(blank=True, null=True)), + ('end_time_mjd', models.FloatField(blank=True, null=True)), + ('run_monitor', models.BooleanField(blank=True, null=True)), + ], + options={ + 'db_table': 'nircam_claw_query_history', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='MIRIReadnoiseStats', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uncal_filename', models.CharField(blank=True, null=True)), + ('aperture', models.CharField(blank=True, null=True)), + ('detector', models.CharField(blank=True, null=True)), + ('subarray', models.CharField(blank=True, null=True)), + ('read_pattern', models.CharField(blank=True, null=True)), + ('nints', models.CharField(blank=True, null=True)), + ('ngroups', models.CharField(blank=True, null=True)), + ('expstart', models.CharField(blank=True, null=True)), + ('readnoise_filename', models.CharField(blank=True, null=True)), + ('full_image_mean', models.FloatField(blank=True, null=True)), + ('full_image_stddev', models.FloatField(blank=True, null=True)), + ('full_image_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('full_image_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('readnoise_diff_image', models.CharField(blank=True, null=True)), + ('diff_image_mean', models.FloatField(blank=True, null=True)), + ('diff_image_stddev', models.FloatField(blank=True, null=True)), + ('diff_image_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('diff_image_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ('amp1_mean', models.FloatField(blank=True, null=True)), + ('amp1_stddev', models.FloatField(blank=True, null=True)), + ('amp1_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp1_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp2_mean', models.FloatField(blank=True, null=True)), + ('amp2_stddev', models.FloatField(blank=True, null=True)), + ('amp2_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp2_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp3_mean', models.FloatField(blank=True, null=True)), + ('amp3_stddev', models.FloatField(blank=True, null=True)), + ('amp3_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp3_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp4_mean', models.FloatField(blank=True, null=True)), + ('amp4_stddev', models.FloatField(blank=True, null=True)), + ('amp4_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp4_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ], + options={ + 'db_table': 'miri_readnoise_stats', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='MIRIReadnoiseQueryHistory', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('instrument', models.CharField(blank=True, null=True)), + ('aperture', models.CharField(blank=True, null=True)), + ('start_time_mjd', models.FloatField(blank=True, null=True)), + ('end_time_mjd', models.FloatField(blank=True, null=True)), + ('entries_found', models.IntegerField(blank=True, null=True)), + ('files_found', models.IntegerField(blank=True, null=True)), + ('run_monitor', models.BooleanField(blank=True, null=True)), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ], + options={ + 'db_table': 'miri_readnoise_query_history', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='FGSReadnoiseStats', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uncal_filename', models.CharField(blank=True, null=True)), + ('aperture', models.CharField(blank=True, null=True)), + ('detector', models.CharField(blank=True, null=True)), + ('subarray', models.CharField(blank=True, null=True)), + ('read_pattern', models.CharField(blank=True, null=True)), + ('nints', models.CharField(blank=True, null=True)), + ('ngroups', models.CharField(blank=True, null=True)), + ('expstart', models.CharField(blank=True, null=True)), + ('readnoise_filename', models.CharField(blank=True, null=True)), + ('full_image_mean', models.FloatField(blank=True, null=True)), + ('full_image_stddev', models.FloatField(blank=True, null=True)), + ('full_image_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('full_image_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('readnoise_diff_image', models.CharField(blank=True, null=True)), + ('diff_image_mean', models.FloatField(blank=True, null=True)), + ('diff_image_stddev', models.FloatField(blank=True, null=True)), + ('diff_image_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('diff_image_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ('amp1_mean', models.FloatField(blank=True, null=True)), + ('amp1_stddev', models.FloatField(blank=True, null=True)), + ('amp1_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp1_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp2_mean', models.FloatField(blank=True, null=True)), + ('amp2_stddev', models.FloatField(blank=True, null=True)), + ('amp2_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp2_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp3_mean', models.FloatField(blank=True, null=True)), + ('amp3_stddev', models.FloatField(blank=True, null=True)), + ('amp3_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp3_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp4_mean', models.FloatField(blank=True, null=True)), + ('amp4_stddev', models.FloatField(blank=True, null=True)), + ('amp4_n', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ('amp4_bin_centers', django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=None)), + ], + options={ + 'db_table': 'fgs_readnoise_stats', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + migrations.CreateModel( + name='FGSReadnoiseQueryHistory', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('instrument', models.CharField(blank=True, null=True)), + ('aperture', models.CharField(blank=True, null=True)), + ('start_time_mjd', models.FloatField(blank=True, null=True)), + ('end_time_mjd', models.FloatField(blank=True, null=True)), + ('entries_found', models.IntegerField(blank=True, null=True)), + ('files_found', models.IntegerField(blank=True, null=True)), + ('run_monitor', models.BooleanField(blank=True, null=True)), + ('entry_date', models.DateTimeField(blank=True, null=True)), + ], + options={ + 'db_table': 'fgs_readnoise_query_history', + 'managed': True, + 'unique_together': {('id', 'entry_date')}, + }, + ), + ] diff --git a/jwql/website/apps/jwql/migrations/0018_nircamclawstats_doy_nircamclawstats_total_bkg.py b/jwql/website/apps/jwql/migrations/0018_nircamclawstats_doy_nircamclawstats_total_bkg.py new file mode 100644 index 000000000..c5efd9125 --- /dev/null +++ b/jwql/website/apps/jwql/migrations/0018_nircamclawstats_doy_nircamclawstats_total_bkg.py @@ -0,0 +1,23 @@ +# Generated by Django 4.2.5 on 2024-02-23 16:51 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('jwql', '0017_nirspecreadnoisestats_nirspecreadnoisequeryhistory_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='nircamclawstats', + name='doy', + field=models.FloatField(blank=True, null=True), + ), + migrations.AddField( + model_name='nircamclawstats', + name='total_bkg', + field=models.FloatField(blank=True, null=True), + ), + ] diff --git a/jwql/website/apps/jwql/monitor_models/bad_pixel.py b/jwql/website/apps/jwql/monitor_models/bad_pixel.py new file mode 100644 index 000000000..463331deb --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/bad_pixel.py @@ -0,0 +1,220 @@ +"""Defines the models for the ``jwql`` bad pixel monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class FGSBadPixelQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + dark_start_time_mjd = models.FloatField(blank=True, null=True) + dark_end_time_mjd = models.FloatField(blank=True, null=True) + flat_start_time_mjd = models.FloatField(blank=True, null=True) + flat_end_time_mjd = models.FloatField(blank=True, null=True) + dark_files_found = models.IntegerField(blank=True, null=True) + flat_files_found = models.IntegerField(blank=True, null=True) + run_bpix_from_darks = models.BooleanField(blank=True, null=True) + run_bpix_from_flats = models.BooleanField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_bad_pixel_query_history' + unique_together = (('id', 'entry_date'),) + + +class FGSBadPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_bad_pixel_stats' + unique_together = (('id', 'entry_date'),) + + +class MIRIBadPixelQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + dark_start_time_mjd = models.FloatField(blank=True, null=True) + dark_end_time_mjd = models.FloatField(blank=True, null=True) + flat_start_time_mjd = models.FloatField(blank=True, null=True) + flat_end_time_mjd = models.FloatField(blank=True, null=True) + dark_files_found = models.IntegerField(blank=True, null=True) + flat_files_found = models.IntegerField(blank=True, null=True) + run_bpix_from_darks = models.BooleanField(blank=True, null=True) + run_bpix_from_flats = models.BooleanField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_bad_pixel_query_history' + unique_together = (('id', 'entry_date'),) + + +class MIRIBadPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_bad_pixel_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRCamBadPixelQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + dark_start_time_mjd = models.FloatField(blank=True, null=True) + dark_end_time_mjd = models.FloatField(blank=True, null=True) + flat_start_time_mjd = models.FloatField(blank=True, null=True) + flat_end_time_mjd = models.FloatField(blank=True, null=True) + dark_files_found = models.IntegerField(blank=True, null=True) + flat_files_found = models.IntegerField(blank=True, null=True) + run_bpix_from_darks = models.BooleanField(blank=True, null=True) + run_bpix_from_flats = models.BooleanField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_bad_pixel_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRCamBadPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_bad_pixel_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRISSBadPixelQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + dark_start_time_mjd = models.FloatField(blank=True, null=True) + dark_end_time_mjd = models.FloatField(blank=True, null=True) + flat_start_time_mjd = models.FloatField(blank=True, null=True) + flat_end_time_mjd = models.FloatField(blank=True, null=True) + dark_files_found = models.IntegerField(blank=True, null=True) + flat_files_found = models.IntegerField(blank=True, null=True) + run_bpix_from_darks = models.BooleanField(blank=True, null=True) + run_bpix_from_flats = models.BooleanField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_bad_pixel_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRISSBadPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_bad_pixel_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecBadPixelQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + dark_start_time_mjd = models.FloatField(blank=True, null=True) + dark_end_time_mjd = models.FloatField(blank=True, null=True) + flat_start_time_mjd = models.FloatField(blank=True, null=True) + flat_end_time_mjd = models.FloatField(blank=True, null=True) + dark_files_found = models.IntegerField(blank=True, null=True) + flat_files_found = models.IntegerField(blank=True, null=True) + run_bpix_from_darks = models.BooleanField(blank=True, null=True) + run_bpix_from_flats = models.BooleanField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_bad_pixel_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecBadPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_bad_pixel_stats' + unique_together = (('id', 'entry_date'),) diff --git a/jwql/website/apps/jwql/monitor_models/bias.py b/jwql/website/apps/jwql/monitor_models/bias.py new file mode 100644 index 000000000..c8245b9fe --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/bias.py @@ -0,0 +1,165 @@ +"""Defines the models for the ``jwql`` monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class NIRCamBiasQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_bias_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRCamBiasStats(models.Model): + aperture = models.CharField(blank=True, null=True) + uncal_filename = models.CharField(blank=True, null=True) + cal_filename = models.CharField(blank=True, null=True) + cal_image = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + median = models.FloatField(blank=True, null=True) + stddev = models.FloatField(blank=True, null=True) + collapsed_rows = ArrayField(models.FloatField()) + collapsed_columns = ArrayField(models.FloatField()) + counts = ArrayField(models.FloatField()) + bin_centers = ArrayField(models.FloatField()) + amp1_even_med = models.FloatField(blank=True, null=True) + amp1_odd_med = models.FloatField(blank=True, null=True) + amp2_even_med = models.FloatField(blank=True, null=True) + amp2_odd_med = models.FloatField(blank=True, null=True) + amp3_even_med = models.FloatField(blank=True, null=True) + amp3_odd_med = models.FloatField(blank=True, null=True) + amp4_even_med = models.FloatField(blank=True, null=True) + amp4_odd_med = models.FloatField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_bias_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRISSBiasQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_bias_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRISSBiasStats(models.Model): + aperture = models.CharField(blank=True, null=True) + uncal_filename = models.CharField(blank=True, null=True) + cal_filename = models.CharField(blank=True, null=True) + cal_image = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + median = models.FloatField(blank=True, null=True) + stddev = models.FloatField(blank=True, null=True) + collapsed_rows = ArrayField(models.FloatField()) + collapsed_columns = ArrayField(models.FloatField()) + counts = ArrayField(models.FloatField()) + bin_centers = ArrayField(models.FloatField()) + amp1_even_med = models.FloatField(blank=True, null=True) + amp1_odd_med = models.FloatField(blank=True, null=True) + amp2_even_med = models.FloatField(blank=True, null=True) + amp2_odd_med = models.FloatField(blank=True, null=True) + amp3_even_med = models.FloatField(blank=True, null=True) + amp3_odd_med = models.FloatField(blank=True, null=True) + amp4_even_med = models.FloatField(blank=True, null=True) + amp4_odd_med = models.FloatField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_bias_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecBiasQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_bias_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecBiasStats(models.Model): + aperture = models.CharField(blank=True, null=True) + uncal_filename = models.CharField(blank=True, null=True) + cal_filename = models.CharField(blank=True, null=True) + cal_image = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + median = models.FloatField(blank=True, null=True) + stddev = models.FloatField(blank=True, null=True) + collapsed_rows = ArrayField(models.FloatField()) + collapsed_columns = ArrayField(models.FloatField()) + counts = ArrayField(models.FloatField()) + bin_centers = ArrayField(models.FloatField()) + amp1_even_med = models.FloatField(blank=True, null=True) + amp1_odd_med = models.FloatField(blank=True, null=True) + amp2_even_med = models.FloatField(blank=True, null=True) + amp2_odd_med = models.FloatField(blank=True, null=True) + amp3_even_med = models.FloatField(blank=True, null=True) + amp3_odd_med = models.FloatField(blank=True, null=True) + amp4_even_med = models.FloatField(blank=True, null=True) + amp4_odd_med = models.FloatField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_bias_stats' + unique_together = (('id', 'entry_date'),) diff --git a/jwql/website/apps/jwql/monitor_models/claw.py b/jwql/website/apps/jwql/monitor_models/claw.py new file mode 100644 index 000000000..a1c6c93e3 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/claw.py @@ -0,0 +1,70 @@ +"""Defines the models for the ``jwql`` monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models + + +class NIRCamClawQueryHistory(models.Model): + entry_date = models.DateTimeField(blank=True, null=True) + instrument = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_claw_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRCamClawStats(models.Model): + entry_date = models.DateTimeField(blank=True, null=True) + filename = models.CharField(blank=True, null=True) + proposal = models.CharField(blank=True, null=True) + obs = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + filter = models.CharField(blank=True, null=True) + pupil = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + expstart_mjd = models.FloatField(blank=True, null=True) + effexptm = models.FloatField(blank=True, null=True) + ra = models.FloatField(blank=True, null=True) + dec = models.FloatField(blank=True, null=True) + pa_v3 = models.FloatField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + median = models.FloatField(blank=True, null=True) + stddev = models.FloatField(blank=True, null=True) + frac_masked = models.FloatField(blank=True, null=True) + skyflat_filename = models.CharField(blank=True, null=True) + doy = models.FloatField(blank=True, null=True) + total_bkg = models.FloatField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_claw_stats' + unique_together = (('id', 'entry_date'),) diff --git a/jwql/website/apps/jwql/monitor_models/common.py b/jwql/website/apps/jwql/monitor_models/common.py new file mode 100644 index 000000000..d0bed2afc --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/common.py @@ -0,0 +1,354 @@ +"""Defines the models for the ``jwql`` common monitor database tables. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + + Usage + ----- + + JWQL uses the django database models for creating tables, updating table fields, adding + new data to tables, and retrieving data from tables. For instrument monitors, in particular, + there are a number of issues that may be relevant. + + In general, django model documentation can be found + `on the django website `_. + Unfortunately, finding a particular bit of documentation in django can be a challenge, so + a few quick-reference notes are provided below. + + Retrieving Data + --------------- + + Django retrieves data directly from its model tables. So, for example, if you want to + select data from the `MIRIMyMonitorStats` table, you must first import the relevant + object: + + .. code-block:: python + + from jwql.website.apps.jwql.monitor_models.my_monitor import MIRIMyMonitorStats + + Then, you would access the database contents via the `objects` member of the class. For + example, to search the `MIRIMyMonitorStats` table for all entries matching a given + aperture, and to sort them with the most recent date at the top, you might do a query like + the following: + + .. code-block:: python + + aperture = "my_miri_aperture" + + records = MIRIMyMonitorStats.objects.filter(aperture__iexact=aperture).order_by("-mjd_end").all() + + In the above code, + + * The `filter()` function selects matching records from the full table. You can use + multiple filter statements, or a single filter function with multiple filters. `filter()` + statements are always combined with an implicit AND. + * If you have a long filter statement and want to separate it from the query statement, + you can create a dictionary and add it in with the `**` prepended. The dictionary + equivalent to the above would be `{'aperture__iexact': aperture}` + * The text before the double underscore is a field name, and the text afterwards describes + the type of comparison. `iexact` indicates "case-insensitive exact match". You can also + use a variety of standard SQL comparisons (`like`, `startswith`, `gte`, etc.) + * If you want to get only records that *don't* match a pattern, then you can use the + `exclude()` function, which otherwise operates exactly the same as `filter()`. + * In the `order_by()` function, the `-` at the start is used to reverse the sort order, + and the `mjd_end` is the name of the field to be sorted by. + * The `all()` statement indicates that you want all the values returned. `get()` returns + a single value and can be iterated on, `first()` returns only the first value, etc. + + As an example of multiple filters, the code below: + + .. code-block:: python + + records = MIRIMyMonitorStats.objects.filter(aperture__iexact=ap, mjd_end__gte=60000) + + filters = { + "aperture__iexact": ap, + "mjd_end__gte": 60000 + } + records = MIRIMyMonitorStats.objects.filter(**filters) + + show two different ways of combining a search for a particular aperture *and* only data + taken more recently than MJD=60000. + + Note that django executes queries lazily, meaning that it will only actually *do* the + query when it needs the results. The above statement, for example, will not actually + run the query. Instead, it will be run when you operate on it, such as + + * Getting the length of the result with e.g. `len(records)` + * Printing out any of the results + * Asking for the value of one of the fields (e.g. `records[3].aperture`) + + Retrieving Specific Columns + =========================== + + Django offers two ways of doing this. The first one is to use the `only()` function, which + immediately loads only the relevant columns. For example, + + .. code-block:: python + + records = MIRIMyMonitorStats.objects.only("aperture", "mjd_start", "relevant_item") + + will immediately load only the three columns selected (although the rest will be retrieved + from the database, and can still be accessed, for no immediately understandable reason). + The other method is the `defer()` method, which loads every column *except* the ones listed. + + Q Objects + ========= + + In order to make more complex queries, Django supplies "Q Objects", which are essentially + encapsulated filters which can be combined using logical operators. For more on this, see + `the django Q object documentation `_. + + Storing Data + ------------ + + Django also uses the model tables (and objects) directly for storing new data. For example, + if you have a monitor table defined as below: + + .. code-block:: python + + from django.db import models + from django.contrib.postgres.fields import ArrayField + + class NIRISSMyMonitorStats(models.Model): + aperture = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + median = models.FloatField(blank=True, null=True) + stddev = models.FloatField(blank=True, null=True) + counts = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_my_monitor_stats' + unique_together = (('id', 'entry_date'),) + + then you would create a new entry as follows: + + .. code-block:: python + + values = { + "aperture": "my_aperture", + "mean": float(mean), + "median": float(median), + "stddev": float(stddev), + "counts": list(counts.astype(float)), + "entry_date": datetime.datetime.now() + } + + entry = NIRISSMyMonitorStats(**values) + entry.save() + + There are (as usual) a few things to note above: + + * Django doesn't have a built-in array data type, so you need to import it from the + database-compatibility layers. The ArrayField takes, as a required argument, the type + of data that makes up the array. + * In the Meta sub-class of the monitor class, the `db_table_comment = 'monitors'` statement is + required so that django knows that the model should be stored in the monitors table. + * The `float()` casts are required because the database interface doesn't understand + numpy data types. + * The `list()` cast is required because the database interface doesn't understand the + numpy `ndarray` data type + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class Monitor(models.Model): + monitor_name = models.CharField() + start_time = models.DateTimeField() + end_time = models.DateTimeField(blank=True, null=True) + status = models.TextField(blank=True, null=True) # This field type is a guess. + log_file = models.CharField() + + class Meta: + managed = True + db_table = 'monitor' + + +class CentralStorage(models.Model): + date = models.DateTimeField() + area = models.CharField() + size = models.FloatField() + used = models.FloatField() + available = models.FloatField() + + class Meta: + managed = True + db_table = 'central_storage' + + +class FilesystemCharacteristics(models.Model): + date = models.DateTimeField() + instrument = models.TextField() # This field type is a guess. + filter_pupil = models.TextField(blank=True, null=True) # This field type is a guess. + obs_per_filter_pupil = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'filesystem_characteristics' + + +class FilesystemGeneral(models.Model): + date = models.DateTimeField(unique=True) + total_file_count = models.IntegerField() + total_file_size = models.FloatField() + fits_file_count = models.IntegerField() + fits_file_size = models.FloatField() + used = models.FloatField() + available = models.FloatField() + + class Meta: + managed = True + db_table = 'filesystem_general' + + +class FilesystemInstrument(models.Model): + date = models.DateTimeField() + instrument = models.TextField() # This field type is a guess. + filetype = models.TextField() # This field type is a guess. + count = models.IntegerField() + size = models.FloatField() + + class Meta: + managed = True + db_table = 'filesystem_instrument' + unique_together = (('date', 'instrument', 'filetype'),) + + +class FgsAnomaly(models.Model): + rootname = models.CharField() + flag_date = models.DateTimeField() + user = models.CharField() + cosmic_ray_shower = models.BooleanField() + diffraction_spike = models.BooleanField() + excessive_saturation = models.BooleanField() + guidestar_failure = models.BooleanField() + persistence = models.BooleanField() + crosstalk = models.BooleanField() + data_transfer_error = models.BooleanField() + ghost = models.BooleanField() + snowball = models.BooleanField() + other = models.BooleanField() + + class Meta: + managed = True + db_table = 'fgs_anomaly' + + +class MiriAnomaly(models.Model): + rootname = models.CharField() + flag_date = models.DateTimeField() + user = models.CharField() + cosmic_ray_shower = models.BooleanField() + diffraction_spike = models.BooleanField() + excessive_saturation = models.BooleanField() + guidestar_failure = models.BooleanField() + persistence = models.BooleanField() + column_pull_up = models.BooleanField() + internal_reflection = models.BooleanField() + row_pull_down = models.BooleanField() + other = models.BooleanField() + column_pull_down = models.BooleanField() + mrs_glow = models.BooleanField(db_column='MRS_Glow') # Field name made lowercase. + mrs_zipper = models.BooleanField(db_column='MRS_Zipper') # Field name made lowercase. + row_pull_up = models.BooleanField() + lrs_contamination = models.BooleanField(db_column='LRS_Contamination') # Field name made lowercase. + tree_rings = models.BooleanField() + + class Meta: + managed = True + db_table = 'miri_anomaly' + + +class NircamAnomaly(models.Model): + rootname = models.CharField() + flag_date = models.DateTimeField() + user = models.CharField() + cosmic_ray_shower = models.BooleanField() + diffraction_spike = models.BooleanField() + excessive_saturation = models.BooleanField() + guidestar_failure = models.BooleanField() + persistence = models.BooleanField() + crosstalk = models.BooleanField() + data_transfer_error = models.BooleanField() + ghost = models.BooleanField() + snowball = models.BooleanField() + dragons_breath = models.BooleanField() + other = models.BooleanField() + scattered_light = models.BooleanField() + claws = models.BooleanField() + wisps = models.BooleanField() + tilt_event = models.BooleanField() + + class Meta: + managed = True + db_table = 'nircam_anomaly' + + +class NirissAnomaly(models.Model): + rootname = models.CharField() + flag_date = models.DateTimeField() + user = models.CharField() + cosmic_ray_shower = models.BooleanField() + diffraction_spike = models.BooleanField() + excessive_saturation = models.BooleanField() + guidestar_failure = models.BooleanField() + persistence = models.BooleanField() + crosstalk = models.BooleanField() + data_transfer_error = models.BooleanField() + ghost = models.BooleanField() + snowball = models.BooleanField() + other = models.BooleanField() + scattered_light = models.TextField() + light_saber = models.TextField() + + class Meta: + managed = True + db_table = 'niriss_anomaly' + + +class NirspecAnomaly(models.Model): + rootname = models.CharField() + flag_date = models.DateTimeField() + user = models.CharField() + cosmic_ray_shower = models.BooleanField() + diffraction_spike = models.BooleanField() + excessive_saturation = models.BooleanField() + guidestar_failure = models.BooleanField() + persistence = models.BooleanField() + crosstalk = models.BooleanField() + data_transfer_error = models.BooleanField() + ghost = models.BooleanField() + snowball = models.BooleanField() + dominant_msa_leakage = models.BooleanField(db_column='Dominant_MSA_Leakage') # Field name made lowercase. + optical_short = models.BooleanField() + other = models.BooleanField() + + class Meta: + managed = True + db_table = 'nirspec_anomaly' diff --git a/jwql/website/apps/jwql/monitor_models/cosmic_ray.py b/jwql/website/apps/jwql/monitor_models/cosmic_ray.py new file mode 100644 index 000000000..cdff2eb22 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/cosmic_ray.py @@ -0,0 +1,190 @@ +"""Defines the models for the ``jwql`` cosmic ray monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class FGSCosmicRayQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_cosmic_ray_query_history' + unique_together = (('id', 'entry_date'),) + + +class FGSCosmicRayStats(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + source_file = models.CharField(blank=True, null=True) + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + jump_count = models.IntegerField(blank=True, null=True) + jump_rate = models.FloatField(blank=True, null=True) + magnitude = models.TextField(blank=True, null=True) # This field type is a guess. + outliers = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'fgs_cosmic_ray_stats' + unique_together = (('id', 'entry_date'),) + + +class MIRICosmicRayQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_cosmic_ray_query_history' + unique_together = (('id', 'entry_date'),) + + +class MIRICosmicRayStats(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + source_file = models.CharField(blank=True, null=True) + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + jump_count = models.IntegerField(blank=True, null=True) + jump_rate = models.FloatField(blank=True, null=True) + magnitude = models.TextField(blank=True, null=True) # This field type is a guess. + outliers = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'miri_cosmic_ray_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRCamCosmicRayQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_cosmic_ray_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRCamCosmicRayStats(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + source_file = models.CharField(blank=True, null=True) + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + jump_count = models.IntegerField(blank=True, null=True) + jump_rate = models.FloatField(blank=True, null=True) + magnitude = models.TextField(blank=True, null=True) # This field type is a guess. + outliers = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'nircam_cosmic_ray_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRISSCosmicRayQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_cosmic_ray_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRISSCosmicRayStats(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + source_file = models.CharField(blank=True, null=True) + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + jump_count = models.IntegerField(blank=True, null=True) + jump_rate = models.FloatField(blank=True, null=True) + magnitude = models.TextField(blank=True, null=True) # This field type is a guess. + outliers = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'niriss_cosmic_ray_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecCosmicRayQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_cosmic_ray_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecCosmicRayStats(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + source_file = models.CharField(blank=True, null=True) + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + jump_count = models.IntegerField(blank=True, null=True) + jump_rate = models.FloatField(blank=True, null=True) + magnitude = models.TextField(blank=True, null=True) # This field type is a guess. + outliers = models.TextField(blank=True, null=True) # This field type is a guess. + + class Meta: + managed = True + db_table = 'nirspec_cosmic_ray_stats' + unique_together = (('id', 'entry_date'),) diff --git a/jwql/website/apps/jwql/monitor_models/dark_current.py b/jwql/website/apps/jwql/monitor_models/dark_current.py new file mode 100644 index 000000000..41ae1ccac --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/dark_current.py @@ -0,0 +1,365 @@ +"""Defines the models for the ``jwql`` dark current monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class FGSDarkDarkCurrent(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + amplifier = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + gauss_amplitude = ArrayField(models.FloatField()) + gauss_peak = ArrayField(models.FloatField()) + gauss_width = ArrayField(models.FloatField()) + gauss_chisq = models.FloatField(blank=True, null=True) + double_gauss_amplitude1 = ArrayField(models.FloatField()) + double_gauss_peak1 = ArrayField(models.FloatField()) + double_gauss_width1 = ArrayField(models.FloatField()) + double_gauss_amplitude2 = ArrayField(models.FloatField()) + double_gauss_peak2 = ArrayField(models.FloatField()) + double_gauss_width2 = ArrayField(models.FloatField()) + double_gauss_chisq = models.FloatField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + hist_dark_values = ArrayField(models.FloatField()) + hist_amplitudes = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'fgs_dark_dark_current' + unique_together = (('id', 'entry_date'),) + + +class FGSDarkPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_dark_pixel_stats' + unique_together = (('id', 'entry_date'),) + + +class FGSDarkQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_dark_query_history' + unique_together = (('id', 'entry_date'),) + + +class MIRIDarkDarkCurrent(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + amplifier = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + gauss_amplitude = ArrayField(models.FloatField()) + gauss_peak = ArrayField(models.FloatField()) + gauss_width = ArrayField(models.FloatField()) + gauss_chisq = models.FloatField(blank=True, null=True) + double_gauss_amplitude1 = ArrayField(models.FloatField()) + double_gauss_peak1 = ArrayField(models.FloatField()) + double_gauss_width1 = ArrayField(models.FloatField()) + double_gauss_amplitude2 = ArrayField(models.FloatField()) + double_gauss_peak2 = ArrayField(models.FloatField()) + double_gauss_width2 = ArrayField(models.FloatField()) + double_gauss_chisq = models.FloatField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + hist_dark_values = ArrayField(models.FloatField()) + hist_amplitudes = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'miri_dark_dark_current' + unique_together = (('id', 'entry_date'),) + + +class MIRIDarkPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_dark_pixel_stats' + unique_together = (('id', 'entry_date'),) + + +class MIRIDarkQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_dark_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRCamDarkDarkCurrent(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + amplifier = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + gauss_amplitude = ArrayField(models.FloatField()) + gauss_peak = ArrayField(models.FloatField()) + gauss_width = ArrayField(models.FloatField()) + gauss_chisq = models.FloatField(blank=True, null=True) + double_gauss_amplitude1 = ArrayField(models.FloatField()) + double_gauss_peak1 = ArrayField(models.FloatField()) + double_gauss_width1 = ArrayField(models.FloatField()) + double_gauss_amplitude2 = ArrayField(models.FloatField()) + double_gauss_peak2 = ArrayField(models.FloatField()) + double_gauss_width2 = ArrayField(models.FloatField()) + double_gauss_chisq = models.FloatField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + hist_dark_values = ArrayField(models.FloatField()) + hist_amplitudes = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'nircam_dark_dark_current' + unique_together = (('id', 'entry_date'),) + + +class NIRCamDarkPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_dark_pixel_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRCamDarkQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_dark_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRISSDarkDarkCurrent(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + amplifier = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + gauss_amplitude = ArrayField(models.FloatField()) + gauss_peak = ArrayField(models.FloatField()) + gauss_width = ArrayField(models.FloatField()) + gauss_chisq = models.FloatField(blank=True, null=True) + double_gauss_amplitude1 = ArrayField(models.FloatField()) + double_gauss_peak1 = ArrayField(models.FloatField()) + double_gauss_width1 = ArrayField(models.FloatField()) + double_gauss_amplitude2 = ArrayField(models.FloatField()) + double_gauss_peak2 = ArrayField(models.FloatField()) + double_gauss_width2 = ArrayField(models.FloatField()) + double_gauss_chisq = models.FloatField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + hist_dark_values = ArrayField(models.FloatField()) + hist_amplitudes = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'niriss_dark_dark_current' + unique_together = (('id', 'entry_date'),) + + +class NIRISSDarkPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_dark_pixel_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRISSDarkQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_dark_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecDarkDarkCurrent(models.Model): + entry_date = models.DateTimeField(unique=True) + aperture = models.CharField(blank=True, null=True) + amplifier = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + mean = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + gauss_amplitude = ArrayField(models.FloatField()) + gauss_peak = ArrayField(models.FloatField()) + gauss_width = ArrayField(models.FloatField()) + gauss_chisq = models.FloatField(blank=True, null=True) + double_gauss_amplitude1 = ArrayField(models.FloatField()) + double_gauss_peak1 = ArrayField(models.FloatField()) + double_gauss_width1 = ArrayField(models.FloatField()) + double_gauss_amplitude2 = ArrayField(models.FloatField()) + double_gauss_peak2 = ArrayField(models.FloatField()) + double_gauss_width2 = ArrayField(models.FloatField()) + double_gauss_chisq = models.FloatField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + hist_dark_values = ArrayField(models.FloatField()) + hist_amplitudes = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'nirspec_dark_dark_current' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecDarkPixelStats(models.Model): + entry_date = models.DateTimeField(unique=True) + detector = models.CharField(blank=True, null=True) + x_coord = ArrayField(models.IntegerField()) + y_coord = ArrayField(models.IntegerField()) + type = models.CharField(blank=True, null=True) + source_files = models.TextField(blank=True, null=True) # This field type is a guess. + obs_start_time = models.DateTimeField(blank=True, null=True) + obs_mid_time = models.DateTimeField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + mean_dark_image_file = models.CharField(blank=True, null=True) + baseline_file = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_dark_pixel_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecDarkQueryHistory(models.Model): + entry_date = models.DateTimeField(unique=True) + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + readpattern = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_dark_query_history' + unique_together = (('id', 'entry_date'),) diff --git a/jwql/website/apps/jwql/monitor_models/edb.py b/jwql/website/apps/jwql/monitor_models/edb.py new file mode 100644 index 000000000..2cad15418 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/edb.py @@ -0,0 +1,440 @@ +"""Defines the models for the ``jwql`` EDB monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class FGSEdbBlocksStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_edb_blocks_stats' + unique_together = (('id', 'entry_date'),) + + +class FGSEdbDailyStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_edb_daily_stats' + unique_together = (('id', 'entry_date'),) + + +class FGSEdbEveryChangeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + time = ArrayField(models.DateTimeField()) + mnemonic_value = ArrayField(models.FloatField()) + median = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + dependency_mnemonic = models.CharField(blank=True, null=True) + dependency_value = models.CharField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_edb_every_change_stats' + unique_together = (('id', 'entry_date'),) + + +class FGSEdbTimeIntervalStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_edb_time_interval_stats' + unique_together = (('id', 'entry_date'),) + + +class FGSEdbTimeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_edb_time_stats' + unique_together = (('id', 'entry_date'),) + + +class MIRIEdbBlocksStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_edb_blocks_stats' + unique_together = (('id', 'entry_date'),) + + +class MIRIEdbDailyStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_edb_daily_stats' + unique_together = (('id', 'entry_date'),) + + +class MIRIEdbEveryChangeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + time = ArrayField(models.DateTimeField()) + mnemonic_value = ArrayField(models.FloatField()) + median = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + dependency_mnemonic = models.CharField(blank=True, null=True) + dependency_value = models.CharField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_edb_every_change_stats' + unique_together = (('id', 'entry_date'),) + + +class MIRIEdbTimeIntervalStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_edb_time_interval_stats' + unique_together = (('id', 'entry_date'),) + + +class MIRIEdbTimeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_edb_time_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRCamEdbBlocksStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_edb_blocks_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRCamEdbDailyStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_edb_daily_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRCamEdbEveryChangeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + time = ArrayField(models.DateTimeField()) + mnemonic_value = ArrayField(models.FloatField()) + median = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + dependency_mnemonic = models.CharField(blank=True, null=True) + dependency_value = models.CharField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_edb_every_change_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRCamEdbTimeIntervalStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_edb_time_interval_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRCamEdbTimeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_edb_time_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRISSEdbBlocksStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_edb_blocks_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRISSEdbDailyStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_edb_daily_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRISSEdbEveryChangeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + time = ArrayField(models.DateTimeField()) + mnemonic_value = ArrayField(models.FloatField()) + median = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + dependency_mnemonic = models.CharField(blank=True, null=True) + dependency_value = models.CharField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_edb_every_change_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRISSEdbTimeIntervalStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_edb_time_interval_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRISSEdbTimeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_edb_time_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecEdbBlocksStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_edb_blocks_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecEdbDailyStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_edb_daily_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecEdbEveryChangeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + time = ArrayField(models.DateTimeField()) + mnemonic_value = ArrayField(models.FloatField()) + median = models.FloatField(blank=True, null=True) + stdev = models.FloatField(blank=True, null=True) + dependency_mnemonic = models.CharField(blank=True, null=True) + dependency_value = models.CharField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_edb_every_change_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecEdbTimeIntervalStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + median = ArrayField(models.FloatField()) + max = ArrayField(models.FloatField()) + min = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_edb_time_interval_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecEdbTimeStats(models.Model): + mnemonic = models.CharField(blank=True, null=True) + latest_query = models.DateTimeField(blank=True, null=True) + times = ArrayField(models.DateTimeField()) + data = ArrayField(models.FloatField()) + stdev = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_edb_time_stats' + unique_together = (('id', 'entry_date'),) diff --git a/jwql/website/apps/jwql/monitor_models/grating.py b/jwql/website/apps/jwql/monitor_models/grating.py new file mode 100644 index 000000000..1c2029049 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/grating.py @@ -0,0 +1,71 @@ +"""Defines the models for the ``jwql`` monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class NIRSpecGratingQueryHistory(models.Model): + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_grating_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecGratingStats(models.Model): + entry_date = models.DateTimeField(unique=True) + time = models.CharField(blank=True, null=True) + inrsh_gwa_adcmgain = models.FloatField(blank=True, null=True) + inrsh_gwa_adcmoffset = models.FloatField(blank=True, null=True) + inrsh_gwa_motor_vref = models.FloatField(blank=True, null=True) + prism_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + prism_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + mirror_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + mirror_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g140h_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g140h_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g235h_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g235h_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g395h_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g395h_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g140m_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g140m_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g235m_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g235m_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + g395m_inrsi_c_gwa_x_position = models.FloatField(blank=True, null=True) + g395m_inrsi_c_gwa_y_position = models.FloatField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_grating_stats' + unique_together = (('id', 'entry_date'),) diff --git a/jwql/website/apps/jwql/monitor_models/readnoise.py b/jwql/website/apps/jwql/monitor_models/readnoise.py new file mode 100644 index 000000000..5e616aa71 --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/readnoise.py @@ -0,0 +1,325 @@ +"""Defines the models for the ``jwql`` monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class FGSReadnoiseQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'fgs_readnoise_query_history' + unique_together = (('id', 'entry_date'),) + + +class FGSReadnoiseStats(models.Model): + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + readnoise_filename = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + readnoise_diff_image = models.CharField(blank=True, null=True) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'fgs_readnoise_stats' + unique_together = (('id', 'entry_date'),) + + +class MIRIReadnoiseQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_readnoise_query_history' + unique_together = (('id', 'entry_date'),) + + +class MIRIReadnoiseStats(models.Model): + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + readnoise_filename = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + readnoise_diff_image = models.CharField(blank=True, null=True) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'miri_readnoise_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRCamReadnoiseQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nircam_readnoise_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRCamReadnoiseStats(models.Model): + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + readnoise_filename = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + readnoise_diff_image = models.CharField(blank=True, null=True) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'nircam_readnoise_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRISSReadnoiseQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'niriss_readnoise_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRISSReadnoiseStats(models.Model): + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + readnoise_filename = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + readnoise_diff_image = models.CharField(blank=True, null=True) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'niriss_readnoise_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecReadnoiseQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_readnoise_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecReadnoiseStats(models.Model): + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + readnoise_filename = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + readnoise_diff_image = models.CharField(blank=True, null=True) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + entry_date = models.DateTimeField(blank=True, null=True) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'nirspec_readnoise_stats' + unique_together = (('id', 'entry_date'),) diff --git a/jwql/website/apps/jwql/monitor_models/ta.py b/jwql/website/apps/jwql/monitor_models/ta.py new file mode 100644 index 000000000..93a8b269b --- /dev/null +++ b/jwql/website/apps/jwql/monitor_models/ta.py @@ -0,0 +1,121 @@ +"""Defines the models for the ``jwql`` TA monitors. + +In Django, "a model is the single, definitive source of information +about your data. It contains the essential fields and behaviors of the +data you’re storing. Generally, each model maps to a single database +table" (from Django documentation). Each model contains fields, such +as character fields or date/time fields, that function like columns in +a data table. This module defines models that are used to store data +related to the JWQL monitors. + +Authors +------- + - Brian York +Use +--- + This module is used as such: + + :: + from monitor_models import MyModel + data = MyModel.objects.filter(name="JWQL") + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +# This is an auto-generated Django model module. +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models +from django.contrib.postgres.fields import ArrayField + + +class MIRITaQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_ta_query_history' + unique_together = (('id', 'entry_date'),) + + +class MIRITaStats(models.Model): + entry_date = models.DateTimeField(unique=True) + cal_file_name = models.CharField(blank=True, null=True) + obs_end_time = models.DateTimeField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + targx = models.FloatField(blank=True, null=True) + targy = models.FloatField(blank=True, null=True) + offset = models.FloatField(blank=True, null=True) + full_im_path = models.CharField(blank=True, null=True) + zoom_im_path = models.CharField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'miri_ta_stats' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecTaQueryHistory(models.Model): + instrument = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + start_time_mjd = models.FloatField(blank=True, null=True) + end_time_mjd = models.FloatField(blank=True, null=True) + entries_found = models.IntegerField(blank=True, null=True) + files_found = models.IntegerField(blank=True, null=True) + run_monitor = models.BooleanField(blank=True, null=True) + entry_date = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = True + db_table = 'nirspec_ta_query_history' + unique_together = (('id', 'entry_date'),) + + +class NIRSpecTaStats(models.Model): + entry_date = models.DateTimeField(blank=True, null=True) + uncal_filename = models.CharField(blank=True, null=True) + aperture = models.CharField(blank=True, null=True) + detector = models.CharField(blank=True, null=True) + subarray = models.CharField(blank=True, null=True) + read_pattern = models.CharField(blank=True, null=True) + nints = models.CharField(blank=True, null=True) + ngroups = models.CharField(blank=True, null=True) + expstart = models.CharField(blank=True, null=True) + full_image_mean = models.FloatField(blank=True, null=True) + full_image_stddev = models.FloatField(blank=True, null=True) + full_image_n = ArrayField(models.FloatField()) + full_image_bin_centers = ArrayField(models.FloatField()) + diff_image_mean = models.FloatField(blank=True, null=True) + diff_image_stddev = models.FloatField(blank=True, null=True) + diff_image_n = ArrayField(models.FloatField()) + diff_image_bin_centers = ArrayField(models.FloatField()) + amp1_mean = models.FloatField(blank=True, null=True) + amp1_stddev = models.FloatField(blank=True, null=True) + amp1_n = ArrayField(models.FloatField()) + amp1_bin_centers = ArrayField(models.FloatField()) + amp2_mean = models.FloatField(blank=True, null=True) + amp2_stddev = models.FloatField(blank=True, null=True) + amp2_n = ArrayField(models.FloatField()) + amp2_bin_centers = ArrayField(models.FloatField()) + amp3_mean = models.FloatField(blank=True, null=True) + amp3_stddev = models.FloatField(blank=True, null=True) + amp3_n = ArrayField(models.FloatField()) + amp3_bin_centers = ArrayField(models.FloatField()) + amp4_mean = models.FloatField(blank=True, null=True) + amp4_stddev = models.FloatField(blank=True, null=True) + amp4_n = ArrayField(models.FloatField()) + amp4_bin_centers = ArrayField(models.FloatField()) + + class Meta: + managed = True + db_table = 'nirspec_ta_stats' + unique_together = (('id', 'entry_date'),) diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py index 5e522f7cd..a3722ce13 100755 --- a/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_bad_pixel_bokeh.py @@ -24,7 +24,8 @@ from bokeh.embed import components, file_html from bokeh.io import show from bokeh.layouts import layout -from bokeh.models import ColumnDataSource, DatetimeTickFormatter, HoverTool, Legend, LinearColorMapper, Panel, Tabs, Text, Title +from bokeh.models import ColumnDataSource, DatetimeTickFormatter, HoverTool, Legend, LinearColorMapper, Text, Title +from bokeh.models.layouts import Tabs, TabPanel from bokeh.plotting import figure from bokeh.resources import CDN import datetime @@ -149,7 +150,7 @@ def run(self): plot_layout = badpix_monitor_plot_layout(all_plots) # Create a tab for each type of plot - detector_panels.append(Panel(child=plot_layout, title=detector)) + detector_panels.append(TabPanel(child=plot_layout, title=detector)) # Build tabs tabs = Tabs(tabs=detector_panels) @@ -667,12 +668,12 @@ def create_plot(self): self.plot.tools.append(hover_tool) # Make the x axis tick labels look nice - self.plot.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + self.plot.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) self.plot.xaxis.major_label_orientation = np.pi / 4 diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py index 4b5a6a6de..0cd7d31e2 100644 --- a/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py @@ -28,7 +28,7 @@ from bokeh.embed import components, file_html from bokeh.layouts import layout from bokeh.models import ColorBar, ColumnDataSource, DatetimeTickFormatter, HoverTool, Legend, LinearAxis -from bokeh.models.widgets import Tabs, Panel +from bokeh.models.layouts import Tabs, TabPanel from bokeh.plotting import figure, output_file, save from bokeh.resources import CDN from datetime import datetime, timedelta @@ -132,10 +132,10 @@ def retrieve_latest_data(self, aperture): aperture : str Aperture name (e.g. NRCA1_FULL) """ - subq = (session.query(self.stats_table.aperture, func.max(self.stats_table.expstart).label("max_created")) \ - .group_by(self.stats_table.aperture) - .subquery() - ) + subq = (session.query(self.stats_table.aperture, func.max(self.stats_table.expstart).label("max_created")) + .group_by(self.stats_table.aperture) + .subquery() + ) query = (session.query(self.stats_table.aperture, self.stats_table.uncal_filename, @@ -147,10 +147,10 @@ def retrieve_latest_data(self, aperture): self.stats_table.counts, self.stats_table.bin_centers, self.stats_table.entry_date) - .filter(self.stats_table.aperture == aperture) - .order_by(self.stats_table.entry_date) \ - .join(subq, self.stats_table.expstart == subq.c.max_created) - ) + .filter(self.stats_table.aperture == aperture) + .order_by(self.stats_table.entry_date) \ + .join(subq, self.stats_table.expstart == subq.c.max_created) + ) latest_data = query.all() session.close() @@ -168,7 +168,6 @@ def retrieve_latest_data(self, aperture): self.latest_data['expstart'] = datetimes - class BiasMonitorPlots(): """This is the top-level class, which will call the BiasMonitorData class to get results from the bias monitor, and use the plotting @@ -281,7 +280,7 @@ def create_tabs(self): ] ) bias_layout.sizing_mode = 'scale_width' - bias_tab = Panel(child=bias_layout, title=aperture) + bias_tab = TabPanel(child=bias_layout, title=aperture) tabs.append(bias_tab) # Build tabs @@ -416,7 +415,6 @@ def create_plot(self): self.plot = PlaceholderPlot('Calibrated data: Histogram', x_label, y_label).plot - class MedianRowColPlot(): """Class to create a plot of the median signal across rows or columns @@ -509,7 +507,6 @@ def create_plot(self, colname): return plot - class TrendingPlot(): """Class to create trending plots of bias level over time. There should be 4 plots produced: 1 for each amplifier (with even and odd columns plotted in each). @@ -565,12 +562,12 @@ def create_amp_plot(self, amp_num, amp_data): alpha=0.75, source=source, legend_label='Odd cols') # Make the x axis tick labels look nice - plot.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + plot.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) plot.xaxis.major_label_orientation = np.pi / 4 @@ -586,7 +583,7 @@ def create_amp_plot(self, amp_num, amp_data): ('Date:', '@expstart_str') ] ) - #hover_tool.formatters = {'@expstart': 'datetime'} + # hover_tool.formatters = {'@expstart': 'datetime'} plot.tools.append(hover_tool) plot.xaxis.axis_label = x_label plot.yaxis.axis_label = y_label diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py index 4160e9b87..a4b03eb9d 100644 --- a/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_cosmic_rays_bokeh.py @@ -147,12 +147,12 @@ def history_plot(self): data = fig.scatter(x='x', y='y', line_width=5, line_color='blue', source=source) # Make the x axis tick labels look nice - fig.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + fig.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) fig.xaxis.major_label_orientation = np.pi / 4 fig.yaxis[0].formatter = BasicTickFormatter(use_scientific=True, precision=2) diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py index d57a83c4b..1a4e7a670 100755 --- a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py @@ -60,7 +60,7 @@ class DarkHistPlot(): plot : bokeh.figure Figure containing the histogram plot """ - def __init__(self, aperture, data): + def __init__(self, aperture, data, obsdate): """Create the plot Parameters @@ -74,6 +74,7 @@ def __init__(self, aperture, data): """ self.data = data self.aperture = aperture + self.obsdate = obsdate self.create_plot() def calc_bin_edges(self, centers): @@ -95,6 +96,9 @@ def calc_bin_edges(self, centers): def create_plot(self): """Place the data in a CoumnDataSource and create the plot """ + title_str = f'{self.aperture}: Dark Rate Histogram. {self.obsdate.strftime("%d %b %Y")}' + x_label = 'Dark Rate (DN/sec)' + y_label = 'Number of Pixels' if len(self.data) > 0: # Specify which key ("amplifier") to show. If there is data for amp='5', # show that, as it will be the data for the entire detector. If not then @@ -109,10 +113,6 @@ def create_plot(self): else: use_amp = '1' - title_str = f'{self.aperture}: Dark Rate Histogram' - x_label = 'Dark Rate (DN/sec)' - y_label = 'Number of Pixels' - # If there are histogram data for multiple amps, then we can plot each histogram. if len(self.data) > 1: # Looks like the histogram data for the individual amps is not being saved @@ -174,7 +174,7 @@ def create_plot(self): fill_color=color, line_color="white", alpha=0.25, legend_label=f'Amp {amp}') # Set ranges - self.plot.extra_y_ranges = {"cdf_line": Range1d(0,1)} + self.plot.extra_y_ranges = {"cdf_line": Range1d(0, 1)} self.plot.add_layout(LinearAxis(y_range_name='cdf_line', axis_label="Cumulative Distribution"), "right") # Add cumulative distribution function @@ -193,10 +193,13 @@ def create_plot(self): self.plot.x_range.end = mainx[disp_index[-1]] self.plot.legend.location = "top_left" self.plot.legend.background_fill_color = "#fefefe" - self.plot.grid.grid_line_color="white" + self.plot.grid.grid_line_color = "white" else: # If self.data is empty, then make a placeholder plot - self.plot = PlaceholderPlot(title_str, x_label, y_label).plot + title_str = f'{self.aperture}: Dark Rate Histogram' + x_label = 'Dark Rate (DN/sec)' + y_label = 'Number of Pixels' + self.plot = PlaceholderPlot(title_str, x_label, y_label).create() class DarkImagePlot(): @@ -253,7 +256,7 @@ def create_plot(self): class DarkMonitorData(): - """Retrive dark monitor data from the database tables + """Retrieve dark monitor data from the database tables Attributes ---------- @@ -462,7 +465,7 @@ def __init__(self, instrument): # Retrieve data from database. Since the mean dark image plots are # produced by the dark monitor itself, all we need for that is the - # name of the file. then we need the histogram and trending data. All + # name of the file. Then we need the histogram and trending data. All # of this is in the dark monitor stats table. No need to query the # dark monitor pixel table. self.db.retrieve_data(self.aperture, get_pixtable_for_detector=False) @@ -479,7 +482,7 @@ def __init__(self, instrument): self.get_trending_data() # Now that we have all the data, create the acutal plots - self.hist_plots[aperture] = DarkHistPlot(self.aperture, self.hist_data).plot + self.hist_plots[aperture] = DarkHistPlot(self.aperture, self.hist_data, self.hist_date).plot self.trending_plots[aperture] = DarkTrendPlot(self.aperture, self.mean_dark, self.stdev_dark, self.obstime).plot def ensure_all_full_frame_apertures(self): @@ -538,8 +541,7 @@ def get_latest_histogram_data(self): self.hist_data = {} if len(self._entry_dates) > 0: # Find the index of the most recent entry - #self._aperture_entries = np.where((self._apertures == aperture))[0] - latest_date = np.max(self._entry_dates) #[self._aperture_entries]) + latest_date = np.max(self._entry_dates) # Get indexes of entries for all amps that were added in the # most recent run of the monitor for the aperture. All entries @@ -549,12 +551,15 @@ def get_latest_histogram_data(self): most_recent_idx = np.where(self._entry_dates > (latest_date - delta_time))[0] # Store the histogram data in a dictionary where the keys are the - # amplifier values (note that these are strings e.g. '1''), and the + # amplifier values (note that these are strings e.g. '1'), and the # values are tuples of (x, y) lists for idx in most_recent_idx: self.hist_data[self.db.stats_data[idx].amplifier] = (self.db.stats_data[idx].hist_dark_values, self.db.stats_data[idx].hist_amplitudes) + # Keep track of the observation date of the most recent entry + self.hist_date = self.db.stats_data[most_recent_idx[0]].obs_mid_time + def get_trending_data(self): """Organize data for the trending plot. Here we need all the data for the aperture. Keep amplifier-specific data separated. @@ -576,10 +581,10 @@ def stats_data_to_lists(self): """Create arrays from some of the stats database columns that are used by multiple plot types """ - #apertures = np.array([e.aperture for e in self.db.stats_data]) self._amplifiers = np.array([e.amplifier for e in self.db.stats_data]) self._entry_dates = np.array([e.entry_date for e in self.db.stats_data]) self._mean = np.array([e.mean for e in self.db.stats_data]) + self._readpatt = np.array([e.readpattern for e in self.db.stats_data]) self._stdev = np.array([e.stdev for e in self.db.stats_data]) self._obs_mid_time = np.array([e.obs_mid_time for e in self.db.stats_data]) self._stats_mean_dark_image_files = np.array([e.mean_dark_image_file for e in self.db.stats_data]) @@ -679,7 +684,8 @@ def create_plot(self): error_upper=error_upper, time=self.obstime[use_amp] ) - ) + ) + self.plot = figure(title=f'{self.aperture}: Mean +/- 1-sigma Dark Rate', tools='pan,box_zoom,reset,wheel_zoom,save', background_fill_color="#fafafa") @@ -707,12 +713,12 @@ def create_plot(self): legend_label=f'Amp {amp}') # Make the x axis tick labels look nice - self.plot.xaxis.formatter = DatetimeTickFormatter(microseconds=["%d %b %H:%M:%S.%3N"], - seconds=["%d %b %H:%M:%S.%3N"], - hours=["%d %b %H:%M"], - days=["%d %b %H:%M"], - months=["%d %b %Y %H:%M"], - years=["%d %b %Y"] + self.plot.xaxis.formatter = DatetimeTickFormatter(microseconds="%d %b %H:%M:%S.%3N", + seconds="%d %b %H:%M:%S.%3N", + hours="%d %b %H:%M", + days="%d %b %H:%M", + months="%d %b %Y %H:%M", + years="%d %b %Y" ) self.plot.xaxis.major_label_orientation = np.pi / 4 @@ -737,7 +743,7 @@ def create_plot(self): self.plot.y_range.end = max_val * 1.05 self.plot.legend.location = "top_right" self.plot.legend.background_fill_color = "#fefefe" - self.plot.grid.grid_line_color="white" + self.plot.grid.grid_line_color = "white" else: # If there are no data, make a placeholder plot self.plot = figure(title=f'{self.aperture}: Mean +/- 1-sigma Dark Rate', tools='pan,box_zoom,reset,wheel_zoom,save', @@ -748,7 +754,7 @@ def create_plot(self): self.plot.y_range.end = 1 source = ColumnDataSource(data=dict(x=[0.5], y=[0.5], text=['No data'])) - glyph = Text(x="x", y="y", text="text", angle=0., text_color="navy", text_font_size={'value':'20px'}) + glyph = Text(x="x", y="y", text="text", angle=0., text_color="navy", text_font_size={'value': '20px'}) self.plot.add_glyph(source, glyph) self.plot.xaxis.axis_label = 'Date' diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py index 599be35b0..d78e64de5 100644 --- a/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_readnoise_bokeh.py @@ -22,15 +22,16 @@ from bokeh.embed import components from bokeh.layouts import column, row -from bokeh.models import Panel, Tabs # bokeh <= 3.0 from bokeh.models import ColumnDataSource, HoverTool -# from bokeh.models import TabPanel, Tabs # bokeh >= 3.0 +from bokeh.models import TabPanel, Tabs from bokeh.plotting import figure from django.templatetags.static import static import numpy as np -from jwql.database.database_interface import session -from jwql.database.database_interface import FGSReadnoiseStats, MIRIReadnoiseStats, NIRCamReadnoiseStats, NIRISSReadnoiseStats, NIRSpecReadnoiseStats +# PEP8 will undoubtedly complain, but the file is specifically designed so that everything +# importable is a monitor class. +from jwql.website.apps.jwql.monitor_models.readnoise import * + from jwql.utils.constants import FULL_FRAME_APERTURES, JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import get_config @@ -80,14 +81,7 @@ def load_data(self): # Determine which database tables are needed based on instrument self.identify_tables() - # Query database for all data in readnoise stats with a matching aperture, - # and sort the data by exposure start time. - self.query_results = session.query(self.stats_table) \ - .filter(self.stats_table.aperture == self.aperture) \ - .order_by(self.stats_table.expstart) \ - .all() - - session.close() + self.query_results = list(self.stats_table.objects.filter(aperture__iexact=self.aperture).order_by("expstart").all()) class ReadNoiseFigure(): @@ -122,10 +116,10 @@ def __init__(self, instrument, aperture): self.plot_readnoise_difference_image() self.plot_readnoise_histogram() - self.tab = Panel(child=column(row(*self.amp_plots), - self.diff_image_plot, - self.readnoise_histogram), - title=self.aperture) + self.tab = TabPanel(child=column(row(*self.amp_plots), + self.diff_image_plot, + self.readnoise_histogram), + title=self.aperture) def plot_readnoise_amplifers(self): """Class to create readnoise scatter plots per amplifier. diff --git a/jwql/website/apps/jwql/monitor_views.py b/jwql/website/apps/jwql/monitor_views.py index 85e850b23..fa6f48204 100644 --- a/jwql/website/apps/jwql/monitor_views.py +++ b/jwql/website/apps/jwql/monitor_views.py @@ -39,9 +39,8 @@ import pandas as pd from . import bokeh_containers -from jwql.database.database_interface import session -from jwql.database.database_interface import NIRCamClawStats from jwql.website.apps.jwql import bokeh_containers +from jwql.website.apps.jwql.monitor_models.claw import NIRCamClawStats from jwql.website.apps.jwql.monitor_pages.monitor_readnoise_bokeh import ReadNoiseFigure from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE from jwql.utils.utils import get_config, get_base_url @@ -74,10 +73,14 @@ def background_monitor(request): output_dir_bkg = static(os.path.join("outputs", "claw_monitor", "backgrounds")) fltrs = ['F070W', 'F090W', 'F115W', 'F150W', 'F200W', 'F277W', 'F356W', 'F444W'] bkg_plots = [os.path.join(output_dir_bkg, '{}_backgrounds.png'.format(fltr)) for fltr in fltrs] + bkg_rms_plots = [os.path.join(output_dir_bkg, '{}_backgrounds_rms.png'.format(fltr)) for fltr in fltrs] + bkg_model_plots = [os.path.join(output_dir_bkg, '{}_backgrounds_vs_models.png'.format(fltr)) for fltr in fltrs] context = { 'inst': 'NIRCam', - 'bkg_plots': bkg_plots + 'bkg_plots': bkg_plots, + 'bkg_rms_plots': bkg_rms_plots, + 'bkg_model_plots': bkg_model_plots } # Return a HTTP response with the template and dictionary of variables @@ -154,12 +157,10 @@ def claw_monitor(request): template = "claw_monitor.html" # Get all recent claw stack images from the last 10 days - query = session.query(NIRCamClawStats.expstart_mjd, NIRCamClawStats.skyflat_filename).order_by(NIRCamClawStats.expstart_mjd.desc()).all() - df = pd.DataFrame(query, columns=['expstart_mjd', 'skyflat_filename']) - recent_files = list(pd.unique(df['skyflat_filename'][df['expstart_mjd'] > Time.now().mjd - 10])) - + query = NIRCamClawStats.objects.filter(expstart_mjd__gte=(Time.now().mjd - 10)) + query = query.order_by('-expstart_mjd').all().values('skyflat_filename') + recent_files = list(pd.unique(pd.DataFrame.from_records(query)['skyflat_filename'])) output_dir_claws = static(os.path.join("outputs", "claw_monitor", "claw_stacks")) - claw_stacks = [os.path.join(output_dir_claws, filename) for filename in recent_files] context = { diff --git a/jwql/website/apps/jwql/router.py b/jwql/website/apps/jwql/router.py new file mode 100644 index 000000000..dbfd9f3a2 --- /dev/null +++ b/jwql/website/apps/jwql/router.py @@ -0,0 +1,69 @@ +"""Defines the query routing for the monitor database tables. + +In Django, database queries are assumed to go to the default database unless either the +`using` field/keyword is defined or a routing table sends it to a different database. In +this case, all monitor tables should be routed to the monitors database, and the router +should otherwise express no opinion (by returning None). + +Authors +------- + - Brian York + +Use +--- + This module is not intended to be used outside of Django asking about it. + +References +---------- + For more information please see: + ```https://docs.djangoproject.com/en/2.0/topics/db/models/``` +""" +from jwql.utils.constants import MONITOR_TABLE_NAMES + + +class MonitorRouter: + """ + A router to control all database operations on models in the + JWQLDB (monitors) database. + """ + + def db_for_read(self, model, **hints): + """ + Attempts to read monitor models go to monitors db. + """ + if model._meta.db_table in MONITOR_TABLE_NAMES: + return "monitors" + return None + + def db_for_write(self, model, **hints): + """ + Attempts to write monitor models go to monitors db. + """ + if model._meta.db_table in MONITOR_TABLE_NAMES: + return "monitors" + return None + + def allow_relation(self, obj1, obj2, **hints): + """ + Allow relations between tables in the monitors DB. + """ + if ( + obj1._meta.db_table in MONITOR_TABLE_NAMES + and obj2._meta.db_table in MONITOR_TABLE_NAMES + ): + return True + return None + + def allow_migrate(self, db, app_label, model_name=None, **hints): + """ + Make sure the monitors apps only appear in the 'monitors' database. + """ + model_names = [name.replace("_", "") for name in MONITOR_TABLE_NAMES] + if app_label == 'jwql': + if model_name in model_names: + if db == "monitors": + return True + return False + elif db == "monitors": + return False + return None diff --git a/jwql/website/apps/jwql/static/css/jwql.css b/jwql/website/apps/jwql/static/css/jwql.css index eb567b25f..26229492a 100644 --- a/jwql/website/apps/jwql/static/css/jwql.css +++ b/jwql/website/apps/jwql/static/css/jwql.css @@ -1,11 +1,36 @@ +.alternate-rows { + width: 100%; + border-collapse: collapse; +} + +.alternate-rows th, .alternate-rows td { + padding: 1px 10px; + border: 1px solid #ddd; +} + +.alternate-rows tr:nth-child(odd) { + background-color:#eee; +} +.alternate-rows tr:nth-child(even) { + background-color:#fff; +} + +.alternate-rows tbody tr:nth-child(even) { + background-color: #f2f2f2; +} + .anomaly_choice { list-style: none; } - + .APT_parameters { width: 20% } - + + .APT_parameters_wide { + width: 50%; + } + .banner { position: absolute; top: 55px; @@ -13,7 +38,7 @@ height: 3rem; overflow: hidden; } - + .banner img{ position: absolute; top: -9999px; @@ -40,7 +65,7 @@ border-radius: 0px !important; text-decoration: none; } - + /*Make outline buttons and highlighted normal buttons white*/ .btn-primary:hover, .btn-primary.active, .btn-outline-primary, .show > .btn-primary.dropdown-toggle, .bk-btn-primary:hover { @@ -50,14 +75,14 @@ border-radius: 0px !important; text-decoration: none; } - + /*Stop them from glowing blue*/ .btn.focus, .btn:active:focus, .btn.active:focus, .btn:active, .btn.active, .show > .btn.dropdown-toggle:focus { box-shadow: none !important; text-decoration: none; } - + [class*="col-"] { padding-top: 1rem; padding-bottom: 1rem; @@ -65,12 +90,42 @@ background-color: rgba(86, 61, 124, .15); border: 1px solid rgba(86, 61, 124, .2);*/ } - + + /* Collapsible text box */ + .collapsible { + width: 100%; + margin: 20px; + } + + .collapsible-btn { + background-color: #c85108 !important; + border-color: #c85108 !important; + color: white !important; + padding: 5px; + cursor: pointer; + border: none; + text-align: left; + outline: none; + border-radius: 0px !important; + text-decoration: none; + } + + .collapsible-content { + display: none; + padding: 10px; + border: 1px solid #3498db; + border-top: none; + } + + .collapsible-content.show { + display: block; + } + .dashboard { margin-left: 2%; margin-right: 2%; } - + /* Show the dropdown menu on hover */ /* DO NOT how the dropdown menu on hover if the navbar is collapsed */ @media only screen and (min-width: 1200px) { @@ -91,52 +146,52 @@ pointer-events: none; cursor: default; } - + /* Make disabled sections opaque and unclickable */ .disabled_section { pointer-events: none; opacity: 0.4; } - + /*Define dropdown menu colors*/ .dropdown-item:hover{ background-color: black; } - + .dropdown-menu { background-color: #2d353c; border-radius: 0px; max-height: 400px; overflow-y: auto; } - + .dropdown-menu .dropdown-item { color: white; } - + .dropdown-menu .dropdown-heading { color: #c85108 !important; text-transform: uppercase; } - + .explorer_options { padding-left: 1rem; padding-right: 1rem; } - + /*Stop the search box from glowing blue*/ .form-control:focus { box-shadow: none; border-color: #cfd4da; } - + /*Make sure the thumbnails are actually vertically centered*/ .helper { display: inline-block; height: 100%; vertical-align: middle; } - + .help-tip { text-align: center; background-color: #D0D7D8; @@ -150,40 +205,40 @@ opacity: 0.5; display: inline-block; } - + /*Stop the search box from glowing blue*/ #homepage_filesearch #id_search { width: 500px; height: 100%; padding: 0px; } - + /* START structures for engineering_database page */ - + #mnemonic_name_search { width: 100%; height: 100%; padding: 0px; } - + .mnemonic_name_search_row { display: flex; width: 100%; } - + .mnemonic_name_search_col { padding: 1em; border: 1px solid #F2CE3A; width: 100%; } - + .mnemonic_name_search_col1 { padding: 1em; border: 1px solid #F2CE3A; width: 40%; } - + .mnemonic_query_section { width: 100%; height: 100%; @@ -191,7 +246,7 @@ /*border:solid #000000;*/ border: 1px solid #F2CE3A; } - + .mnemonic_exploration_section { width: 100%; height: 100%; @@ -201,37 +256,37 @@ border: 1px solid #F2CE3A; line-height: 15px } - + .mnemonic_query_field { float:left; width:300px; list-style-type: none; display : inline; } - + /* END structures for engineering_database page */ - - + + #homepage_filesearch #id_search:focus { box-shadow: none; border-color: #cfd4da; } - + /*Make the form fields be inline*/ .homepage_form_fieldWrapper { display: inline; } - + #id_anomaly_choices { list-style: none; padding-left: 0; } - + /*Don't let the search bar be super long*/ .input-group { width: 250px; } - + /*Make the search icon look like a button*/ .input-group-text { background-color: #c85108 !important; @@ -239,7 +294,7 @@ color: white !important; border-radius: 0px; } - + /*Format the color background*/ .instrument-color-fill { display: none; @@ -252,7 +307,7 @@ left: 0%; z-index: 1; } - + /*To make the instrument logos vertically centered*/ .instrument_frame { height: 180px; @@ -262,12 +317,12 @@ position: relative; display: inline-block; } - + /*Make H2 header smaller for select pages*/ #instrument_main h2, .mnemonic_trending_main h2 { font-size: 1.75rem; } - + .instrument-name { font-size: 35px; color: white; @@ -286,72 +341,72 @@ z-index: 2; vertical-align: middle; } - + .instrument_panel { text-align: center; } - + .instrument_panel:hover .instrument-color-fill { display: inline; } - + .instrument_panel:hover .instrument-name { display: inline-block; } - + .instrument_select { padding-top: 1rem; padding-bottom: 2rem; margin-right: 5rem; margin-left: 5rem; } - + .image_preview { display: inline-block; } - + #loading { text-align:center; margin: 0 auto; width: 200px; z-index: 1000; } - + .monitor-name { background-color: #c85108; color: white; width: 100%; height: 100%; } - + /* Change color of dropdown links on hover */ li:hover .nav-link, .navbar-brand:hover { color: #fff !important; } - + /* Define navbar color*/ .navbar { background-color: black; } - + /*Define navbar font color and case*/ .nav-link { color: #bec4d4 !important; text-transform: uppercase; } - + /* Set padding around JWST logo*/ .navbar-left { padding-left:10px; padding-right:10px; } - + /* Get rid of padding around GitHub logo */ #github-link, #github-link-collapsed { padding-bottom: 0px; padding-top: 0px; } - + .plot-container { width: 100%; height: 600px; @@ -362,7 +417,7 @@ border-radius: 0px; border-width: 1px; } - + .plot-header { background-color: #c85108 !important; border-color: #c85108 !important ; @@ -372,7 +427,7 @@ border-width: 1px; width: 100%; } - + /*Define the proposal thumbnails*/ .proposal { display: inline-block; @@ -383,11 +438,11 @@ display: inline-block; margin: 0.1rem; } - + .proposal img { filter: grayscale(100%); } - + .proposal-color-fill { width: 100%; height: 100%; @@ -398,16 +453,16 @@ left: 0%; z-index: 1; } - + .proposal:hover { cursor: pointer; } - + .proposal:hover { background-color: #356198; opacity: 0.75; } - + .proposal-info { width: 100%; height: 100%; @@ -423,11 +478,11 @@ z-index: 2; font-size: 0.75rem; } - + .row { margin-bottom: 1rem; } - + .slider{ -webkit-appearance: none; width: 250px; @@ -435,7 +490,7 @@ background: #BEC4D4; outline: none; } - + /* slider style for Chrome/Safari/Opera/Edge */ .slider::-webkit-slider-thumb { -webkit-appearance: none; @@ -445,7 +500,7 @@ background: #C85108; cursor: pointer; } - + /* slider style for Firefox */ .slider::-moz-range-thumb { width: 15px; @@ -453,17 +508,17 @@ background: #C85108; cursor: pointer; } - + /* remove slider outline for Firefox */ .slider::-moz-focus-outer { border: 0; } - + .row .row { margin-top: 1rem; margin-bottom: 0; } - + /*Video for space 404 page*/ #space_404 { position: fixed; @@ -474,7 +529,7 @@ bottom: 0; align: center; } - + #space_404_text { position: fixed; background: rgba(0, 0, 0, 0.5); @@ -484,7 +539,7 @@ padding: 2rem; display: none; } - + .thumbnail { width: 8rem; height: 8rem; @@ -493,7 +548,7 @@ display: inline-block; margin: 0.1rem; } - + /*Format the color background*/ .thumbnail-color-fill { display: none; @@ -506,16 +561,16 @@ left: 0%; z-index: 1; } - + .thumbnail:hover { cursor: pointer; } - + .thumbnail:hover .thumbnail-info, .thumbnail:hover .thumbnail-color-fill { display: inline; } - + .thumbnail img { max-width: 100%; max-height: 100%; @@ -523,7 +578,7 @@ height: auto; vertical-align: middle; } - + /*Format the proposal number and number of files*/ .thumbnail-info { display: none; @@ -540,7 +595,7 @@ color: white; z-index: 2; } - + .thumbnail-staff { width: 15rem; height: 15rem; @@ -564,7 +619,7 @@ position: absolute; font-size: 0.65rem; } - + /*Format the version identifier text in bottom corner*/ #version-div { float: right; @@ -573,25 +628,25 @@ color: white; font-size: 12px } - + /*Add underline for links*/ a { text-decoration: underline; } - + /*Don't add underline for navbar and button links*/ nav a, .btn { text-decoration: none; } - + body { padding-top: 8rem; } - + body { font-family: 'Overpass', sans-serif !important; } - + h1, h2, h3, h4, h5, h6 { font-family: 'Oswald', sans-serif !important; } @@ -601,7 +656,7 @@ h1 { letter-spacing: 0.05em; } - + ul.no-bullets { list-style: none; padding-left:10px; diff --git a/jwql/website/apps/jwql/static/js/jwql.js b/jwql/website/apps/jwql/static/js/jwql.js index 0f4ee63ba..df47c63e7 100644 --- a/jwql/website/apps/jwql/static/js/jwql.js +++ b/jwql/website/apps/jwql/static/js/jwql.js @@ -310,6 +310,40 @@ function determine_page_title_obs(instrument, proposal, observation) { } } +/** + * Construct a 4-column table from an input dictionary. The 4 columns + * correspond to: key, value, key, value. + * @dictionary {dict} jsonified dictionary + */ +function make_table_from_dict(dictionary) { + var tableBody = document.getElementById("table-body"); + // Extract keys and values from the dictionary + var keys = Object.keys(dictionary); + var values = Object.values(dictionary); + + // Determine the maximum length of keys and values + //var maxLength = Math.max(keys.length, values.length); + var maxLength = keys.length + + // Populate the table dynamically + for (var i = 0; i < maxLength; i+=2) { + var row = document.createElement("tr"); + var row = tableBody.insertRow(i/2) + var cell1 = row.insertCell(0) + var cell2 = row.insertCell(1) + var cell3 = row.insertCell(2) + var cell4 = row.insertCell(3) + + cell1.textContent = i < keys.length ? keys[i]+':' : ""; + cell2.textContent = i < keys.length ? values[i] : ""; + cell3.textContent = (i+1) < keys.length ? keys[i+1]+':' : ""; + cell4.textContent = (i+1) < keys.length ? values[i+1] : ""; + + tableBody.appendChild(row); + } + return tableBody; +} + /** * adds/removes disabled_section class and clears value * @param {string} element_id @@ -1475,3 +1509,15 @@ function version_url(version_string) { a_line += '">JWQL v' + version_string + ''; return a_line; } + +/** + * Create a collapsible table + */ +document.addEventListener('DOMContentLoaded', function () { + var collapsibleBtn = document.querySelector('.collapsible-btn'); + var collapsibleContent = document.querySelector('.collapsible-content'); + + collapsibleBtn.addEventListener('click', function () { + collapsibleContent.classList.toggle('show'); + }); +}); diff --git a/jwql/website/apps/jwql/templates/background_monitor.html b/jwql/website/apps/jwql/templates/background_monitor.html index ddbf53c6f..5a01a0918 100644 --- a/jwql/website/apps/jwql/templates/background_monitor.html +++ b/jwql/website/apps/jwql/templates/background_monitor.html @@ -8,18 +8,117 @@ {% block content %} -
+
-

{{ inst }} Background Monitor

-
+

{{ inst }} Background Monitor

+
-
- {% for bkg_plot in bkg_plots %} - + + + + + + +
+ + + +
+ +
+
+ {% for plot in bkg_plots %} + +


+ {% endfor %} +
+
+ +
+
+ {% for plot in bkg_rms_plots %} +


{% endfor %}
+
+ +
+
+ {% for plot in bkg_model_plots %} + +


+ {% endfor %} +
+
+ + + + -
+
{% endblock %} \ No newline at end of file diff --git a/jwql/website/apps/jwql/templates/jwql_query.html b/jwql/website/apps/jwql/templates/jwql_query.html index 6274c5a83..098bdd4ae 100644 --- a/jwql/website/apps/jwql/templates/jwql_query.html +++ b/jwql/website/apps/jwql/templates/jwql_query.html @@ -211,6 +211,18 @@

Dynamic Query Form

+ +
+ FGS Subarrays +
+ {% for field in form.fgs_subarray %} +
+ {{ field }} +
+ {% endfor %} +
+
+
@@ -546,6 +558,18 @@

Dynamic Query Form

+ +
+ NIRSpec Subarrays +
+ {% for field in form.nirspec_subarray %} +
+ {{ field }} +
+ {% endfor %} +
+
+

diff --git a/jwql/website/apps/jwql/templates/view_exposure.html b/jwql/website/apps/jwql/templates/view_exposure.html index a96c12153..7b9d71856 100644 --- a/jwql/website/apps/jwql/templates/view_exposure.html +++ b/jwql/website/apps/jwql/templates/view_exposure.html @@ -12,12 +12,48 @@

Exposure {{ group_root }}

- -
-
Proposal:
-
Observation:
-
Visit:
- + +
+
Proposal:
+
Observation:
+
Visit:
+
+ +
+
+
Visit Status: {{ basic_info.visit_status }}
+
Category: {{ basic_info.category }}
+ +
Subarray: {{ basic_info.subarray }}
+
Viewed: {{ marked_viewed }}
+
+
+
Filter: {{ basic_info.filter }}
+ {% if 'pupil' in basic_info %} +
Pupil: {{ basic_info.pupil }}
+ {% endif %} + {% if 'grating' in basic_info %} +
Grating: {{ basic_info.grating }}
+ {% endif %} +
Exp Start: {{ expstart_str }}
+
+ +
+ +
+ + + + + + +
+
+
diff --git a/jwql/website/apps/jwql/templates/view_image.html b/jwql/website/apps/jwql/templates/view_image.html index 4355eb878..90dd52c87 100644 --- a/jwql/website/apps/jwql/templates/view_image.html +++ b/jwql/website/apps/jwql/templates/view_image.html @@ -13,15 +13,50 @@

{{ file_root }}

-
+
Proposal:
Observation:
Visit:
Detector:
-
- FITS Filename:
- JPG Filename:

+
+
+
Visit Status: {{ basic_info.visit_status }}
+
Category: {{ basic_info.category }}
+ +
Subarray: {{ basic_info.subarray }}
+
Viewed: {{ marked_viewed }}
+
+
+
Filter: {{ basic_info.filter }}
+ {% if 'pupil' in basic_info %} +
Pupil: {{ basic_info.pupil }}
+ {% endif %} + {% if 'grating' in basic_info %} +
Grating: {{ basic_info.grating }}
+ {% endif %} +
Exp Start: {{ expstart_str }}
+
+ +
+ +
+ + + + + + +
+
+
+ +
+ FITS Filename:   JPG Filename:

View File Type: @@ -44,6 +79,7 @@

{{ file_root }}

+
diff --git a/jwql/website/apps/jwql/tests/test_context_processors.py b/jwql/website/apps/jwql/tests/test_context_processors.py index 1d4fc5951..b1ed31510 100644 --- a/jwql/website/apps/jwql/tests/test_context_processors.py +++ b/jwql/website/apps/jwql/tests/test_context_processors.py @@ -24,7 +24,7 @@ import os from unittest import skipIf -ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') +from jwql.utils.constants import ON_GITHUB_ACTIONS if not ON_GITHUB_ACTIONS: from jwql.website.apps.jwql import context_processors diff --git a/jwql/website/apps/jwql/views.py b/jwql/website/apps/jwql/views.py index 3fc36eaa7..0ea0281ad 100644 --- a/jwql/website/apps/jwql/views.py +++ b/jwql/website/apps/jwql/views.py @@ -51,21 +51,25 @@ import operator import socket +from astropy.time import Time from bokeh.layouts import layout from bokeh.embed import components from django.core.paginator import Paginator from django.http import HttpResponse, JsonResponse from django.shortcuts import redirect, render +import numpy as np from sqlalchemy import inspect from jwql.database.database_interface import load_connection from jwql.utils import monitor_utils from jwql.utils.interactive_preview_image import InteractivePreviewImg from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, URL_DICT, QUERY_CONFIG_TEMPLATE, QueryConfigKeys -from jwql.utils.utils import filename_parser, get_base_url, get_config, get_rootnames_for_instrument_proposal, query_unformat +from jwql.utils.utils import filename_parser, filesystem_path, get_base_url, get_config +from jwql.utils.utils import get_rootnames_for_instrument_proposal, query_unformat from .data_containers import build_table from .data_containers import get_acknowledgements +from .data_containers import get_additional_exposure_info from .data_containers import get_available_suffixes from .data_containers import get_anomaly_form from .data_containers import get_dashboard_components @@ -1169,6 +1173,13 @@ def view_exposure(request, inst, group_root): root_file_info = RootFileInfo.objects.filter(root_name__startswith=group_root) viewed = all([rf.viewed for rf in root_file_info]) + # Convert expstart from MJD to a date + expstart_str = Time(root_file_info[0].expstart, format='mjd').to_datetime().strftime('%d %b %Y %H:%M') + + # Create one dict of info to show at the top of the page, and another dict of info + # to show in the collapsible text box. + basic_info, additional_info = get_additional_exposure_info(root_file_info, image_info) + # Build the context context = {'base_url': get_base_url(), 'group_root_list': group_root_list, @@ -1182,7 +1193,10 @@ def view_exposure(request, inst, group_root): 'total_ints': image_info['total_ints'], 'detectors': sorted(image_info['detectors']), 'form': form, - 'marked_viewed': viewed} + 'marked_viewed': viewed, + 'expstart_str': expstart_str, + 'basic_info': basic_info, + 'additional_info': additional_info} return render(request, template, context) @@ -1258,6 +1272,13 @@ def view_image(request, inst, file_root): # Get our current views RootFileInfo model and send our "viewed/new" information root_file_info = RootFileInfo.objects.get(root_name=file_root) + # Convert expstart from MJD to a date + expstart_str = Time(root_file_info.expstart, format='mjd').to_datetime().strftime('%d %b %Y %H:%M') + + # Create one dict of info to show at the top of the page, and another dict of info + # to show in the collapsible text box. + basic_info, additional_info = get_additional_exposure_info(root_file_info, image_info) + # Build the context context = {'base_url': get_base_url(), 'file_root_list': file_root_list, @@ -1270,6 +1291,9 @@ def view_image(request, inst, file_root): 'available_ints': image_info['available_ints'], 'total_ints': image_info['total_ints'], 'form': form, - 'marked_viewed': root_file_info.viewed} + 'marked_viewed': root_file_info.viewed, + 'expstart_str': expstart_str, + 'basic_info': basic_info, + 'additional_info': additional_info} return render(request, template, context) diff --git a/jwql/website/jwql_proj/settings.py b/jwql/website/jwql_proj/settings.py index 6dc070ca7..a8adf9fa3 100644 --- a/jwql/website/jwql_proj/settings.py +++ b/jwql/website/jwql_proj/settings.py @@ -95,12 +95,12 @@ ] MESSAGE_TAGS = { - messages.DEBUG: 'alert-secondary', - messages.INFO: 'alert-info', - messages.SUCCESS: 'alert-success', - messages.WARNING: 'alert-warning', - messages.ERROR: 'alert-danger', - } + messages.DEBUG: 'alert-secondary', + messages.INFO: 'alert-info', + messages.SUCCESS: 'alert-success', + messages.WARNING: 'alert-warning', + messages.ERROR: 'alert-danger', +} WSGI_APPLICATION = 'jwql.website.jwql_proj.wsgi.application' @@ -108,8 +108,10 @@ # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { - 'default': get_config()['django_database'] + 'default': get_config()['django_databases']['default'], + 'monitors': get_config()['django_databases']['monitors'] } +DATABASE_ROUTERS = ["jwql.website.apps.jwql.router.MonitorRouter"] # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators diff --git a/jwql/website/manage.py b/jwql/website/manage.py index 805e86b22..39009f26a 100755 --- a/jwql/website/manage.py +++ b/jwql/website/manage.py @@ -47,7 +47,7 @@ 'outputs': 'outputs', 'preview_image_filesystem': 'preview_images', 'thumbnail_filesystem': 'thumbnails' - } + } for directory in ['filesystem', 'outputs', 'preview_image_filesystem', 'thumbnail_filesystem']: symlink_location = os.path.join(os.path.dirname(__file__), 'apps', 'jwql', 'static', directory_mapping[directory]) diff --git a/pyproject.toml b/pyproject.toml index 24f701717..b17920154 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ dependencies = [ "asdf", "astropy", "astroquery", - "bokeh<3", + "bokeh>=3", "crds", "cryptography", "django", diff --git a/requirements.txt b/requirements.txt index 5f601fddc..0edf7d9ea 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ astropy==5.3.3 astroquery==0.4.6 bandit==1.7.5 beautifulsoup4==4.12.2 -bokeh==2.4.3 +bokeh==3.3.0 celery==5.3.4 cryptography==41.0.7 django==4.2.5 @@ -11,6 +11,7 @@ ipython==8.16.1 jinja2==3.1.2 jsonschema==4.19.1 jwst==1.12.3 +jwst_backgrounds==1.2.0 matplotlib==3.8.0 nodejs==20.8.0 numpy==1.25.2