diff --git a/jwql/example_config.json b/jwql/example_config.json index 937a1d63b..b6dfe2806 100644 --- a/jwql/example_config.json +++ b/jwql/example_config.json @@ -26,6 +26,7 @@ "mast_base_url" : "", "mast_request_url": "", "outputs" : "", + "working": "", "preview_image_filesystem" : "", "filesystem" : "", "setup_file" : "", diff --git a/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py b/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py index cc4b7fb68..c0366df14 100755 --- a/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py +++ b/jwql/instrument_monitors/common_monitors/bad_pixel_monitor.py @@ -659,8 +659,8 @@ def map_uncal_and_rate_file_lists(self, uncal_files, rate_files, rate_files_to_c the rate file failed) """ # Copy files from filesystem - uncal_copied_files, uncal_not_copied = copy_files(uncal_files, self.data_dir) - rate_copied_files, rate_not_copied = copy_files(rate_files_to_copy, self.data_dir) + uncal_copied_files, uncal_not_copied = copy_files(uncal_files, self.working_data_dir) + rate_copied_files, rate_not_copied = copy_files(rate_files_to_copy, self.working_data_dir) # Set any rate files that failed to copy to None so # that we can regenerate them @@ -677,7 +677,7 @@ def map_uncal_and_rate_file_lists(self, uncal_files, rate_files, rate_files_to_c del rate_files[bad_index] logging.info('\tNew {} observations: '.format(obs_type)) - logging.info('\tData dir: {}'.format(self.data_dir)) + logging.info('\tData dir: {}'.format(self.working_data_dir)) logging.info('\tCopied to data dir: {}'.format(uncal_copied_files)) logging.info('\tNot copied (failed, or missing from filesystem): {}'.format(uncal_not_copied)) @@ -800,10 +800,10 @@ def process(self, illuminated_raw_files, illuminated_slope_files, flat_file_coun self.get_metadata(uncal_file) if rate_file == 'None': short_name = os.path.basename(uncal_file).replace('_uncal.fits', '') - local_uncal_file = os.path.join(self.data_dir, os.path.basename(uncal_file)) + local_uncal_file = os.path.join(self.working_data_dir, os.path.basename(uncal_file)) logging.info('Calling pipeline for {}'.format(uncal_file)) - logging.info("Copying raw file to {}".format(self.data_dir)) - copy_files([uncal_file], self.data_dir) + logging.info("Copying raw file to {}".format(self.working_data_dir)) + copy_files([uncal_file], self.working_data_dir) if hasattr(self, 'nints') and self.nints > 1: out_exts[short_name] = ['jump', '1_ramp_fit'] needs_calibration = False @@ -817,14 +817,14 @@ def process(self, illuminated_raw_files, illuminated_slope_files, flat_file_coun else: logging.info("\tRate file found for {}".format(uncal_file)) if os.path.isfile(rate_file): - copy_files([rate_file], self.data_dir) + copy_files([rate_file], self.working_data_dir) else: logging.warning("\tRate file {} doesn't actually exist".format(rate_file)) short_name = os.path.basename(uncal_file).replace('_uncal.fits', '') - local_uncal_file = os.path.join(self.data_dir, os.path.basename(uncal_file)) + local_uncal_file = os.path.join(self.working_data_dir, os.path.basename(uncal_file)) logging.info('Calling pipeline for {}'.format(uncal_file)) - logging.info("Copying raw file to {}".format(self.data_dir)) - copy_files([uncal_file], self.data_dir) + logging.info("Copying raw file to {}".format(self.working_data_dir)) + copy_files([uncal_file], self.working_data_dir) if hasattr(self, 'nints') and self.nints > 1: out_exts[short_name] = ['jump', '1_ramp_fit'] needs_calibration = False @@ -845,7 +845,7 @@ def process(self, illuminated_raw_files, illuminated_slope_files, flat_file_coun logging.info("Checking files post-calibration") for uncal_file, rate_file in zip(illuminated_raw_files, illuminated_slope_files): logging.info("\tChecking files {}, {}".format(os.path.basename(uncal_file), os.path.basename(rate_file))) - local_uncal_file = os.path.join(self.data_dir, os.path.basename(uncal_file)) + local_uncal_file = os.path.join(self.working_data_dir, os.path.basename(uncal_file)) if local_uncal_file in outputs: logging.info("\t\tAdding calibrated file.") illuminated_slope_files[index] = deepcopy(outputs[local_uncal_file][1]) @@ -907,10 +907,10 @@ def process(self, illuminated_raw_files, illuminated_slope_files, flat_file_coun logging.info("Checking dark file {} with rate file {}".format(uncal_file, rate_file)) self.get_metadata(uncal_file) short_name = os.path.basename(uncal_file).replace('_uncal.fits', '') - local_uncal_file = os.path.join(self.data_dir, os.path.basename(uncal_file)) + local_uncal_file = os.path.join(self.working_data_dir, os.path.basename(uncal_file)) if not os.path.isfile(local_uncal_file): - logging.info("\tCopying raw file to {}".format(self.data_dir)) - copy_files([uncal_file], self.data_dir) + logging.info("\tCopying raw file to {}".format(self.working_data_dir)) + copy_files([uncal_file], self.working_data_dir) if hasattr(self, 'nints') and self.nints > 1: out_exts[short_name] = ['jump', 'fitopt', '1_ramp_fit'] local_processed_files = [local_uncal_file.replace("uncal", x) for x in out_exts[short_name]] @@ -938,7 +938,7 @@ def process(self, illuminated_raw_files, illuminated_slope_files, flat_file_coun logging.info("Checking files post-calibration") for uncal_file, rate_file in zip(dark_raw_files, dark_slope_files): logging.info("\tChecking files {}, {}".format(uncal_file, rate_file)) - local_uncal_file = os.path.join(self.data_dir, os.path.basename(uncal_file)) + local_uncal_file = os.path.join(self.working_data_dir, os.path.basename(uncal_file)) short_name = os.path.basename(uncal_file).replace('_uncal.fits', '') if local_uncal_file in outputs: logging.info("\t\tAdding calibrated files") @@ -1090,7 +1090,7 @@ def process(self, illuminated_raw_files, illuminated_slope_files, flat_file_coun raise ValueError("Unrecognized type of bad pixel: {}. Cannot update database table.".format(bad_type)) # Remove raw files, rate files, and pipeline products in order to save disk space - files_to_remove = glob(f'{self.data_dir}/*.fits') + files_to_remove = glob(f'{self.working_data_dir}/*.fits') for filename in files_to_remove: os.remove(filename) @@ -1110,6 +1110,7 @@ def run(self): logging.info('Begin logging for bad_pixel_monitor') # Get the output directory + self.working_dir = os.path.join(get_config()['working'], 'bad_pixel_monitor') self.output_dir = os.path.join(get_config()['outputs'], 'bad_pixel_monitor') # Read in config file that defines the thresholds for the number @@ -1251,9 +1252,12 @@ def run(self): dark_uncal_files, dark_rate_files, dark_rate_files_to_copy = None, None, None # Set up directories for the copied data + ensure_dir_exists(os.path.join(self.working_dir, 'data')) ensure_dir_exists(os.path.join(self.output_dir, 'data')) - self.data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower())) - ensure_dir_exists(self.data_dir) + self.working_data_dir = os.path.join(self.working_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower())) + self.output_data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower())) + ensure_dir_exists(self.working_data_dir) + ensure_dir_exists(self.output_data_dir) # Copy files from filesystem if run_flats: diff --git a/jwql/instrument_monitors/common_monitors/bias_monitor.py b/jwql/instrument_monitors/common_monitors/bias_monitor.py index 9e1f9ad32..f47bc6213 100755 --- a/jwql/instrument_monitors/common_monitors/bias_monitor.py +++ b/jwql/instrument_monitors/common_monitors/bias_monitor.py @@ -170,7 +170,7 @@ def extract_zeroth_group(self, filename): The full path to the output file. """ - output_filename = os.path.join(self.data_dir, os.path.basename(filename).replace('.fits', '_0thgroup.fits')) + output_filename = os.path.join(self.working_data_dir, os.path.basename(filename).replace('.fits', '_0thgroup.fits')) # Write a new fits file containing the primary and science # headers from the input file, as well as the 0th group @@ -279,7 +279,7 @@ def image_to_png(self, image, outname): The full path to the output png file. """ - output_filename = os.path.join(self.data_dir, '{}.png'.format(outname)) + output_filename = os.path.join(self.output_data_dir, '{}.png'.format(outname)) # Get image scale limits z = ZScaleInterval() @@ -455,6 +455,8 @@ def run(self): # Get the output directory and setup a directory to store the data self.output_dir = os.path.join(get_config()['outputs'], 'bias_monitor') ensure_dir_exists(os.path.join(self.output_dir, 'data')) + self.working_dir = os.path.join(get_config()['working'], 'bias_monitor') + ensure_dir_exists(os.path.join(self.working_dir, 'data')) # Use the current time as the end time for MAST query self.query_end = Time.now().mjd @@ -495,14 +497,17 @@ def run(self): logging.info('\tAperture: {}, new entries: {}'.format(self.aperture, len(new_entries))) # Set up a directory to store the data for this aperture - self.data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower())) + self.working_data_dir = os.path.join(self.working_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower())) if len(new_entries) > 0: - ensure_dir_exists(self.data_dir) + ensure_dir_exists(self.working_data_dir) + self.output_data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower())) + if len(new_entries) > 0: + ensure_dir_exists(self.output_data_dir) # Get any new files to process new_files = [] for file_entry in new_entries: - output_filename = os.path.join(self.data_dir, file_entry['filename']) + output_filename = os.path.join(self.working_data_dir, file_entry['filename']) output_filename = output_filename.replace('_uncal.fits', '_uncal_0thgroup.fits').replace('_dark.fits', '_uncal_0thgroup.fits') # Dont process files that already exist in the bias stats database diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index c73dd3543..00de6b2bf 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -135,7 +135,7 @@ class Dark(): output_dir : str Path into which outputs will be placed - data_dir : str + working_dir : str Path into which new dark files will be copied to be worked on query_start : float @@ -515,7 +515,7 @@ def get_baseline_filename(self): else: filename = query.all()[0].baseline_file # Specify the full path - filename = os.path.join(self.output_dir, 'mean_slope_images', filename) + filename = os.path.join(get_config()['outputs'], 'dark_monitor', 'mean_slope_images', filename) logging.info('Baseline filename: {}'.format(filename)) session.close() @@ -684,7 +684,7 @@ def process(self, file_list): rate_file = filename.replace("dark", "rate") rate_file_name = os.path.basename(rate_file) - local_rate_file = os.path.join(self.data_dir, rate_file_name) + local_rate_file = os.path.join(self.working_data_dir, rate_file_name) if os.path.isfile(local_rate_file): logging.info("\t\tFile {} exists, skipping pipeline".format(local_rate_file)) @@ -814,7 +814,7 @@ def process(self, file_list): histogram, bins) = self.stats_by_amp(slope_image, amp_bounds) # Remove the input files in order to save disk space - files_to_remove = glob(f'{self.data_dir}/*fits') + files_to_remove = glob(f'{self.working_data_dir}/*fits') for filename in files_to_remove: os.remove(filename) @@ -888,8 +888,8 @@ def run(self): apertures_to_skip = ['NRCALL_FULL', 'NRCAS_FULL', 'NRCBS_FULL'] - # Get the output directory - self.output_dir = os.path.join(get_config()['outputs'], 'dark_monitor') + # Get the working directory + self.working_dir = os.path.join(get_config()['working'], 'dark_monitor') # Read in config file that defines the thresholds for the number # of dark files that must be present in order for the monitor to run @@ -1004,11 +1004,11 @@ def run(self): if monitor_run: # Set up directories for the copied data - ensure_dir_exists(os.path.join(self.output_dir, 'data')) - self.data_dir = os.path.join(self.output_dir, - 'data/{}_{}'.format(self.instrument.lower(), - self.aperture.lower())) - ensure_dir_exists(self.data_dir) + ensure_dir_exists(os.path.join(self.working_dir, 'data')) + self.working_data_dir = os.path.join(self.working_dir, + 'data/{}_{}'.format(self.instrument.lower(), + self.aperture.lower())) + ensure_dir_exists(self.working_data_dir) # Copy files from filesystem dark_files, not_copied = copy_files(new_filenames, self.data_dir) diff --git a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py index e87016faf..2154ee00a 100755 --- a/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py +++ b/jwql/instrument_monitors/common_monitors/edb_telemetry_monitor.py @@ -21,12 +21,12 @@ After filtering the data, the monitor calcualtes statistics. The monitor supports several different types averaging. These include: -1. **daily\_means** - This is designed for mnemonics whose values do not change much over the course of a +1. **daily_means** - This is designed for mnemonics whose values do not change much over the course of a day. In this case, mnemonic data is retrieved over a small amount of time each day (e.g. 12:00 - 12:15). From these data, a daily mean is calculated. For all other types of telemetry, the EDB queries span the full day. -2. **block\_means** - These are mnemonics where the user wishes to see mean values associated with each +2. **block_means** - These are mnemonics where the user wishes to see mean values associated with each block of entries in the retrieved and filtered data. For example, you want to examine a voltage at times when some other current is less than 0.25A. The script will read in all telemetry data, and filter out data points for times where the current did not meet the criteria. It will then calculate the mean of @@ -34,15 +34,15 @@ 3:00, and good again from 3:00-4:00, then the monitor will calculate a mean value for the 2:00-2:30 period, and a mean from the 3:00-4:00 period. -3. **time\_interval** - Mnemonics in this category have their data retrieved and filtered, and then averaged +3. **time_interval** - Mnemonics in this category have their data retrieved and filtered, and then averaged over the requested time interval. For example, if the user sets a time interval of 5 minutes, then the monitor caculates the mean value within each 5-minute block of the total time range of the data, and plots the average values. -4. **every\_change** - This is the most complex case. Mnemonics in this category have their data filtered -and organized based on the value of a secondary mnemonic. For example, the IMIR\_HK\_GW14\_POS\_RATIO returns +4. **every_change** - This is the most complex case. Mnemonics in this category have their data filtered +and organized based on the value of a secondary mnemonic. For example, the IMIR_HK_GW14_POS_RATIO returns a measure of the position of MIRI's grating wheel. We can plot this position as a function of the commanded -location of the grating wheel, which is provided by IMIR\_HK\_GW14\_CUR\_POS. In this case, the monitor will +location of the grating wheel, which is provided by IMIR_HK_GW14_CUR_POS. In this case, the monitor will loop over the commanded positions and for each, gather the measured position information. The measured positions associated with each commanded position will then be plotted separately. Note that this use of "every change" is separate from the idea of every-change telemetry, in which telemetry points are @@ -52,15 +52,15 @@ 5. **all** - In this case, no averaging is done. (Although filtering is still done) All filtered data are kept as they are retrived from the EDB, and plotted without any modification. -6. **all+daily\_means** - This is a combination of the "all" and "daily\_means" cases above. All data points +6. **all+daily_means** - This is a combination of the "all" and "daily_means" cases above. All data points are retrieved from the EDB and optionally filtered by dependencies. Then daily means are calculated. Both the full set of data and the daily means are plotted, along with deviations from the mean. -7. **all+block\_means** - This is a combination of the "all" and "block\_means" cases above. All data points +7. **all+block_means** - This is a combination of the "all" and "block_means" cases above. All data points are retrieved from the EDB and optionally filtered by dependencies. Then means for each block of good data are calculated. Both the full set of data and the means are plotted, along with deviations from the mean. -8. **all+time\_interval** - This is a combination of the "all" and "time\_interval" cases above. All data points +8. **all+time_interval** - This is a combination of the "all" and "time_interval" cases above. All data points are retrieved from the EDB and optionally filtered by dependencies. Then means are calculated for each block of time lasting the duration of the time interval. Both the full set of data and the means are plotted, along with deviations from the mean. @@ -72,24 +72,24 @@ The entry for each mnemonic has several pieces of information, described below. - **name**: Name of the mnemonic as it appears in the EDB. -- **database\_id** Optional name to use in the plot title for this mnemonic. Any averaged data saved to the JWQL database will be saved under this name if it is present. +- **database_id** Optional name to use in the plot title for this mnemonic. Any averaged data saved to the JWQL database will be saved under this name if it is present. - **description**: Summary describing the data contained in the mnemonic. Placed in the plot title. - **dependency**: This is a list of mnemonics and conditions that will be used to filter the data -- **plot\_data**: Description of how the data are to be plotted. There are two options: "nominal", in which case +- **plot_data**: Description of how the data are to be plotted. There are two options: "nominal", in which case the mnemonic data are plotted as-is, and "*" where is the name of another mnemonic. In this case, the data for this second mnemonic are retrieved using the same dependencies as the primary mnemonic. The primary mnemonic and this second mnemonic are then multiplied together and plotted. This option was designed around plotting power as the product of current and voltage. -A further option for the **"plot\_data"** field is the addition of a comma-separated list of statistics to be overplotted. +A further option for the **"plot_data"** field is the addition of a comma-separated list of statistics to be overplotted. Options are: "mean", "median", "max", and "min". Note that this is a little confusing, because in many cases the menmonic's data will already contain the median value of the data (and the original data as returned from the EDB will not be -available). The monitor realized this though, so if you specify "mean" for a mnemonic in the "daily\_mean" list, it will simply +available). The monitor realized this though, so if you specify "mean" for a mnemonic in the "daily_mean" list, it will simply plot the same data twice, on top of itself. -As an example, in order to plot the daily mean and maximum values of the product of SE\_ZIMIRICEA and SE\_ZBUSVLT, the plot\_data -entry would be: "*SE\_ZBUSVLT,max". If you also wanted to plot the minimum daily value, the entry would be: "*SE\_ZBUSVLT,max,min". -And similarly, to plot SE\_ZIMIRICEA on its own (not as a product), the plot\_data entries shown above would become: "nominal,max" +As an example, in order to plot the daily mean and maximum values of the product of SE_ZIMIRICEA and SE_ZBUSVLT, the plot_data +entry would be: "*SE_ZBUSVLT,max". If you also wanted to plot the minimum daily value, the entry would be: "*SE_ZBUSVLT,max,min". +And similarly, to plot SE_ZIMIRICEA on its own (not as a product), the plot_data entries shown above would become: "nominal,max" and "nominal,max,min". * **nominal_value**: Optional. The "expected" value for this mnemonic. If provided, a horizontal dashed line will be added at this value. @@ -660,8 +660,8 @@ def execute(self, mnem_to_query=None, plot_start=None, plot_end=None): # Set up directory structure to hold the saved plots config = get_config() - base_dir = os.path.join(config["outputs"], "edb_telemetry_monitor") - ensure_dir_exists(base_dir) + outputs_dir = os.path.join(config["outputs"], "edb_telemetry_monitor") + ensure_dir_exists(outputs_dir) # Case where the user is requesting the monitor run for some subset of # mnemonics for some non-standard time span @@ -677,7 +677,7 @@ def execute(self, mnem_to_query=None, plot_start=None, plot_end=None): monitor_dir = os.path.dirname(os.path.abspath(__file__)) # Define the output directory in which the html files will be saved - self.plot_output_dir = os.path.join(base_dir, instrument_name) + self.plot_output_dir = os.path.join(outputs_dir, instrument_name) ensure_dir_exists(self.plot_output_dir) # File of mnemonics to monitor @@ -710,7 +710,7 @@ def execute(self, mnem_to_query=None, plot_start=None, plot_end=None): mnemonic_file = os.path.join(monitor_dir, 'edb_monitor_data', f'{instrument_name}_mnemonics_to_monitor.json') # Define the output directory in which the html files will be saved - self.plot_output_dir = os.path.join(base_dir, instrument_name) + self.plot_output_dir = os.path.join(outputs_dir, instrument_name) ensure_dir_exists(self.plot_output_dir) # Read in file with nominal list of mnemonics @@ -991,14 +991,14 @@ def get_dependency_data(self, dependency, starttime, endtime): if dependency["name"] in self.query_results: # We need the full time to be covered - if ((self.query_results[dependency["name"]].requested_start_time <= starttime) - and (self.query_results[dependency["name"]].requested_end_time >= endtime)): + if ((self.query_results[dependency["name"]].requested_start_time <= starttime) and + (self.query_results[dependency["name"]].requested_end_time >= endtime)): logging.info(f'Dependency {dependency["name"]} is already present in self.query_results.') # Extract data for the requested time range - matching_times = np.where((self.query_results[dependency["name"]].data["dates"] >= starttime) - & (self.query_results[dependency["name"]].data["dates"] <= endtime)) + matching_times = np.where((self.query_results[dependency["name"]].data["dates"] >= starttime) & + (self.query_results[dependency["name"]].data["dates"] <= endtime)) dep_mnemonic = {"dates": self.query_results[dependency["name"]].data["dates"][matching_times], "euvalues": self.query_results[dependency["name"]].data["euvalues"][matching_times]} @@ -1138,16 +1138,16 @@ def get_history_every_change(self, mnemonic, start_date, end_date): devs = [] # Keep only data that fall at least partially within the plot range - if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) \ - | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): + if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) + | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): times.extend(row.time) values.extend(row.mnemonic_value) medians.append(row.median) devs.append(row.stdev) hist[row.dependency_value] = (times, values, medians, devs) else: - if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) \ - | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): + if (((np.min(row.time) > self._plot_start) & (np.min(row.time) < self._plot_end)) + | ((np.max(row.time) > self._plot_start) & (np.max(row.time) < self._plot_end))): hist[row.dependency_value] = (row.time, row.mnemonic_value, row.median, row.stdev) return hist @@ -1854,7 +1854,7 @@ def calculate_statistics(mnemonic_instance, telemetry_type): mnemonic_instance.block_stats() elif telemetry_type == "every_change": mnemonic_instance.block_stats_filter_positions() - #mnemonic_instance.block_stats(ignore_vals=[0.], ignore_edges=True, every_change=True) + # mnemonic_instance.block_stats(ignore_vals=[0.], ignore_edges=True, every_change=True) elif telemetry_type == "time_interval": mnemonic_instance.timed_stats() elif telemetry_type == "all": diff --git a/jwql/instrument_monitors/common_monitors/readnoise_monitor.py b/jwql/instrument_monitors/common_monitors/readnoise_monitor.py index 833db0fb7..10fc82237 100755 --- a/jwql/instrument_monitors/common_monitors/readnoise_monitor.py +++ b/jwql/instrument_monitors/common_monitors/readnoise_monitor.py @@ -83,7 +83,7 @@ class Readnoise(): output_dir : str Path into which outputs will be placed. - data_dir : str + working_dir : str Path into which new dark files will be copied to be worked on. query_start : float @@ -253,7 +253,7 @@ def image_to_png(self, image, outname): The full path to the output png file. """ - output_filename = os.path.join(self.data_dir, '{}.png'.format(outname)) + output_filename = os.path.join(self.output_data_dir, '{}.png'.format(outname)) # Get image scale limits zscale = ZScaleInterval() @@ -445,7 +445,7 @@ def process(self, file_list): cal_data = cal_data[:, 5:-1, :, :] # Make the readnoise image - readnoise_outfile = os.path.join(self.data_dir, os.path.basename(processed_file.replace('.fits', '_readnoise.fits'))) + readnoise_outfile = os.path.join(self.output_data_dir, os.path.basename(processed_file.replace('.fits', '_readnoise.fits'))) readnoise = self.make_readnoise_image(cal_data) # fits.writeto(readnoise_outfile, readnoise, overwrite=True) # logging.info('\tReadnoise image saved to {}'.format(readnoise_outfile)) @@ -549,6 +549,8 @@ def run(self): # Get the output directory and setup a directory to store the data self.output_dir = os.path.join(get_config()['outputs'], 'readnoise_monitor') ensure_dir_exists(os.path.join(self.output_dir, 'data')) + self.working_dir = os.path.join(get_config()['working'], 'readnoise_monitor') + ensure_dir_exists(os.path.join(self.working_dir, 'data')) # Use the current time as the end time for MAST query self.query_end = Time.now().mjd @@ -589,15 +591,18 @@ def run(self): logging.info('\tAperture: {}, new entries: {}'.format(self.aperture, len(new_entries))) # Set up a directory to store the data for this aperture - self.data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower())) + self.output_data_dir = os.path.join(self.output_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower())) if len(new_entries) > 0: - ensure_dir_exists(self.data_dir) + ensure_dir_exists(self.output_data_dir) + self.working_data_dir = os.path.join(self.working_dir, 'data/{}_{}'.format(self.instrument.lower(), self.aperture.lower())) + if len(new_entries) > 0: + ensure_dir_exists(self.working_data_dir) # Get any new files to process new_files = [] checked_files = [] for file_entry in new_entries: - output_filename = os.path.join(self.data_dir, file_entry['filename'].replace('_dark', '_uncal')) + output_filename = os.path.join(self.working_data_dir, file_entry['filename'].replace('_dark', '_uncal')) # Sometimes both the dark and uncal name of a file is picked up in new_entries if output_filename in checked_files: @@ -627,7 +632,7 @@ def run(self): # Skip processing if the file doesnt have enough groups/ints to calculate the readnoise. # MIRI needs extra since they omit the first five and last group before calculating the readnoise. if total_cds_frames >= 10: - shutil.copy(uncal_filename, self.data_dir) + shutil.copy(uncal_filename, self.working_data_dir) logging.info('\tCopied {} to {}'.format(uncal_filename, output_filename)) set_permissions(output_filename) new_files.append(output_filename) diff --git a/jwql/shared_tasks/run_pipeline.py b/jwql/shared_tasks/run_pipeline.py index 07191e49a..ffc197310 100755 --- a/jwql/shared_tasks/run_pipeline.py +++ b/jwql/shared_tasks/run_pipeline.py @@ -313,8 +313,8 @@ def run_save_jump(input_file, short_name, work_directory, instrument, ramp_fit=T if __name__ == '__main__': - status_dir = os.path.join(get_config()['outputs'], 'calibrated_data') - general_status_file = os.path.join(status_dir, "general_status.txt") + working_dir = os.path.join(get_config()['working'], 'calibrated_data') + general_status_file = os.path.join(working_dir, "general_status.txt") with open(general_status_file, "w") as status_file: status_file.write("Started at {}\n".format(time.ctime())) diff --git a/jwql/shared_tasks/shared_tasks.py b/jwql/shared_tasks/shared_tasks.py index f076e2035..8e3be0a5c 100644 --- a/jwql/shared_tasks/shared_tasks.py +++ b/jwql/shared_tasks/shared_tasks.py @@ -185,15 +185,15 @@ def _caller(*args, **kwargs): def create_task_log_handler(logger, propagate): log_file_name = configure_logging('shared_tasks') - output_dir = os.path.join(get_config()['outputs'], 'calibrated_data') - ensure_dir_exists(output_dir) + working_dir = os.path.join(get_config()['working'], 'calibrated_data') + ensure_dir_exists(working_dir) celery_log_file_handler = FileHandler(log_file_name) logger.addHandler(celery_log_file_handler) for handler in logger.handlers: handler.setFormatter(TaskFormatter('%(asctime)s - %(task_id)s - %(task_name)s - %(name)s - %(levelname)s - %(message)s')) logger.propagate = propagate - if not os.path.exists(os.path.join(output_dir, "celery_pipeline_log.cfg")): - with open(os.path.join(output_dir, "celery_pipeline_log.cfg"), "w") as cfg_file: + if not os.path.exists(os.path.join(working_dir, "celery_pipeline_log.cfg")): + with open(os.path.join(working_dir, "celery_pipeline_log.cfg"), "w") as cfg_file: cfg_file.write("[*]\n") cfg_file.write("level = WARNING\n") cfg_file.write("handler = append:{}\n".format(log_file_name)) @@ -323,7 +323,7 @@ def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument, ext_or_exts = [ext_or_exts] input_dir = os.path.join(config['transfer_dir'], "incoming") - cal_dir = os.path.join(config['outputs'], "calibrated_data") + cal_dir = os.path.join(config['working'], "calibrated_data") output_dir = os.path.join(config['transfer_dir'], "outgoing") msg = "Input from {}, calibrate in {}, output to {}" logging.info(msg.format(input_dir, cal_dir, output_dir)) @@ -457,7 +457,7 @@ def calwebb_detector1_save_jump(input_file_name, instrument, ramp_fit=True, save config = get_config() input_dir = os.path.join(config["transfer_dir"], "incoming") - cal_dir = os.path.join(config['outputs'], "calibrated_data") + cal_dir = os.path.join(config['working'], "calibrated_data") output_dir = os.path.join(config['transfer_dir'], "outgoing") msg = "Input from {}, calibrate in {}, output to {}" logging.info(msg.format(input_dir, cal_dir, output_dir)) diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index 1c554a6f8..48af112b4 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -379,7 +379,7 @@ } # output subdirectories to keep track of via the filesytem monitor -FILESYSTEM_MONITOR_SUBDIRS = ["logs", "outputs", "preview_images", "thumbnails", "all"] +FILESYSTEM_MONITOR_SUBDIRS = ['logs', 'outputs', 'working', 'preview_images', 'thumbnails', 'all'] FILTERS_PER_INSTRUMENT = { "fgs": [], diff --git a/jwql/utils/monitor_template.py b/jwql/utils/monitor_template.py index 863b26e0d..9e36ccc0f 100644 --- a/jwql/utils/monitor_template.py +++ b/jwql/utils/monitor_template.py @@ -130,6 +130,7 @@ def monitor_template_main(): plt.sizing_mode = 'stretch_both' # Necessary for responsive sizing on web app script, div = components(plt) + working_data_dir = SETTINGS['working'] plot_output_dir = SETTINGS['outputs'] div_outfile = os.path.join(plot_output_dir, 'monitor_name', filename_of_interest + "_component.html") diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py index 113dd66c9..fd5ecbabd 100644 --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -99,6 +99,7 @@ def _validate_config(config_file_dict): "server_type": {"type": "string"}, "log_dir": {"type": "string"}, "mast_token": {"type": "string"}, + "working": {"type": "string"}, "outputs": {"type": "string"}, "preview_image_filesystem": {"type": "string"}, "filesystem": {"type": "string"}, @@ -113,7 +114,7 @@ def _validate_config(config_file_dict): "preview_image_filesystem", "thumbnail_filesystem", "outputs", "jwql_dir", "admin_account", "log_dir", "test_dir", "test_data", "setup_file", "auth_mast", - "mast_token"] + "mast_token", "working"] } # Test that the provided config file dict matches the schema