diff --git a/datalab/datalab_session/analysis/get_tif.py b/datalab/datalab_session/analysis/get_tif.py index bac6cbb..211ecac 100644 --- a/datalab/datalab_session/analysis/get_tif.py +++ b/datalab/datalab_session/analysis/get_tif.py @@ -1,5 +1,5 @@ -from datalab.datalab_session.file_utils import create_tif, get_fits -from datalab.datalab_session.s3_utils import key_exists, add_file_to_bucket, get_s3_url +from datalab.datalab_session.file_utils import create_tif +from datalab.datalab_session.s3_utils import key_exists, add_file_to_bucket, get_s3_url, get_fits def get_tif(input: dict): """ diff --git a/datalab/datalab_session/analysis/line_profile.py b/datalab/datalab_session/analysis/line_profile.py index 3479a18..47b2729 100644 --- a/datalab/datalab_session/analysis/line_profile.py +++ b/datalab/datalab_session/analysis/line_profile.py @@ -4,6 +4,7 @@ from astropy import coordinates from datalab.datalab_session.file_utils import scale_points, get_hdu +from datalab.datalab_session.s3_utils import get_fits # For creating an array of brightness along a user drawn line def line_profile(input: dict): @@ -19,7 +20,9 @@ def line_profile(input: dict): y2 (int): The y coordinate of the ending point } """ - sci_hdu = get_hdu(input['basename'], 'SCI') + fits_path = get_fits(input['basename']) + + sci_hdu = get_hdu(fits_path, 'SCI') x_points, y_points = scale_points(input["height"], input["width"], sci_hdu.data.shape[0], sci_hdu.data.shape[1], x_points=[input["x1"], input["x2"]], y_points=[input["y1"], input["y2"]]) diff --git a/datalab/datalab_session/analysis/source_catalog.py b/datalab/datalab_session/analysis/source_catalog.py index c64b767..cb331bf 100644 --- a/datalab/datalab_session/analysis/source_catalog.py +++ b/datalab/datalab_session/analysis/source_catalog.py @@ -1,13 +1,16 @@ import numpy as np from datalab.datalab_session.file_utils import get_hdu, scale_points +from datalab.datalab_session.s3_utils import get_fits def source_catalog(input: dict): """ Returns a dict representing the source catalog data with x,y coordinates and flux values """ - cat_hdu = get_hdu(input['basename'], 'CAT') - sci_hdu = get_hdu(input['basename'], 'SCI') + fits_path = get_fits(input['basename']) + + cat_hdu = get_hdu(fits_path, 'CAT') + sci_hdu = get_hdu(fits_path, 'SCI') # The number of sources to send back to the frontend, default 50 SOURCE_CATALOG_COUNT = min(50, len(cat_hdu.data["x"])) diff --git a/datalab/datalab_session/data_operations/data_operation.py b/datalab/datalab_session/data_operations/data_operation.py index 106bf04..e87d158 100644 --- a/datalab/datalab_session/data_operations/data_operation.py +++ b/datalab/datalab_session/data_operations/data_operation.py @@ -5,6 +5,7 @@ from django.core.cache import cache import numpy as np +from datalab.datalab_session.s3_utils import get_fits from datalab.datalab_session.tasks import execute_data_operation from datalab.datalab_session.file_utils import get_hdu @@ -57,7 +58,7 @@ def perform_operation(self): status = self.get_status() if status == 'PENDING' or status == 'FAILED': self.set_status('IN_PROGRESS') - self.set_percent_completion(0.0) + self.set_operation_progress(0.0) # This asynchronous task will call the operate() method on the proper operation execute_data_operation.send(self.name(), self.input_data) @@ -78,15 +79,16 @@ def set_message(self, message: str): def get_message(self) -> str: return cache.get(f'operation_{self.cache_key}_message', '') - def set_percent_completion(self, percent_completed: float): - cache.set(f'operation_{self.cache_key}_percent_completion', percent_completed, CACHE_DURATION) + def set_operation_progress(self, percent_completed: float): + cache.set(f'operation_{self.cache_key}_progress', percent_completed, CACHE_DURATION) - def get_percent_completion(self) -> float: - return cache.get(f'operation_{self.cache_key}_percent_completion', 0.0) + def get_operation_progress(self) -> float: + return cache.get(f'operation_{self.cache_key}_progress', 0.0) - def set_output(self, output_data: dict): + def set_output(self, output): + output_data = {'output_files': output if isinstance(output, list) else [output]} self.set_status('COMPLETED') - self.set_percent_completion(1.0) + self.set_operation_progress(1.0) cache.set(f'operation_{self.cache_key}_output', output_data, CACHE_DURATION) def get_output(self) -> dict: @@ -96,8 +98,7 @@ def set_failed(self, message: str): self.set_status('FAILED') self.set_message(message) - def get_fits_npdata(self, input_files: list[dict], percent=None, cur_percent=None) -> list[np.memmap]: - total_files = len(input_files) + def get_fits_npdata(self, input_files: list[dict]) -> list[np.memmap]: image_data_list = [] # get the fits urls and extract the image data @@ -105,10 +106,10 @@ def get_fits_npdata(self, input_files: list[dict], percent=None, cur_percent=Non basename = file_info.get('basename', 'No basename found') source = file_info.get('source', 'No source found') - sci_hdu = get_hdu(basename, 'SCI', source) + fits_path = get_fits(file_info['basename'], file_info['source']) + sci_hdu = get_hdu(fits_path, 'SCI') image_data_list.append(sci_hdu.data) - - if percent is not None and cur_percent is not None: - self.set_percent_completion(cur_percent + index/total_files * percent) + + self.set_operation_progress(index / len(input_files) * 0.5) return image_data_list diff --git a/datalab/datalab_session/data_operations/long.py b/datalab/datalab_session/data_operations/long.py index 78c889f..909bbdc 100644 --- a/datalab/datalab_session/data_operations/long.py +++ b/datalab/datalab_session/data_operations/long.py @@ -42,9 +42,6 @@ def operate(self): for i, file in enumerate(self.input_data.get('input_files', [])): print(f"Processing long operation on file {file.get('basename', 'No basename found')}") sleep(per_image_timeout) - self.set_percent_completion((i+1) / num_files) + self.set_operation_progress((i+1) / num_files) # Done "processing" the files so set the output which sets the final status - output = { - 'output_files': self.input_data.get('input_files', []) - } - self.set_output(output) + self.set_output(self.input_data.get('input_files', [])) diff --git a/datalab/datalab_session/data_operations/median.py b/datalab/datalab_session/data_operations/median.py index d93a745..a8198f5 100644 --- a/datalab/datalab_session/data_operations/median.py +++ b/datalab/datalab_session/data_operations/median.py @@ -4,8 +4,7 @@ from datalab.datalab_session.data_operations.data_operation import BaseDataOperation from datalab.datalab_session.exceptions import ClientAlertException -from datalab.datalab_session.file_utils import create_fits, crop_arrays, create_jpgs -from datalab.datalab_session.s3_utils import save_fits_and_thumbnails +from datalab.datalab_session.file_utils import crop_arrays, create_output log = logging.getLogger() log.setLevel(logging.INFO) @@ -49,7 +48,7 @@ def operate(self): log.info(f'Executing median operation on {len(input)} files') - image_data_list = self.get_fits_npdata(input, percent=0.4, cur_percent=0.0) + image_data_list = self.get_fits_npdata(input) cropped_data_list = crop_arrays(image_data_list) stacked_data = np.stack(cropped_data_list, axis=2) @@ -57,13 +56,9 @@ def operate(self): # using the numpy library's median method median = np.median(stacked_data, axis=2) - fits_file = create_fits(self.cache_key, median) + self.set_operation_progress(0.80) - large_jpg_path, small_jpg_path = create_jpgs(self.cache_key, fits_file) - - output_file = save_fits_and_thumbnails(self.cache_key, fits_file, large_jpg_path, small_jpg_path) - - output = {'output_files': [output_file]} + output = create_output(self.cache_key, median, comment=f'Product of Datalab Median on files {", ".join([image["basename"] for image in input])}') self.set_output(output) log.info(f'Median output: {self.get_output()}') diff --git a/datalab/datalab_session/data_operations/noop.py b/datalab/datalab_session/data_operations/noop.py index 35931be..a5806cd 100644 --- a/datalab/datalab_session/data_operations/noop.py +++ b/datalab/datalab_session/data_operations/noop.py @@ -42,7 +42,4 @@ def wizard_description(): def operate(self): print("No-op triggered!") - output = { - 'output_files': self.input_data.get('input_files', []) - } - self.set_output(output) + self.set_output(self.input_data.get('input_files', [])) diff --git a/datalab/datalab_session/data_operations/normalization.py b/datalab/datalab_session/data_operations/normalization.py index aff5752..e250f6e 100644 --- a/datalab/datalab_session/data_operations/normalization.py +++ b/datalab/datalab_session/data_operations/normalization.py @@ -3,8 +3,7 @@ import numpy as np from datalab.datalab_session.data_operations.data_operation import BaseDataOperation -from datalab.datalab_session.file_utils import create_fits, create_jpgs -from datalab.datalab_session.s3_utils import save_fits_and_thumbnails +from datalab.datalab_session.file_utils import create_output log = logging.getLogger() log.setLevel(logging.INFO) @@ -46,21 +45,15 @@ def operate(self): log.info(f'Executing normalization operation on {len(input)} file(s)') image_data_list = self.get_fits_npdata(input) - self.set_percent_completion(0.40) output_files = [] - for index, image in enumerate(image_data_list): + for index, image in enumerate(image_data_list, start=1): median = np.median(image) normalized_image = image / median - fits_file = create_fits(self.cache_key, normalized_image) - large_jpg_path, small_jpg_path = create_jpgs(self.cache_key, fits_file) - output_file = save_fits_and_thumbnails(self.cache_key, fits_file, large_jpg_path, small_jpg_path, index=index) - output_files.append(output_file) + output = create_output(self.cache_key, normalized_image, index=index, comment=f'Product of Datalab Normalization on file {input[index]["basename"]}') + output_files.append(output) + self.set_operation_progress(0.5 + index/len(image_data_list) * 0.4) - self.set_percent_completion(self.get_percent_completion() + .40 * (index + 1) / len(input)) - - output = {'output_files': output_files} - - self.set_output(output) + self.set_output(output_files) log.info(f'Normalization output: {self.get_output()}') diff --git a/datalab/datalab_session/data_operations/rgb_stack.py b/datalab/datalab_session/data_operations/rgb_stack.py index 58e347f..51b1cf4 100644 --- a/datalab/datalab_session/data_operations/rgb_stack.py +++ b/datalab/datalab_session/data_operations/rgb_stack.py @@ -5,8 +5,8 @@ from datalab.datalab_session.data_operations.data_operation import BaseDataOperation from datalab.datalab_session.exceptions import ClientAlertException -from datalab.datalab_session.file_utils import get_fits, crop_arrays, create_fits, create_jpgs -from datalab.datalab_session.s3_utils import save_fits_and_thumbnails +from datalab.datalab_session.file_utils import create_output, crop_arrays, create_jpgs +from datalab.datalab_session.s3_utils import get_fits log = logging.getLogger() log.setLevel(logging.INFO) @@ -59,30 +59,27 @@ def wizard_description(): def operate(self): rgb_input_list = self.input_data['red_input'] + self.input_data['green_input'] + self.input_data['blue_input'] - if len(rgb_input_list) == 3: - log.info(f'Executing RGB Stack operation on files: {rgb_input_list}') + if len(rgb_input_list) != 3: + raise ClientAlertException('RGB stack requires exactly 3 files') + + log.info(f'Executing RGB Stack operation on files: {rgb_input_list}') - fits_paths = [] - for file in rgb_input_list: - fits_paths.append(get_fits(file.get('basename'))) - self.set_percent_completion(self.get_percent_completion() + 0.2) - - large_jpg_path, small_jpg_path = create_jpgs(self.cache_key, fits_paths, color=True) + fits_paths = [] + for index, file in enumerate(rgb_input_list, start=1): + fits_paths.append(get_fits(file.get('basename'))) + self.set_operation_progress(index * 0.2) + + large_jpg_path, small_jpg_path = create_jpgs(self.cache_key, fits_paths, color=True) - # color photos take three files, so we store it as one fits file with a 3d SCI ndarray - arrays = [fits.open(file)['SCI'].data for file in fits_paths] - cropped_data_list = crop_arrays(arrays) - stacked_data = np.stack(cropped_data_list, axis=2) - - fits_file = create_fits(self.cache_key, stacked_data) + # color photos take three files, so we store it as one fits file with a 3d SCI ndarray + arrays = [fits.open(file)['SCI'].data for file in fits_paths] + cropped_data_list = crop_arrays(arrays) + stacked_data = np.stack(cropped_data_list, axis=2) - output_file = save_fits_and_thumbnails(self.cache_key, fits_file, large_jpg_path, small_jpg_path) + self.set_operation_progress(0.8) + + rgb_comment = f'Product of Datalab RGB Stack on files {", ".join([image["basename"] for image in rgb_input_list])}' + output = create_output(self.cache_key, stacked_data, large_jpg=large_jpg_path, small_jpg=small_jpg_path, comment=rgb_comment) - output = {'output_files': [output_file]} - else: - output = {'output_files': []} - raise ClientAlertException('RGB Stack operation requires exactly 3 input files') - - self.set_percent_completion(1.0) self.set_output(output) log.info(f'RGB Stack output: {self.get_output()}') diff --git a/datalab/datalab_session/data_operations/stacking.py b/datalab/datalab_session/data_operations/stacking.py index d965a2e..2d80509 100644 --- a/datalab/datalab_session/data_operations/stacking.py +++ b/datalab/datalab_session/data_operations/stacking.py @@ -4,8 +4,7 @@ from datalab.datalab_session.data_operations.data_operation import BaseDataOperation from datalab.datalab_session.exceptions import ClientAlertException -from datalab.datalab_session.file_utils import create_fits, crop_arrays, create_jpgs -from datalab.datalab_session.s3_utils import save_fits_and_thumbnails +from datalab.datalab_session.file_utils import create_output, crop_arrays log = logging.getLogger() log.setLevel(logging.INFO) @@ -52,25 +51,16 @@ def operate(self): image_data_list = self.get_fits_npdata(input_files) - self.set_percent_completion(0.4) - cropped_data = crop_arrays(image_data_list) stacked_data = np.stack(cropped_data, axis=2) - - self.set_percent_completion(0.6) + self.set_operation_progress(0.6) # using the numpy library's sum method stacked_sum = np.sum(stacked_data, axis=2) - - self.set_percent_completion(0.8) - - fits_file = create_fits(self.cache_key, stacked_sum) - - large_jpg_path, small_jpg_path = create_jpgs(self.cache_key, fits_file) - - output_file = save_fits_and_thumbnails(self.cache_key, fits_file, large_jpg_path, small_jpg_path) + self.set_operation_progress(0.8) - output = {'output_files': [output_file]} + stacking_comment = f'Product of Datalab Stacking. Stack of {", ".join([image["basename"] for image in input_files])}' + output = create_output(self.cache_key, stacked_sum, comment=stacking_comment) self.set_output(output) log.info(f'Stacked output: {self.get_output()}') diff --git a/datalab/datalab_session/data_operations/subtraction.py b/datalab/datalab_session/data_operations/subtraction.py index b3bca9f..e171cc1 100644 --- a/datalab/datalab_session/data_operations/subtraction.py +++ b/datalab/datalab_session/data_operations/subtraction.py @@ -4,8 +4,7 @@ from datalab.datalab_session.data_operations.data_operation import BaseDataOperation from datalab.datalab_session.exceptions import ClientAlertException -from datalab.datalab_session.file_utils import create_fits, create_jpgs, crop_arrays -from datalab.datalab_session.s3_utils import save_fits_and_thumbnails +from datalab.datalab_session.file_utils import crop_arrays, create_output log = logging.getLogger() log.setLevel(logging.INFO) @@ -51,9 +50,7 @@ def wizard_description(): def operate(self): input_files = self.input_data.get('input_files', []) - print(f'Input files: {input_files}') subtraction_file_input = self.input_data.get('subtraction_file', []) - print(f'Subtraction file: {subtraction_file_input}') if not subtraction_file_input: raise ClientAlertException('Missing a subtraction file') @@ -64,10 +61,9 @@ def operate(self): log.info(f'Executing subtraction operation on {len(input_files)} files') input_image_data_list = self.get_fits_npdata(input_files) - self.set_percent_completion(.30) subtraction_image = self.get_fits_npdata(subtraction_file_input)[0] - self.set_percent_completion(.40) + self.set_operation_progress(0.70) outputs = [] for index, input_image in enumerate(input_image_data_list): @@ -76,15 +72,10 @@ def operate(self): difference_array = np.subtract(input_image, subtraction_image) - fits_file = create_fits(self.cache_key, difference_array) - large_jpg_path, small_jpg_path = create_jpgs(self.cache_key, fits_file) + subtraction_comment = f'Product of Datalab Subtraction of {subtraction_file_input[0]["basename"]} subtracted from {input_files[index]["basename"]}' + outputs.append(create_output(self.cache_key, difference_array, index=index, comment=subtraction_comment)) + + self.set_operation_progress(0.90) - output_file = save_fits_and_thumbnails(self.cache_key, fits_file, large_jpg_path, small_jpg_path, index) - outputs.append(output_file) - - self.set_percent_completion(self.get_percent_completion() + .50 * (index + 1) / len(input_files)) - - output = {'output_files': outputs} - - self.set_output(output) + self.set_output(outputs) log.info(f'Subtraction output: {self.get_output()}') diff --git a/datalab/datalab_session/file_utils.py b/datalab/datalab_session/file_utils.py index c0a5c75..5957648 100644 --- a/datalab/datalab_session/file_utils.py +++ b/datalab/datalab_session/file_utils.py @@ -6,44 +6,41 @@ from fits2image.conversions import fits_to_jpg, fits_to_tif from datalab.datalab_session.exceptions import ClientAlertException -from datalab.datalab_session.s3_utils import get_fits, add_file_to_bucket +from datalab.datalab_session.s3_utils import save_fits_and_thumbnails log = logging.getLogger() log.setLevel(logging.INFO) -def get_hdu(basename: str, extension: str = 'SCI', source: str = 'archive') -> list[fits.HDUList]: +def get_hdu(path: str, extension: str = 'SCI') -> list[fits.HDUList]: """ - Returns a HDU for the given basename from the source - Will download the file to a tmp directory so future calls can open it directly + Returns a HDU for the fits in the given path Warning: this function returns an opened file that must be closed after use """ - - basename_file_path = get_fits(basename, source) - - hdu = fits.open(basename_file_path) + hdu = fits.open(path) try: extension = hdu[extension] except KeyError: - raise ClientAlertException(f"{extension} Header not found in fits file {basename}") + raise ClientAlertException(f"{extension} Header not found in fits file at {path.split('/')[-1]}") return extension def get_fits_dimensions(fits_file, extension: str = 'SCI') -> tuple: return fits.open(fits_file)[extension].shape -def create_fits(key: str, image_arr: np.ndarray) -> str: +def create_fits(key: str, image_arr: np.ndarray, comment=None) -> str: """ Creates a fits file with the given key and image array Returns the the path to the fits_file """ header = fits.Header([('KEY', key)]) + header.add_comment(comment) if comment else None primary_hdu = fits.PrimaryHDU(header=header) image_hdu = fits.ImageHDU(data=image_arr, name='SCI') hdu_list = fits.HDUList([primary_hdu, image_hdu]) fits_path = tempfile.NamedTemporaryFile(suffix=f'{key}.fits').name - hdu_list.writeto(fits_path) + hdu_list.writeto(fits_path, overwrite=True) return fits_path @@ -112,3 +109,16 @@ def scale_points(height_1: int, width_1: int, height_2: int, width_2: int, x_poi x_points = width_2 - x_points return x_points, y_points + +def create_output(cache_key, np_array, large_jpg=None, small_jpg=None, index=None, comment=None): + """ + A more automated way of creating output for a dev + Dev can specify just a cache_key and np array and the function will create the fits and jpgs + or the dev can pass the fits_file or jpgs and the function will save them + """ + fits_file = create_fits(cache_key, np_array, comment) + + if not large_jpg or not small_jpg: + large_jpg, small_jpg = create_jpgs(cache_key, fits_file) + + return save_fits_and_thumbnails(cache_key, fits_file, large_jpg, small_jpg, index) diff --git a/datalab/datalab_session/models.py b/datalab/datalab_session/models.py index f94720c..0814b94 100644 --- a/datalab/datalab_session/models.py +++ b/datalab/datalab_session/models.py @@ -74,8 +74,8 @@ def status(self): return cache.get(f'operation_{self.cache_key}_status', 'PENDING') @property - def percent_completion(self): - return cache.get(f'operation_{self.cache_key}_percent_completion', 0.0) + def operation_progress(self): + return cache.get(f'operation_{self.cache_key}_progress', 0.0) @property def output(self): diff --git a/datalab/datalab_session/s3_utils.py b/datalab/datalab_session/s3_utils.py index 8ccbd72..2275fcf 100644 --- a/datalab/datalab_session/s3_utils.py +++ b/datalab/datalab_session/s3_utils.py @@ -114,6 +114,7 @@ def get_archive_url(basename: str, archive: str = settings.ARCHIVE_API) -> dict: def get_fits(basename: str, source: str = 'archive'): """ Returns a Fits File for the given basename from the source bucket + Will download the file to a tmp directory so future calls can open it directly """ basename = basename.replace('-large', '').replace('-small', '') basename_file_path = os.path.join(settings.TEMP_FITS_DIR, basename) diff --git a/datalab/datalab_session/serializers.py b/datalab/datalab_session/serializers.py index f1b1b37..422d37d 100644 --- a/datalab/datalab_session/serializers.py +++ b/datalab/datalab_session/serializers.py @@ -10,14 +10,14 @@ class DataOperationSerializer(serializers.ModelSerializer): cache_key = serializers.CharField(write_only=True, required=False) status = serializers.ReadOnlyField() message = serializers.ReadOnlyField() - percent_completion = serializers.ReadOnlyField() + operation_progress = serializers.ReadOnlyField() output = serializers.ReadOnlyField() class Meta: model = DataOperation exclude = ('session',) read_only_fields = ( - 'id', 'created', 'status', 'percent_completion', 'message', 'output', + 'id', 'created', 'status', 'operation_progress', 'message', 'output', ) class DataSessionSerializer(serializers.ModelSerializer): diff --git a/datalab/datalab_session/tests/test_analysis.py b/datalab/datalab_session/tests/test_analysis.py index be16413..0c30368 100644 --- a/datalab/datalab_session/tests/test_analysis.py +++ b/datalab/datalab_session/tests/test_analysis.py @@ -17,7 +17,7 @@ def setUp(self): with open(f'{self.analysis_test_path}test_source_catalog.json') as f: self.test_source_catalog_data = json.load(f)['test_source_catalog'] - @mock.patch('datalab.datalab_session.file_utils.get_fits') + @mock.patch('datalab.datalab_session.analysis.line_profile.get_fits') def test_line_profile(self, mock_get_fits): mock_get_fits.return_value = self.analysis_fits_1_path @@ -34,7 +34,7 @@ def test_line_profile(self, mock_get_fits): assert_almost_equal(output.get('line_profile').tolist(), self.test_line_profile_data, decimal=3) - @mock.patch('datalab.datalab_session.file_utils.get_fits') + @mock.patch('datalab.datalab_session.analysis.source_catalog.get_fits') def test_source_catalog(self, mock_get_fits): mock_get_fits.return_value = self.analysis_fits_1_path diff --git a/datalab/datalab_session/tests/test_files/median/median_1_2.fits b/datalab/datalab_session/tests/test_files/median/median_1_2.fits index 2a11cda..2a172b7 100644 Binary files a/datalab/datalab_session/tests/test_files/median/median_1_2.fits and b/datalab/datalab_session/tests/test_files/median/median_1_2.fits differ diff --git a/datalab/datalab_session/tests/test_files/rgb_stack/rgb_stack.fits b/datalab/datalab_session/tests/test_files/rgb_stack/rgb_stack.fits index e6ad3fa..3219b2e 100644 Binary files a/datalab/datalab_session/tests/test_files/rgb_stack/rgb_stack.fits and b/datalab/datalab_session/tests/test_files/rgb_stack/rgb_stack.fits differ diff --git a/datalab/datalab_session/tests/test_operations.py b/datalab/datalab_session/tests/test_operations.py index d2c625d..c096fed 100644 --- a/datalab/datalab_session/tests/test_operations.py +++ b/datalab/datalab_session/tests/test_operations.py @@ -56,7 +56,7 @@ def wizard_description(): return wizard_description def operate(self): - self.set_output({'output_files': []}) + self.set_output([]) class TestDataOperation(FileExtendedTestCase): @@ -138,7 +138,7 @@ def test_wizard_description(self): def test_operate(self): self.data_operation.operate() - self.assertEqual(self.data_operation.get_percent_completion(), 1.0) + self.assertEqual(self.data_operation.get_operation_progress(), 1.0) self.assertEqual(self.data_operation.get_status(), 'COMPLETED') self.assertEqual(self.data_operation.get_output(), {'output_files': []}) @@ -147,8 +147,8 @@ def test_generate_cache_key(self): self.assertEqual(self.data_operation.generate_cache_key(), pregenerated_cache_key) def test_set_get_output(self): - self.data_operation.set_output({'output_files': []}) - self.assertEqual(self.data_operation.get_percent_completion(), 1.0) + self.data_operation.set_output([]) + self.assertEqual(self.data_operation.get_operation_progress(), 1.0) self.assertEqual(self.data_operation.get_status(), 'COMPLETED') self.assertEqual(self.data_operation.get_output(), {'output_files': []}) @@ -169,9 +169,9 @@ def tearDown(self): return super().tearDown() @mock.patch('datalab.datalab_session.file_utils.tempfile.NamedTemporaryFile') - @mock.patch('datalab.datalab_session.file_utils.get_fits') - @mock.patch('datalab.datalab_session.data_operations.median.save_fits_and_thumbnails') - @mock.patch('datalab.datalab_session.data_operations.median.create_jpgs') + @mock.patch('datalab.datalab_session.data_operations.data_operation.get_fits') + @mock.patch('datalab.datalab_session.file_utils.save_fits_and_thumbnails') + @mock.patch('datalab.datalab_session.file_utils.create_jpgs') def test_operate(self, mock_create_jpgs, mock_save_fits_and_thumbnails, mock_get_fits, mock_named_tempfile): # return the test fits paths in order of the input_files instead of aws fetch @@ -194,7 +194,7 @@ def test_operate(self, mock_create_jpgs, mock_save_fits_and_thumbnails, mock_get median.operate() output = median.get_output().get('output_files') - self.assertEqual(median.get_percent_completion(), 1.0) + self.assertEqual(median.get_operation_progress(), 1.0) self.assertTrue(os.path.exists(output[0])) self.assertFilesEqual(self.test_median_path, output[0]) @@ -221,8 +221,8 @@ def tearDown(self): self.clean_test_dir() return super().tearDown() - @mock.patch('datalab.datalab_session.data_operations.rgb_stack.save_fits_and_thumbnails') - @mock.patch('datalab.datalab_session.data_operations.rgb_stack.create_jpgs') + @mock.patch('datalab.datalab_session.file_utils.save_fits_and_thumbnails') + @mock.patch('datalab.datalab_session.file_utils.create_jpgs') @mock.patch('datalab.datalab_session.file_utils.tempfile.NamedTemporaryFile') @mock.patch('datalab.datalab_session.data_operations.rgb_stack.get_fits') def test_operate(self, mock_get_fits, mock_named_tempfile, mock_create_jpgs, mock_save_fits_and_thumbnails): @@ -246,7 +246,7 @@ def test_operate(self, mock_get_fits, mock_named_tempfile, mock_create_jpgs, moc rgb.operate() output = rgb.get_output().get('output_files') - self.assertEqual(rgb.get_percent_completion(), 1.0) + self.assertEqual(rgb.get_operation_progress(), 1.0) self.assertTrue(os.path.exists(output[0])) self.assertFilesEqual(self.test_rgb_path, output[0]) @@ -265,9 +265,9 @@ def tearDown(self): return super().tearDown() @mock.patch('datalab.datalab_session.file_utils.tempfile.NamedTemporaryFile') - @mock.patch('datalab.datalab_session.file_utils.get_fits') - @mock.patch('datalab.datalab_session.data_operations.stacking.save_fits_and_thumbnails') - @mock.patch('datalab.datalab_session.data_operations.stacking.create_jpgs') + @mock.patch('datalab.datalab_session.data_operations.data_operation.get_fits') + @mock.patch('datalab.datalab_session.file_utils.save_fits_and_thumbnails') + @mock.patch('datalab.datalab_session.file_utils.create_jpgs') def test_operate(self, mock_create_jpgs, mock_save_fits_and_thumbnails, mock_get_fits, mock_named_tempfile): # Create a negative images using numpy @@ -323,7 +323,7 @@ def test_operate(self, mock_create_jpgs, mock_save_fits_and_thumbnails, mock_get output = stack.get_output().get('output_files') # 100% completion - self.assertEqual(stack.get_percent_completion(), 1.0) + self.assertEqual(stack.get_operation_progress(), 1.0) # test that file paths are the same self.assertEqual(self.temp_stacked_path, output[0])