diff --git a/Tutorial.ipynb b/Tutorial.ipynb deleted file mode 100644 index facffea..0000000 --- a/Tutorial.ipynb +++ /dev/null @@ -1,178 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Tutorial**:\n", - " \n", - "1. Download project and install dependencies\n", - "2. Preprocess data\n", - "3. Segment preprocessed data" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**1. Download the BrainLes Aurora package**\n", - "\n", - "Download the github project with the following command:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install brainles_aurora" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Install a preprocessing package, e.g. BraTS-Toolkit" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install BraTS-Toolkit" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Please follow the instructione on the Github-page for installation and setup advice." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**3. Segment preprocessed data**\n", - "\n", - "We provide sample data from the [ASNR-MICCAI BraTS Brain Metastasis Challenge](https://www.synapse.org/#!Synapse:syn51156910/wiki/622553), which is already preprocessed.\n", - "\n", - "Minimal mode: Segmentation without test-time augmentation with only T1-CE as input." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "__Minimal example__\n", - "\n", - "Logging will be messed up when used from a juypter notebook." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "BasicUNet features: (32, 32, 64, 128, 256, 32).\n" - ] - } - ], - "source": [ - "from brainles_aurora.inferer.inferer import AuroraGPUInferer, AuroraInferer\n", - "from brainles_aurora.inferer.dataclasses import AuroraInfererConfig\n", - "\n", - "config = AuroraInfererConfig(\n", - " tta=False\n", - ") # disable tta for faster inference in this showcase\n", - "\n", - "# If you don-t have a GPU that supports CUDA use the CPU version: AuroraInferer(config=config)\n", - "inferer = AuroraGPUInferer(config=config)\n", - "\n", - "inferer.infer(\n", - " t1=\"example_data/BraTS-MET-00110-000-t1c.nii.gz\",\n", - " segmentation_file=\"test_output/segmentation.nii.gz\",\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There are many possibilities for customization:\n", - "- Any of the following combination of sequences can be supplied: \n", - " - T1-CE + T1 + T2 + T2-FLAIR\n", - " - T1-CE only\n", - " - T1 only\n", - " - T2-FLAIR only\n", - " - T1-CE + T2-FLAIR\n", - " - T1-CE + T1\n", - " - T1-CE + T1 + T2-FLAIR\n", - "- Instead of only saving the final output consisting of one file with 2 labels, additional files with labels for the whole lesion (metastasis + edema) or the metastasis only can also be saved.\n", - "- Test-time augmentation can be enabled. Segmentation with TTA will take around 10 times longer than without TTA." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# from lib import single_inference\n", - "\n", - "# single_inference(\n", - "# t1_file=\"Examples/BraTS-MET-00110-000-t1n.nii.gz\",\n", - "# t1c_file=\"Examples/BraTS-MET-00110-000-t1c.nii.gz\",\n", - "# t2_file=\"Examples/BraTS-MET-00110-000-t2w.nii.gz\",\n", - "# fla_file=\"Examples/BraTS-MET-00110-000-t2f.nii.gz\",\n", - "# segmentation_file=\"Examples/your_segmentation_file.nii.gz\",\n", - "# whole_network_outputs_file=\"Examples/your_whole_lesion_file.nii.gz\", # optional: whether to save network outputs for the whole lesion (metastasis + edema)\n", - "# metastasis_network_outputs_file=\"Examples/your_metastasis_file.nii.gz\", # optional: whether to save network outputs for the metastasis\n", - "# cuda_devices=\"0\", # optional: which CUDA devices to use\n", - "# tta=True, # optional: whether to use test time augmentations\n", - "# sliding_window_batch_size=1, # optional: adjust to fit your GPU memory, each step requires an additional 2 GB of VRAM, increasing is not recommended for single interference\n", - "# workers=8, # optional: workers for the data laoder\n", - "# threshold=0.5, # optional: where to threshold the network outputs\n", - "# sliding_window_overlap=0.5, # optional: overlap for the sliding window\n", - "# model_selection=\"best\", # optional: choose best or last checkpoint, best is recommended\n", - "# verbosity=True, # optional: verbosity of the output\n", - "# )" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.13" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/brainles_aurora/inferer/dataclasses.py b/brainles_aurora/inferer/dataclasses.py index 09626bc..52eba0d 100644 --- a/brainles_aurora/inferer/dataclasses.py +++ b/brainles_aurora/inferer/dataclasses.py @@ -11,10 +11,10 @@ class BaseConfig: """Base configuration for the Aurora model inferer. Attributes: - log_level (int | str, optional): Logging level. Defaults to logging.INFO. + log_level (int, optional): Logging level. Defaults to logging.INFO. """ - log_level: int | str = logging.INFO + log_level: int = logging.INFO @dataclass diff --git a/brainles_aurora/inferer/inferer.py b/brainles_aurora/inferer/inferer.py index d8259a1..180353a 100644 --- a/brainles_aurora/inferer/inferer.py +++ b/brainles_aurora/inferer/inferer.py @@ -1,11 +1,12 @@ import logging from logging import Logger import os +import json from abc import ABC, abstractmethod from pathlib import Path import sys from typing import Dict, List - +import signal import monai import nibabel as nib import numpy as np @@ -23,7 +24,7 @@ ToTensord, ) from torch.utils.data import DataLoader -import uuid +import traceback from brainles_aurora.inferer import ( IMGS_TO_MODE_DICT, @@ -35,11 +36,12 @@ ) from brainles_aurora.utils import ( turbo_path, - DualStdErrOutput, download_model_weights, remove_path_suffixes, ) +logger = logging.getLogger(__name__) + class AbstractInferer(ABC): """ @@ -57,11 +59,7 @@ def __init__(self, config: BaseConfig) -> None: config (BaseConfig): Configuration for the inferer. """ self.config = config - - # setup logger - self.dual_stderr_output = DualStdErrOutput(sys.stderr) - sys.stderr = self.dual_stderr_output - self.log = self._setup_logger(log_file=None) + self._setup_logger() # download weights if not present self.lib_path: str = Path(os.path.dirname(os.path.abspath(__file__))) @@ -70,39 +68,64 @@ def __init__(self, config: BaseConfig) -> None: if not self.model_weights_folder.exists(): download_model_weights(target_folder=str(self.lib_path.parent)) - def _setup_logger(self, log_file: str | Path | None = None) -> Logger: - """Setup a logger with an optional log file. + def _set_log_file(self, log_file: str | Path) -> None: + """Set the log file for the inference run and remove the file handler from a potential previous run. Args: - log_file (str | Path | None): Path to the log file. If None, no log file is created. - - Returns: - Logger: Logger instance. + log_file (str | Path): log file path """ - - default_formatter = logging.Formatter( - "%(asctime)s %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S" + if self.log_file_handler: + logging.getLogger().removeHandler(self.log_file_handler) + + parent_dir = os.path.dirname(log_file) + # create parent dir if the path is more than just a file name + if parent_dir: + os.makedirs(parent_dir, exist_ok=True) + self.log_file_handler = logging.FileHandler(log_file) + self.log_file_handler.setFormatter( + logging.Formatter( + "[%(levelname)s|%(module)s|L%(lineno)d] %(asctime)s: %(message)s", + "%Y-%m-%dT%H:%M:%S%z", + ) ) - # we create a new log file and therefore logger for each infer call, hence the logger need unique names - logger = logging.getLogger(f"Inferer_{uuid.uuid4()}") - logger.setLevel(self.config.log_level) # Set the desired logging level - stream_handler = logging.StreamHandler() - stream_handler.setFormatter(default_formatter) - logger.addHandler(stream_handler) - if log_file: - # Create a file handler. We dont add it to the logger directly, but to the dual_stderr_output - # This way als console output includign excpetions will be redirceted to the log file - parent_dir = os.path.dirname(log_file) - # create parent dir if the path is more than just a file name - if parent_dir: - os.makedirs(parent_dir, exist_ok=True) - file_handler = logging.FileHandler(log_file) + # Add the file handler to the !root! logger + logging.getLogger().addHandler(self.log_file_handler) + + def _setup_logger(self) -> Logger: + """Setup the logger for the inferer and overwrite system hooks to add logging for exceptions and signals.""" + config_file = Path(__file__).parent / "log_config.json" + with open(config_file) as f_in: + log_config = json.load(f_in) + logging.config.dictConfig(log_config) + logging.basicConfig(level=self.config.log_level) + self.log_file_handler = None - self.dual_stderr_output.set_file_handler_stream(file_handler.stream) - logger.info(f"Logging to: {log_file}") + # overwrite system hooks to log exceptions and signals (SIGINT, SIGTERM) + #! NOTE: This will note work in Jupyter Notebooks, (Without extra setup) see https://stackoverflow.com/a/70469055: + def exception_handler(exception_type, value, tb): + """Handle exceptions - return logger + Args: + exception_type (Exception): Exception type + exception (Exception): Exception + traceback (Traceback): Traceback + """ + logger.error("".join(traceback.format_exception(exception_type, value, tb))) + + if issubclass(exception_type, SystemExit): + # add specific code if exception was a system exit + sys.exit(value.code) + + def signal_handler(sig, frame): + signame = signal.Signals(sig).name + logger.error(f"Received signal {sig} ({signame}), exiting...") + sys.exit(0) + + sys.excepthook = exception_handler + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) @abstractmethod def infer(self): @@ -120,9 +143,7 @@ def __init__(self, config: AuroraInfererConfig) -> None: """ super().__init__(config=config) - self.log.info( - f"Initialized {self.__class__.__name__} with config: {self.config}" - ) + logger.info(f"Initialized {self.__class__.__name__} with config: {self.config}") self.device = self._configure_device() self.validated_images = None @@ -184,7 +205,7 @@ def _validate_image( len(unique_types) == 1 ), f"All passed images must be of the same type! Received {unique_types}. Accepted Input types: {list(DataMode)}" - self.log.info( + logger.info( f"Successfully validated input images. Input mode: {self.input_mode}" ) return images @@ -203,9 +224,7 @@ def _determine_inference_mode( InferenceMode: Inference mode based on the combination of input images. """ _t1, _t1c, _t2, _fla = [img is not None for img in images] - self.log.info( - f"Received files: T1: {_t1}, T1C: {_t1c}, T2: {_t2}, FLAIR: {_fla}" - ) + logger.info(f"Received files: T1: {_t1}, T1C: {_t1c}, T2: {_t2}, FLAIR: {_fla}") # check if files are given in a valid combination that has an existing model implementation mode = IMGS_TO_MODE_DICT.get((_t1, _t1c, _t2, _fla), None) @@ -215,7 +234,7 @@ def _determine_inference_mode( "No model implemented for this combination of images" ) - self.log.info(f"Inference mode: {mode}") + logger.info(f"Inference mode: {mode}") return mode def _get_data_loader(self) -> torch.utils.data.DataLoader: @@ -377,7 +396,7 @@ def _save_as_nifti(self, postproc_data: Dict[str, np.ndarray]) -> None: ref = nib.load(reference_file) affine, header = ref.affine, ref.header else: - self.log.warning( + logger.warning( f"Writing NIFTI output after NumPy input, using default affine=np.eye(4) and header=None" ) affine, header = np.eye(4), None @@ -389,7 +408,7 @@ def _save_as_nifti(self, postproc_data: Dict[str, np.ndarray]) -> None: output_image = nib.Nifti1Image(data, affine, header) os.makedirs(os.path.dirname(output_file), exist_ok=True) nib.save(output_image, output_file) - self.log.info(f"Saved {key} to {output_file}") + logger.info(f"Saved {key} to {output_file}") def _post_process( self, onehot_model_outputs_CHWD: torch.Tensor @@ -452,22 +471,22 @@ def _sliding_window_inference(self) -> Dict[str, np.ndarray]: outputs = inferer(inputs, self.model) if self.config.tta: - self.log.info("Applying test time augmentations") + logger.info("Applying test time augmentations") outputs = self._apply_test_time_augmentations( outputs, data, inferer ) - self.log.info("Post-processing data") + logger.info("Post-processing data") postprocessed_data = self._post_process( onehot_model_outputs_CHWD=outputs, ) # save data to fie if paths are provided if any(self.output_file_mapping.values()): - self.log.info("Saving post-processed data as NIFTI files") + logger.info("Saving post-processed data as NIFTI files") self._save_as_nifti(postproc_data=postprocessed_data) - self.log.info("Returning post-processed data as Dict of Numpy arrays") + logger.info("Returning post-processed data as Dict of Numpy arrays") return postprocessed_data def _configure_device(self) -> torch.device: @@ -477,7 +496,7 @@ def _configure_device(self) -> torch.device: torch.device: Configured device. """ device = torch.device("cpu") - self.log.info(f"Using device: {device}") + logger.info(f"Using device: {device}") return device def infer( @@ -490,7 +509,7 @@ def infer( whole_tumor_unbinarized_floats_file: str | Path | None = None, metastasis_unbinarized_floats_file: str | Path | None = None, log_file: str | Path | None = None, - ) -> Dict[str, np.ndarray] | None: + ) -> Dict[str, np.ndarray]: """Perform inference on the provided images. Args: @@ -512,18 +531,19 @@ def infer( Returns: Dict[str, np.ndarray]: Post-processed data. """ - # setup logger for inference run + # setup log file for inference run if log_file: - self.log = self._setup_logger(log_file=log_file) + self._set_log_file(log_file=log_file) else: # if no log file is provided: set logfile to segmentation filename if provided, else inferer class name - self.log = self._setup_logger( + self._set_log_file( log_file=( remove_path_suffixes(segmentation_file).with_suffix(".log") if segmentation_file else os.path.abspath(f"./{self.__class__.__name__}.log") ), ) + logger.info(f"Infer with config: {self.config} and device: {self.device}") # check inputs and get mode , if mode == prev mode => run inference, else load new model prev_mode = self.inference_mode @@ -531,16 +551,15 @@ def infer( self.inference_mode = self._determine_inference_mode( images=self.validated_images ) - if prev_mode != self.inference_mode: - self.log.info("No loaded compatible model found. Loading Model and weights") + logger.info("No loaded compatible model found. Loading Model and weights") self.model = self._get_model() else: - self.log.info( + logger.info( f"Same inference mode {self.inference_mode} as previous infer call. Re-using loaded model" ) # self.model.eval() - self.log.info("Setting up Dataloader") + logger.info("Setting up Dataloader") self.data_loader = self._get_data_loader() # setup output file paths @@ -551,9 +570,9 @@ def infer( } ######## - self.log.info(f"Running inference on device := {self.device}") + logger.info(f"Running inference on device := {self.device}") out = self._sliding_window_inference() - self.log.info(f"Finished inference {os.linesep}") + logger.info(f"Finished inference {os.linesep}") return out @@ -592,7 +611,7 @@ def _configure_device(self) -> torch.device: ), "No cuda device available while using GPUInferer" device = torch.device("cuda") - self.log.info(f"Using device: {device}") + logger.info(f"Set torch device: {device}") # clean memory torch.cuda.empty_cache() diff --git a/brainles_aurora/inferer/log_config.json b/brainles_aurora/inferer/log_config.json new file mode 100644 index 0000000..ad7654d --- /dev/null +++ b/brainles_aurora/inferer/log_config.json @@ -0,0 +1,30 @@ +{ + "version": 1, + "disable_existing_loggers": false, + "formatters": { + "simple": { + "format": "%(asctime)s %(levelname)s: %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S" + }, + "detailed": { + "format": "[%(levelname)s|%(module)s|L%(lineno)d] %(asctime)s: %(message)s", + "datefmt": "%Y-%m-%dT%H:%M:%S%z" + } + }, + "handlers": { + "stderr": { + "class": "logging.StreamHandler", + "level": "INFO", + "formatter": "simple", + "stream": "ext://sys.stderr" + } + }, + "loggers": { + "root": { + "level": "INFO", + "handlers": [ + "stderr" + ] + } + } +} \ No newline at end of file diff --git a/brainles_aurora/utils/__init__.py b/brainles_aurora/utils/__init__.py index b978871..aa7d72a 100644 --- a/brainles_aurora/utils/__init__.py +++ b/brainles_aurora/utils/__init__.py @@ -1,2 +1,2 @@ -from .utils import turbo_path, remove_path_suffixes, DualStdErrOutput +from .utils import turbo_path, remove_path_suffixes from .download import download_model_weights diff --git a/brainles_aurora/utils/utils.py b/brainles_aurora/utils/utils.py index de21de5..4601660 100644 --- a/brainles_aurora/utils/utils.py +++ b/brainles_aurora/utils/utils.py @@ -1,6 +1,7 @@ from pathlib import Path import os from typing import IO +import sys def turbo_path(path: str | Path) -> Path: @@ -34,24 +35,3 @@ def remove_path_suffixes(path: Path | str) -> Path: while path_stem.suffix: path_stem = path_stem.with_suffix("") return path_stem - - -class DualStdErrOutput: - """Class to write to stderr and a file at the same time""" - - def __init__(self, stderr: IO, file_handler_stream: IO = None): - self.stderr = stderr - self.file_handler_stream = file_handler_stream - - def set_file_handler_stream(self, file_handler_stream: IO): - self.file_handler_stream = file_handler_stream - - def write(self, text: str): - self.stderr.write(text) - if self.file_handler_stream: - self.file_handler_stream.write(text) - - def flush(self): - self.stderr.flush() - if self.file_handler_stream: - self.file_handler_stream.flush() diff --git a/segmentation_test.py b/segmentation_test.py index 2f1395c..9464368 100644 --- a/segmentation_test.py +++ b/segmentation_test.py @@ -82,7 +82,6 @@ def gpu_np(): inferer = AuroraGPUInferer(config=config) t1_np = load_np_from_nifti(t1) - print(t1_np.shape) inferer.infer( t1=t1_np, ) @@ -99,8 +98,7 @@ def gpu_output_np(): config=config, ) data = inferer.infer() - print(data) if __name__ == "__main__": - gpu_np() + gpu_nifti_2() diff --git a/tests/test_inferer.py b/tests/test_inferer.py index 927ec88..a31cc43 100644 --- a/tests/test_inferer.py +++ b/tests/test_inferer.py @@ -79,10 +79,6 @@ def test_determine_inference_mode_not_implemented(self, mock_inferer, t2_path): with pytest.raises(NotImplementedError): mode = mock_inferer._determine_inference_mode(images=images) - def test_setup_logger(self, mock_inferer): - logger = mock_inferer._setup_logger() - assert isinstance(logger, logging.Logger) - def test_infer(self, mock_inferer, t1_path): with patch.object(mock_inferer, "_sliding_window_inference", return_value=None): mock_inferer.infer(t1=t1_path)