diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..bb0478d --- /dev/null +++ b/.dockerignore @@ -0,0 +1,13 @@ +venv/ +*__pycache__/ +resources/ +*.DS_Store +*.nii +*.nii.gz +*.nrrd +*.obj +*.zip +*log.csv +*.ini +gradio_cached_examples/ +.idea/ diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..066324e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,32 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Desktop (please complete the following information):** + - OS: [e.g. Windows] + - Version: [e.g. 10] + - Python: [e.g. 3.8.10] + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..bbcbbe7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..487cea4 --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,20 @@ +name: Deploy +on: + push: + branches: [ main ] + + # to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + sync-to-hub: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + lfs: true + - name: Push to hub + env: + HF_TOKEN: ${{ secrets.HF_TOKEN }} + run: git push https://dbouget:$HF_TOKEN@huggingface.co/spaces/dbouget/Raidionics-HF main diff --git a/.github/workflows/filesize.yml b/.github/workflows/filesize.yml new file mode 100644 index 0000000..0922d48 --- /dev/null +++ b/.github/workflows/filesize.yml @@ -0,0 +1,16 @@ +name: Check file size +on: # or directly `on: [push]` to run the action on every push on any branch + pull_request: + branches: [ main ] + + # to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + check-filesize: + runs-on: ubuntu-latest + steps: + - name: Check large files + uses: ActionsDesk/lfs-warning@v2.0 + with: + filesizelimit: 10485760 # this is 10MB so we can sync to HF Spaces diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml new file mode 100644 index 0000000..2b6392b --- /dev/null +++ b/.github/workflows/linting.yml @@ -0,0 +1,26 @@ +name: Linting + +on: + push: + branches: + - '*' + pull_request: + branches: + - '*' + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v1 + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + + - name: Install lint dependencies + run: pip install wheel setuptools black==22.3.0 isort==5.10.1 flake8==4.0.1 + + - name: Lint the code + run: sh shell/lint.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bb0478d --- /dev/null +++ b/.gitignore @@ -0,0 +1,13 @@ +venv/ +*__pycache__/ +resources/ +*.DS_Store +*.nii +*.nii.gz +*.nrrd +*.obj +*.zip +*log.csv +*.ini +gradio_cached_examples/ +.idea/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..9890d42 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,74 @@ +# read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker +# you will also find guides on how best to write your Dockerfile +FROM python:3.8-slim + +# set language, format and stuff +ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 + +WORKDIR /code + +RUN apt-get update -y +#RUN apt-get install -y python3 python3-pip +RUN apt install git --fix-missing -y +RUN apt install wget -y + +# installing other libraries +RUN apt-get install python3-pip -y && \ + apt-get -y install sudo +RUN apt-get install curl -y +RUN apt-get install nano -y +RUN apt-get update && apt-get install -y git +RUN apt-get install libblas-dev -y && apt-get install liblapack-dev -y +RUN apt-get install gfortran -y +RUN apt-get install libpng-dev -y +RUN apt-get install python3-dev -y + +WORKDIR /code + +# install dependencies +COPY ./requirements.txt /code/requirements.txt +RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt + +# resolve issue with tf==2.4 and gradio dependency collision issue +RUN pip install --force-reinstall typing_extensions==4.7.1 + +# Install wget +RUN apt install wget -y && \ + apt install unzip + +# Set up a new user named "user" with user ID 1000 +RUN useradd -m -u 1000 user + +# Switch to the "user" user +USER user + +# Set home to the user's home directory +ENV HOME=/home/user \ + PATH=/home/user/.local/bin:$PATH + +# Set the working directory to the user's home directory +WORKDIR $HOME + +# Copy the current directory contents into the container at $HOME/app setting the owner to the user +COPY --chown=user . $HOME + +# Download pretrained models +RUN mkdir -p resources/models/ +RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-MRI_Brain-ONNX-v12.zip" && \ + unzip "Raidionics-MRI_Brain-ONNX-v12.zip" && mv MRI_Brain/ resources/models/MRI_Brain/ +RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-MRI_GBM-ONNX-v12.zip" && \ + unzip "Raidionics-MRI_GBM-ONNX-v12.zip" && mv MRI_GBM/ resources/models/MRI_GBM/ \ +RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-MRI_LGGlioma-ONNX-v12.zip" && \ + unzip "Raidionics-MRI_LGGlioma-ONNX-v12.zip" && mv MRI_LGGlioma/ resources/models/MRI_LGGlioma/ \ +RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-MRI_Meningioma-ONNX-v12.zip" && \ + unzip "Raidionics-MRI_Meningioma-ONNX-v12.zip" && mv MRI_Meningioma/ resources/models/MRI_Meningioma/ +RUN wget "https://github.com/raidionics/Raidionics-models/releases/download/1.2.0/Raidionics-MRI_Metastasis-ONNX-v12.zip" && \ + unzip "Raidionics-MRI_Metastasis-ONNX-v12.zip" && mv MRI_Metastasis/ resources/models/MRI_Metastasis/ + +RUN rm -r *.zip + +# Download test sample +RUN wget "https://github.com/raidionics/Raidionics-HF/releases/download/v1.0.0/t1gd.nii.gz" + +# CMD ["/bin/bash"] +CMD ["python3", "app.py"] \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000..02ca2ae --- /dev/null +++ b/app.py @@ -0,0 +1,41 @@ +import os +from argparse import ArgumentParser + +from src.gui import WebUI + + +def main(): + parser = ArgumentParser() + parser.add_argument( + "--cwd", + type=str, + default="/home/user/app/", + help="Set current working directory (path to app.py).", + ) + parser.add_argument( + "--share", + type=int, + default=1, + help="Whether to enable the app to be accessible online" + "-> setups a public link which requires internet access.", + ) + args = parser.parse_args() + + print("Current working directory:", args.cwd) + + if not os.path.exists(args.cwd): + raise ValueError("Chosen 'cwd' is not a valid path!") + if args.share not in [0, 1]: + raise ValueError( + "The 'share' argument can only be set to 0 or 1, but was:", + args.share, + ) + + # initialize and run app + print("Launching demo...") + app = WebUI(cwd=args.cwd, share=args.share) + app.run() + + +if __name__ == "__main__": + main() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..92a42d8 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +raidionicsrads@git+https://github.com/dbouget/raidionics_rads_lib +gradio==3.44.4 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..94e9a93 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,14 @@ +[metadata] +description-file = README.md + +[isort] +force_single_line=True +known_first_party=aeropath +line_length=160 +profile=black + +[flake8] +# imported but unused in __init__.py, that's ok. +per-file-ignores=*__init__.py:F401 +ignore=E203,W503,W605,F632,E266,E731,E712,E741 +max-line-length=160 diff --git a/shell/format.sh b/shell/format.sh new file mode 100644 index 0000000..df548bc --- /dev/null +++ b/shell/format.sh @@ -0,0 +1,4 @@ +#!/bin/bash +isort --sl demo/src/ demo/app.py +black --line-length 80 demo/src/ demo/app.py +flake8 demo/src/ demo/app.py diff --git a/shell/lint.sh b/shell/lint.sh new file mode 100644 index 0000000..103ceb3 --- /dev/null +++ b/shell/lint.sh @@ -0,0 +1,23 @@ +#!/bin/bash +isort --check --sl -c demo/src/ demo/app.py +if ! [ $? -eq 0 ] +then + echo "Please run \"sh shell/format.sh\" to format the code." + exit 1 +fi +echo "no issues with isort" +flake8 demo/src/ demo/app.py +if ! [ $? -eq 0 ] +then + echo "Please fix the code style issue." + exit 1 +fi +echo "no issues with flake8" +black --check --line-length 80 demo/src/ demo/app.py +if ! [ $? -eq 0 ] +then + echo "Please run \"sh shell/format.sh\" to format the code." + exit 1 +fi +echo "no issues with black" +echo "linting success!" diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/src/__init__.py @@ -0,0 +1 @@ + diff --git a/src/gui.py b/src/gui.py new file mode 100644 index 0000000..0d4914d --- /dev/null +++ b/src/gui.py @@ -0,0 +1,184 @@ +import os + +import gradio as gr + +from .inference import run_model +from .utils import load_to_numpy +from .utils import load_pred_volume_to_numpy +from .utils import nifti_to_glb + + +class WebUI: + def __init__( + self, + model_name: str = None, + cwd: str = "/home/user/app/", + share: int = 1, + ): + # global states + self.images = [] + self.pred_images = [] + + # @TODO: This should be dynamically set based on chosen volume size + self.nb_slider_items = 512 + + self.model_name = model_name + self.cwd = cwd + self.share = share + + self.class_name = "meningioma" # default + self.class_names = { + "meningioma": "MRI_Meningioma", + "lower-grade-glioma": "MRI_LGGlioma", + "metastasis": "MRI_Metastasis", + "glioblastoma": "MRI_GBM", + "brain": "MRI_Brain", + } + + self.result_names = { + "meningioma": "Tumor", + "lower-grade-glioma": "Tumor", + "metastasis": "Tumor", + "glioblastoma": "Tumor", + "brain": "Brain", + } + + # define widgets not to be rendered immediately, but later on + self.slider = gr.Slider( + minimum=1, + maximum=self.nb_slider_items, + value=1, + step=1, + label="Which 2D slice to show", + interactive=True, + ) + + self.volume_renderer = gr.Model3D( + clear_color=[0.0, 0.0, 0.0, 0.0], + label="3D Model", + visible=True, + elem_id="model-3d", + ).style(height=512) + + def set_class_name(self, value): + print("Changed task to:", value) + self.class_name = value + + def combine_ct_and_seg(self, img, pred): + return (img, [(pred, self.class_name)]) + + def upload_file(self, file): + return file.name + + def process(self, mesh_file_name): + path = mesh_file_name.name + run_model( + path, + model_path=os.path.join(self.cwd, "resources/models/"), + task=self.class_names[self.class_name], + name=self.result_names[self.class_name], + ) + nifti_to_glb("prediction.nii.gz") + + self.images = load_to_numpy(path) + # @TODO. Dynamic update of the slider does not seem to work like this + # self.nb_slider_items = len(self.images) + # self.slider.update(value=int(self.nb_slider_items/2), maximum=self.nb_slider_items) + + self.pred_images = load_pred_volume_to_numpy("./prediction.nii.gz") + return "./prediction.obj" + + def get_img_pred_pair(self, k): + k = int(k) - 1 + # @TODO. Will the duplicate the last slice to fill up, since slider not adjustable right now + if k >= len(self.images): + k = len(self.images) - 1 + out = [gr.AnnotatedImage.update(visible=False)] * self.nb_slider_items + out[k] = gr.AnnotatedImage.update( + self.combine_ct_and_seg(self.images[k], self.pred_images[k]), + visible=True, + ) + return out + + def run(self): + css = """ + #model-3d { + height: 512px; + } + #model-2d { + height: 512px; + margin: auto; + } + #upload { + height: 120px; + } + """ + with gr.Blocks(css=css) as demo: + with gr.Row(): + file_output = gr.File(file_count="single", elem_id="upload") + file_output.upload(self.upload_file, file_output, file_output) + + model_selector = gr.Dropdown( + list(self.class_names.keys()), + label="Segmentation task", + info="Select the preoperative segmentation model to run", + multiselect=False, + size="sm", + ) + model_selector.input( + fn=lambda x: self.set_class_name(x), + inputs=model_selector, + outputs=None, + ) + + run_btn = gr.Button("Run segmentation").style( + full_width=False, size="lg" + ) + run_btn.click( + fn=lambda x: self.process(x), + inputs=file_output, + outputs=self.volume_renderer, + ) + + with gr.Row(): + gr.Examples( + examples=[ + os.path.join(self.cwd, "t1gd.nii.gz"), + ], + inputs=file_output, + outputs=file_output, + fn=self.upload_file, + cache_examples=True, + ) + + with gr.Row(): + with gr.Box(): + with gr.Column(): + image_boxes = [] + for i in range(self.nb_slider_items): + visibility = True if i == 1 else False + t = gr.AnnotatedImage( + visible=visibility, elem_id="model-2d" + ).style( + color_map={self.class_name: "#ffae00"}, + height=512, + width=512, + ) + image_boxes.append(t) + + self.slider.input( + self.get_img_pred_pair, self.slider, image_boxes + ) + + self.slider.render() + + with gr.Box(): + self.volume_renderer.render() + + # sharing app publicly -> share=True: + # https://gradio.app/sharing-your-app/ + # inference times > 60 seconds -> need queue(): + # https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062 + demo.queue().launch( + server_name="0.0.0.0", server_port=7860, share=self.share + ) diff --git a/src/inference.py b/src/inference.py new file mode 100644 index 0000000..a5df82b --- /dev/null +++ b/src/inference.py @@ -0,0 +1,97 @@ +import configparser +import logging +import os +import shutil + + +def run_model( + input_path: str, + model_path: str, + verbose: str = "info", + task: str = "MRI_Meningioma", + name: str = "Tumor", +): + logging.basicConfig() + logging.getLogger().setLevel(logging.WARNING) + + if verbose == "debug": + logging.getLogger().setLevel(logging.DEBUG) + elif verbose == "info": + logging.getLogger().setLevel(logging.INFO) + elif verbose == "error": + logging.getLogger().setLevel(logging.ERROR) + else: + raise ValueError("Unsupported verbose value provided:", verbose) + + # delete patient/result folder if they exist + if os.path.exists("./patient/"): + shutil.rmtree("./patient/") + if os.path.exists("./result/"): + shutil.rmtree("./result/") + + try: + # setup temporary patient directory + filename = input_path.split("/")[-1] + splits = filename.split(".") + extension = ".".join(splits[1:]) + patient_directory = "./patient/" + os.makedirs(patient_directory + "T0/", exist_ok=True) + shutil.copy( + input_path, + patient_directory + "T0/" + splits[0] + "-t1gd." + extension, + ) + + # define output directory to save results + output_path = "./result/prediction-" + splits[0] + "/" + os.makedirs(output_path, exist_ok=True) + + # Setting up the configuration file + rads_config = configparser.ConfigParser() + rads_config.add_section("Default") + rads_config.set("Default", "task", "neuro_diagnosis") + rads_config.set("Default", "caller", "") + rads_config.add_section("System") + rads_config.set("System", "gpu_id", "-1") + rads_config.set("System", "input_folder", patient_directory) + rads_config.set("System", "output_folder", output_path) + rads_config.set("System", "model_folder", model_path) + rads_config.set( + "System", + "pipeline_filename", + os.path.join(model_path, task, "pipeline.json"), + ) + rads_config.add_section("Runtime") + rads_config.set( + "Runtime", "reconstruction_method", "thresholding" + ) # thresholding, probabilities + rads_config.set("Runtime", "reconstruction_order", "resample_first") + rads_config.set("Runtime", "use_preprocessed_data", "False") + + with open("rads_config.ini", "w") as f: + rads_config.write(f) + + # finally, run inference + from raidionicsrads.compute import run_rads + + run_rads(config_filename="rads_config.ini") + + # rename and move final result + os.rename( + "./result/prediction-" + + splits[0] + + "/T0/" + + splits[0] + + "-t1gd_annotation-" + + name + + ".nii.gz", + "./prediction.nii.gz", + ) + + except Exception as e: + print(e) + + # Clean-up + if os.path.exists(patient_directory): + shutil.rmtree(patient_directory) + if os.path.exists(output_path): + shutil.rmtree(output_path) diff --git a/src/utils.py b/src/utils.py new file mode 100644 index 0000000..2bb222f --- /dev/null +++ b/src/utils.py @@ -0,0 +1,68 @@ +import nibabel as nib +import numpy as np +from nibabel.processing import resample_to_output +from skimage.measure import marching_cubes + + +def load_to_numpy(data_path): + if type(data_path) != str: + data_path = data_path.name + + image = nib.load(data_path) + resampled = resample_to_output(image, None, order=0) + data = resampled.get_fdata() + + data = np.rot90(data, k=1, axes=(0, 1)) + + # @TODO. Contrast-operation to do based on MRI/CT and target to segment + # data[data < -150] = -150 + # data[data > 250] = 250 + + data = data - np.amin(data) + data = data / np.amax(data) * 255 + data = data.astype("uint8") + + print(data.shape) + return [data[..., i] for i in range(data.shape[-1])] + + +def load_pred_volume_to_numpy(data_path): + if type(data_path) != str: + data_path = data_path.name + + image = nib.load(data_path) + resampled = resample_to_output(image, None, order=0) + data = resampled.get_fdata() + + data = np.rot90(data, k=1, axes=(0, 1)) + + data[data > 0] = 1 + data = data.astype("uint8") + + print(data.shape) + return [data[..., i] for i in range(data.shape[-1])] + + +def nifti_to_glb(path, output="prediction.obj"): + # load NIFTI into numpy array + image = nib.load(path) + resampled = resample_to_output(image, [1, 1, 1], order=1) + data = resampled.get_fdata().astype("uint8") + + # extract surface + verts, faces, normals, values = marching_cubes(data, 0) + faces += 1 + + with open(output, "w") as thefile: + for item in verts: + thefile.write("v {0} {1} {2}\n".format(item[0], item[1], item[2])) + + for item in normals: + thefile.write("vn {0} {1} {2}\n".format(item[0], item[1], item[2])) + + for item in faces: + thefile.write( + "f {0}//{0} {1}//{1} {2}//{2}\n".format( + item[0], item[1], item[2] + ) + )