From 533027367197f32ab4de4f0423d6cf806374f8e7 Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Tue, 23 Jul 2024 13:15:11 +0100 Subject: [PATCH 1/2] Print container logs if service fails to start (#782) When a Docker container fails to start properly, the integration tests fail with a timeout error. In that case, we will print container logs for debugging. --- .github/workflows/docker-build-test-upload.yml | 2 +- tests_integration/conftest.py | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.github/workflows/docker-build-test-upload.yml b/.github/workflows/docker-build-test-upload.yml index c285cf4d0..f8f0e3b55 100644 --- a/.github/workflows/docker-build-test-upload.yml +++ b/.github/workflows/docker-build-test-upload.yml @@ -78,7 +78,7 @@ jobs: run: echo "JUPYTER_TOKEN=$(openssl rand -hex 32)" >> $GITHUB_ENV - name: Run pytest for Chrome - run: pytest --driver Chrome tests_integration/ + run: pytest -sv --driver Chrome tests_integration/ env: QE_IMAGE: ${{ env.IMAGE }}@${{ steps.build-upload.outputs.digest }} diff --git a/tests_integration/conftest.py b/tests_integration/conftest.py index cbd576174..39eb0f7f7 100644 --- a/tests_integration/conftest.py +++ b/tests_integration/conftest.py @@ -50,19 +50,22 @@ def nb_user(aiidalab_exec): @pytest.fixture(scope="session") -def notebook_service(docker_ip, docker_services): +def notebook_service(docker_compose, docker_ip, docker_services): """Ensure that HTTP service is up and responsive.""" # `port_for` takes a container port and returns the corresponding host port port = docker_services.port_for("aiidalab", 8888) url = f"http://{docker_ip}:{port}" token = os.environ.get("JUPYTER_TOKEN", "testtoken") - docker_services.wait_until_responsive( - # The timeout is very high for this test, because the installation of pseudo libraries. - timeout=180.0, - pause=0.1, - check=lambda: is_responsive(url), - ) + try: + docker_services.wait_until_responsive( + timeout=180.0, + pause=1.0, + check=lambda: is_responsive(url), + ) + except Exception as e: + print(docker_compose.execute("logs").decode().strip()) + pytest.exit(e) return url, token From a3b40b0d6d8305a71ee0cbb09628bb1ee960ea47 Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Tue, 23 Jul 2024 13:17:24 +0100 Subject: [PATCH 2/2] Move QE conda install into separate stage (#783) When building the QEApp Docker container, the Quantum Espresso installation into a conda environment is a step that's completely independent from the others. By putting it into a separate Docker build stage, it can be build in parallel (when using buildkit), as: DOCKER_BUILDKIT=1 docker build . --- .dockerignore | 16 +++++++ .../workflows/docker-build-test-upload.yml | 2 +- Dockerfile | 47 ++++++++++--------- before-notebook.d/70_prepare-qe-executable.sh | 2 +- 4 files changed, 44 insertions(+), 23 deletions(-) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..f45fee4b5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,16 @@ +**/*.egg-info +**/__pycache__ +**/.*_cache +**/*.pyc +**/*.tar.gz +*.code-workspace +**/.*.ipynb +**/.ipynb* +.venv/ +build/ +export/ +.do-not-setup-on-localhost + +# Sphinx documentation +docs/html +screenshots/ diff --git a/.github/workflows/docker-build-test-upload.yml b/.github/workflows/docker-build-test-upload.yml index f8f0e3b55..521f640ad 100644 --- a/.github/workflows/docker-build-test-upload.yml +++ b/.github/workflows/docker-build-test-upload.yml @@ -62,7 +62,7 @@ jobs: context: . platforms: linux/amd64 cache-to: | - type=gha,scope=${{ github.workflow }},mode=min + type=gha,scope=${{ github.workflow }},mode=max cache-from: | type=gha,scope=${{ github.workflow }} diff --git a/Dockerfile b/Dockerfile index c53456900..48c48d15b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,26 @@ # syntax=docker/dockerfile:1 -FROM ghcr.io/astral-sh/uv:0.2.18 AS uv -FROM ghcr.io/aiidalab/full-stack:2024.1019 +ARG FULL_STACK_VER=2024.1021 +ARG UV_VER=0.2.27 +ARG QE_VER=7.2 +ARG QE_DIR=/opt/conda/envs/quantum-espresso-${QE_VER} +FROM ghcr.io/astral-sh/uv:0.2.27 AS uv + +# STAGE 1 +# Install QE into conda environment in /opt/conda +# This step is independent from the others and can be run in parallel. +FROM ghcr.io/aiidalab/full-stack:${FULL_STACK_VER} AS qe_conda_env +ARG QE_VER +ARG QE_DIR +RUN mamba create -p "${QE_DIR}" --yes qe="${QE_VER}" && \ + mamba clean --all -f -y && \ + fix-permissions "${CONDA_DIR}" + + +# STAGE 2 +FROM ghcr.io/aiidalab/full-stack:${FULL_STACK_VER} +ARG QE_VER +ARG QE_DIR # Copy whole repo and pre-install the dependencies and app to the tmp folder. # In the before notebook scripts the app will be re-installed by moving it to the app folder. ENV PREINSTALL_APP_FOLDER=${CONDA_DIR}/aiidalab-qe @@ -17,32 +36,18 @@ RUN --mount=from=uv,source=/uv,target=/bin/uv \ cd ${PREINSTALL_APP_FOLDER} && \ # Remove all untracked files and directories. For example the setup lock flag file. git clean -fx && \ - # It is important to install from `aiidalab install` to mimic the exact installation operation as - # from the app store. - # The command wil first install the dependencies from list by parsing setup config files, - # (for `aiidalab/aiidalab<23.03.2` the `setup.py` should be in the root folder of the app https://github.com/aiidalab/aiidalab/pull/382). - # and then the app and restart the daemon in the end. - # But since the aiida profile not yet exists, the daemon restart will fail but it is not a problem. - # Because we only need the dependencies to be installed. - # aiidalab install --yes --python ${CONDA_DIR}/bin/python "quantum-espresso@file://${PREINSTALL_APP_FOLDER}" && \ - # However, have to use `pip install` explicitly because `aiidalab install` call `pip install --user` which will install the app to `/home/${NB_USER}/.local`. - # It won't cause issue for docker but for k8s deployment the home folder is not bind mounted to the pod and the dependencies won't be found. (see issue in `jupyter/docker-stacks` https://github.com/jupyter/docker-stacks/issues/815) uv pip install --system --no-cache . && \ - fix-permissions "${CONDA_DIR}" && \ - fix-permissions "/home/${NB_USER}" - -ENV QE_VERSION="7.2" -RUN mamba create -p /opt/conda/envs/quantum-espresso --yes \ - qe=${QE_VERSION} \ - && mamba clean --all -f -y && \ - fix-permissions "${CONDA_DIR}" && \ - fix-permissions "/home/${NB_USER}" + fix-permissions "${CONDA_DIR}" # Download the QE pseudopotentials to the folder for afterware installation. ENV PSEUDO_FOLDER=${CONDA_DIR}/pseudo RUN mkdir -p ${PSEUDO_FOLDER} && \ python -m aiidalab_qe download-pseudos --dest ${PSEUDO_FOLDER} +COPY --from=qe_conda_env "${QE_DIR}" "${QE_DIR}" +# TODO: Remove this once we get rid of 70_prepare-qe-executable.sh +ENV QE_VERSION="$QE_VER" + COPY before-notebook.d/* /usr/local/bin/before-notebook.d/ WORKDIR "/home/${NB_USER}" diff --git a/before-notebook.d/70_prepare-qe-executable.sh b/before-notebook.d/70_prepare-qe-executable.sh index 41ba2d391..51d828310 100644 --- a/before-notebook.d/70_prepare-qe-executable.sh +++ b/before-notebook.d/70_prepare-qe-executable.sh @@ -6,7 +6,7 @@ set -x # Copy quantum espresso env to user space. mkdir -p /home/${NB_USER}/.conda/envs if [ ! -d /home/${NB_USER}/.conda/envs/quantum-espresso-${QE_VERSION} ]; then - ln -s /opt/conda/envs/quantum-espresso /home/${NB_USER}/.conda/envs/quantum-espresso-${QE_VERSION} + ln -s /opt/conda/envs/quantum-espresso-${QE_VERSION} /home/${NB_USER}/.conda/envs/quantum-espresso-${QE_VERSION} # Install qe so the progress bar not shown in the notebook when first time using app. echo "Installing qe."