From 594443e6ad3533642af42c1a3c1f9d1a58d01abe Mon Sep 17 00:00:00 2001 From: Rob Ballantyne Date: Fri, 14 Jun 2024 17:28:19 +0100 Subject: [PATCH] Update base (venv) --- .github/workflows/docker-build.yml | 47 +++++++------------ .gitignore | 1 - .../opt/ai-dock/bin/build/layer0/amd.sh | 23 --------- .../opt/ai-dock/bin/build/layer0/common.sh | 24 ---------- .../opt/ai-dock/bin/build/layer0/cpu.sh | 13 ----- .../opt/ai-dock/bin/build/layer0/nvidia.sh | 14 ------ .../opt/ai-dock/tests/assert-torch-version.py | 9 ---- build/COPY_ROOT/root/.gitkeep | 0 build/COPY_ROOT/usr/.gitkeep | 0 .../opt/ai-dock/bin/build/layer0/amd.sh | 16 +++++++ .../opt/ai-dock/bin/build/layer0/clean.sh | 1 - .../opt/ai-dock/bin/build/layer0/common.sh | 17 +++++++ .../opt/ai-dock/bin/build/layer0/cpu.sh | 16 +++++++ .../opt/ai-dock/bin/build/layer0/init.sh | 7 ++- .../opt/ai-dock/bin/build/layer0/nvidia.sh | 27 +++++++++++ .../opt/ai-dock/bin/preflight.d}/.gitkeep | 0 .../opt/ai-dock/bin/build/layer1/init.sh | 11 +++++ .../opt/storage}/.gitkeep | 0 build/COPY_ROOT_EXTRA/opt/storage/.gitkeep | 0 build/Dockerfile | 8 +--- docker-compose.yaml | 8 ++-- 21 files changed, 115 insertions(+), 127 deletions(-) delete mode 100755 build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh delete mode 100755 build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh delete mode 100755 build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh delete mode 100755 build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh delete mode 100644 build/COPY_ROOT/opt/ai-dock/tests/assert-torch-version.py delete mode 100644 build/COPY_ROOT/root/.gitkeep delete mode 100644 build/COPY_ROOT/usr/.gitkeep create mode 100755 build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/amd.sh rename build/{COPY_ROOT => COPY_ROOT_0}/opt/ai-dock/bin/build/layer0/clean.sh (85%) create mode 100755 build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/common.sh create mode 100755 build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/cpu.sh rename build/{COPY_ROOT => COPY_ROOT_0}/opt/ai-dock/bin/build/layer0/init.sh (76%) create mode 100755 build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/nvidia.sh rename build/{COPY_ROOT/etc/supervisor/supervisord/conf.d => COPY_ROOT_0/opt/ai-dock/bin/preflight.d}/.gitkeep (100%) create mode 100755 build/COPY_ROOT_99/opt/ai-dock/bin/build/layer1/init.sh rename build/{COPY_ROOT/opt/ai-dock/bin/preflight.d => COPY_ROOT_99/opt/storage}/.gitkeep (100%) delete mode 100644 build/COPY_ROOT_EXTRA/opt/storage/.gitkeep diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index ac9e1d1..b5cfd79 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -8,9 +8,9 @@ on: env: UBUNTU_VERSION: 22.04 BUILDX_NO_DEFAULT_ATTESTATIONS: 1 - LATEST_CUDA: "2.2.1-py3.10-cuda-11.8.0-runtime-22.04" - LATEST_ROCM: "2.2.1-py3.10-rocm-5.7-runtime-22.04" - LATEST_CPU: "2.2.1-py3.10-cpu-22.04" + LATEST_CUDA: "2.3.1-py3.10-v2-cuda-11.8.0-runtime-22.04" + LATEST_ROCM: "2.3.1-py3.10-v2-rocm-5.7-runtime-22.04" + LATEST_CPU: "2.3.1-py3.10-v2-cpu-22.04" jobs: cpu-base: @@ -23,12 +23,8 @@ jobs: - "3.11" - "3.12" pytorch: - - "2.1.2" - - "2.2.0" - "2.2.1" - exclude: - - python: "3.12" - pytorch: "2.1.2" + - "2.3.1" steps: - name: Free Space @@ -65,7 +61,7 @@ jobs: name: Set tags run: | img_path="ghcr.io/${{ env.PACKAGE_NAME }}" - ver_tag="${{ matrix.pytorch }}-py${{ matrix.python }}-cpu-${{ env.UBUNTU_VERSION }}" + ver_tag="${{ matrix.pytorch }}-py${{ matrix.python }}-v2-cpu-${{ env.UBUNTU_VERSION }}" if [[ $ver_tag == ${{ env.LATEST_CPU }} ]]; then TAGS="${img_path}:latest-cpu, ${img_path}:$ver_tag" @@ -79,7 +75,7 @@ jobs: with: context: build build-args: | - IMAGE_BASE=ghcr.io/ai-dock/python:${{ matrix.python }}-cpu-${{ env.UBUNTU_VERSION }} + IMAGE_BASE=ghcr.io/ai-dock/python:${{ matrix.python }}-v2-cpu-${{ env.UBUNTU_VERSION }} PYTORCH_VERSION=${{ matrix.pytorch }} push: true # Avoids unknown/unknown architecture and extra metadata @@ -96,19 +92,15 @@ jobs: - "3.11" - "3.12" pytorch: - - "2.1.2" - - "2.2.0" - "2.2.1" + - "2.3.1" cuda: - "11.8.0" - - "12.1.0" + - "12.1.1" level: - "base" - "runtime" - "devel" - exclude: - - python: "3.12" - pytorch: "2.1.2" steps: - @@ -146,7 +138,7 @@ jobs: name: Set tags run: | img_path="ghcr.io/${{ env.PACKAGE_NAME }}" - ver_tag="${{ matrix.pytorch }}-py${{ matrix.python }}-cuda-${{ matrix.cuda }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }}" + ver_tag="${{ matrix.pytorch }}-py${{ matrix.python }}-v2-cuda-${{ matrix.cuda }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }}" if [[ $ver_tag == ${{ env.LATEST_CUDA }} ]]; then TAGS="${img_path}:latest, ${img_path}:latest-cuda, ${img_path}:$ver_tag" @@ -160,7 +152,7 @@ jobs: with: context: build build-args: | - IMAGE_BASE=ghcr.io/ai-dock/python:${{ matrix.python }}-cuda-${{ matrix.cuda }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }} + IMAGE_BASE=ghcr.io/ai-dock/python:${{ matrix.python }}-v2-cuda-${{ matrix.cuda }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }} PYTORCH_VERSION=${{ matrix.pytorch }} push: true provenance: false @@ -176,23 +168,18 @@ jobs: - "3.11" - "3.12" pytorch: - - "2.1.2" - - "2.2.0" - "2.2.1" + - "2.3.1" rocm: - - "5.6" - "5.7" + - "6.0.2" level: - "runtime" exclude: - - pytorch: "2.2.1" - rocm: "5.6" - - pytorch: "2.2.0" - rocm: "5.6" - - pytorch: "2.1.2" + - pytorch: "2.3.1" rocm: "5.7" - - python: "3.12" - rocm: "5.6" + - pytorch: "2.2.1" + rocm: "6.0.2" steps: - @@ -230,7 +217,7 @@ jobs: name: Set tags run: | img_path="ghcr.io/${{ env.PACKAGE_NAME }}" - ver_tag="${{ matrix.pytorch }}-py${{ matrix.python }}-rocm-${{ matrix.rocm }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }}" + ver_tag="${{ matrix.pytorch }}-py${{ matrix.python }}-v2-rocm-${{ matrix.rocm }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }}" if [[ $ver_tag == ${{ env.LATEST_ROCM }} ]]; then TAGS="${img_path}:latest-rocm, ${img_path}:$ver_tag" @@ -244,7 +231,7 @@ jobs: with: context: build build-args: | - IMAGE_BASE=ghcr.io/ai-dock/python:${{ matrix.python }}-rocm-${{ matrix.rocm }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }} + IMAGE_BASE=ghcr.io/ai-dock/python:${{ matrix.python }}-v2-rocm-${{ matrix.rocm }}-${{ matrix.level }}-${{ env.UBUNTU_VERSION }} PYTORCH_VERSION=${{ matrix.pytorch }} push: true provenance: false diff --git a/.gitignore b/.gitignore index a26d8f7..50f2079 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,4 @@ workspace -build/COPY_ROOT_EXTRA/ config/authorized_keys config/rclone .env diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh deleted file mode 100755 index be64b22..0000000 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/amd.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/false - -# For ROCm specific logic -# Pytorch for ROCm is not available through conda -# but we can still get the dependencies - -# Mamba will downgrade python to satisfy requirements. We don't want that. -python_lock="$(micromamba -n $MAMBA_DEFAULT_ENV run python -V | tail -n1 | awk '{print $2}' | cut -d '.' -f1,2)" - -$MAMBA_INSTALL -n $MAMBA_DEFAULT_ENV \ - pytorch=${PYTORCH_VERSION} torchvision torchaudio \ - python=${python_lock} \ - --only-deps - -$MAMBA_INSTALL -n $MAMBA_DEFAULT_ENV \ - python=${python_lock} - -# Now pip install... -micromamba -n $MAMBA_DEFAULT_ENV run pip install \ - --no-cache-dir \ - --index-url https://download.pytorch.org/whl/rocm${ROCM_VERSION} \ - torch==${PYTORCH_VERSION} torchvision torchaudio - \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh deleted file mode 100755 index 3f144b0..0000000 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/common.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/false - -# For logic common to cpu, cuda & rocm - -source /opt/ai-dock/etc/environment.sh - -if [[ $PYTORCH_VERSION == "2.0.1" ]]; then - ffmpeg_version="4.4" -else - ffmpeg_version="6.*" -fi - -export MAMBA_CREATE="micromamba create --always-softlink -y -c pytorch -c conda-forge" -export MAMBA_INSTALL="micromamba install --always-softlink -y -c pytorch -c conda-forge" -printf "export MAMBA_CREATE=\"%s\"\n" "${MAMBA_CREATE}" >> /opt/ai-dock/etc/environment.sh -printf "export MAMBA_INSTALL=\"%s\"\n" "${MAMBA_INSTALL}" >> /opt/ai-dock/etc/environment.sh - -python_version="$(micromamba -n $MAMBA_DEFAULT_ENV run python -V | tail -n1 | awk '{print $2}' | cut -d '.' -f1,2)" -printf "/opt/micromamba/envs/%s/lib/python%s/site-packages/torch/lib/\n" "$MAMBA_DEFAULT_ENV" "$python_version" >> /etc/ld.so.conf.d/x86_64-linux-gnu.micromamba.80-pytorch.conf - -$MAMBA_INSTALL -n $MAMBA_DEFAULT_ENV \ - ffmpeg="$ffmpeg_version" \ - sox=14.4.2 \ - ocl-icd-system \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh deleted file mode 100755 index 249a84b..0000000 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/cpu.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/false - -# For CPU specific processes -# Mamba will downgrade python to satisfy requirements. We don't want that. -python_lock="$(micromamba -n $MAMBA_DEFAULT_ENV run python -V | tail -n1 | awk '{print $2}' | cut -d '.' -f1,2)" - -$MAMBA_INSTALL -n $MAMBA_DEFAULT_ENV \ - python=${python_lock} \ - pytorch=${PYTORCH_VERSION} torchvision \ - torchaudio \ - cpuonly \ - ffmpeg="$ffmpeg_version" - \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh b/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh deleted file mode 100755 index 0ed4f60..0000000 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/nvidia.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/false - -export MAMBA_CREATE="micromamba create --always-softlink -y -c pytorch -c nvidia -c conda-forge" -env-store MAMBA_CREATE -export MAMBA_INSTALL="micromamba install --always-softlink -y -c pytorch -c nvidia -c conda-forge" -env-store MAMBA_INSTALL - -# Mamba will downgrade python to satisfy requirements. We don't want that. -python_lock="$(micromamba -n $MAMBA_DEFAULT_ENV run python -V | tail -n1 | awk '{print $2}' | cut -d '.' -f1,2)" - -$MAMBA_INSTALL -n $MAMBA_DEFAULT_ENV \ - pytorch=${PYTORCH_VERSION} torchvision torchaudio \ - python=${python_lock} \ - pytorch-cuda="$(cut -d '.' -f 1,2 <<< "${CUDA_VERSION}")" diff --git a/build/COPY_ROOT/opt/ai-dock/tests/assert-torch-version.py b/build/COPY_ROOT/opt/ai-dock/tests/assert-torch-version.py deleted file mode 100644 index 3ad721e..0000000 --- a/build/COPY_ROOT/opt/ai-dock/tests/assert-torch-version.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -import os -required_version = os.environ.get("PYTORCH_VERSION") -actual_version = torch.__version__ -# Vaguely handle version+platform type string -if not actual_version.startswith(required_version): - print(f"Expected pytorch v{required_version} but found v{actual_version}") - exit(1) - diff --git a/build/COPY_ROOT/root/.gitkeep b/build/COPY_ROOT/root/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/build/COPY_ROOT/usr/.gitkeep b/build/COPY_ROOT/usr/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/amd.sh b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/amd.sh new file mode 100755 index 0000000..39b7e54 --- /dev/null +++ b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/amd.sh @@ -0,0 +1,16 @@ +#!/bin/false + +build_amd_main() { + build_amd_install_torch + build_common_run_tests +} + +build_amd_install_torch() { + "$PYTHON_VENV_PIP" install --no-cache-dir \ + torch==${PYTORCH_VERSION} \ + torchvision \ + torchaudio \ + --extra-index-url=https://download.pytorch.org/whl/rocm${ROCM_VERSION} +} + +build_amd_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/clean.sh similarity index 85% rename from build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh rename to build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/clean.sh index 4a35056..3a33952 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/clean.sh +++ b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/clean.sh @@ -2,7 +2,6 @@ # Tidy up and keep image small apt-get clean -y -micromamba clean -ay fix-permissions.sh -o container diff --git a/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/common.sh b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/common.sh new file mode 100755 index 0000000..7f658c0 --- /dev/null +++ b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/common.sh @@ -0,0 +1,17 @@ +#!/bin/false + +# For logic common to cpu, cuda & rocm + +build_common_main() { + : +} + +build_common_run_tests() { + installed_pytorch_version=$("$PYTHON_VENV_PYTHON" -c "import torch; print(torch.__version__)") + if [[ "$installed_pytorch_version" != "$PYTORCH_VERSION"* ]]; then + echo "Expected PyTorch ${PYTORCH_VERSION} but found ${installed_pytorch_version}\n" + exit 1 + fi +} + +build_common_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/cpu.sh b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/cpu.sh new file mode 100755 index 0000000..adb93d8 --- /dev/null +++ b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/cpu.sh @@ -0,0 +1,16 @@ +#!/bin/false + +build_cpu_main() { + build_cpu_install_torch + build_common_run_tests +} + +build_cpu_install_deps() { + "$PYTHON_VENV_PIP" install --no-cache-dir \ + torch==${PYTORCH_VERSION} \ + torchvision \ + torchaudio \ + --extra-index-url=https://download.pytorch.org/whl/cpu +} + +build_cpu_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/init.sh similarity index 76% rename from build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh rename to build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/init.sh index 81f25ae..1981407 100755 --- a/build/COPY_ROOT/opt/ai-dock/bin/build/layer0/init.sh +++ b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/init.sh @@ -4,8 +4,13 @@ set -eo pipefail umask 002 +source /opt/ai-dock/etc/environment.sh source /opt/ai-dock/bin/build/layer0/common.sh +PYTHON_VENV="$VENV_DIR/$PYTHON_DEFAULT_VENV" +PYTHON_VENV_PYTHON="$PYTHON_VENV/bin/python" +PYTHON_VENV_PIP="$PYTHON_VENV/bin/pip" + if [[ "$XPU_TARGET" == "NVIDIA_GPU" ]]; then source /opt/ai-dock/bin/build/layer0/nvidia.sh elif [[ "$XPU_TARGET" == "AMD_GPU" ]]; then @@ -17,6 +22,4 @@ else exit 1 fi -$MAMBA_DEFAULT_RUN python /opt/ai-dock/tests/assert-torch-version.py - source /opt/ai-dock/bin/build/layer0/clean.sh diff --git a/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/nvidia.sh b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/nvidia.sh new file mode 100755 index 0000000..3bc030b --- /dev/null +++ b/build/COPY_ROOT_0/opt/ai-dock/bin/build/layer0/nvidia.sh @@ -0,0 +1,27 @@ +#!/bin/false + +build_nvidia_main() { + build_nvidia_install_torch + build_common_run_tests + build_nvidia_run_tests +} + +build_nvidia_install_torch() { + short_cuda_version="cu$(cut -d '.' -f 1,2 <<< "${CUDA_VERSION}" | tr -d '.')" + "$PYTHON_VENV_PIP" install --no-cache-dir \ + nvidia-ml-py3 \ + torch==${PYTORCH_VERSION} \ + torchvision \ + torchaudio \ + --extra-index-url=https://download.pytorch.org/whl/$short_cuda_version +} + +build_nvidia_run_tests() { + installed_pytorch_cuda_version=$("$PYTHON_VENV_PYTHON" -c "import torch; print(torch.version.cuda)") + if [[ "$CUDA_VERSION" != "$installed_pytorch_cuda"* ]]; then + echo "Expected PyTorch CUDA ${CUDA_VERSION} but found ${installed_pytorch_cuda}\n" + exit 1 + fi +} + +build_nvidia_main "$@" \ No newline at end of file diff --git a/build/COPY_ROOT/etc/supervisor/supervisord/conf.d/.gitkeep b/build/COPY_ROOT_0/opt/ai-dock/bin/preflight.d/.gitkeep similarity index 100% rename from build/COPY_ROOT/etc/supervisor/supervisord/conf.d/.gitkeep rename to build/COPY_ROOT_0/opt/ai-dock/bin/preflight.d/.gitkeep diff --git a/build/COPY_ROOT_99/opt/ai-dock/bin/build/layer1/init.sh b/build/COPY_ROOT_99/opt/ai-dock/bin/build/layer1/init.sh new file mode 100755 index 0000000..36397ab --- /dev/null +++ b/build/COPY_ROOT_99/opt/ai-dock/bin/build/layer1/init.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Override this file to add extras to your build + +## START +#umask 002 + + + +## END +#fix-permissions.sh -o container \ No newline at end of file diff --git a/build/COPY_ROOT/opt/ai-dock/bin/preflight.d/.gitkeep b/build/COPY_ROOT_99/opt/storage/.gitkeep similarity index 100% rename from build/COPY_ROOT/opt/ai-dock/bin/preflight.d/.gitkeep rename to build/COPY_ROOT_99/opt/storage/.gitkeep diff --git a/build/COPY_ROOT_EXTRA/opt/storage/.gitkeep b/build/COPY_ROOT_EXTRA/opt/storage/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/build/Dockerfile b/build/Dockerfile index cedb4a9..5c06e04 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -1,6 +1,6 @@ # For build automation - Allows building from any ai-dock base image # Use a *cuda*base* image as default because pytorch brings the libs -ARG IMAGE_BASE="ghcr.io/ai-dock/python:3.10-cuda-11.8.0-base-22.04" +ARG IMAGE_BASE="ghcr.io/ai-dock/python:3.10-v2-cuda-11.8.0-base-22.04" FROM ${IMAGE_BASE} LABEL org.opencontainers.image.source https://github.com/ai-dock/pytorch @@ -13,7 +13,7 @@ ENV IMAGE_SLUG="pytorch" # Note: the default mamba environment is set by ai-dock/python # Copy early so we can use scripts in the build - Changes to these files will invalidate the cache and cause a rebuild. -COPY --chown=0:1111 ./COPY_ROOT/ / +COPY --chown=0:1111 ./COPY_ROOT_0/ / # Use build scripts to ensure we can build all targets from one Dockerfile in a single layer. # Don't put anything heavy in here - We can use multi-stage building above if necessary. @@ -22,9 +22,5 @@ ENV PYTORCH_VERSION=${PYTORCH_VERSION} RUN set -eo pipefail && /opt/ai-dock/bin/build/layer0/init.sh | tee /var/log/build.log -# Copy overrides and new files into a final layer for fast rebuilds. Uncomment below -#COPY --chown=0:1111 ./COPY_ROOT_EXTRA/ / -#RUN set -eo pipefail && /opt/ai-dock/bin/build/layer1/init.sh | tee -a /var/log/build.log - # Keep init.sh as-is and place additional logic in /opt/ai-dock/bin/preflight.sh CMD ["init.sh"] diff --git a/docker-compose.yaml b/docker-compose.yaml index 37fa036..0f6beaa 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -6,12 +6,12 @@ services: build: context: ./build args: - IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/python:3.10-cuda-11.8.0-runtime-22.04} - PYTORCH_VERSION: ${PYTORCH_VERSION:-2.1.1} + IMAGE_BASE: ${IMAGE_BASE:-ghcr.io/ai-dock/python:3.10-v2-cuda-11.8.0-runtime-22.04} + PYTORCH_VERSION: ${PYTORCH_VERSION:-2.3.0} tags: - - "ghcr.io/ai-dock/pytorch:${IMAGE_TAG:-2.1.1-py3.10-cuda-11.8.0-runtime-22.04}" + - "ghcr.io/ai-dock/pytorch:${IMAGE_TAG:-2.3.0-py3.10-v2-cuda-11.8.0-runtime-22.04}" - image: ghcr.io/ai-dock/pytorch:${IMAGE_TAG:-2.1.1-py3.10-cuda-11.8.0-runtime-22.04} + image: ghcr.io/ai-dock/pytorch:${IMAGE_TAG:-2.3.0-py3.10-v2-cuda-11.8.0-runtime-22.04} ## For Nvidia GPU's - You probably want to uncomment this #deploy: