diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000..9603404 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,192 @@ +name: qpod-docker-images + +on: + pull_request: + paths-ignore: + - "*.md" + + push: + branches: + - master + paths-ignore: + - "*.md" + +env: + DOCKER_REGISTRY_USER: ${{ secrets.DOCKER_REGISTRY_USER }} + DOCKER_REGISTRY_PASSWORD: ${{ secrets.DOCKER_REGISTRY_PASSWORD }} + +jobs: + build-base: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json && sudo service docker restart && docker info + + - name: docker - atom/base + run: | + source ./tool.sh + build_image atom latest docker_atom/Dockerfile && push_image + build_image base latest docker_base/Dockerfile + alias_image base latest py-mini latest + push_image + + build-cuda-base: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json && sudo service docker restart + - name: docker - cuda + run: | + source ./tool.sh + + build_image cuda_11.4 latest docker_atom/Dockerfile --build-arg "BASE_IMG=nvidia/cuda:11.4.1-cudnn8-runtime-ubuntu20.04" + build_image cuda_11.4 latest docker_base/Dockerfile --build-arg "BASE_IMG=cuda_11.4" + build_image cuda_11.4 latest docker_cuda/Dockerfile --build-arg "BASE_IMG=cuda_11.4" alias_image cuda_11.4 latest cuda latest && push_image + + # For tensorflow >=2.5 and torch >= 1.9 support + build_image cuda_11.2 latest docker_atom/Dockerfile --build-arg "BASE_IMG=nvidia/cuda:11.2.2-cudnn8-runtime-ubuntu20.04" + build_image cuda_11.2 latest docker_base/Dockerfile --build-arg "BASE_IMG=cuda_11.2" + build_image cuda_11.2 latest docker_cuda/Dockerfile --build-arg "BASE_IMG=cuda_11.2" && push_image + + # For legacy tensorflow 1.15 support + build_image cuda_10.0 latest docker_atom/Dockerfile --build-arg "BASE_IMG=nvidia/cuda:10.0-cudnn7-runtime-ubuntu18.04" + build_image cuda_10.0 latest docker_base/Dockerfile --build-arg "BASE_IMG=cuda_10.0" + build_image cuda_10.0 latest docker_cuda/Dockerfile --build-arg "BASE_IMG=cuda_10.0" && push_image + + build-core: + needs: build-base + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json && sudo service docker restart + - name: docker-core + run: | + source ./tool.sh + build_image core latest docker_core/Dockerfile \ + --build-arg "ARG_PROFILE_PYTHON=base,datascience,database,nlp,cv,bio,chem,tf2,torch" \ + --build-arg "ARG_PROFILE_R=base,datascience,rstudio,rshiny" \ + --build-arg "ARG_PROFILE_NODEJS=base" \ + --build-arg "ARG_PROFILE_JAVA=base,maven" \ + --build-arg "ARG_PROFILE_GO=base" \ + --build-arg "ARG_PROFILE_JULIA=base" \ + --build-arg "ARG_PROFILE_OCTAVE=base" \ + --build-arg "ARG_PROFILE_LATEX=base,cjk" + push_image + + build-py: + needs: build-base + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json && sudo service docker restart + - name: docker-py + run: | + source ./tool.sh + build_image py-data latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,database" && push_image + build_image py-nlp latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,nlp" && push_image + build_image py-cv latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,cv" && push_image + build_image py-bio latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,bio" && push_image + build_image py-bio latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,chem" && push_image + build_image py-std latest docker_core/Dockerfile \ + --build-arg "ARG_PROFILE_PYTHON=datascience,database,nlp,cv,bio,chem" \ + && push_image + build_image py-jdk latest docker_core/Dockerfile \ + --build-arg "ARG_PROFILE_PYTHON=datascience,database,nlp,cv,bio,chem" \ + --build-arg "ARG_PROFILE_JAVA=base" \ + && push_image + + build-r: + needs: build-base + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json && sudo service docker restart + - name: docker-R + run: | + source ./tool.sh + build_image r-mini latest docker_core/Dockerfile \ + --build-arg "ARG_PROFILE_R=base" \ + && push_image + build_image r-std latest docker_core/Dockerfile \ + --build-arg "ARG_PROFILE_R=base,datascience" \ + --build-arg "ARG_PROFILE_JAVA=base" \ + && push_image + build_image r-latex latest docker_core/Dockerfile \ + --build-arg "ARG_PROFILE_R=base,datascience" \ + --build-arg "ARG_PROFILE_JAVA=base" \ + --build-arg "ARG_PROFILE_LATEX=base,cjk" \ + && push_image + build_image r-studio latest docker_core/Dockerfile \ + --build-arg "ARG_PROFILE_R=base,datascience,rstudio,rshiny" \ + --build-arg "ARG_PROFILE_JAVA=base" \ + --build-arg "ARG_PROFILE_LATEX=base,cjk" \ + && push_image + + build-other: + needs: build-base + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json && sudo service docker restart + - name: docker-other + run: | + source ./tool.sh + build_image node latest docker_core/Dockerfile --build-arg "ARG_PROFILE_NODEJS=base" && push_image + build_image jdk latest docker_core/Dockerfile --build-arg "ARG_PROFILE_JAVA=base" && push_image + build_image julia latest docker_core/Dockerfile --build-arg "ARG_PROFILE_JULIA=base" && push_image + build_image go latest docker_core/Dockerfile --build-arg "ARG_PROFILE_GO=base" && push_image + + build-cuda-core: + needs: build-cuda-base + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json && sudo service docker restart + - name: docker-cuda-core + run: | + source ./tool.sh + + build_image py-cuda-10.0 latest docker_core/Dockerfile \ + --build-arg "ARG_PROFILE_PYTHON=tf1,datascience,nlp,cv" \ + --build-arg "BASE_IMG=cuda_10.0" + alias_image py-cuda-10.0 latest tf1 latest && push_image + + build_image full-cuda-11.2 latest docker_core/Dockerfile \ + --build-arg "BASE_IMG=cuda_11.2" \ + --build-arg "ARG_PROFILE_PYTHON=base,datascience,database,nlp,cv,bio,chem,tf2,torch" \ + --build-arg "ARG_PROFILE_R=base,datascience,rstudio,rshiny" \ + --build-arg "ARG_PROFILE_NODEJS=base" \ + --build-arg "ARG_PROFILE_JAVA=base,maven" \ + --build-arg "ARG_PROFILE_GO=base" \ + --build-arg "ARG_PROFILE_JULIA=base" \ + --build-arg "ARG_PROFILE_LATEX=base,cjk" + alias_image full-cuda-11.2 latest core-cuda latest && push_image + + build-dev-core: + needs: build-core + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json && sudo service docker restart + - name: docker-other + run: | + source ./tool.sh + build_image core-dev latest docker_dev/Dockerfile \ + --build-arg "ARG_PROFILE_JUPYTER=base,kernels,extensions" \ + --build-arg "ARG_PROFILE_VSCODE=base" + alias_image core-dev latest full latest && push_image + + build-dev-cuda: + needs: build-cuda-core + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json && sudo service docker restart + - name: docker-dev-cuda + run: | + source ./tool.sh + build_image cuda-dev latest docker_dev/Dockerfile \ + --build-arg "BASE_IMG=core-cuda" \ + --build-arg "ARG_PROFILE_JUPYTER=base,kernels,extensions" \ + --build-arg "ARG_PROFILE_VSCODE=base" + alias_image cuda-dev latest full-cuda latest && push_image diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index a407349..0000000 --- a/.travis.yml +++ /dev/null @@ -1,186 +0,0 @@ -language: minimal - -dist: bionic - -branches: - only: - - master - - devops - -notifications: - slack: q-pod:lrzKf5Ff1Ao1MGclzElR23j4 - -env: - global: - - REGISTRY_URL: "docker.io" # docker.io or other registry URL, DOCKER_REGISTRY_USER/DOCKER_REGISTRY_PASSWORD to be set in CI env. - - DOCKER_BUILDKIT: 0 - -install: - - echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json && sudo service docker restart && docker info - -before_script: - - echo IP=$(curl -s http://ifconfig.me/ip) - - CI_PROJECT_NAMESPACE=$([[ "$TRAVIS_PULL_REQUEST_SLUG" = "" ]] && echo "$(dirname ${TRAVIS_REPO_SLUG})" || echo "$(dirname ${TRAVIS_PULL_REQUEST_SLUG})") - - export NAMESPACE=$(echo "${REGISTRY_URL}/${CI_PROJECT_NAMESPACE}" | awk '{print tolower($0)}') - - export VER=`date +%Y.%m%d` - - build_image() { - IMG=$1; TAG=$2; FILE=$3; shift 3; - [[ "$TRAVIS_PULL_REQUEST_BRANCH" == "" ]] && BASE_NAMESPACE="${NAMESPACE}" || BASE_NAMESPACE="${NAMESPACE}0${TRAVIS_PULL_REQUEST_BRANCH}" ; - docker build --squash --compress --force-rm=true -t "${BASE_NAMESPACE}/${IMG}:${TAG}" -f "$FILE" --build-arg "BASE_NAMESPACE=${BASE_NAMESPACE}" "$@" "$(dirname $FILE)" ; - docker tag "${BASE_NAMESPACE}/${IMG}:${TAG}" "${BASE_NAMESPACE}/${IMG}:${VER}" ; - } - - alias_image() { - IMG_1=$1; TAG_1=$2; IMG_2=$3; TAG_2=$4; shift 4; - [[ "$TRAVIS_PULL_REQUEST_BRANCH" == "" ]] && BASE_NAMESPACE="${NAMESPACE}" || BASE_NAMESPACE="${NAMESPACE}0${TRAVIS_PULL_REQUEST_BRANCH}" ; - docker tag "${BASE_NAMESPACE}/${IMG_1}:${TAG_1}" "${BASE_NAMESPACE}/${IMG_2}:${TAG_2}" ; - docker tag "${BASE_NAMESPACE}/${IMG_2}:${TAG_2}" "${BASE_NAMESPACE}/${IMG_2}:${VER}" ; - } - -after_script: - - docker image prune --force && docker images - - IMGS=$(docker images | grep "second" | awk '{print $1 ":" $2}') - - echo "$DOCKER_REGISTRY_PASSWORD" | docker login "${REGISTRY_URL}" -u "$DOCKER_REGISTRY_USER" --password-stdin ; - for IMG in $(echo $IMGS | tr " " "\n") ; - do - docker push "${IMG}"; - status=$?; - echo "[${status}] Image pushed > ${IMG}"; - done - -jobs: - include: - - stage: atom - name: atom - script: build_image atom latest docker_atom/Dockerfile - - - stage: base - name: base - script: - - build_image base latest docker_base/Dockerfile - - alias_image base latest py-mini latest -#------------------------------------------------------------------------------- -# core: full - start the full job fist as it's time consuming -#------------------------------------------------------------------------------- - - stage: core - name: core - script: build_image core latest docker_core/Dockerfile - --build-arg "ARG_PROFILE_PYTHON=base,datascience,database,nlp,cv,bio,chem,tf2,torch" - --build-arg "ARG_PROFILE_R=base,datascience,rstudio,rshiny" - --build-arg "ARG_PROFILE_NODEJS=base" - --build-arg "ARG_PROFILE_JAVA=base,maven" - --build-arg "ARG_PROFILE_GO=base" - --build-arg "ARG_PROFILE_JULIA=base" - --build-arg "ARG_PROFILE_OCTAVE=base" - --build-arg "ARG_PROFILE_LATEX=base,cjk" -#------------------------------------------------------------------------------- -# Python: (mini - same as `base` as aliased above), datascience, bio, chem, nlp, cv, full -#------------------------------------------------------------------------------- - - name: py-data - script: build_image py-data latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,database" - - name: py-nlp - script: build_image py-nlp latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,nlp" - - name: py-cv - script: build_image py-cv latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,cv" - - name: py-bio - script: build_image py-bio latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,bio" - - name: py-chem - script: build_image py-bio latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,chem" - - name: py-std - script: build_image py-std latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,database,nlp,cv,bio,chem" - - name: py-jdk - script: build_image py-jdk latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=datascience,database,nlp,cv,bio,chem" --build-arg "ARG_PROFILE_JAVA=base" -#------------------------------------------------------------------------------- -# R: mini, std, latex, studio -#------------------------------------------------------------------------------- - - name: r-mini - script: build_image r-mini latest docker_core/Dockerfile --build-arg "ARG_PROFILE_R=base" - - name: r-std - script: build_image r-std latest docker_core/Dockerfile --build-arg "ARG_PROFILE_R=base,datascience" --build-arg "ARG_PROFILE_JAVA=base" - - name: r-latex - script: build_image r-latex latest docker_core/Dockerfile --build-arg "ARG_PROFILE_R=base,datascience" --build-arg "ARG_PROFILE_JAVA=base" --build-arg "ARG_PROFILE_LATEX=base,cjk" - - name: r-studio - script: build_image r-studio latest docker_core/Dockerfile --build-arg "ARG_PROFILE_R=base,datascience,rstudio,rshiny" --build-arg "ARG_PROFILE_JAVA=base" --build-arg "ARG_PROFILE_LATEX=base,cjk" -#------------------------------------------------------------------------------- -# NodeJS: base -#------------------------------------------------------------------------------- - - name: node - script: build_image node latest docker_core/Dockerfile --build-arg "ARG_PROFILE_NODEJS=base" -#------------------------------------------------------------------------------- -# Java: base -#------------------------------------------------------------------------------- - - name: jdk - script: build_image jdk latest docker_core/Dockerfile --build-arg "ARG_PROFILE_JAVA=base" -#------------------------------------------------------------------------------- -# Julia: base -#------------------------------------------------------------------------------- - - name: julia - script: build_image julia latest docker_core/Dockerfile --build-arg "ARG_PROFILE_JULIA=base" -#------------------------------------------------------------------------------- -# Go: base -#------------------------------------------------------------------------------- - - name: go - script: build_image go latest docker_core/Dockerfile --build-arg "ARG_PROFILE_GO=base" -#------------------------------------------------------------------------------- -# Octave: base -#------------------------------------------------------------------------------- - - name: octave - script: build_image octave latest docker_core/Dockerfile --build-arg "ARG_PROFILE_OCTAVE=base" -#=============================================================================== -# GPU: cuda_10.0, cuda_10.1, cuda_10.2 (cuda), cuda_11.0 -#------------------------------------------------------------------------------- - - stage: cuda-base - name: cuda_10.0 - script: build_image cuda_10.0 latest docker_cuda/cuda10.0.Dockerfile - - name: cuda_10.1 - script: build_image cuda_10.1 latest docker_cuda/cuda10.1.Dockerfile - - name: cuda_10.2 - script: build_image cuda_10.2 latest docker_cuda/cuda10.2.Dockerfile - - name: cuda_11.0 - script: build_image cuda_11.0 latest docker_cuda/cuda11.0.Dockerfile -#------------------------------------------------------------------------------- -# Python with DeepLearning package: tf1 tf2 pytorch, -#------------------------------------------------------------------------------- - - stage: cuda-core - name: py-cuda-10.0 - script: - - build_image py-cuda-10.0 latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=tf1,datascience,nlp,cv" --build-arg "BASE_IMG=cuda_10.0" - - alias_image py-cuda-10.0 latest tf1 latest - - name: py-cuda-10.1 - script: - - build_image py-cuda-10.1 latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=tf2,datascience,nlp,cv" --build-arg "BASE_IMG=cuda_10.1" - - alias_image py-cuda-10.1 latest tf2 latest - - name: py-cuda-10.2 - script: - - build_image py-cuda-10.2 latest docker_core/Dockerfile --build-arg "ARG_PROFILE_PYTHON=torch,datascience,nlp,cv" --build-arg "BASE_IMG=cuda_10.2" - - alias_image py-cuda-10.2 latest torch latest -#------------------------------------------------------------------------------- -# Python with DeepLearning packages and others: core-cuda, full-cuda-10.1 -#------------------------------------------------------------------------------- - - stage: cuda-core - name: full-cuda - script: - - build_image full-cuda-10.1 latest docker_core/Dockerfile - --build-arg "BASE_IMG=cuda_10.1" - --build-arg "ARG_PROFILE_PYTHON=base,datascience,database,nlp,cv,bio,chem,tf2,torch" - --build-arg "ARG_PROFILE_R=base,datascience,rstudio,rshiny" - --build-arg "ARG_PROFILE_NODEJS=base" - --build-arg "ARG_PROFILE_JAVA=base,maven" - --build-arg "ARG_PROFILE_GO=base" - --build-arg "ARG_PROFILE_JULIA=base" - --build-arg "ARG_PROFILE_LATEX=base,cjk" - - alias_image full-cuda-10.1 latest core-cuda latest -#=============================================================================== -# dev: core-dev, full ; cuda-dev, full-cuda -#------------------------------------------------------------------------------- - - stage: dev - name: core-dev - script: - - build_image core-dev latest docker_dev/Dockerfile --build-arg "ARG_PROFILE_JUPYTER=base,kernels,extensions" --build-arg "ARG_PROFILE_VSCODE=base" - - alias_image core-dev latest full latest - - name: cuda-dev - script: - - build_image cuda-dev latest docker_dev/Dockerfile - --build-arg "BASE_IMG=core-cuda" - --build-arg "ARG_PROFILE_JUPYTER=base,kernels,extensions" - --build-arg "ARG_PROFILE_VSCODE=base" - - alias_image cuda-dev latest full-cuda latest diff --git a/README.md b/README.md index e296197..be3c3a4 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # QPod - Docker Image Stack [![License](https://img.shields.io/badge/License-BSD%203--Clause-green.svg)](https://opensource.org/licenses/BSD-3-Clause) -[![TravisCI Pipeline Status](https://img.shields.io/travis/com/QPod/docker-images.svg)](https://travis-ci.com/QPod/docker-images) +[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/QPod/docker-images/qpod-docker-images)](https://github.com/QPod/docker-images/actions/workflows/docker.yml) [![Join the Gitter Chat](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/QPod/) [![Docker Pulls](https://img.shields.io/docker/pulls/qpod/qpod.svg)](https://hub.docker.com/r/qpod/qpod) [![Docker Starts](https://img.shields.io/docker/stars/qpod/qpod.svg)](https://hub.docker.com/r/qpod/qpod) @@ -44,8 +44,8 @@ With Docker and `QPod`, you ### 0. Have docker installed on your laptop/server - Linux (e.g.: Ubuntu LTS) / Windows (>=10) / macOS -- Install **Docker >= 19.03**: `docker-ce` ( community version & free: [Linux](https://hub.docker.com/search/?offering=community&type=edition&operating_system=linux) | [macOS](https://download.docker.com/mac/stable/Docker.dmg) | [Windows](https://download.docker.com/win/stable/Docker%20Desktop%20Installer.exe) ) or [docker-ee](https://hub.docker.com/search/?offering=enterprise&type=edition) (enterprise version & paid) on your laptop/server. **Docker installed from default Ubuntu/CentOS repository probably won't work for GPU!** -- If you want to use *NVIDIA GPUs* with `QPod`, Linux server is **required**. After installing **Docker >= 19.03**, also install both the [`NVIDIA driver`](https://github.com/NVIDIA/nvidia-docker/wiki/Frequently-Asked-Questions#how-do-i-install-the-nvidia-driver) and the latest version of [`nvidia-container-toolkit`](https://github.com/NVIDIA/nvidia-docker#quickstart) to use the GPUs in containers. +- Install **Docker >= 19.03**: `docker-ce` ( community version & free: [Linux](https://hub.docker.com/search/?offering=community&type=edition&operating_system=linux) | [macOS](https://hub.docker.com/editions/community/docker-ce-desktop-mac) | [Windows](https://desktop.docker.com/win/stable/amd64/Docker%20Desktop%20Installer.exe) ) on your laptop/server. **Docker installed from default Ubuntu/CentOS repository probably won't work for GPU!** +- If you want to use *NVIDIA GPUs* with `QPod`, Linux server or latest Windows WSL2 is **required**. After installing **Docker >= 19.03**, also install both the [`NVIDIA driver`](https://github.com/NVIDIA/nvidia-docker/wiki/Frequently-Asked-Questions#how-do-i-install-the-nvidia-driver) and the latest version of [`nvidia-container-toolkit`](https://github.com/NVIDIA/nvidia-docker#quickstart) to use the GPUs in containers. ### 1. Choose the features and choose a folder on your disk @@ -104,7 +104,7 @@ Typically, you can choose `full` / `full-cuda` if you have enough disk space and Change the value of `IMG` and `WORKDIR` to your choices in the script below, and run the script. Shutdown Jupyter or other service/program which are using port 8888 or 9999. -#### For Linux/macOS, run this in bash/terminal +#### For Linux/macOS/Windows WSL, run this in bash/terminal ```shell IMG="qpod/full:latest" diff --git a/docker_atom/Dockerfile b/docker_atom/Dockerfile index c1461d5..6f629f7 100644 --- a/docker_atom/Dockerfile +++ b/docker_atom/Dockerfile @@ -2,7 +2,8 @@ # default value: Latest LTS version of Ubuntu (https://hub.docker.com/_/ubuntu) -FROM ubuntu:latest +ARG BASE_IMG="ubuntu:latest" +FROM ${BASE_IMG} LABEL maintainer="haobibo@gmail.com" @@ -13,17 +14,17 @@ COPY work /opt/utils/ ENV SHELL=/bin/bash \ DEBIAN_FRONTEND=noninteractive \ LC_ALL="" \ - LC_CTYPE=C.UTF-8 \ - LC_TYPE=en_US.UTF-8 \ - LANG=en_US.UTF-8 \ - LANGUAGE=en_US.UTF-8 \ + LC_CTYPE="C.UTF-8" \ + LC_TYPE="en_US.UTF-8" \ + LANG="en_US.UTF-8" \ + LANGUAGE="en_US.UTF-8" \ HOME_DIR=/root SHELL ["/bin/bash", "-c"] # --> Install OS libraries and setup some configurations RUN cd /tmp \ - && apt-get -y update --fix-missing > /dev/null && apt-get -y -qq upgrade \ + && apt-get -qq update --fix-missing && apt-get -y -qq upgrade \ && apt-get -qq install -y --no-install-recommends \ apt-utils apt-transport-https ca-certificates gnupg2 dirmngr locales sudo lsb-release curl \ && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ diff --git a/docker_base/work/install_list_base.apt b/docker_atom/work/install_list_base.apt similarity index 86% rename from docker_base/work/install_list_base.apt rename to docker_atom/work/install_list_base.apt index 33d99da..f4f6d7a 100644 --- a/docker_base/work/install_list_base.apt +++ b/docker_atom/work/install_list_base.apt @@ -8,4 +8,3 @@ unzip p7zip-full % zip/unzip rsync % file transfer openssh-client % Provide ssh client and generate keys openssh-server % Provide ssh server (sshd) -python2.7 % required by some legacy packages like node-gyp diff --git a/docker_base/work/script-setup.sh b/docker_atom/work/script-setup.sh similarity index 83% rename from docker_base/work/script-setup.sh rename to docker_atom/work/script-setup.sh index 2f9dec3..de6f690 100644 --- a/docker_base/work/script-setup.sh +++ b/docker_atom/work/script-setup.sh @@ -11,7 +11,7 @@ setup_conda() { && conda update --all --quiet --yes # These conda pkgs shouldn't be removed (otherwise will cause RemoveError) since they are directly reqiuired by conda: pip setuptools pycosat pyopenssl requests ruamel_yaml - CONDA_PY_PKGS=`conda list | grep "py3" | cut -d " " -f 1 | sed "/#/d;/conda/d;/pip/d;/setuptools/d;/pycosat/d;/pyopenssl/d;/requests/d;/ruamel_yaml/d;"` \ + CONDA_PY_PKGS=`conda list | grep "py3" | cut -d " " -f 1 | sed "/#/d;/conda/d;/pip/d;/setuptools/d;/pycosat/d;/pyopenssl/d;/requests/d;/ruamel_yaml/d;"` \ && conda remove --force -yq $CONDA_PY_PKGS \ && pip install -UIq pip setuptools $CONDA_PY_PKGS @@ -20,17 +20,35 @@ setup_conda() { } +setup_nvtop() { + # Install Utilities `nvtop` + sudo apt-get -qq update --fix-missing && sudo apt-get -qq install -y --no-install-recommends libncurses5-dev + + DIRECTORY=`pwd` + + cd /tmp \ + && git clone https://github.com/Syllo/nvtop.git \ + && mkdir -pv nvtop/build && cd nvtop/build \ + && LIB_PATH=`find / -name "libnvidia-ml*" 2>/dev/null` \ + && cmake .. -DCMAKE_LIBRARY_PATH="`dirname $LIB_PATH`" .. \ + && make && sudo make install \ + && nvtop --version + + cd $DIRECTORY && rm -rf /tmp/nvtop + + sudo apt-get -qq remove -y libncurses5-dev +} + + setup_java_base() { - # VERSION_OPENJDK=16 && VERSION_OPENJDK_EA=8 \ - # && URL_OPENJDK="https://download.java.net/java/early_access/jdk${VERSION_OPENJDK}/${VERSION_OPENJDK_EA}/GPL/openjdk-${VERSION_OPENJDK}-ea+${VERSION_OPENJDK_EA}_linux-x64_bin.tar.gz" \ - URL_OPENJDK="https://download.java.net/java/GA/jdk14.0.2/205943a0976c4ed48cb16f1043c5c647/12/GPL/openjdk-14.0.2_linux-x64_bin.tar.gz" \ + URL_OPENJDK=`curl -sL https://jdk.java.net/archive/ | grep 'linux-x64_bin.tar' | sed -n 's/.*href="\([^"]*\).*/\1/p' | head -n 1` \ && install_tar_gz ${URL_OPENJDK} && mv /opt/jdk-* /opt/jdk \ && ln -s /opt/jdk/bin/* /usr/bin/ \ && echo "@ Version of Java (java/javac):" && java -version && javac -version } setup_java_maven() { - MAVEN_VERSION="3.6.3" \ + MAVEN_VERSION="3.8.2" \ && install_zip "http://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.zip" \ && mv /opt/apache-maven-${MAVEN_VERSION} /opt/maven \ && ln -s /opt/maven/bin/mvn* /usr/bin/ \ @@ -47,9 +65,9 @@ setup_node() { && mv /opt/node* /opt/node \ && echo "PATH=/opt/node/bin:$PATH" >> /etc/bash.bashrc \ && export PATH=/opt/node/bin:$PATH \ - && npm install -g npm yarn \ + && npm install -g npm \ && ln -s /opt/node/bin/* /usr/bin/ \ - && echo "@ Version of Node, npm, and yarn:" `node -v` `npm -v` `yarn -v` + && echo "@ Version of Node, npm, and yarn:" `node -v` `npm -v` } @@ -65,8 +83,7 @@ setup_R_base() { setup_R_rstudio() { - RSTUDIO_VERSION=`curl -sL https://dailies.rstudio.com/rstudioserver/oss/ubuntu/x86_64/ | grep -Po "(?<=rstudio-server-)[0-9]\.[0-9]\.[0-9]+" | sort | tail -n 1` \ - && wget -qO- "https://s3.amazonaws.com/rstudio-ide-build/server/bionic/amd64/rstudio-server-${RSTUDIO_VERSION}-amd64.deb" -O /tmp/rstudio.deb \ + $(curl -sL https://www.rstudio.com/products/rstudio/download-server/debian-ubuntu/ | grep '.deb' | grep 'bionic') -O /tmp/rstudio.deb \ && dpkg -x /tmp/rstudio.deb /tmp && mv /tmp/usr/lib/rstudio-server/ /opt/ \ && ln -s /opt/rstudio-server /usr/lib/ \ && ln -s /opt/rstudio-server/bin/rs* /usr/bin/ @@ -79,7 +96,7 @@ setup_R_rstudio() { && echo "auth-none=1" >> /etc/rstudio/rserver.conf \ && echo "auth-minimum-user-id=0" >> /etc/rstudio/rserver.conf \ && echo "auth-validate-users=0" >> /etc/rstudio/rserver.conf \ - && printf "#!/bin/bash\nexport USER=root\nrserver --www-port=8888" > /usr/local/bin/start-rstudio.sh \ + && printf "USER=root rserver --www-port=8888" > /usr/local/bin/start-rstudio.sh \ && chmod u+x /usr/local/bin/start-rstudio.sh # Remove RStudio's pandoc and pandoc-proc to reduce size if they are already installed in the jpy-latex step. @@ -95,7 +112,7 @@ setup_R_rshiny() { && dpkg -i /tmp/rshiny.deb \ && sed -i "s/run_as shiny;/run_as root;/g" /etc/shiny-server/shiny-server.conf \ && sed -i "s/3838/8888/g" /etc/shiny-server/shiny-server.conf \ - && printf "#!/bin/bash\nexport USER=root\nshiny-server" > /usr/local/bin/start-shiny-server.sh \ + && printf "USER=root shiny-server" > /usr/local/bin/start-shiny-server.sh \ && chmod u+x /usr/local/bin/start-shiny-server.sh # Remove shiny's pandoc and pandoc-proc to reduce size if they are already installed in the jpy-latex step. @@ -112,7 +129,7 @@ setup_R_rshiny() { setup_R_datascience() { # firstly install rgl stub to work around, which has too many deps, but required by some libs - R -e "devtools::install_git(\"git://github.com/sorhawell/rgl.git\",quiet=T,clean=T)" + R -e "devtools::install_git(\"git://github.com/sorhawell/rgl.git\",quiet=T,clean=T)" install_apt /opt/utils/install_list_R_datascience.apt \ && install_R /opt/utils/install_list_R_datascience.R @@ -142,7 +159,7 @@ setup_julia() { setup_octave() { # TEMPFIX: javac version - # && OCTAVE_VERSION="5.2.0" \ + # && OCTAVE_VERSION="6.3.0" \ # && install_tar_xz "https://ftp.gnu.org/gnu/octave/octave-${OCTAVE_VERSION}.tar.xz" \ # && cd /opt/octave-* \ # && sed -i "s/1.6/11/g" ./Makefile.in \ diff --git a/docker_atom/work/script-utils.sh b/docker_atom/work/script-utils.sh index 0c70eda..a4bc24f 100644 --- a/docker_atom/work/script-utils.sh +++ b/docker_atom/work/script-utils.sh @@ -3,13 +3,13 @@ install_echo() { cat $1 | cut -d "%" -f 1 | sed '/^$/d' | xargs -r -n1 printf '%s\n' ; } # function to install apt-get packages from a text file which lists package names (add comments with % char) -install_apt() { apt-get -y update --fix-missing > /dev/null && apt-get install -yq --no-install-recommends `cat $1 | cut -d '%' -f 1` ; } +install_apt() { apt-get -qq update -yq --fix-missing && apt-get -qq install -yq --no-install-recommends `cat $1 | cut -d '%' -f 1` ; } # function to install conda packages from a text file which lists package names (add comments with % char) install_conda() { cat $1 | cut -d "%" -f 1 | sed '/^$/d' | xargs -r -n1 conda install -yq ; } # function to install python packages with pip from a text file which lists package names (add comments with % char) -install_pip() { cat $1 | cut -d "%" -f 1 | sed '/^$/d' | xargs -r -n1 pip install -U --pre --use-feature=2020-resolver ; } +install_pip() { cat $1 | cut -d "%" -f 1 | sed '/^$/d' | xargs -r -n1 pip -qq install -U --pre ; } # function to install R packages from a text file which lists package names (add comments with % char, use quiet=T to be less verbose) install_R() { R -e "options(Ncpus=4);lapply(scan('$1','c',comment.char='%'),function(x){cat(x,system.time(install.packages(x,clean=T,quiet=T)),'\n')})"; } diff --git a/docker_base/Dockerfile b/docker_base/Dockerfile index 295d696..4a2235d 100644 --- a/docker_base/Dockerfile +++ b/docker_base/Dockerfile @@ -6,8 +6,6 @@ FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG} LABEL maintainer="haobibo@gmail.com" -COPY work /opt/utils/ - RUN source /opt/utils/script-utils.sh \ && install_apt /opt/utils/install_list_base.apt diff --git a/docker_core/Dockerfile b/docker_core/Dockerfile index a022b0d..6f6cd28 100644 --- a/docker_core/Dockerfile +++ b/docker_core/Dockerfile @@ -45,7 +45,7 @@ RUN source /opt/utils/script-setup.sh \ # If on a x86_64 architecture and install data science pkgs, install MKL for acceleration; Installing conda packages if provided. RUN ( [[ `arch` == "x86_64" && ${ARG_PROFILE_PYTHON} == *"datascience"* ]] && ( echo "mkl" >> /opt/utils/install_list.conda ) || true ) \ - && source /opt/utils/script-utils.sh && ( install_conda /opt/utils/install_list.conda || true ) + && source /opt/utils/script-utils.sh && ( install_conda /opt/utils/install_list_core.conda || true ) # If installing Python packages RUN source /opt/utils/script-utils.sh \ @@ -63,10 +63,8 @@ RUN source /opt/utils/script-utils.sh \ # Handle pytorch installation 1.x only, cpu/gpu && ( [[ ${ARG_PROFILE_PYTHON} == *"torch"* ]] \ && IDX=$( [ -x "$(command -v nvcc)" ] && echo "cu${CUDA_VER//./}" || echo "cpu" ) \ - && pip install --pre -U torch torchvision -f "https://download.pytorch.org/whl/${IDX}/torch_stable.html" \ - || true ) \ - ## TODO: TEMP Fix - https://github.com/pytoolz/cytoolz/issues/144 - && rm -f $(python -c 'import sys; print("/opt/conda/lib/python3.%s/site-packages/cytoolz/__init__.py" % sys.version_info.minor)') + && pip install --pre -U torch -f "https://download.pytorch.org/whl/${IDX}/torch_stable.html" \ + || true ) RUN [[ ${ARG_PROFILE_GO} == *"base"* ]] && source /opt/utils/script-setup.sh && setup_GO || true diff --git a/docker_core/work/install_list_PY_chem.apt b/docker_core/work/install_list_PY_chem.apt new file mode 100644 index 0000000..dcd70e5 --- /dev/null +++ b/docker_core/work/install_list_PY_chem.apt @@ -0,0 +1,4 @@ +% This file contains apt packages to be installed with apt-get install line by line. +% Use percent char as line comment separator. + +librdkit1 rdkit-data % required by rdkit diff --git a/docker_core/work/install_list_PY_chem.pip b/docker_core/work/install_list_PY_chem.pip index 646612f..5c1cd0d 100644 --- a/docker_core/work/install_list_PY_chem.pip +++ b/docker_core/work/install_list_PY_chem.pip @@ -1,5 +1,4 @@ % This file contains python packages to be installed with pip line by line. % Use percent char as line comment separator. -% rdkit % Computational Chemistry -oddt % Drug Discovery +rdkit-pypi % Computational Chemistry diff --git a/docker_core/work/install_list_PY_database.pip b/docker_core/work/install_list_PY_database.pip index 82f4b96..bb96228 100644 --- a/docker_core/work/install_list_PY_database.pip +++ b/docker_core/work/install_list_PY_database.pip @@ -5,6 +5,3 @@ datashape bottleneck blaze dask distributed % Python BigData/parallel interface openpyxl dill cloudpickle % Data access: external systems; Object Serialization sqlalchemy pandasql pymysql psycopg2-binary % Data access: OpenSource DB -% cx_Oracle % Data access: OracleDB -% ibm_db ibm_db_sa % Data access: IBM DB2 and IBM Informix -% sqlalchemy_pytds % Data access: Microsoft SQLServer diff --git a/docker_core/work/install_list_PY_nlp.pip b/docker_core/work/install_list_PY_nlp.pip index f97d661..9101bdc 100644 --- a/docker_core/work/install_list_PY_nlp.pip +++ b/docker_core/work/install_list_PY_nlp.pip @@ -1,6 +1,6 @@ % This file contains python packages to be installed with pip line by line. % Use percent char as line comment separator. -pyparsing astroid % Parsing, AST -nltk spacy PyStemmer orange3-text % NLP -gensim % NLP embeddings +pyparsing astroid % Parsing, AST +nltk spacy PyStemmer % NLP +transformers % transformers diff --git a/docker_core/work/install_list_latex_base.apt b/docker_core/work/install_list_latex_base.apt index ad7f86b..21a38f8 100644 --- a/docker_core/work/install_list_latex_base.apt +++ b/docker_core/work/install_list_latex_base.apt @@ -3,5 +3,6 @@ lmodern libx11-dev % required libraries inkscape pandoc pandoc-citeproc % required for tex file generation -texlive-xetex texlive-generic-recommended % xetex, to generate PDF from tex +texlive-xetex % xetex, to generate PDF from tex +texlive-latex-recommended texlive-plain-generic % texlive texlive-fonts-extra texlive-fonts-recommended % latex fonts: basic diff --git a/docker_cuda/Dockerfile b/docker_cuda/Dockerfile new file mode 100644 index 0000000..190c4e6 --- /dev/null +++ b/docker_cuda/Dockerfile @@ -0,0 +1,14 @@ +# Distributed under the terms of the Modified BSD License. + +ARG BASE_NAMESPACE +ARG BASE_IMG="cuda:latest" +FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG} + +LABEL maintainer="haobibo@gmail.com" + +# For cuda version 10.0, the image is solely serverd for legacy tensorflow 1.15, which requires python 3.7 +# For tensorflow 2.x or torch, python>=3.9 is supported. +RUN echo ${CUDA_VERSION} && nvcc --version \ + && source /opt/utils/script-utils.sh && setup_nvtop \ + && [[ ${CUDA_VERSION} == *"10.0"* ]] && conda install -yq python=3.7 || true \ + && install__clean diff --git a/docker_cuda/cuda10.0.Dockerfile b/docker_cuda/cuda10.0.Dockerfile deleted file mode 100644 index 5aaf640..0000000 --- a/docker_cuda/cuda10.0.Dockerfile +++ /dev/null @@ -1,87 +0,0 @@ -# Distributed under the terms of the Modified BSD License. - -# CUDA base: https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/ubuntu18.04/10.0/base/Dockerfile -# CUDA runtime: https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/ubuntu18.04/10.0/runtime/Dockerfile -# CUDNN runtime: https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/ubuntu18.04/10.0/runtime/cudnn7/Dockerfile -# CUDA devel: https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/ubuntu18.04/10.0/devel/Dockerfile -# CUDNN devel https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/ubuntu18.04/10.0/devel/cudnn7/Dockerfile - -ARG BASE_NAMESPACE -ARG BASE_IMG="base" -FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG} - -LABEL maintainer="haobibo@gmail.com" - -ARG ARG_CUDA_RUNTIME=true -ARG ARG_CUDNN_RUNTIME=true -ARG ARG_CUDA_DEVEL=true -ARG ARG_CUDNN_DEVEL=true - -ENV CUDA_VER 10.0 -ENV CUDA_VERSION ${CUDA_VER}.130 -ENV CUDA_PKG_VERSION 10-0=$CUDA_VERSION-1 -ENV NCCL_VERSION 2.4.8 -ENV CUDNN_VERSION 7.6.5.32 -ENV NVIDIA_REQUIRE_CUDA "cuda>=${CUDA_VER}" - -ENV NVIDIA_VISIBLE_DEVICES=all \ - NVIDIA_DRIVER_CAPABILITIES=compute,utility \ - NVIDIA_REQUIRE_CUDA="cuda>=10.0 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411" \ - PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} \ - LIBRARY_PATH=/usr/local/cuda/lib64/stubs \ - LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}" - -# Installing CUDA base -RUN curl -sL "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub" | apt-key add - \ - && echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list \ - && echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - cuda-cudart-$CUDA_PKG_VERSION cuda-compat-10-0 \ - && ln -s cuda-$CUDA_VER /usr/local/cuda \ - && echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf \ - && echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -# If installing CUDA runtime -RUN ${ARG_CUDA_RUNTIME:-false} \ - && apt-get install -y --no-install-recommends \ - cuda-libraries-$CUDA_PKG_VERSION cuda-nvtx-$CUDA_PKG_VERSION libnccl2=$NCCL_VERSION-1+cuda$CUDA_VER \ - && apt-mark hold libnccl2 \ - || true - -# If installing CUDNN runtime -RUN ${ARG_CUDNN_RUNTIME:false} \ - && apt-get install -y --no-install-recommends \ - libcudnn7=$CUDNN_VERSION-1+cuda$CUDA_VER \ - && apt-mark hold libcudnn7 \ - || true - -# If installing CUDA devel -RUN ${ARG_CUDA_DEVEL:false} \ - && apt-get install -y --no-install-recommends \ - cuda-libraries-dev-$CUDA_PKG_VERSION cuda-nvml-dev-$CUDA_PKG_VERSION \ - cuda-minimal-build-$CUDA_PKG_VERSION cuda-command-line-tools-$CUDA_PKG_VERSION \ - libnccl-dev=$NCCL_VERSION-1+cuda$CUDA_VER \ - || true - -# If installing CUDNN devel -RUN ${ARG_CUDNN_DEVEL:false} \ - && apt-get install -y --no-install-recommends \ - libcudnn7=$CUDNN_VERSION-1+cuda$CUDA_VER libcudnn7-dev=$CUDNN_VERSION-1+cuda$CUDA_VER \ - || true - -# Install Utilities `nvtop` -RUN cd /tmp \ - && apt-get -y update --fix-missing && apt-get -qq install -y --no-install-recommends libncurses5-dev \ - && git clone https://github.com/Syllo/nvtop.git \ - && mkdir -p nvtop/build && cd nvtop/build \ - && LIB_PATH=`find / -name "libnvidia-ml*" 2>/dev/null` \ - && cmake .. -DCMAKE_LIBRARY_PATH="`dirname $LIB_PATH`" .. \ - && make && make install \ - && apt-get -qq remove -y libncurses5-dev - -# This cuda image is left for legacy tensorflow 1.x, which requires python 3.7 -RUN nvcc --version \ - && source /opt/utils/script-utils.sh && conda install -yq python=3.7 && install__clean diff --git a/docker_cuda/cuda10.1.Dockerfile b/docker_cuda/cuda10.1.Dockerfile deleted file mode 100644 index b4da01d..0000000 --- a/docker_cuda/cuda10.1.Dockerfile +++ /dev/null @@ -1,87 +0,0 @@ -# Distributed under the terms of the Modified BSD License. - -# CUDA base: https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/ubuntu18.04/10.1/base/Dockerfile -# CUDA runtime: https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/ubuntu18.04/10.1/runtime/Dockerfile -# CUDNN runtime: https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/ubuntu18.04/10.1/runtime/cudnn7/Dockerfile -# CUDA devel: https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/ubuntu18.04/10.1/devel/Dockerfile -# CUDNN devel https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/ubuntu18.04/10.1/devel/cudnn7/Dockerfile - -ARG BASE_NAMESPACE -ARG BASE_IMG="base" -FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG} - -LABEL maintainer="haobibo@gmail.com" - -ARG ARG_CUDA_RUNTIME=true -ARG ARG_CUDNN_RUNTIME=true -ARG ARG_CUDA_DEVEL=true -ARG ARG_CUDNN_DEVEL=true - -ENV CUDA_VER 10.1 -ENV CUDA_VERSION ${CUDA_VER}.243 -ENV CUDA_PKG_VERSION 10-1=$CUDA_VERSION-1 -ENV NCCL_VERSION 2.4.8 -ENV CUDNN_VERSION 7.6.5.32 -ENV CUBLAS_VERSION 10.2.1.243-1 - -ENV NVIDIA_VISIBLE_DEVICES=all \ - NVIDIA_DRIVER_CAPABILITIES=compute,utility \ - NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_VER} brand=tesla,driver>=384,driver<385 brand=tesla,driver>=396,driver<397 brand=tesla,driver>=410,driver<411" \ - PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} \ - LIBRARY_PATH=/usr/local/cuda/lib64/stubs \ - LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}" - -# Installing CUDA base -RUN curl -sL "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub" | apt-key add - \ - && echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list \ - && echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - cuda-cudart-$CUDA_PKG_VERSION cuda-compat-10-1 \ - && ln -s cuda-$CUDA_VER /usr/local/cuda \ - && echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf \ - && echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -# If installing CUDA runtime -RUN ${ARG_CUDA_RUNTIME:-false} \ - && apt-get install -y --no-install-recommends \ - cuda-libraries-$CUDA_PKG_VERSION cuda-nvtx-$CUDA_PKG_VERSION \ - libnccl2=$NCCL_VERSION-1+cuda$CUDA_VER libcublas10=$CUBLAS_VERSION \ - && apt-mark hold libnccl2 \ - || true - -# If installing CUDNN runtime -RUN ${ARG_CUDNN_RUNTIME:false} \ - && apt-get install -y --no-install-recommends \ - libcudnn7=$CUDNN_VERSION-1+cuda$CUDA_VER \ - && apt-mark hold libcudnn7 \ - || true - -# If installing CUDA devel -RUN ${ARG_CUDA_DEVEL:false} \ - && apt-get install -y --no-install-recommends \ - cuda-libraries-dev-$CUDA_PKG_VERSION cuda-nvml-dev-$CUDA_PKG_VERSION \ - cuda-minimal-build-$CUDA_PKG_VERSION cuda-command-line-tools-$CUDA_PKG_VERSION \ - libnccl-dev=$NCCL_VERSION-1+cuda$CUDA_VER libcublas-dev=$CUBLAS_VERSION \ - || true - -# If installing CUDNN devel -RUN ${ARG_CUDNN_DEVEL:false} \ - && apt-get install -y --no-install-recommends \ - libcudnn7=$CUDNN_VERSION-1+cuda$CUDA_VER libcudnn7-dev=$CUDNN_VERSION-1+cuda$CUDA_VER \ - || true - -# Install Utilities `nvtop` -RUN cd /tmp \ - && apt-get -y update --fix-missing && apt-get -qq install -y --no-install-recommends libncurses5-dev \ - && git clone https://github.com/Syllo/nvtop.git \ - && mkdir -p nvtop/build && cd nvtop/build \ - && LIB_PATH=`find / -name "libnvidia-ml*" 2>/dev/null` \ - && cmake .. -DCMAKE_LIBRARY_PATH="`dirname $LIB_PATH`" .. \ - && make && make install \ - && apt-get -qq remove -y libncurses5-dev - -RUN nvcc --version \ - && source /opt/utils/script-utils.sh && install__clean diff --git a/docker_cuda/cuda10.2.Dockerfile b/docker_cuda/cuda10.2.Dockerfile deleted file mode 100644 index f98f0e8..0000000 --- a/docker_cuda/cuda10.2.Dockerfile +++ /dev/null @@ -1,85 +0,0 @@ -# Distributed under the terms of the Modified BSD License. - -# CUDA base: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/10.2/ubuntu18.04-x86_64/base/Dockerfile -# CUDA runtime: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/10.2/ubuntu18.04-x86_64/runtime/Dockerfile -# CUDNN runtime: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/10.2/ubuntu18.04-x86_64/runtime/cudnn8/Dockerfile -# CUDA devel: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/10.2/ubuntu18.04-x86_64/devel/Dockerfile -# CUDNN devel https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/10.2/ubuntu18.04-x86_64/devel/cudnn8/Dockerfile - -ARG BASE_NAMESPACE -ARG BASE_IMG="base" -FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG} - -LABEL maintainer="haobibo@gmail.com" - -ARG ARG_CUDA_RUNTIME=true -ARG ARG_CUDNN_RUNTIME=true -ARG ARG_CUDA_DEVEL=true -ARG ARG_CUDNN_DEVEL=true - -ENV CUDA_VER 10.2 -ENV CUDA_VERSION ${CUDA_VER}.89 -ENV CUDA_PKG_VERSION 10-2=$CUDA_VERSION-1 -ENV NCCL_VERSION 2.7.8 -ENV CUDNN_VERSION 8.0.2.39 -ENV CUBLAS_VERSION 10.2.2.89-1 - -ENV NVIDIA_VISIBLE_DEVICES=all \ - NVIDIA_DRIVER_CAPABILITIES=compute,utility \ - NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_VER} brand=tesla,driver>=396,driver<397 brand=tesla,driver>=410,driver<411 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441" \ - PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} \ - LIBRARY_PATH=/usr/local/cuda/lib64/stubs \ - LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}" - -# Installing CUDA base -RUN curl -sL "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub" | apt-key add - \ - && echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list \ - && echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - cuda-cudart-$CUDA_PKG_VERSION cuda-compat-10-2 \ - && ln -s cuda-$CUDA_VER /usr/local/cuda \ - && echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf \ - && echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -# If installing CUDA runtime -RUN ${ARG_CUDA_RUNTIME:-false} \ - && apt-get install -y --no-install-recommends \ - cuda-libraries-$CUDA_PKG_VERSION cuda-nvtx-$CUDA_PKG_VERSION cuda-npp-$CUDA_PKG_VERSION \ - libcublas10=$CUBLAS_VERSION libnccl2=$NCCL_VERSION-1+cuda$CUDA_VER \ - || true - -# If installing CUDNN runtime -RUN ${ARG_CUDNN_RUNTIME:false} \ - && apt-get install -y --no-install-recommends libcudnn8=$CUDNN_VERSION-1+cuda$CUDA_VER \ - || true - -# If installing CUDA devel -RUN ${ARG_CUDA_DEVEL:false} \ - && apt-get install -y --no-install-recommends \ - cuda-nvml-dev-$CUDA_PKG_VERSION cuda-command-line-tools-$CUDA_PKG_VERSION \ - cuda-nvprof-$CUDA_PKG_VERSION cuda-npp-dev-$CUDA_PKG_VERSION \ - cuda-libraries-dev-$CUDA_PKG_VERSION cuda-minimal-build-$CUDA_PKG_VERSION \ - libcublas-dev=$CUBLAS_VERSION libnccl-dev=$NCCL_VERSION-1+cuda$CUDA_VER \ - || true - -# If installing CUDNN devel -RUN ${ARG_CUDNN_DEVEL:false} \ - && apt-get install -y --no-install-recommends \ - libcudnn8=$CUDNN_VERSION-1+cuda$CUDA_VER libcudnn8-dev=$CUDNN_VERSION-1+cuda$CUDA_VER \ - || true - -# Install Utilities `nvtop` -RUN cd /tmp \ - && apt-get -y update --fix-missing && apt-get -qq install -y --no-install-recommends libncurses5-dev \ - && git clone https://github.com/Syllo/nvtop.git \ - && mkdir -p nvtop/build && cd nvtop/build \ - && LIB_PATH=`find / -name "libnvidia-ml*" 2>/dev/null` \ - && cmake .. -DCMAKE_LIBRARY_PATH="`dirname $LIB_PATH`" .. \ - && make && make install \ - && apt-get -qq remove -y libncurses5-dev - -RUN nvcc --version \ - && source /opt/utils/script-utils.sh && install__clean diff --git a/docker_cuda/cuda11.0.Dockerfile b/docker_cuda/cuda11.0.Dockerfile deleted file mode 100644 index 76e1b01..0000000 --- a/docker_cuda/cuda11.0.Dockerfile +++ /dev/null @@ -1,85 +0,0 @@ -# Distributed under the terms of the Modified BSD License. - -# CUDA base: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.0/ubuntu18.04-x86_64/base/Dockerfile -# CUDA runtime: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.0/ubuntu18.04-x86_64/runtime/Dockerfile -# CUDNN runtime: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.0/ubuntu18.04-x86_64/runtime/cudnn8/Dockerfile -# CUDA devel: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.0/ubuntu18.04-x86_64/devel/Dockerfile -# CUDNN devel https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.0/ubuntu18.04-x86_64/devel/cudnn8/Dockerfile - -ARG BASE_NAMESPACE -ARG BASE_IMG="base" -FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG} - -LABEL maintainer="haobibo@gmail.com" - -ARG ARG_CUDA_RUNTIME=true -ARG ARG_CUDNN_RUNTIME=true -ARG ARG_CUDA_DEVEL=true -ARG ARG_CUDNN_DEVEL=true - -ENV CUDA_VER 11.0 -ENV CUDA_VERSION ${CUDA_VER}.221 -ENV CUDA_PKG_VERSION 11-0=$CUDA_VERSION-1 -ENV NCCL_VERSION 2.7.8 -ENV CUDNN_VERSION 8.0.2.39 -ENV CUBLAS_VERSION 11.2.0.252-1 - -ENV NVIDIA_VISIBLE_DEVICES=all \ - NVIDIA_DRIVER_CAPABILITIES=compute,utility \ - NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_VER} brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441" \ - PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} \ - LIBRARY_PATH=/usr/local/cuda/lib64/stubs \ - LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}" - -# Installing CUDA base -RUN curl -sL "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub" | apt-key add - \ - && echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list \ - && echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - cuda-cudart-$CUDA_PKG_VERSION cuda-compat-11-0 \ - && ln -s cuda-$CUDA_VER /usr/local/cuda \ - && echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf \ - && echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -# If installing CUDA runtime -RUN ${ARG_CUDA_RUNTIME:-false} \ - && apt-get install -y --no-install-recommends \ - cuda-libraries-11-0=11.0.3-1 libnpp-11-0=11.1.0.245-1 cuda-nvtx-11-0=11.0.167-1 \ - libcublas-11-0=11.2.0.252-1 libnccl2=$NCCL_VERSION-1+cuda11.0 \ - || true - -# If installing CUDNN runtime -RUN ${ARG_CUDNN_RUNTIME:false} \ - && apt-get install -y --no-install-recommends libcudnn8=$CUDNN_VERSION-1+cuda11.0 \ - || true - -# If installing CUDA devel -RUN ${ARG_CUDA_DEVEL:false} \ - && apt-get install -y --no-install-recommends \ - cuda-minimal-build-11-0=11.0.3-1 cuda-libraries-dev-11-0=11.0.3-1 cuda-command-line-tools-11-0=11.0.3-1 \ - cuda-nvml-dev-11-0=11.0.167-1 libcublas-dev-11-0=${CUBLAS_VERSION} \ - libnccl-dev=2.7.8-1+cuda11.0 cuda-nvprof-11-0=11.0.221-1 \ - libnpp-dev-11-0=11.1.0.245-1 libcusparse-11-0=11.1.1.245-1 libcusparse-dev-11-0=11.1.1.245-1 \ - || true - -# If installing CUDNN devel -RUN ${ARG_CUDNN_DEVEL:false} \ - && apt-get install -y --no-install-recommends \ - libcudnn8=$CUDNN_VERSION-1+cuda11.0 libcudnn8-dev=$CUDNN_VERSION-1+cuda11.0 \ - || true - -# Install Utilities `nvtop` -RUN cd /tmp \ - && apt-get -y update --fix-missing && apt-get -qq install -y --no-install-recommends libncurses5-dev \ - && git clone https://github.com/Syllo/nvtop.git \ - && mkdir -p nvtop/build && cd nvtop/build \ - && LIB_PATH=`find / -name "libnvidia-ml*" 2>/dev/null` \ - && cmake .. -DCMAKE_LIBRARY_PATH="`dirname $LIB_PATH`" .. \ - && make && make install \ - && apt-get -qq remove -y libncurses5-dev - -RUN nvcc --version \ - && source /opt/utils/script-utils.sh && install__clean diff --git a/docker_dev/work/script-extend.sh b/docker_dev/work/script-extend.sh index 97dd491..4a8fe70 100644 --- a/docker_dev/work/script-extend.sh +++ b/docker_dev/work/script-extend.sh @@ -1,8 +1,8 @@ source /opt/utils/script-utils.sh setup_jupyter_base() { - # TEMP fix: nbconvert requires mistune<2,>0.8.1 for now - pip install -Uq jupyterhub jupyterlab notebook ipywidgets qpod_hub "mistune<2,>0.8.1" \ + # TEMP fix: nbconvert requires mistune>=0.8.1,<2 for now + pip install -Uq jupyterhub jupyterlab notebook ipywidgets qpod_hub "mistune>=0.8.1,<2" \ && mkdir -p /opt/conda/etc/jupyter/ \ && mv /opt/utils/jupyter_notebook_config.json /opt/conda/etc/jupyter/ \ && jupyter nbextension enable --py widgetsnbextension \ @@ -19,7 +19,7 @@ setup_jupyter_kernels() { pip install -Uq bash_kernel && python -m bash_kernel.install --sys-prefix which npm \ - && npm install -g --unsafe-perm --python=python2.7 ijavascript \ + && npm install -g --unsafe-perm ijavascript \ && /opt/node/bin/ijsinstall --install=global --spec-path=full \ && mv /usr/local/share/jupyter/kernels/javascript /opt/conda/share/jupyter/kernels/ diff --git a/tool.sh b/tool.sh new file mode 100644 index 0000000..36bf87f --- /dev/null +++ b/tool.sh @@ -0,0 +1,40 @@ +#!/bin/bash +export REGISTRY_URL="docker.io" # docker.io or other registry URL, DOCKER_REGISTRY_USER/DOCKER_REGISTRY_PASSWORD to be set in CI env. +export BUILDKIT_PROGRESS="plain" # Full logs for CI build. +# DOCKER_REGISTRY_USER and DOCKER_REGISTRY_PASSWORD is required for docker image push, they should be set in CI secrets. + +CI_PROJECT_BRANCH=${GITHUB_HEAD_REF:-master} + +if [ "${CI_PROJECT_BRANCH}" == "master" ]; then + export CI_PROJECT_NAMESPACE=$(echo "$(dirname ${GITHUB_REPOSITORY})") ; +else + export CI_PROJECT_NAMESPACE=$(echo "$(dirname ${GITHUB_REPOSITORY})")0${CI_PROJECT_BRANCH} ; +fi + +export NAMESPACE=$(echo "${REGISTRY_URL:-"docker.io"}/${CI_PROJECT_NAMESPACE}" | awk '{print tolower($0)}') +echo '---->' $GITHUB_REPOSITORY $NAMESPACE + +build_image() { + echo $@ ; + IMG=$1; TAG=$2; FILE=$3; shift 3; VER=`date +%Y.%m%d`; + docker build --squash --compress --force-rm=true -t "${NAMESPACE}/${IMG}:${TAG}" -f "$FILE" --build-arg "BASE_NAMESPACE=${NAMESPACE}" "$@" "$(dirname $FILE)" ; + docker tag "${NAMESPACE}/${IMG}:${TAG}" "${NAMESPACE}/${IMG}:${VER}" ; +} + +alias_image() { + IMG_1=$1; TAG_1=$2; IMG_2=$3; TAG_2=$4; shift 4; VER=`date +%Y.%m%d`; + docker tag "${NAMESPACE}/${IMG_1}:${TAG_1}" "${NAMESPACE}/${IMG_2}:${TAG_2}" ; + docker tag "${NAMESPACE}/${IMG_2}:${TAG_2}" "${NAMESPACE}/${IMG_2}:${VER}" ; +} + +push_image() { + docker image prune --force && docker images ; + IMGS=$(docker images | grep "second" | awk '{print $1 ":" $2}') ; + echo "$DOCKER_REGISTRY_PASSWORD" | docker login "${REGISTRY_URL}" -u "$DOCKER_REGISTRY_USER" --password-stdin ; + for IMG in $(echo $IMGS | tr " " "\n") ; + do + docker push "${IMG}"; + status=$?; + echo "[${status}] Image pushed > ${IMG}"; + done +}