diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 02345e55..d13af853 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -30,6 +30,4 @@ assignees: '' ## Configuration - - diff --git a/.github/ISSUE_TEMPLATE/other_request.md b/.github/ISSUE_TEMPLATE/other_request.md index 9a59be11..1f02cfbd 100644 --- a/.github/ISSUE_TEMPLATE/other_request.md +++ b/.github/ISSUE_TEMPLATE/other_request.md @@ -14,4 +14,3 @@ assignees: '' ## Additional context - diff --git a/.github/ISSUE_TEMPLATE/software_request.md b/.github/ISSUE_TEMPLATE/software_request.md index de90ba80..5930e2f1 100644 --- a/.github/ISSUE_TEMPLATE/software_request.md +++ b/.github/ISSUE_TEMPLATE/software_request.md @@ -11,16 +11,6 @@ assignees: '' -## Ubuntu package - - - - ## Additional context - -## Developer - - - diff --git a/.github/dependabot.yml b/.github/dependabot.yml index c70dd4e0..b61d8ac7 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -21,30 +21,3 @@ updates: applies-to: security-updates patterns: - '*' -- package-ecosystem: "pip" - directory: "/doc" - target-branch: trunk - schedule: - interval: "monthly" - time: "07:00" - timezone: "EST5EDT" - pull-request-branch-name: - separator: "-" - open-pull-requests-limit: 2 - reviewers: - - joaander - groups: - pip-version: - applies-to: version-updates - patterns: - - '*' - update-types: - - minor - - patch - pip-security: - applies-to: security-updates - patterns: - - '*' - update-types: - - minor - - patch diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index d20653d6..00000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Build - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -on: - # Trigger on pull requests. - pull_request: - - # Trigger on pushes to the mainline branches. These triggers push the built images to Docker Hub. - push: - branches: - - "trunk" - - # Trigger on request. - workflow_dispatch: - -jobs: - start_action_runners: - name: Start action runners - runs-on: ubuntu-latest - steps: - - uses: glotzerlab/jetstream2-admin/start@v1.2.5 - with: - OS_APPLICATION_CREDENTIAL_ID: ${{ secrets.OS_APPLICATION_CREDENTIAL_ID }} - OS_APPLICATION_CREDENTIAL_SECRET: ${{ secrets.OS_APPLICATION_CREDENTIAL_SECRET }} - - Build: - name: Build ${{ matrix.configuration }} - runs-on: [self-hosted,jetstream2,CPU] - strategy: - fail-fast: false - matrix: - configuration: [nompi, greatlakes, bridges2, delta, expanse, expanse-gpu, anvil] - steps: - - uses: actions/checkout@v4.1.4 - - name: Login to DockerHub - if: ${{ github.event_name != 'pull_request' || (github.event.pull_request.head.repo.full_name == github.repository && github.actor != 'dependabot[bot]') }} - uses: docker/login-action@v3.1.0 - with: - username: ${{ secrets.DOCKER_HUB_USERNAME }} - password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - - name: Clear existing images - run: docker system prune -a -f - - name: Build image - run: ./build.sh -r glotzerlab ${{ matrix.configuration }} - - name: Push image - run: docker push -a glotzerlab/software - if: github.ref == 'refs/heads/trunk' - - name: Slack notification - if: ${{ (github.ref == 'refs/heads/trunk') && (failure() || cancelled()) }} - uses: 8398a7/action-slack@v3.16.2 - with: - status: ${{ job.status }} - fields: workflow,ref,message,commit - mention: channel - if_mention: failure,cancelled - channel: '#dev-glotzerlab-software' - username: Github Action - author_name: Build ${{ matrix.configuration }} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - MATRIX_CONTEXT: ${{ toJson(matrix) }} diff --git a/.github/workflows/doc.yaml b/.github/workflows/doc.yaml new file mode 100644 index 00000000..5100fe73 --- /dev/null +++ b/.github/workflows/doc.yaml @@ -0,0 +1,47 @@ +name: doc + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + push: + branches: + - "trunk" + + workflow_dispatch: + +defaults: + run: + shell: bash + +env: + CARGO_TERM_COLOR: always + CLICOLOR: 1 + MDBOOK_VERSION: 0.4.37 + LINKCHECK_VERSION: 0.7.7 + +jobs: + build_documentation: + name: Build documentation + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4.1.3 + - name: Install mdbook + run: | + mkdir -p "$HOME/.cargo/bin" + curl -sSL "https://github.com/rust-lang/mdBook/releases/download/v$MDBOOK_VERSION/mdbook-v$MDBOOK_VERSION-x86_64-unknown-linux-gnu.tar.gz" | tar -xvz --directory "$HOME/.cargo/bin" + - name: Install mdbook-linkcheck + run: | + curl -sSL "https://github.com/Michael-F-Bryan/mdbook-linkcheck/releases/download/v$LINKCHECK_VERSION/mdbook-linkcheck.x86_64-unknown-linux-gnu.zip" -o mdbook-linkcheck.zip + unzip mdbook-linkcheck.zip -d "$HOME/.cargo/bin" + chmod a+x "$HOME/.cargo/bin/mdbook-linkcheck" + - name: Add linkcheck configuration + run: | + echo -e "[output.linkcheck]\nfollow-web-links=true" >> doc/book.toml + cat doc/book.toml + - name: Build documentation + run: mdbook build doc + env: + RUST_LOG: "mdbook=info,linkcheck=warn,reqwest=debug" diff --git a/.github/workflows/requirements.txt b/.github/workflows/requirements.txt deleted file mode 100644 index e343b0e8..00000000 --- a/.github/workflows/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -pur==7.3.1 diff --git a/.github/workflows/update-packages.py b/.github/workflows/update-packages.py deleted file mode 100644 index b5594601..00000000 --- a/.github/workflows/update-packages.py +++ /dev/null @@ -1,40 +0,0 @@ -import yaml -import subprocess - - -def get_latest_git_tag(repository, ignore): - ignore_commands = 'grep -v {' - - for s in ignore.split(','): - if s != '': - ignore_commands += f' | grep -v {s}' - - grep_command = f'git ls-remote --tags {repository}'\ - " | sort -t '/' -k 3 -k 4 -V"\ - f' | {ignore_commands}'\ - " | awk -F/ '{ print $3 }' "\ - f' | tail -n1' - - output = subprocess.run(grep_command, shell=True, capture_output=True) - return output.stdout.strip().decode('utf-8') - - -if __name__ == '__main__': - with open('packages.yml', 'r') as f: - packages = yaml.safe_load(f) - - for name, p in packages.items(): - if 'repository' in p and p['repository'] is not None: - new_version = get_latest_git_tag(p['repository'], - p.get('ignore', '')) - - # remove 'v' prefix when requested - if not p.get('include_v_prefix', True) and new_version[0] == 'v': - new_version = new_version[1:] - - if p['version'] != new_version: - print(f"Updating {name} {p['version']} -> {new_version}") - p['version'] = new_version - - with open('packages.yml', 'w') as f: - yaml.dump(packages, f) diff --git a/.github/workflows/update-packages.yml b/.github/workflows/update-packages.yml deleted file mode 100644 index d31bd1ef..00000000 --- a/.github/workflows/update-packages.yml +++ /dev/null @@ -1,61 +0,0 @@ -name: Update packages - -on: - # Trigger on request. - workflow_dispatch: - - # Run weekly. - schedule: - - cron: '0 18 * * 1' - -jobs: - update_packages: - name: Update packages on ${{ matrix.branch }} - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - branch: [trunk] - steps: - - uses: actions/checkout@v4.1.4 - with: - ref: ${{ matrix.branch }} - - uses: tibdex/github-app-token@v2.1 - id: generate-token - with: - app_id: ${{ secrets.PR_SUBMITTER_APP_ID }} - private_key: ${{ secrets.PR_SUBMITTER_PRIVATE_KEY }} - - uses: actions/cache@v4.0.2 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('.github/workflows/requirements.txt') }} - restore-keys: | - ${{ runner.os }}-pip- - - name: Install required packages - run: python3 -m pip install -r .github/workflows/requirements.txt - - name: Bump versions in packages.yml - run: python3 .github/workflows/update-packages.py - - name: Bump versions in .github/workflows/requirements.txt - run: pur -r .github/workflows/requirements.txt - - name: Bump versions in requirements.txt - run: pur -r requirements.txt - - name: Bump versions in requirements-mpi.txt - run: pur -r requirements-mpi.txt - - name: Bump versions in requirements-source.txt - run: pur -r requirements-source.txt - - name: Bump versions in requirements-cupy.txt - run: pur -r requirements-cupy.txt - - name: Bump versions in generated scripts - run: python3 make_dockerfiles.py - - name: Display updates - run: git diff - - name: Create Pull Request - uses: peter-evans/create-pull-request@v6.0.5 - with: - base: ${{ matrix.branch }} - commit-message: "[update-packages] Bump package versions" - branch: update-packages-${{ matrix.branch }} - delete-branch: true - title: Update packages on ${{ matrix.branch }} - body: Automated changes by `update-packages.yml`. - token: ${{ steps.generate-token.outputs.token }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..4114b814 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,20 @@ +ci: + autoupdate_schedule: quarterly + autoupdate_branch: 'trunk' + autofix_prs: false + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: 'v4.5.0' + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-json + - id: check-toml + - id: check-case-conflict + - id: mixed-line-ending +- repo: https://github.com/crate-ci/typos + rev: v1.21.0 + hooks: + - id: typos + exclude_types: [css] diff --git a/.readthedocs.yaml b/.readthedocs.yaml index a0209643..87ca7a4d 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,14 +1,9 @@ version: 2 - build: - os: ubuntu-22.04 - tools: - python: "3.12" - -sphinx: - configuration: doc/conf.py - fail_on_warning: true - -python: - install: - - requirements: doc/requirements.txt + os: "ubuntu-22.04" + commands: + - mkdir -p bin + - curl -sSL "https://github.com/rust-lang/mdBook/releases/download/v0.4.37/mdbook-v0.4.37-x86_64-unknown-linux-gnu.tar.gz" | tar -xvz --directory "bin" + - mkdir -p $READTHEDOCS_OUTPUT/html + - echo "site-url = \"/$READTHEDOCS_LANGUAGE/$READTHEDOCS_VERSION/\"" >> doc/book.toml + - bin/mdbook build doc --dest-dir=$READTHEDOCS_OUTPUT/html diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 525ba6ed..abbfff91 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -1,126 +1,12 @@ # Glotzerlab software -## Docker file overview +## Conda packages -This repository hosts Dockerfiles for software developed and used by the [Glotzer -lab](http://glotzerlab.engin.umich.edu/home/). Most of these software packages are python libraries -designed to be used together, so we install it all together in one image and version it by date. -Each image contains the latest versions of all installed software available that month. +This repository hosts conda recipes to build HOOMD-blue and related software with MPI +and/or GPU support on HPC resources. -These images are hosted on the Docker Hub in the [glotzerlab -organization](https://hub.docker.com/r/glotzerlab/software/): This README is intended for Dockerfile -image developers and those who wish to modify these images. - -To prevent combinatorial explosion of the number of images (i.e. multiple MPI versions combined with -multiple CUDA versions), we only introduce new images where absolutely necessary to support a wide -number of users. Supporting dozens of images is not only time consuming to build and test, but also -confusing for users. Individual users that require specific versions can clone this repository, make -the modification, and build their own images. - -## Installation scripts - -Not all HPC systems support containers. For these systems glotzerlab-software generates installation -scripts from the same templates used for docker. - -## Layout - -Images are combined from a number of Jinja template files in `template/*.jinja` by the script -`make_dockerfiles.py`. Modifications must be made to the templates, not the generated dockerfiles. - -### Base templates - -* `template/base.jinja` provides the base dependencies necessary to build Glotzer Lab - software along with commonly used tools. This image is based on - `nvidia/cuda-?.?-devel-ubuntu20.04`. -* `frontier.jinja` provides the base dependencies through modules on OLCF Frontier and configures - a software root directory for the build script. -* `ib-mlx.jinja` adds high speed IB networking drivers. -* `openmpi.jinja` and `mvapich2.jinja` build the corresponding MPI libraries. -* `glotzer-software.jinja` compiles and installs glotzer group software. -* `finalize.jinja` creates a `glotzerlab-software` user so that tools designed to run as - non-root can run without any user intervention. -* `test.jinja` runs unit tests on some of the installed packages. - -### Software images - -Images are stored in the docker hub repository `glotzerlab/software`. - -The script `make_dockerfiles.py` generates the docker files from the templates and places them under -the `docker/` directory. - -* `docker/Dockerfile` - devel image providing dependencies but no software -* `docker/nompi/Dockerfile` - base image with no MPI support -* `docker/${cluster}/Dockerfile` - Add MPI support for a given cluster - -### Installation scripts - -Build scripts are generated under the `script/` directory for systems that don't support containers. - -* `script/frontier/install.sh` - -## Building images - -### Dependencies - -`glotzerlab/software` requires the following Python dependencies -- `jinja2` -- `pyyaml`. - -We recommend installing these into a virtual environment with `venv`. - -### Building - -`build.sh -r repository [ -t tag ] [ system [ system [ ... ] ] ]` builds the images. - -* `-r repository` sets the docker repository to label the builds -* `-t` set the tag name (the default is the date in `YYYY-MM` format). -* system is one of the directories in `docker` to build - -Here is an example that builds the `nompi` image in a testing repository:: - - ./build.sh -r glotzerlab-testing nompi - -The CI scripts build and upload the docker containers to the `glotzerlab/software` repository on -Docker Hub. - -## Singularity - -Users use `singularity pull` to pull the Docker image from Docker Hub and generate a Singularity -image locally for use on HPC systems. - -## Benchmarking - -The image contains the [OSU microbenchmark suite](http://mvapich.cse.ohio-state.edu/benchmarks/) to -verify proper MPI operation and high speed network performance: - - mpirun -N 1 singularity exec ${IMAGE} /opt/osu-micro-benchmarks/libexec/osu-micro-benchmarks/mpi/pt2pt/osu_bibw - -## Testing - -See: https://glotzerlab-software.readthedocs.io/en/latest/test.html - -## Adding a package to the container - -The Python packages to install in the container in one of the following files: - - * `requirements.txt` - general packages. - * `requirements-source.txt` - packages to build from source. - * `requirements-mpi.txt` - packages that depend on an MPI installation. - -Packages not available on PyPI are listed in `packages.yml`. - -All packages in the above files are listed with explicit versions. - -Steps to add a new package: - -1. Add the package as appropriate to `requirements*.txt` or `packages.yml`. -2. Add build scripts for non-PyPI packages to `glotzerlab-software.jinja`. -3. Add unit tests to `test.jinja` if needed. -4. Build and test the image. - -## Updating packages in the container - -The update process is automatic. Dependabot is configured to update the packages in -`requirements*.txt`. The script `.github/workflows/update-packages.py` updates `packages.yml` and a -GitHub Actions workflow runs this periodically. All changes by CI are submitted as pull requests for -final review before merging. +The `conda/` directory contains the files needed to construct these builds. +Subdirectories under `conda/` contain the recipes for each package. `conda/build.sh` +is the main script to build the package. The other `*.sh` scripts are drivers to +make the build on specific platforms. On systems that allow network connectivity from +compute nodes, these are SLURM scripts to be submitted with `sbatch`. diff --git a/CHANGELOG.rst b/CHANGELOG.rst deleted file mode 100644 index 906c25bf..00000000 --- a/CHANGELOG.rst +++ /dev/null @@ -1,567 +0,0 @@ - -Change log -========== - -**glotzerlab-software** releases update regularly. Releases are numbered by the year, month, -and optionally the day separated by periods or hyphens. The change log highlights major changes. -Find the complete archive of releases on `Docker Hub`_. - -.. _Docker Hub: https://hub.docker.com/r/glotzerlab/software/tags - -2024 ----- - -Next release -++++++++++++ - -*Added* - -* Conda package distribution for UMich Great Lakes, Purdue ANVIL, and NCSA Delta. - -*Deprecated* - -* Docker images - -2024-02-02 -++++++++++ - -*Changed* - -* Support Delta slingshot11 hardware update. Remove ``OMPI_MCA_btl=self`` - ``OMPI_MCA_btl`` must - remain unset. - -2024-01-17 -++++++++++ - -*Removed* - -* Support for OLCF Summit. - -2023 ----- - -2023-10-27 -++++++++++ - -*Added* - -* Support Purdue Anvil. - -2023-10-12 -++++++++++ - -*Changed* - -* Require openmpi/4.1.4 on Delta. -* Recommend ``export OMPI_MCA_btl=self`` and ``srun`` when launching MPI jobs on Delta. - -2023-09-22 -++++++++++ - -*Changed* - -* Require openmpi/4.1.6 on Great Lakes. -* Set `PYTHONUBUFFERED=1`. - - -2022 ----- - -2022-07-12 -++++++++++ - -*Fixed* - -* ``expanse-gpu`` execution with more than one MPI rank. - -*Added* - -* ``cupy`` Python package. - -2022-07-11 -++++++++++ - -*Added* - -* ``delta`` image with support for NCSA Delta. - -2021 ----- - -2021-12-01 -++++++++++ - -*Added* - -* ``signac-dashboard`` Python package. - -*Changed* - -* Added ``expanse-gpu`` image with support for XSEDE Expanse's GPU nodes. - - * Expanse CPU jobs should use the ``expanse`` image and - ``module load cpu singularitypro gcc/9.2.0 openmpi/4.1.1``. - * GPU jobs should use the ``expanse-gpu`` image and - ``module load gpu singularitypro openmpi/4.0.4``. - -2021.11.09 -++++++++++ - -*Changed* - -* Use Ubuntu 20.04 base image. -* Use CUDA 11 base image. - -*Removed* - -* Stampede2 image. - -2021.06.08 -++++++++++ - -*Added:* - -* Image for SDSC Expanse. - -*Removed:* - -* Image for SDSC Comet. - -2021.04.21 -++++++++++ - -*Added:* - -* Environment variables that identify source commit and configuration of the image: - ``GLOTZERLAB_SOFTWARE_GIT_SHA``, ``GLOTZERLAB_SOFTWARE_GIT_BRANCH``, - ``GLOTZERLAB_SOFTWARE_CONFIGURATION``, and ``GLOTZERLAB_SOFTWARE_TAG`` - -*Changed:* - -* New images are now pushed to Docker Hub on every commit to the mainline branches. -* Future change log entries will no longer document version bumps. - -2021.03.24 -++++++++++ - -*Changed:* - -* ``glotzerlab-software`` images are no longer hosted at https://glotzerlab.engin.umich.edu. - Use ``singularity pull software.sif docker://glotzerlab/software`` to download the image. - -*Updated:* - -* freud v2.5.0 -* fresnel v0.13.1 -* gsd v2.4.1 -* libgetar v1.1.0 - -* HOOMD-blue - - * v2.9.6 - * Beta: v3.0.0-beta.5 - -* signac-flow v0.13.0 - -2021.02.25 -++++++++++ - -*Updated:* - -* Python 3.8 -* HOOMD-blue - - * v2.9.4 - * Beta: v3.0.0-beta.4 - -2021.01.15 -++++++++++ - -*Added:* - -* Support PSC Bridges-2 - -*Updated:* - -* freud v2.4.1 -* rowan v1.3.0.post1 -* signac v1.5.1 -* HOOMD-blue - - * Beta: v3.0.0-beta.3 - -*Removed:* - -* Support for PSC Bridges - -2020 ----- - -2020.12.15 -++++++++++ - -*Updated:* - -* HOOMD-blue - - * Beta: v3.0.0-beta.2 - - -2020.11.18 -++++++++++ - -*Updated:* - -* coxeter v0.4.0 -* freud v2.4.0 -* garnett v0.7.1 -* gsd v2.4.0 -* signac v1.5.0 -* signac-flow v0.11.0 - -2020.10 -+++++++ - -*Updated:* - -* gsd v2.2.0 -* HOOMD-blue - - * Stable: v2.9.3 - * Beta: v3.0.0-beta.1 - -2020.07 -+++++++ - -*Updated:* - -* gsd v2.1.2 -* HOOMD-blue v2.9.2 - -2020.04 -+++++++ - -*Updated:* - -* fresnel v0.12.0 -* freud v2.2.0 -* garnett v0.7.1 -* gsd v2.1.1 - -*Changed:* - -* Temporarily removed garnett: Ubuntu 18.04 does not provide a compatible - ``numpy`` or ``tqdm``. - -*Fixed:* - -* Add python3-filelock - -2020.02 -+++++++ - -*Updated:* - -* freud v2.1.0 -* gsd v2.0.0 -* HOOMD-blue v2.9.0 -* signac v1.3.0 -* signac-flow v0.9.0 - -2019 ----- - -2019.12 -+++++++ - -*Updated:* - -* freud v2.0.1 -* gsd v1.10.0 -* HOOMD-blue v2.8.1 - -*Library/OS updates:* - -* Ubuntu 18.04 and Python 3.6 now on all systems -* Great Lakes now requires ``module load openmpi/4.0.2`` -* Bridges images are broken. Please use the ``nompi`` images on Bridges until - we can resolve MPI support issues on Bridges. - -2019.11 -+++++++ - -*Updated:* - -* fresnel v0.11.0 -* freud v2.0.0 -* garnett v0.6.1 -* HOOMD-blue v2.8.0 - -*Library/OS updates:* - -* Ubuntu 18.04 [not on comet] -* Python 3.6 [not on comet] - -*Supported systems:* - -* Removed UMich Flux - - -2019.10 -+++++++ - -*Updated:* - -* fresnel v0.10.1 -* gsd v1.9.3 -* HOOMD-blue v2.7.0 -* rowan v1.2.2 -* plato v1.7.0 - -2019.09 -+++++++ - -*Updated:* - -* fresnel v0.10.0 -* freud v1.2.2 -* garnett v0.5.0 -* gsd v1.8.1 -* signac-flow v0.8.0 - -*Library/OS updates:* - -* embree v3.6.1 -* [summit] TBB 2019_U8 -* [summit] scipy v1.3.1 -* [greatlakes] Ubuntu 18.04 -* [greatlakes] Python 3.6 - -*Supported systems:* - -* Added UMich Great Lakes - -2019.08 -+++++++ - -*New software:* - -* garnett v0.4.1 - -*Updated:* - -* freud v1.2.1 -* gsd v1.8.0 -* libgetar v1.0.1 -* pythia v0.2.5 -* siganc v1.2.0 - -2019.07 -+++++++ - -*Updated:* - -* freud v1.2.0 -* HOOMD-blue v2.6.0 -* rowan v1.2.1 -* plato v1.6.0 -* siganc v1.1.0 - -2019.05 -+++++++ - -*Updated:* - -* fresnel v0.9.0 -* GSD v1.7.0 -* HOOMD-blue v2.5.2 -* signac-flow v0.7.1 - -2019.03 -+++++++ - -*Updated:* - -* fresnel v0.8.0 -* freud v1.0.0 -* GSD v1.6.1 -* HOOMD-blue v2.5.1 -* rowan v1.2.0 -* signac v1.0.0 - -*Library updates:* - -* embree 3.5.2 - -2019.02 -+++++++ - -*Updated:* - -* fresnel v0.7.1 -* HOOMD-blue v2.5.0 -* rowan v1.1.7 -* signac v0.9.5 -* [summit] scipy v1.2.0 - -*Library updates:* - -* embree 3.4.0 -* TBB 2019_U3 - -*Fixes:* - -* Stampede2 multi-node MPI now works - -2019.01 -+++++++ - -*Updated:* - -* gsd v1.6.0 -* HOOMD-blue v2.4.2 -* plato v1.4.0 -* signac-flow v0.6.4 - -2018 ----- - -2018.12 -+++++++ - -*Updated:* - -* Freud v0.11.4 -* gsd v1.5.5 -* HOOMD-blue v2.4.1 -* plato v1.3.0 -* pythia v0.2.4 - -*Supported systems:* - -* Added Summit build scripts - -2018.11 -+++++++ - -*Updated:* - -* Freud v0.11.3 -* HOOMD-blue v2.4.0 -* libgetar v0.7.0 -* plato v1.2.0 -* pythia v0.2.3 -* rowan v1.1.6 -* signac v0.9.4 - -2018.10 -+++++++ - -*Updated:* - -* Embree v3.2.1 -* Freud v0.11.0 -* GSD v1.5.4 -* HOOMD-blue v2.3.5 - -2018.09 -+++++++ - -*Changes:* - -* Documentation now on readthedocs.org -* Latest images are now available directly from singularity-hub - -*Updated:* - -* Freud v0.10.0 -* libgetar v0.6.1 -* Rowan v1.1.0 -* Signac-flow v0.6.3 - -2018.08 -+++++++ - -*Updated:* - -* Fresnel v0.6.0 -* Freud v0.9.0 -* HOOMD-blue v2.3.4 - -2018.07 -+++++++ - -Added: - -* pyhull - -*Updated:* - -* Freud v0.8.2 -* HOOMD-blue v2.3.3 -* Signac v0.9.3 -* Signac-flow v0.6.1 - -2018.06 -+++++++ - -Added: - -* ffmpeg -* h5py -* jupyter -* mpi4py -* pandas -* pillow -* PyQt5 -* pyyaml -* scipy -* scikit-learn -* libgetar v0.5.4 -* Pythia v0.2.2 -* Rowan v0.6.1 -* Plato v1.1.0 - -*Updated:* - -* Freud v0.8.1 -* GSD v1.5.3 -* HOOMD-blue v2.3.1 -* Signac-flow v0.6.0 - -2018.04 -+++++++ - -*Changes:* - -* Tag MPI enabled builds for specific clusters -* Support SDSC Comet, PSC Bridges, TACC Stampede2, and University of Michigan Flux clusters. - -*Updated:* - -* Freud v0.8.0 -* GSD v1.5.2 -* HOOMD-blue v2.3.0 - -2018.03 -+++++++ - -The initial release includes: - -Glotzerlab software versions: - -* Fresnel v0.5.0 -* Freud v0.7.0 -* GSD v1.5.1 -* HOOMD-blue v2.2.4 -* Signac v0.9.2 -* Signac-flow v0.5.6 - -Commonly used tools: - -* LLVM/clang -* python3 -* matplotlib -* numpy -* pytest -* sphinx diff --git a/LICENSE b/LICENSE index 0882aa04..00efa00d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -glotzerlab-software Open Source Software License Copyright (c) 2018-2022 The Regents of +glotzerlab-software Open Source Software License Copyright (c) 2018-2024 The Regents of the University of Michigan All rights reserved. glotzerlab-software may contain modifications ("Contributions") provided, and to which diff --git a/README.md b/README.md index bdb0941a..2727623a 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,15 @@ # Glotzerlab software -**glotzerlab-software** deploys software developed by the [Glotzer -group]() at the [University of Michigan](https://www.umich.edu/) -to HPC resources via conda packages and container images. Use **glotzerlab-software** to install -a MPI and GPU enabled build of HOOMD-blue on a *HPC resource*. Use the standard conda-forge -provided HOOMD-blue (`mamba install hoomd` with no special configuration) for serial execution -on individual workstations. +**glotzerlab-software** deploys software developed by the [Glotzer group] at the +[University of Michigan] to HPC resources via conda packages. Use +**glotzerlab-software** to install an MPI and GPU enabled build of HOOMD-blue +and related packages on a *HPC resource*. + +Use the standard conda-forge provided HOOMD-blue (`mamba install hoomd` with +no special configuration) for serial execution on individual workstations. + +[Glotzer group]: http://glotzerlab.engin.umich.edu +[University of Michigan]: https://www.umich.edu ## Resources @@ -16,7 +20,7 @@ on individual workstations. * [HOOMD-blue](https://glotzerlab.engin.umich.edu/hoomd-blue/): The general-purpose particle simulation toolkit that **glotzerlab-software** packages. -## Quick start [conda] +## Quick start 1. Install [miniforge](https://github.com/conda-forge/miniforge). 2. Replace `$HOME/miniforge3/.condarc` with: @@ -49,34 +53,11 @@ build will contain the cluster name and a `gpu` or `cpu` suffix. For example: # Name Version Build Channel hoomd X.Y.Z cluster_gpu_py311h1b32822_0 file://path/to/conda/channel -## Quick start [container] - -Download the stable **glotzerlab/software** image using -[Singularity](https://sylabs.io/singularity/): - - $ singularity pull software.sif docker://glotzerlab/software - -**singularity exec** executes software from inside the container. For example, run a Python script -with: - - $ singularity exec software.sif python3 script.py - -Add the ``--nv`` option to **enable NVIDIA GPUs** inside the container: - - $ singularity exec --nv software.sif nvidia-smi - -**glotzerlab-software** updates regularly with the latest versions of included software. Download -the latest image to update: - - $ singularity pull software.sif docker://glotzerlab/software - -## Change log - -See [CHANGELOG.rst](CHANGELOG.rst). - ## Contributing -Contributions are welcomed via [pull requests](https://github.com/glotzerlab/software/pulls). Please -report bugs and suggest feature enhancements via the [issue -tracker](https://github.com/glotzerlab/software/issues). See [ARCHITECTURE.md](ARCHITECTURE.md) for -information on how the repository is structured, including how to modify the containers. +Contributions are welcomed via [pull requests]. Please report bugs and suggest feature +enhancements via the [issue tracker]. See `ARCHITECTURE.md` for information on how the +repository is structured, including how to modify the containers. + +[pull requests]: https://github.com/glotzerlab/software/pulls +[issue tracker]: https://github.com/glotzerlab/software/issues diff --git a/build.sh b/build.sh deleted file mode 100755 index ac5c43d1..00000000 --- a/build.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -set -u -set -e - -usage() - { - echo "Usage: $0 -r repository [ -t tag ] [ system [ system [ ... ] ] ]" - exit - } - -tag="$(date +%Y.%m.%d)" -repository="" - -while getopts ":r:t:" o; do - case "${o}" in - r) - repository=${OPTARG} - ;; - t) - tag=${OPTARG} - ;; - *) - usage - ;; - esac -done -shift $((OPTIND-1)) - -if [ -z "${repository}" ]; then - usage -fi - -python3 make_dockerfiles.py - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -declare -A extra_tags -extra_tags=( ["nompi"]="-t ${repository}/software:latest" - ["greatlakes"]="" - ["delta"]="" - ["expanse"]="" - ["expanse-gpu"]="" - ["bridges2"]="" - ["anvil"]="" -) - -for cluster in "$@" -do - cp -a $DIR/test/*.py $DIR/docker/${cluster}/test - cp -a requirements*.txt $DIR/docker/${cluster} - docker build $DIR/docker/${cluster} \ - -t ${repository}/software:${cluster} \ - -t ${repository}/software:${tag}-${cluster} \ - ${extra_tags[$cluster]} \ - --build-arg GIT_SHA=$(git rev-parse HEAD) \ - --build-arg GIT_BRANCH=$(git branch --show-current) \ - --build-arg CONFIGURATION=${cluster} \ - --build-arg TAG=${tag} -done diff --git a/conda/andes.sh b/conda/andes.sh index 52513827..44aad5d8 100755 --- a/conda/andes.sh +++ b/conda/andes.sh @@ -16,4 +16,3 @@ export SLURM_CPUS_PER_TASK=8 --output-folder $OUTPUT_FOLDER chmod g-w $OUTPUT_FOLDER -R - diff --git a/conda/anvil.sh b/conda/anvil.sh index a6b3b91e..b82d60a7 100644 --- a/conda/anvil.sh +++ b/conda/anvil.sh @@ -23,4 +23,3 @@ export CXX=$GCC_HOME/bin/g++ --output-folder $OUTPUT_FOLDER chmod g-w $OUTPUT_FOLDER -R - diff --git a/conda/frontier.sh b/conda/frontier.sh index 0a0664a7..8cb42107 100755 --- a/conda/frontier.sh +++ b/conda/frontier.sh @@ -15,7 +15,7 @@ export SLURM_CPUS_PER_TASK=16 export MPICC=$CRAY_MPICH_DIR/bin/mpicc export CC=$GCC_PATH/bin/gcc export CXX=$GCC_PATH/bin/g++ -export HCC_AMDGPU_TARGET=gfx90a +export HCC_AMDGPU_TARGET=gfx90a ./build.sh "$@" \ --skip-existing \ @@ -23,4 +23,3 @@ export HCC_AMDGPU_TARGET=gfx90a --output-folder $OUTPUT_FOLDER chmod g-w $OUTPUT_FOLDER -R - diff --git a/conda/hoomd/meta.yaml b/conda/hoomd/meta.yaml index 66f4f0b0..5e692b56 100644 --- a/conda/hoomd/meta.yaml +++ b/conda/hoomd/meta.yaml @@ -46,4 +46,3 @@ requirements: run: - python - numpy - diff --git a/doc/.gitignore b/doc/.gitignore index 378eac25..7585238e 100644 --- a/doc/.gitignore +++ b/doc/.gitignore @@ -1 +1 @@ -build +book diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 813620c8..00000000 --- a/doc/Makefile +++ /dev/null @@ -1,216 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -a -n -W -T --keep-going -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " applehelp to make an Apple Help Book" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " coverage to run coverage check of the documentation (if enabled)" - -.PHONY: clean -clean: - rm -rf $(BUILDDIR)/* - -.PHONY: html -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -.PHONY: dirhtml -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -.PHONY: singlehtml -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -.PHONY: pickle -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -.PHONY: json -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -.PHONY: htmlhelp -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -.PHONY: qthelp -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/HOOMD-blue.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/HOOMD-blue.qhc" - -.PHONY: applehelp -applehelp: - $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp - @echo - @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." - @echo "N.B. You won't be able to view it unless you put it in" \ - "~/Library/Documentation/Help or install it in your application" \ - "bundle." - -.PHONY: devhelp -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/HOOMD-blue" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/HOOMD-blue" - @echo "# devhelp" - -.PHONY: epub -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -.PHONY: latex -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -.PHONY: latexpdf -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: latexpdfja -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: text -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -.PHONY: man -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -.PHONY: texinfo -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -.PHONY: info -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -.PHONY: gettext -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -.PHONY: changes -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -.PHONY: linkcheck -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -.PHONY: doctest -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -.PHONY: coverage -coverage: - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage - @echo "Testing of coverage in the sources finished, look at the " \ - "results in $(BUILDDIR)/coverage/python.txt." - -.PHONY: xml -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -.PHONY: pseudoxml -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/doc/_templates/page.html b/doc/_templates/page.html deleted file mode 100644 index e284003a..00000000 --- a/doc/_templates/page.html +++ /dev/null @@ -1,71 +0,0 @@ -{% extends "furo/page.html" %} -{% block footer %} - -
-
-

Development of {{ project }} is led by the Glotzer Group at the University of Michigan. -

- - {%- if show_copyright %} - - {%- endif %} - {% trans %}Made with {% endtrans -%} - {%- if show_sphinx -%} - {% trans %}Sphinx and {% endtrans -%} - @pradyunsg's - {% endif -%} - {% trans %} - Furo - {% endtrans %} - {%- if last_updated -%} -
- {% trans last_updated=last_updated|e -%} - Last updated on {{ last_updated }} - {%- endtrans -%} -
- {%- endif %} -
- -
-{% endblock footer %} diff --git a/doc/book.toml b/doc/book.toml new file mode 100644 index 00000000..fc142c07 --- /dev/null +++ b/doc/book.toml @@ -0,0 +1,18 @@ +[book] +language = "en" +multilingual = false +src = "src" +title = "Glotzerlab-software documentation" + +[build] +create-missing = false + +# Uncomment to enable link checking in docs. Normally these checks are only performed in CI. +# You also need to install https://github.com/Michael-F-Bryan/mdbook-linkcheck. +# [output.linkcheck] +# follow-web-links = true + +# [output.html] must be the last table so that .readthedocs.yaml can easily add a key to it +[output.html] +git-repository-url = "https://github.com/glotzerlab/software" +additional-css = ["rtd.css"] diff --git a/doc/changes.rst b/doc/changes.rst deleted file mode 100644 index 565b0521..00000000 --- a/doc/changes.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../CHANGELOG.rst diff --git a/doc/cluster.rst b/doc/cluster.rst deleted file mode 100644 index 3b033b19..00000000 --- a/doc/cluster.rst +++ /dev/null @@ -1,19 +0,0 @@ -HPC clusters ------------- - -**glotzerlab-software** provides a number of cluster-specific images, needed to enable high -performance CPU, CUDA, and multi-node MPI support matching the drivers and hardware available on -each cluster. Refer to the appropriate section for documentation specific to the cluster you are -using. If you wish to use **glotzerlab-software** on a different cluster, contact the :doc:`user -community ` for suggestions. - -.. toctree:: - :maxdepth: 1 - - clusters/anvil - clusters/bridges2 - clusters/crusher - clusters/delta - clusters/expanse - clusters/frontier - clusters/greatlakes diff --git a/doc/clusters/anvil.rst b/doc/clusters/anvil.rst deleted file mode 100644 index 7d5a9393..00000000 --- a/doc/clusters/anvil.rst +++ /dev/null @@ -1,47 +0,0 @@ -Anvil (Purdue) -************** - -Anvil_ is an HPC cluster at NCSA with GPU and CPU nodes. Apply for resources on Anvil through -the ACCESS_ program. - -.. _Anvil: https://www.rcac.purdue.edu/knowledge/anvil -.. _ACCESS: https://allocations.access-ci.org - -The **glotzerlab-software** image and the singularity cache are large, store them in your scratch -directory:: - - $ cd $SCRATCH - $ export SINGULARITY_CACHEDIR=$SCRATCH/.singularity - -Download the image with support for Anvil:: - - $ singularity pull software.sif docker://glotzerlab/software:anvil - -Use the following commands in your job scripts or interactively to execute software inside the -container: - -.. note:: - - Replace ``command arguments`` with the command and arguments you wish to run. For example: - ``python3 script.py``. - -Serial (or multithreaded) CPU jobs:: - - mpirun -n 1 singularity exec --bind /anvil $SCRATCH/software.sif command arguments - -MPI parallel CPU jobs:: - - mpirun singularity exec --bind /anvil $SCRATCH/software.sif command arguments - -.. important:: - - You must use ``mpirun`` to launch parallel jobs. ``srun`` is not compatible with the MPI library - installed inside the container. - -.. note:: - - This container is compatible with ``openmpi/4.0.6`` which is loaded by default on Anvil. - -.. attention:: - - The container is built with GPU support but GPU jobs have not been tested on Anvil. diff --git a/doc/clusters/bridges2.rst b/doc/clusters/bridges2.rst deleted file mode 100644 index ebc1caae..00000000 --- a/doc/clusters/bridges2.rst +++ /dev/null @@ -1,48 +0,0 @@ -Bridges-2 (PSC) -*************** - -`Bridges-2 `_ is an HPC cluster at PSC with GPU and CPU -nodes. Apply for resources on Bridges through the `ACCESS `_ program. - -The **glotzerlab-software** image and the singularity cache are large, store them in your project -directory:: - - $ cd $PROJECT - $ export SINGULARITY_CACHEDIR=$PROJECT/.singularity - -Download the image with support for Bridges-2:: - - $ singularity pull software.sif docker://glotzerlab/software:bridges2 - -Use the following commands in your job scripts or interactively to execute software inside the -container: - -.. note:: - - Replace ``command arguments`` with the command and arguments you wish to run. For example: - ``python3 script.py``. - -Serial (or multithreaded) CPU jobs (``RM-shared`` partitions):: - - module load openmpi/4.0.5-gcc10.2.0 - mpirun -n 1 singularity exec --bind /ocean $PROJECT/software.sif command arguments - -Single GPU jobs (``GPU-shared`` partition):: - - module load openmpi/4.0.5-gcc10.2.0 - mpirun -n 1 singularity exec --bind /ocean --nv $PROJECT/software.sif command arguments - -MPI parallel CPU jobs (``RM`` partition, ``RM-shared`` partition with more than 1 core):: - - module load openmpi/4.0.5-gcc10.2.0 - mpirun singularity exec --bind /ocean $PROJECT/software.sif command arguments - -MPI parallel GPU jobs (``GPU`` partition):: - - module load openmpi/4.0.5-gcc10.2.0 - mpirun singularity exec --bind /ocean --nv $PROJECT/software.sif command arguments - -.. important:: - - You must use ``mpirun`` to launch parallel jobs. ``srun`` is not compatible with the MPI library - installed inside the container. diff --git a/doc/clusters/crusher.rst b/doc/clusters/crusher.rst deleted file mode 100644 index 61a60a76..00000000 --- a/doc/clusters/crusher.rst +++ /dev/null @@ -1,42 +0,0 @@ -Crusher (OLCF) -************** - -`Crusher `_ is a system -at ORNL with identical hardware and similar software as the upcoming Frontier system. - -Crusher does not support container execution at this time. **glotzerlab-software** instead provides -a build script and a module environment to create an equivalent software stack. - -First, clone the **glotzerlab-software** repository:: - - $ git clone https://github.com/glotzerlab/software - $ cd software - -If you already have a clone, update it:: - - $ cd software - $ git pull origin trunk - -Per OLCF policies, you should install your software in NFS under ``/ccs/proj/``. For example, -set the installation root directory to ``/ccs/proj/{your-project}/software/${USER}``. - -Build the software environment and install it into the root:: - - $ script/crusher/install.sh /ccs/proj/{your-project}/software/${USER} - ... compiling software will take several minutes ... - -Activate the environment with:: - - $ source /ccs/proj/{your-project}/software/${USER}/environment.sh - -The environment is a `python3 venv `_. You may extend -it with additional python packages using ``python3 -m pip install``:: - - $ source /ccs/proj/{your-project}/software/${USER}/environment.sh - $ python3 -m pip install package - -Use the following commands in your job scripts or interactively to execute software inside the -container:: - - source /ccs/proj/{your-project}/software/${USER}/environment.sh - srun {srun options} command arguments diff --git a/doc/clusters/delta.rst b/doc/clusters/delta.rst deleted file mode 100644 index c3840193..00000000 --- a/doc/clusters/delta.rst +++ /dev/null @@ -1,50 +0,0 @@ -Delta (NCSA) -************* - -Delta_ is an HPC cluster at NCSA with GPU and CPU nodes. Apply for resources on Delta through -the ACCESS_ program. - -.. _Delta: https://ncsa-delta-doc.readthedocs-hosted.com -.. _ACCESS: https://allocations.access-ci.org - -The **glotzerlab-software** image and the singularity cache are large, store them in your scratch -directory:: - - $ cd /scratch//$USER/ - $ export APPTAINER_CACHEDIR=/scratch//$USER/.apptainer - -.. note:: - - Replace ```` with your NCSA account name. - -Download the image with support for Delta:: - - $ singularity pull software.sif docker://glotzerlab/software:delta - -Use the following commands in your job scripts or interactively to execute software inside the -container: - -.. note:: - - Replace ``command arguments`` with the command and arguments you wish to run. For example: - ``python3 script.py``. - -Serial (or multithreaded) CPU jobs (``cpu`` partition):: - - module load gcc/11.4.0 openmpi/4.1.6 - mpirun -n 1 singularity exec --bind /scratch /scratch//$USER/software.sif command arguments - -Single GPU jobs (``gpuA100x4`` and similar partitions):: - - module load gcc/11.4.0 openmpi/4.1.6 - mpirun -n 1 singularity exec --nv --bind /scratch /scratch//$USER/software.sif command arguments - -MPI parallel CPU jobs (``cpu`` partition with more than 1 core):: - - module load gcc/11.4.0 openmpi/4.1.6 - mpirun singularity exec --bind /scratch /scratch//$USER/software.sif command arguments - -MPI parallel GPU jobs (``gpuA100x4`` and similar partitions with more than 1 GPU):: - - module load gcc/11.4.0 openmpi/4.1.6 - mpirun singularity exec --nv --bind /scratch /scratch//$USER/software.sif command arguments diff --git a/doc/clusters/expanse.rst b/doc/clusters/expanse.rst deleted file mode 100644 index a20c85c0..00000000 --- a/doc/clusters/expanse.rst +++ /dev/null @@ -1,77 +0,0 @@ -Expanse (SDSC) -************** - -Expanse_ is an HPC cluster at SDSC with GPU and CPU nodes. Apply for resources on Expanse through -the ACCESS_ program. - -.. _Expanse: https://www.sdsc.edu/support/user_guides/expanse.html -.. _ACCESS: https://allocations.access-ci.org/ - -For unknown reasons, ``singularity pull`` generates corrupt images when run on Expanse. You need -to use another Linux system to pull the ``expanse`` or ``expanse-gpu`` image, then copy that image -to Expanse. - -.. note:: - - The corrupt image causes ``/usr/.so: file too short`` error messages when you - attempt to use software in the container. - -Download the image with support for Expanse's CPU nodes:: - - $ singularity pull software.sif docker://glotzerlab/software:expanse - -Then copy ``software.sif`` to ``/expanse/lustre/scratch/$USER/temp_project/`` on Expanse. - -Or: download the image with support for Expanse's GPU nodes:: - - $ singularity pull software.sif docker://glotzerlab/software:expanse-gpu - -Then copy ``software.sif`` to ``/expanse/lustre/scratch/$USER/temp_project/`` on Expanse. - -.. important:: - - Use the correct image: - - * ``expanse`` on CPU nodes. - * ``expanse-gpu`` on GPU nodes. - -Use the following commands in your job scripts or interactively to execute software inside the -container: - -.. note:: - - Replace ``command arguments`` with the command and arguments you wish to run. For example: - ``python3 script.py``. - -Serial (or multithreaded) CPU jobs (``shared`` partition):: - - module load cpu/0.15.4 singularitypro gcc/9.2.0 openmpi/4.1.1 - singularity exec --bind /expanse /expanse/lustre/scratch/$USER/temp_project/software.sif command arguments - -Single GPU jobs (``gpu-shared`` partition):: - - module load gpu/0.15.4 singularitypro openmpi/4.0.4-nocuda - singularity exec --nv --bind /expanse /expanse/lustre/scratch/$USER/temp_project/software.sif command arguments - -MPI parallel CPU jobs (``compute`` partition, ``shared`` partition with more than 1 core):: - - module load cpu/0.15.4 singularitypro gcc/9.2.0 openmpi/4.1.1 - mpirun singularity exec --bind /expanse /expanse/lustre/scratch/$USER/temp_project/software.sif command arguments - -MPI parallel GPU jobs (``gpu`` partition, ``gpu-shared`` with more than 1 GPU):: - - module load gpu/0.15.4 singularitypro openmpi/4.0.4-nocuda - mpirun singularity exec --nv --bind /expanse /expanse/lustre/scratch/$USER/temp_project/software.sif command arguments - -.. important:: - - Use the correct ``module load`` line for the type of node your job will execute on. - -.. warning:: - - ``mpirun`` will hang when launching jobs one more than one node in the ``gpu`` partition. - -.. important:: - - You must use ``mpirun`` to launch parallel jobs. ``srun`` is not compatible with the MPI library - installed inside the container. diff --git a/doc/clusters/frontier.rst b/doc/clusters/frontier.rst deleted file mode 100644 index f9a9561c..00000000 --- a/doc/clusters/frontier.rst +++ /dev/null @@ -1,80 +0,0 @@ -Frontier (OLCF) -*************** - -`frontier `_ is a system -at ORNL. - -Frontier does not support container execution at this time. **glotzerlab-software** instead provides -a build script and a module environment to create an equivalent software stack. - -First, clone the **glotzerlab-software** repository:: - - $ git clone https://github.com/glotzerlab/software - $ cd software - -If you already have a clone, update it:: - - $ cd software - $ git pull origin trunk - -You should install your software in NFS under ``$HOME``. For example, set the installation -root directory to ``/ccs/home/${USER}/software/frontier/``. - - -Build the software environment and install it into the root:: - - $ script/frontier/install.sh /ccs/home/${USER}/software/frontier/ - ... compiling software will take several minutes ... - -Activate the environment with:: - - $ source /ccs/home/${USER}/software/frontier/environment.sh - -The environment is a `python3 venv `_. You may extend -it with additional python packages using ``python3 -m pip install``:: - - $ source /ccs/home/${USER}/software/frontier/environment.sh - $ python3 -m pip install package - -Importing Python packages from this environment will be *very* slow with large node count jobs. -To improve performance, generate a tar file with the environment and store it on Orion. - -.. important:: - - Repeat this step after you update the environment or install packages with ``pip`` - -.. code-block:: - - $ /ccs/home/${USER}/software/frontier/generate-tar-cache.sh \ - ${MEMBERWORK}/{your-project}/software.tar - -.. tip:: - - Collaborative projects should aim to install and maintain a single copy of the software. - This should be installed to the project home as follows:: - - $ script/frontier/install.sh /ccs/proj/{your-project}/software/frontier/{subproject-name} - ... compiling software will take several minutes ... - - This allows for changes to the environment to propagate between users, and cuts down on - storage usage in the project home directory. - - Collaborative projects can also work off of a single cached ``software.tar``:: - - $ /ccs/proj/{your-project}/software/frontier/{subproject-name}/generate-tar-cache.sh \ - ${PROJWORK}/{your-project}/software/{subproject-name}/software.tar - - -Use the following commands in your job scripts (or interactively with ``salloc``) to load the cache -into NVME and execute software from there:: - - #SBATCH -C nvme - - export GLOTZERLAB_SOFTWARE_ROOT=/mnt/bb/${USER}/software - srun --ntasks-per-node 1 mkdir ${GLOTZERLAB_SOFTWARE_ROOT} - srun --ntasks-per-node 1 tar --directory ${GLOTZERLAB_SOFTWARE_ROOT} -xpf \ - ${MEMBERWORK}/{your-project}/software.tar - # ${PROJWORK}/{your-project}/software.tar # For use with shared projects. - source ${GLOTZERLAB_SOFTWARE_ROOT}/variables.sh - - srun {srun options} command arguments diff --git a/doc/clusters/greatlakes.rst b/doc/clusters/greatlakes.rst deleted file mode 100644 index 34775748..00000000 --- a/doc/clusters/greatlakes.rst +++ /dev/null @@ -1,58 +0,0 @@ -Great Lakes (UMich) -******************* - -`Great Lakes `_ is the University of Michigan campus cluster. - -The **glotzerlab-software** image and the singularity cache are large, store them in your scratch -directory:: - - $ cd /scratch///$USER - $ export SINGULARITY_CACHEDIR=/scratch///$USER/.singularity - -.. note:: - - Replace ```` with your Great Lakes account name. - -Download the image with support for Great Lakes:: - - $ module load singularity - $ singularity pull software.sif docker://glotzerlab/software:greatlakes - -Use the following commands in your job scripts or interactively to execute software inside the container: - -.. note:: - - Replace ``command arguments`` with the command and arguments you wish to run. For example: - ``python3 script.py``. - -Serial (or multithreaded) CPU jobs:: - - module load gcc/10.3.0 openmpi/4.1.6 singularity - srun -u --export=ALL -n 1 singularity exec --bind /scratch,/gpfs \ - /scratch/your-account_root/your-account/$USER/software.sif command arguments - -Single GPU jobs:: - - module load gcc/10.3.0 openmpi/4.1.6 singularity - srun -u --export=ALL -n 1 singularity exec --bind /scratch,/gpfs --nv \ - /scratch/your-account_root/your-account/$USER/software.sif command arguments - -MPI parallel CPU jobs:: - - module load gcc/10.3.0 openmpi/4.1.6 singularity - srun -u --export=ALL singularity exec --bind /scratch,/gpfs \ - /scratch/your-account_root/your-account/$USER/software.sif command arguments - -MPI parallel GPU jobs:: - - module load gcc/10.3.0 openmpi/4.1.6 singularity - srun -u --export=ALL singularity exec --bind /scratch,/gpfs --nv \ - /scratch/your-account_root/your-account/$USER/software.sif command arguments - -.. important:: - - Invoke parallel jobs with ``srun -u --export=ALL`` to ensure proper task distribution to the - requested resources (``mpirun`` oversubscribes resources in some cases). The ``--export=ALL`` - should be the default behavior but is not observed in testing. The ``-u`` option ensures that - the stdout and stderr output is written to the file immediately. Without ``-u``, srun on Great - Lakes tends to buffer output until the job completes. diff --git a/doc/community.rst b/doc/community.rst deleted file mode 100644 index 506d9828..00000000 --- a/doc/community.rst +++ /dev/null @@ -1,9 +0,0 @@ -User community --------------- - -Use the `glotzerlab-software discussion board -`_ to post questions, ask for support, and -discuss potential new features. - -File bug reports and feature requests on `glotzerlab-software's issue tracker -`_. diff --git a/doc/conf.py b/doc/conf.py deleted file mode 100644 index c3ecb092..00000000 --- a/doc/conf.py +++ /dev/null @@ -1,286 +0,0 @@ -#!/usr/bin/env python3 - -import sys -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('..')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] -exclude_patterns = ['build', - '_templates', - '**.ipynb_checkpoints'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'glotzerlab-software' -copyright = '2018-2021 The Regents of the University of Michigan' -author = 'The Regents of the University of Michigan' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '' -# The full version, including alpha/beta/rc tags. -release = '' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = 'en' - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['build'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "friendly" -pygments_dark_style = "native" - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'furo' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "dark_css_variables": { - "color-brand-primary": "#5187b2", - "color-brand-content": "#5187b2", - }, - "light_css_variables": { - "color-brand-primary": "#406a8c", - "color-brand-content": "#406a8c", - }, -} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - - - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' -#html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'glotzerlab-software-doc' - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'glotzerlab-software.tex', 'glotzerlab-software Documentation', - 'The Regents of the University of Michigan', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -# man_pages = [ -# (master_doc, 'fresnel', 'Fresnel Documentation', -# [author], 1) -#] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -#texinfo_documents = [ -# (master_doc, 'Fresnel', 'Fresnel Documentation', -# author, 'Fresnel', 'One line description of project.', -# 'Miscellaneous'), -#] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False diff --git a/doc/container.rst b/doc/container.rst deleted file mode 100644 index b3ab1c36..00000000 --- a/doc/container.rst +++ /dev/null @@ -1,85 +0,0 @@ -Container images -================ - -**glotzerlab-software** is a set of container images that provide software developed by the `Glotzer -group `_ at the `University of Michigan -`_ along with related software commonly used in simulation and data analysis -workflows. An *image* bundles these tools into a single file which can be copied, moved, shared with -others, and published to provide a completely reproducible workflow. A *host* system provides the -resources to execute software inside containers. `Singularity `_ -provides a high performance solution, supporting NVIDIA GPUs and MPI parallel execution. Singularity -is available on most HPC resources. You can install it on your linux system if you have root access. - -To use a container, download the **glotzerlab/software** image:: - - $ singularity pull software.sif docker://glotzerlab/software - -.. note:: - - On HPC clusters, download the cluster specific image. See :doc:`cluster` for details. - -**singularity exec** executes software from inside the container. For example, run a Python script -with:: - - $ singularity exec software.sif python3 script.py - -Add the ``--nv`` option to **enable NVIDIA GPUs** inside the container:: - - $ singularity exec --nv software.sif nvidia-smi - +-----------------------------------------------------------------------------+ - | NVIDIA-SMI 450.57 Driver Version: 450.57 CUDA Version: 11.0 | - |-------------------------------+----------------------+----------------------+ - | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | - | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | - | | | MIG M. | - |===============================+======================+======================| - | 0 Quadro RTX 5000 On | 00000000:02:00.0 Off | Off | - | 33% 29C P8 16W / 230W | 1MiB / 16124MiB | 0% Default | - | | | N/A | - +-------------------------------+----------------------+----------------------+ - -**singularity shell** launches an interactive shell:: - - $ singularity shell --nv software.sif - Singularity: Invoking an interactive shell within container... - - Singularity> python3 - Python 3.6.9 (default, Oct 8 2020, 12:12:24) - [GCC 8.4.0] on linux - Type "help", "copyright", "credits" or "license" for more information. - >>> import hoomd - >>> import freud - >>> import signac - >>> gpu = hoomd.device.GPU() - >>> print(gpu.devices) - ['[0] Quadro RTX 5000 48 SM_7.5 @ 1.82 GHz, 16124 MiB DRAM'] - >>> - -**glotzerlab-software** updates weekly with the latest versions of included -software. Download the latest image to update:: - - $ singularity pull software.sif docker://glotzerlab/software - -Environment variables set in the image identify the build:: - - $ singularity exec software.sif bash -c "set" | grep GLOTZERLAB - GLOTZERLAB_SOFTWARE_CONFIGURATION=nompi - GLOTZERLAB_SOFTWARE_GIT_BRANCH=trunk - GLOTZERLAB_SOFTWARE_GIT_SHA=f2debd75280c98ed0a4e46e3e2d381e953b80b6f - GLOTZERLAB_SOFTWARE_TAG=2021.03.24 - -.. seealso:: - - See the `Singularity documentation `_ for more information on - ``singularity`` commands. - -.. toctree:: - :maxdepth: 1 - - software - cluster - files - profiling - test - docker - community diff --git a/doc/docker.rst b/doc/docker.rst deleted file mode 100644 index d4bdee1e..00000000 --- a/doc/docker.rst +++ /dev/null @@ -1,42 +0,0 @@ -Docker ------- - -**glotzerlab-software** is available on the `Docker Hub -`_ for use on docker based systems (for example: -cloud platforms). - -You can start an interactive session of the glotzerlab/software image with the following command:: - - $ docker run --rm -it glotzerlab/software - Python 3.6.9 (default, Oct 8 2020, 12:12:24) - [GCC 8.4.0] on linux - Type "help", "copyright", "credits" or "license" for more information. - >>> import hoomd - >>> import freud - >>> import signac - -You can utilize NVIDIA GPUs on local and cloud systems with the NVIDIA container runtime. See the -`NVIDIA GPU CLOUD documentation `_ -for more information:: - - $ docker run --gpus=device=all --rm -it glotzerlab/software - glotzerlab-software@b0cbab541230:/$ nvidia-smi - +-----------------------------------------------------------------------------+ - | NVIDIA-SMI 450.57 Driver Version: 450.57 CUDA Version: 11.0 | - |-------------------------------+----------------------+----------------------+ - | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | - | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | - | | | MIG M. | - |===============================+======================+======================| - | 0 Quadro RTX 5000 On | 00000000:02:00.0 Off | Off | - | 33% 29C P8 16W / 230W | 1MiB / 16124MiB | 0% Default | - | | | N/A | - +-------------------------------+----------------------+----------------------+ - -Tutorials on accessing files, executing parallel jobs, and extending docker images are beyond the -scope of this documentation. - -.. seealso:: - - The `Docker documentation `_ and the documentation for the docker - based platform you are using. diff --git a/doc/files.rst b/doc/files.rst deleted file mode 100644 index 0ca8d22d..00000000 --- a/doc/files.rst +++ /dev/null @@ -1,56 +0,0 @@ -Accessing files ----------------- - -The container's filesystem -************************** - -A container has its own filesystem baked into the image. The container's software is installed in -it's filesystem. Software executing inside the container will see different files than on the host. -This starts at the root of the filesystem:: - - $ ls / - bin dev home lib64 mnt opt root sbin sys usr - boot etc lib lost+found nfs proc run srv tmp var - $ singularity exec software.sif ls / - bin etc lib mkdir opt run srv var - boot fresnel-examples lib32 mnt proc sbin sys - dev home lib64 nfs projects scratch tmp - environment hoomd-examples media oasis root singularity usr - -.. note:: - - With Singularity, the container's filesystem is **read-only**. - -Bind mounting -************* - -Specific directories may be *bind mounted* from the host into the container so it can access the -contents directly. On most systems, Singularity is configured to bind mount your home directory by -default:: - - $ echo "print('hello world')" > script.py - $ singularity exec software.sif ls - script.py software.sif - $ singularity exec software.sif python3 script.py - hello world - -Only specific directories are bind mounted. The above example was in ``testuser``'s home directory. -Even though there are many users on this host system, singularity only sees ``/glotzerlab-software`` -(from the container image) and ``testuser`` (bind mounted) in ``/home``:: - - $ singularity exec software.sif ls /home - glotzerlab-software testuser - -You can bind mount additional directories with the ``--bind`` command line option. - -.. tip:: - - Bind the scratch filesystems when launching singularity processes on HPC systems. For example:: - - singularity exec --nv --bind /scratch - -.. seealso:: - - If this doesn't fit your workflow, see the `Singularity documentation - `_ to learn how to specify your own bind mounts (if allowed by your - system administrator). diff --git a/doc/index.rst b/doc/index.rst deleted file mode 100644 index 73a84b99..00000000 --- a/doc/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -Glotzerlab software images -++++++++++++++++++++++++++ - -**glotzerlab-software** deploys software developed by the `Glotzer group -`_ at the `University of Michigan `_ to -HPC resources via conda packages and container images. Use **glotzerlab-software** to install a MPI -and GPU enabled build of HOOMD-blue on a *HPC resource*. Use the standard **conda-forge** provided -HOOMD-blue (``mamba install hoomd`` with no special configuration) for serial execution on individual -workstations. - -.. note:: - - Use conda packages for new installations. The container images are deprecated and will no longer - be maintained after 2024-06-01. - -.. toctree:: - :maxdepth: 1 - - conda - container - changes - license diff --git a/doc/license.rst b/doc/license.rst deleted file mode 100644 index 20045037..00000000 --- a/doc/license.rst +++ /dev/null @@ -1,5 +0,0 @@ -License -======= - -.. literalinclude:: ../LICENSE - :language: none diff --git a/doc/profiling.rst b/doc/profiling.rst deleted file mode 100644 index ed488944..00000000 --- a/doc/profiling.rst +++ /dev/null @@ -1,28 +0,0 @@ -Profiling ---------- - -You can run profiling tools to determine what operations take time while running executables within -the container. Some profiling tools must be executed on the host as they use tight integrations with -the Linux kernel. - -GPU -*** - -Use ``nvprof`` with the option ``--profile-child-processes``:: - - $ nvprof --profile-child-processes singularity exec --nv software.sif python3 lj.py - -On platforms where you need to launch HOOMD with MPI, call ``nvprof`` after ``mpirun``:: - - $ mpirun -n 1 nvprof --profile-child-processes \ - singularity exec --nv software.sif.sif python3 lj.py - -.. note:: - - You may need to add the ``nvprof`` option ``--openacc-profiling off`` on some systems. - -CPU -*** - -Build HOOMD from source to profile it on the CPU. The CPU profiler `perf` does not interoperate -with Singularity containers. diff --git a/doc/requirements.in b/doc/requirements.in deleted file mode 100644 index 664b2d7a..00000000 --- a/doc/requirements.in +++ /dev/null @@ -1 +0,0 @@ -furo==2024.4.27 diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index ba7e0706..00000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,60 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.12 -# by the following command: -# -# pip-compile requirements.in -# -alabaster==0.7.16 - # via sphinx -babel==2.14.0 - # via sphinx -beautifulsoup4==4.12.3 - # via furo -certifi==2024.2.2 - # via requests -charset-normalizer==3.3.2 - # via requests -docutils==0.21.2 - # via sphinx -furo==2024.4.27 - # via -r requirements.in -idna==3.7 - # via requests -imagesize==1.4.1 - # via sphinx -jinja2==3.1.3 - # via sphinx -markupsafe==2.1.5 - # via jinja2 -packaging==24.0 - # via sphinx -pygments==2.17.2 - # via - # furo - # sphinx -requests==2.31.0 - # via sphinx -snowballstemmer==2.2.0 - # via sphinx -soupsieve==2.5 - # via beautifulsoup4 -sphinx==7.3.7 - # via - # furo - # sphinx-basic-ng -sphinx-basic-ng==1.0.0b2 - # via furo -sphinxcontrib-applehelp==1.0.8 - # via sphinx -sphinxcontrib-devhelp==1.0.6 - # via sphinx -sphinxcontrib-htmlhelp==2.0.5 - # via sphinx -sphinxcontrib-jsmath==1.0.1 - # via sphinx -sphinxcontrib-qthelp==1.0.7 - # via sphinx -sphinxcontrib-serializinghtml==1.1.10 - # via sphinx -urllib3==2.2.1 - # via requests diff --git a/doc/rtd.css b/doc/rtd.css new file mode 100644 index 00000000..467c4d5c --- /dev/null +++ b/doc/rtd.css @@ -0,0 +1,187 @@ +/* Read the Docs promotional block, only applicable to RTD.org + +To support sphinx_rtd_theme, a `wy-menu` element is added. Other themes are +targeted using the theme identifier and use custom elements instead of a CSS +framework html structure. + +*/ + +div.ethical-sidebar, +div.ethical-footer { + display: block !important; +} +.ethical-sidebar, +.ethical-footer { + padding: 0.5em; + margin: 1em 0; +} +.ethical-sidebar img, +.ethical-footer img { + width: 120px; + height: 90px; + display: inline-block; +} +.ethical-sidebar .ethical-callout, +.ethical-footer .ethical-callout { + padding-top: 1em; + clear: both; +} +.ethical-sidebar .ethical-pixel, +.ethical-footer .ethical-pixel, +.ethical-fixedfooter .ethical-pixel { + display: none !important; +} +.ethical-sidebar .ethical-text, +.ethical-footer .ethical-text { + margin-top: 1em; +} +.ethical-sidebar .ethical-image-link, +.ethical-footer .ethical-image-link { + border: 0; +} + +.ethical-sidebar, +.ethical-footer { + background-color: #eee; + border: 1px solid #ccc; + border-radius: 5px; + color: #0a0a0a; + font-size: 14px; + line-height: 20px; +} + +/* Techstack badging */ +.ethical-sidebar ul { + margin: 0 !important; + padding-left: 0; + list-style: none; +} +.ethical-sidebar ul li { + display: inline-block; + background-color: lightskyblue; + color: black; + padding: 0.25em 0.4em; + font-size: 75%; + font-weight: 700; + margin: 0.25em; + border-radius: 0.25rem; + text-align: center; + vertical-align: baseline; + white-space: nowrap; + line-height: 1.41; +} +.ethical-sidebar ul li:not(:last-child) { + margin-right: .25rem; +} + +.ethical-sidebar a, +.ethical-sidebar a:visited, +.ethical-sidebar a:hover, +.ethical-sidebar a:active, +.ethical-footer a, +.ethical-footer a:visited, +.ethical-footer a:hover, +.ethical-footer a:active { + color: #0a0a0a; + text-decoration: none !important; + border-bottom: 0 !important; +} + +.ethical-callout a { + color: #707070 !important; + text-decoration: none !important; +} + +/* Sidebar promotions */ +.ethical-sidebar { + text-align: center; + max-width: 300px; + margin-left: auto; + margin-right: auto; +} + +/* Footer promotions */ +.ethical-footer { + text-align: left; + + font-size: 14px; + line-height: 20px; +} +.ethical-footer img { + float: right; + margin-left: 25px; +} +.ethical-footer .ethical-callout { + text-align: center; +} +.ethical-footer small { + font-size: 10px; +} + +/* Fixed footer promotions */ +.ethical-fixedfooter { + box-sizing: border-box; + position: fixed; + bottom: 0; + left: 0; + z-index: 100; + background-color: #eee; + border-top: 1px solid #bfbfbf; + font-size: 12px; + line-height: 1.5; + padding: 0.5em 1.5em; + text-align: center; + color: #404040; + width: 100%; /* Fallback for Opera Mini */ + width: 100vw; +} +@media (min-width: 769px) { + /* Improve viewing on non-mobile */ + .ethical-fixedfooter { + font-size: 13px; + padding: 1em 1.5em; + } +} +.ethical-fixedfooter .ethical-text:before { + margin-right: 4px; + padding: 2px 6px; + border-radius: 3px; + background-color: #4caf50; + color: #fff; + content: "Sponsored"; +} +.ethical-fixedfooter .ethical-callout { + color: #999; + padding-left: 6px; + white-space: nowrap; +} +.ethical-fixedfooter a, +.ethical-fixedfooter a:hover, +.ethical-fixedfooter a:active, +.ethical-fixedfooter a:visited { + color: #404040; + text-decoration: none; +} +.ethical-fixedfooter .ethical-close { + position: absolute; + top: 0; + right: 5px; + font-size: 20px; + line-height: 20px; +} + +/* Use mdbook theme colors. */ + +.ethical-sidebar { + border: none; + box-shadow: none; + background: var(--sidebar-bg); +} + +.ethical-sidebar:hover { + background: var(--sidebar-non-existant); +} + +.ethical-sidebar a { + color: var(--sidebar-fg); +} diff --git a/doc/software.rst b/doc/software.rst deleted file mode 100644 index 1e05cc8d..00000000 --- a/doc/software.rst +++ /dev/null @@ -1,59 +0,0 @@ -Installed software ------------------- - -**glotzerlab-software** includes the following installed inside the container. - -* `Ubuntu `_ -* `python `_ -* `Glotzer lab `_ open-source software: - - * `coxeter `_ - * `fresnel `_ - * `freud `_ - * `garnett `_ - * `GSD `_ - * `HOOMD-blue `_ - * `plato `_ - * `pythia `_ - * `rowan `_ - * `signac `_ - * `signac-dashboard `_ - * `signac-flow `_ - -* Additional tools commonly used in simulation/analysis workflows: - - * `ffmpeg `_ - * `h5py `_ - * `jupyter `_ - * `matplotlib `_ - * `mpi4py `_ - * `numpy `_ - * `pandas `_ - * `pillow `_ - * `PyQt5 `_ - * `pyyaml `_ - * `scipy `_ - * `scikit-learn `_ - * `scikit-image `_ - -* Compilers, tools, and libraries needed to build additional software: - - * `cmake `_ - * `CUDA `_ - * `cython `_ - * `embree `_ - * `git `_ - * `LLVM/clang `_ - * `pybind11 `_ - * `pytest `_ - * `qhull `_ - * `sqlite3 `_ - * `sphinx `_ - * `TBB `_ - * `zlib `_ - -.. note:: - - Launch python with the ``python3`` command inside the container. - -To request additional software in the container, contact the :doc:`user community `. diff --git a/doc/src/SUMMARY.md b/doc/src/SUMMARY.md new file mode 100644 index 00000000..ce966d31 --- /dev/null +++ b/doc/src/SUMMARY.md @@ -0,0 +1,10 @@ +# Summary + +[Introduction](index.md) + +- [Conda packages](conda.md) + - [Glotzer lab members](glotzer.md) + - [Building packages](build.md) + - [Installing packages](install.md) +- [License](license.md) +--- diff --git a/doc/src/build.md b/doc/src/build.md new file mode 100644 index 00000000..ca3cf4bc --- /dev/null +++ b/doc/src/build.md @@ -0,0 +1,35 @@ +# Building packages + +If you are not a Glotzer Lab member, or you would like to build these packages on a new +resource: + +> Note: Replace `{{ package-manager }}` with the name of your preferred conda compatible +> package manager executable. + +1. Install `conda-build` and `boa` into your environment: + + {{ package-manager }} install conda-build boa + +2. Obtain the **glotzerlab-software** source code: + + git clone https://github.com/glotzerlab/software + +3. Change to the `conda` directory: + + cd software/conda + +4. Load any modules needed to provide compilers, *MPI*, and *CUDA* + (optional). For example: + + module load gcc openmpi cuda + +5. Build the packages: + + ./build.sh hoomd mpi4py \ + --skip-existing \ + --variants "{'cluster': ['{{ cluster-name }}'], 'device': ['gpu'], 'gpu_platform': ['CUDA']}" \ + --output-folder {{ channel-path }} + +> Note: The `output-folder` is the directory where `conda build` will write the +> packages. Set the channel path `file:/{{ channel-path }}` in `.condarc` to match (see +> [install](install.md)). diff --git a/doc/src/conda.md b/doc/src/conda.md new file mode 100644 index 00000000..4d04d78c --- /dev/null +++ b/doc/src/conda.md @@ -0,0 +1,32 @@ +# Conda packages + +**glotzerlab-software** provides [conda] formatted packages built with cluster-specific +*MPI* and *CUDA* libraries. Use it to add *MPI-* and *GPU-enabled* builds of the +following software packages to your conda compatible environments on HPC resources: + +- hoomd +- mpi4py + +[conda]: https://docs.conda.io + +> Important: These packages are built for ABI compatibility with packages on the +> **conda-forge** channel. Ensure that you have **no** packages installed +> from the **default** channel before proceeding. + +[miniforge] provides a conda compatible environment pre-configured to install packages +only from **conda-forge**. When you install [miniforge] with default options, replace: + +- `{{ package-manager }}` with `mamba` +- `{{ environment-path }}` with `$HOME/miniforge3` + +[miniforge]: https://github.com/conda-forge/miniforge + +If you are using a different conda compatible package manager, use the appropriate +`{{ package-manager }}` and `{{ environment-path }}`. + +
+Do not install any MPI or GPU enabled packages from the conda-forge +channel. The generic MPI and CUDA libraries provided by +conda-forge will take precedence and prevent the cluster-specific libraries from +operating correctly. +
diff --git a/doc/src/glotzer.md b/doc/src/glotzer.md new file mode 100644 index 00000000..c5ed9f97 --- /dev/null +++ b/doc/src/glotzer.md @@ -0,0 +1,59 @@ +# Glotzer lab members + +Members of the **Glotzer lab** can install *precompiled* packages. Follow the +instructions in [on the installation page](install.md) to install the latest stable +release using the following channel paths and module versions: + +| HPC resource | Channel location | Module versions | +|--------------------|-----------------------|-----------------------| +| UMich Great Lakes | `file://nfs/turbo/glotzer/software/conda` | `module load gcc/10.3.0 openmpi/4.1.6 cuda/12.3.0` | +| Purdue ANVIL | `file://anvil/projects/x-dmr140129/software/conda` | `module load gcc/11.2.0 openmpi/4.1.6` | +| NCSA Delta | `file://projects/bbgw/software/conda` | `module load openmpi/4.1.6 cuda/12.3.0` | +| OLCF Frontier | `file://ccs/proj/mat110/software/frontier/conda` | `module load PrgEnv-gnu rocm/5.4.3; module unload darshan-runtime` | +| OLCF Andes | `file://ccs/proj/mat110/software/andes/conda` | `module load gcc/10.3.0 openmpi/4.1.2` | + +## Frontier + +Individual users should install conda compatible environments in their **home +directory** on Frontier. Importing Python packages from this environment will be *very* +slow with large node count jobs. To improve performance, generate a **tar** file from +the environment and store it on Orion. + +```shell +$ tar --directory {{ environment-path }} -cf ${MEMBERWORK}/mat110/conda-env.tar . +``` + +> Important: Repeat this step after you install or update packages with +> `{{ package-manager }}`. + + +> Note: Collaborative projects may maintain a single copy of the software in the +> shared project directory: +> `/ccs/proj/mat110/software/frontier/{{ subproject-name }}`. +> +> Collaborative projects may also utilize a single cached `conda-env.tar`: +> +> $ tar --directory /ccs/proj/mat110/software/frontier/{{ subproject-name }} \ +> -cf ${PROJWORK}/mat110/software/{{ subproject-name} }/conda-env.tar . + +Use the following lines in your job scripts (or interactively with `salloc`) to load the +environment into NVME and execute software from there: + + #SBATCH -C nvme + + module load PrgEnv-gnu rocm/5.4.3 + module unload darshan-runtime + + export CONDA_ENV_ROOT=/mnt/bb/${USER}/conda-env + srun --ntasks-per-node 1 mkdir ${CONDA_ENV_ROOT} + srun --ntasks-per-node 1 tar --directory ${CONDA_ENV_ROOT} -xpf \ + ${MEMBERWORK}/mat110/conda-env.tar + # ${PROJWORK}/mat110/software/{{ subproject-name }}/conda-env.tar # For use with shared projects. + + export PATH=${CONDA_ENV_ROOT}/bin:$PATH + + srun {srun options} command arguments + +> Note: The above script has been tested on environments with all packages installed +> into *base*. You may need to set additional environment variables or source activation +> scripts to activate conda environments within this directory. diff --git a/doc/src/images/umich-block-M.svg b/doc/src/images/umich-block-M.svg new file mode 100644 index 00000000..f5fa9abb --- /dev/null +++ b/doc/src/images/umich-block-M.svg @@ -0,0 +1,79 @@ + + + + + + + + + + + diff --git a/doc/src/index.md b/doc/src/index.md new file mode 100644 index 00000000..676d71c1 --- /dev/null +++ b/doc/src/index.md @@ -0,0 +1 @@ +{{#include ../../README.md}} diff --git a/doc/src/install.md b/doc/src/install.md new file mode 100644 index 00000000..99a3105c --- /dev/null +++ b/doc/src/install.md @@ -0,0 +1,39 @@ +# Installing packages + +1. **Configure condarc** to use the local HPC resource channel and + **conda-forge**. + + Replace `{{ environment-path }}/.condarc` with: + + ```yaml + channel_priority: strict + channels: + - file:/{{ channel-path }} + - conda-forge + disallow: + - openmpi + - mpich + - cuda-cudart-dev + ``` + and keep any custom configuration options you would like. + + > Note: The `disallow` section prevents you from accidentally installing *MPI* and + > *GPU* packages from conda-forge. + +2. **Install the packages**: + + {{ package-manager }} install hoomd mpi4py + +3. **Load the module versions** that match those used to build the + package. For example: + + module load openmpi/X.Y.Z cuda/I.J.K + +4. **Execute Python scripts**. Use your HPC resource scheduler to + request compute nodes and use the resources\'s MPI launcher. For + example: + + srun -n 8 python3 script.py + +> Tip: After initial installation and setup is complete, you can update packages to +> their latest versions with `{{package-manager}} update --all`. diff --git a/doc/src/license.md b/doc/src/license.md new file mode 100644 index 00000000..ac3d8bb7 --- /dev/null +++ b/doc/src/license.md @@ -0,0 +1,7 @@ +# License + +**Glotzerlab-software** is available under the following license: + +```plaintext +{{#include ../../LICENSE}} +``` diff --git a/doc/test.rst b/doc/test.rst deleted file mode 100644 index 873c39b7..00000000 --- a/doc/test.rst +++ /dev/null @@ -1,88 +0,0 @@ -Tests ------ - -**glotzerlab-software** provides a set of test jobs to determine the installed software functions and that MPI-enabled -software inter-operates with the host system. - -Copy the test scripts to the current directory:: - - $ singularity exec software.sif bash -c "cp /test/* ." - -.. note:: - - The test scripts assume the image is named ``software.sif`` and is the same directory. - -Submit the test jobs:: - - $ sbatch -A job-cpu.sh - $ sbatch -A job-gpu.sh - -.. note:: - - Replace ``sbatch`` with the appropriate queue submission command if necessary. - -After the jobs complete, examine the test output. Here example output:: - - + singularity exec software.sif bash -c set - + grep GLOTZERLAB - GLOTZERLAB_SOFTWARE_CONFIGURATION=bridges2 - GLOTZERLAB_SOFTWARE_GIT_BRANCH=trunk - GLOTZERLAB_SOFTWARE_GIT_SHA=2327dce1a5cf37351abca48d44a39b93359e55ad - GLOTZERLAB_SOFTWARE_TAG=2022.08.19 - + mpirun -n 1 singularity exec software.sif python3 serial-cpu.py - ** Starting serial CPU tests ** - Fresnel version : 0.13.4 - Fresnel device : - Freud version : 2.11.0 - garnett version : 0.7.1 - GSD version : 2.6.0 - HOOMD version : 3.4.0 - HOOMD flags : GPU [CUDA] (11.1) DOUBLE HPMC_MIXED MPI TBB SSE SSE2 SSE3 SSE4_1 SSE4_2 AVX AVX2 - pythia version : 0.3.0 - plato version : 1.12.0 - rowan version : 1.3.0 - signac version : 1.7.0 - flow version : 0.21.0 - - h5py version : 3.7.0 - matplotlib version: 3.5.3 - numpy version : 3.5.3 - pandas version : 3.5.3 - pillow version : 9.2.0 - scipy version : 1.9.0 - sklearn version : 1.1.2 - pyyaml version : 6.0 - ** Serial CPU tests PASSED ** - - + mpirun --npernode 1 singularity exec software.sif python3 mpi-cpu.py - ** Starting MPI CPU tests ** - HOOMD version : 3.4.0 - ** MPI CPU tests PASSED ** - + mpirun --npernode 1 singularity exec software.sif /opt/osu-micro-benchmarks/libexec/osu-micro-benchmarks/mpi/pt2pt/osu_bw - # OSU MPI Bandwidth Test v5.4.1 - # Size Bandwidth (MB/s) - 1 2.53 - 2 5.07 - 4 10.13 - 8 20.31 - 16 40.70 - 32 80.90 - 64 159.37 - 128 314.75 - 256 603.60 - 512 1186.74 - 1024 2437.16 - 2048 4385.84 - 4096 6576.44 - 8192 10170.07 - 16384 12811.31 - 32768 15895.04 - 65536 17412.40 - 131072 18098.46 - 262144 21474.57 - 524288 22771.60 - 1048576 22894.82 - 2097152 22945.78 - 4194304 23162.50 - + echo 'Tests complete.' - Tests complete. diff --git a/doc/theme/head.hbs b/doc/theme/head.hbs new file mode 100644 index 00000000..c4dfadeb --- /dev/null +++ b/doc/theme/head.hbs @@ -0,0 +1,9 @@ + + + diff --git a/doc/theme/index.hbs b/doc/theme/index.hbs new file mode 100644 index 00000000..e8e23aa1 --- /dev/null +++ b/doc/theme/index.hbs @@ -0,0 +1,367 @@ + + + + + + {{ title }} + {{#if is_print }} + + {{/if}} + {{#if base_url}} + + {{/if}} + + + + {{> head}} + + + + + + {{#if favicon_svg}} + + {{/if}} + {{#if favicon_png}} + + {{/if}} + + + + {{#if print_enable}} + + {{/if}} + + + + {{#if copy_fonts}} + + {{/if}} + + + + + + + + {{#each additional_css}} + + {{/each}} + + {{#if mathjax_support}} + + + {{/if}} + + +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ {{> header}} + + + + {{#if search_enabled}} + + {{/if}} + + + + +
+
+ {{{ content }}} + + +
+
+

Development of glotzerlab-software is led by the Glotzer Group at the University of Michigan. +

Copyright © 2024 The Regents of the University of Michigan. +

+
+ + University of Michigan logo + +
+
+
+ + +
+
+ + + +
+ + {{#if live_reload_endpoint}} + + + {{/if}} + + {{#if google_analytics}} + + + {{/if}} + + {{#if playground_line_numbers}} + + {{/if}} + + {{#if playground_copyable}} + + {{/if}} + + {{#if playground_js}} + + + + + + {{/if}} + + {{#if search_js}} + + + + {{/if}} + + + + + + + {{#each additional_js}} + + {{/each}} + + {{#if is_print}} + {{#if mathjax_support}} + + {{else}} + + {{/if}} + {{/if}} + +
+ + diff --git a/docker/.gitignore b/docker/.gitignore deleted file mode 100644 index 86cd3f58..00000000 --- a/docker/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.py -*.txt diff --git a/docker/anvil/test/job-cpu.sh b/docker/anvil/test/job-cpu.sh deleted file mode 100644 index e4301736..00000000 --- a/docker/anvil/test/job-cpu.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -#SBATCH --job-name="test-cpu" -#SBATCH --partition=wholenode -#SBATCH --nodes=2 -#SBATCH --ntasks=2 -#SBATCH --cpus-per-task=128 -#SBATCH -t 0:20:00 - -set -x - -singularity exec software.sif bash -c "set" | grep GLOTZERLAB - -mpirun -n 1 singularity exec software.sif python3 serial-cpu.py - -mpirun --npernode 1 singularity exec software.sif python3 mpi-cpu.py - -mpirun --npernode 1 singularity exec software.sif /opt/osu-micro-benchmarks/libexec/osu-micro-benchmarks/mpi/pt2pt/osu_bw - -echo "Tests complete." diff --git a/docker/bridges2/test/job-cpu.sh b/docker/bridges2/test/job-cpu.sh deleted file mode 100644 index 6bdec437..00000000 --- a/docker/bridges2/test/job-cpu.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -#SBATCH --job-name="test-cpu" -#SBATCH --partition=RM -#SBATCH --nodes=2 -#SBATCH --ntasks=2 -#SBATCH --cpus-per-task=128 -#SBATCH -t 0:20:00 - -module load openmpi/4.0.5-gcc10.2.0 - -set -x - -singularity exec software.sif bash -c "set" | grep GLOTZERLAB - -mpirun -n 1 singularity exec software.sif python3 serial-cpu.py - -mpirun --npernode 1 singularity exec software.sif python3 mpi-cpu.py - -mpirun --npernode 1 singularity exec software.sif /opt/osu-micro-benchmarks/libexec/osu-micro-benchmarks/mpi/pt2pt/osu_bw - -echo "Tests complete." diff --git a/docker/bridges2/test/job-gpu.sh b/docker/bridges2/test/job-gpu.sh deleted file mode 100644 index 852a2f58..00000000 --- a/docker/bridges2/test/job-gpu.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -#SBATCH --job-name="test-gpu" -#SBATCH --partition=GPU -#SBATCH --nodes=2 -#SBATCH --gpus-per-node=8 -#SBATCH --ntasks-per-node=1 -#SBATCH -t 0:10:00 - -module load openmpi/4.0.5-gcc10.2.0 - -set -x - -singularity exec software.sif bash -c "set" | grep GLOTZERLAB - -mpirun -v -n 1 singularity exec --nv software.sif python3 serial-gpu.py - -mpirun -v singularity exec --nv software.sif python3 mpi-gpu.py - -mpirun -v singularity exec software.sif /opt/osu-micro-benchmarks/libexec/osu-micro-benchmarks/mpi/pt2pt/osu_bibw - -echo "Tests complete." diff --git a/docker/delta/test/.keep b/docker/delta/test/.keep deleted file mode 100644 index e69de29b..00000000 diff --git a/docker/delta/test/job-cpu.sh b/docker/delta/test/job-cpu.sh deleted file mode 100644 index fe5d0fbb..00000000 --- a/docker/delta/test/job-cpu.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -#SBATCH --job-name="test-cpu" -#SBATCH --partition=cpu -#SBATCH --nodes=2 -#SBATCH --ntasks-per-node=1 -#SBATCH --export=ALL -#SBATCH -t 0:10:00 - -module load gcc/11.4.0 openmpi/4.1.6 - -set -x - -singularity exec software.sif bash -c "set" | grep GLOTZERLAB - -mpirun -n 1 singularity exec software.sif python3 serial-cpu.py - -mpirun singularity exec software.sif python3 mpi-cpu.py - -mpirun singularity exec software.sif /opt/osu-micro-benchmarks/libexec/osu-micro-benchmarks/mpi/pt2pt/osu_bibw - -echo "Tests complete." diff --git a/docker/delta/test/job-gpu.sh b/docker/delta/test/job-gpu.sh deleted file mode 100644 index 0b31f2a5..00000000 --- a/docker/delta/test/job-gpu.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -#SBATCH --job-name="test-gpu" -#SBATCH --partition=gpuA100x4 -#SBATCH --nodes=2 -#SBATCH --ntasks-per-node=1 -#SBATCH --gpus-per-node=4 -#SBATCH --export=ALL -#SBATCH -t 0:10:00 - -module load gcc/11.4.0 openmpi/4.1.6 - -set -x - -singularity exec software.sif bash -c "set" | grep GLOTZERLAB - -mpirun -n 1 singularity exec --nv software.sif python3 serial-gpu.py - -mpirun singularity exec --nv software.sif python3 mpi-gpu.py - -mpirun singularity exec software.sif /opt/osu-micro-benchmarks/libexec/osu-micro-benchmarks/mpi/pt2pt/osu_bibw - -echo "Tests complete." diff --git a/docker/expanse-gpu/test/job-gpu.sh b/docker/expanse-gpu/test/job-gpu.sh deleted file mode 100644 index 5dde1f6e..00000000 --- a/docker/expanse-gpu/test/job-gpu.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -#SBATCH --job-name="test-gpu" -#SBATCH --partition=gpu -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=2 -#SBATCH --gpus=4 -#SBATCH -t 0:10:00 - -module load gpu/0.15.4 singularitypro openmpi/4.0.4-nocuda - -set -x - -singularity exec software.sif bash -c "set" | grep GLOTZERLAB - -singularity exec --nv software.sif python3 serial-gpu.py - -mpirun -v singularity exec --nv software.sif python3 mpi-gpu.py - -mpirun -v singularity exec software.sif /opt/osu-micro-benchmarks/libexec/osu-micro-benchmarks/mpi/pt2pt/osu_bibw - -echo "Tests complete." diff --git a/docker/expanse/test/job-cpu.sh b/docker/expanse/test/job-cpu.sh deleted file mode 100644 index b1569c50..00000000 --- a/docker/expanse/test/job-cpu.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -#SBATCH --job-name="test-cpu" -#SBATCH --partition=compute -#SBATCH --nodes=2 -#SBATCH --ntasks-per-node=1 -#SBATCH -t 0:10:00 - -module load cpu/0.15.4 singularitypro gcc/9.2.0 openmpi/4.1.1 - -set -x - -singularity exec software.sif bash -c "set" | grep GLOTZERLAB - -singularity exec software.sif python3 serial-cpu.py - -mpirun --npernode 1 singularity exec software.sif python3 mpi-cpu.py - -mpirun --npernode 1 singularity exec software.sif /opt/osu-micro-benchmarks/libexec/osu-micro-benchmarks/mpi/pt2pt/osu_bibw - -echo "Tests complete." diff --git a/docker/greatlakes/test/.keep b/docker/greatlakes/test/.keep deleted file mode 100644 index e69de29b..00000000 diff --git a/docker/greatlakes/test/job-cpu.sh b/docker/greatlakes/test/job-cpu.sh deleted file mode 100644 index 63c3dc70..00000000 --- a/docker/greatlakes/test/job-cpu.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -#SBATCH --job-name="test-cpu" -#SBATCH --partition=standard -#SBATCH --nodes=2 -#SBATCH --ntasks=2 -#SBATCH --cpus-per-task=1 -#SBATCH --export=ALL -#SBATCH -t 0:10:00 - -module load gcc/10.3.0 openmpi/4.1.6 singularity - -set -x -cd $SLURM_SUBMIT_DIR - -srun -u --export=ALL -n 1 singularity exec --bind /scratch,/gpfs software.sif python3 serial-cpu.py - -srun -u --export=ALL singularity exec --bind /scratch,/gpfs software.sif python3 mpi-cpu.py - -srun -u --export=ALL singularity exec --bind /scratch,/gpfs software.sif /opt/osu-micro-benchmarks/libexec/osu-micro-benchmarks/mpi/pt2pt/osu_bibw - -echo "Tests complete." diff --git a/docker/greatlakes/test/job-gpu.sh b/docker/greatlakes/test/job-gpu.sh deleted file mode 100644 index 4150d3b7..00000000 --- a/docker/greatlakes/test/job-gpu.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -#SBATCH --job-name="test-gpu" -#SBATCH --partition=gpu -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=1 -#SBATCH --gres=gpu:1 -#SBATCH --export=ALL -#SBATCH -t 0:10:00 - -module load gcc/10.3.0 openmpi/4.1.6 singularity - -set -x -cd $SLURM_SUBMIT_DIR - -srun -u --export=ALL -n 1 singularity exec --bind /scratch,/gpfs --nv software.sif python3 serial-gpu.py - -echo "Tests complete." diff --git a/docker/nompi/test/.keep b/docker/nompi/test/.keep deleted file mode 100644 index e69de29b..00000000 diff --git a/make_dockerfiles.py b/make_dockerfiles.py deleted file mode 100755 index 6868c778..00000000 --- a/make_dockerfiles.py +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env python - -import jinja2 -import yaml -import multiprocessing - -# software versions -versions = {} - -# load package versions -with open('packages.yml', 'r') as f: - packages = yaml.safe_load(f) - -for name, p in packages.items(): - versions[f'{name.upper()}_VERSION'] = p['version'] - -if __name__ == '__main__': - - def write(fname, templates, **kwargs): - with open(fname, 'w') as f: - for template in templates: - f.write(template.render(**kwargs)) - - env = jinja2.Environment(loader=jinja2.FileSystemLoader('template')) - base_template = env.get_template('base.jinja') - crusher_template = env.get_template('crusher.jinja') - frontier_template = env.get_template('frontier.jinja') - ib_mlx_template = env.get_template('ib-mlx.jinja') - openmpi_template = env.get_template('openmpi.jinja') - pmix_template = env.get_template('pmix.jinja') - mvapich2_template = env.get_template('mvapich2.jinja') - ucx_template = env.get_template('ucx.jinja') - glotzerlab_software_template = env.get_template('glotzerlab-software.jinja') - finalize_template = env.get_template('finalize.jinja') - test_template = env.get_template('test.jinja') - - write('docker/nompi/Dockerfile', [base_template, glotzerlab_software_template, test_template, finalize_template], - FROM='nvidia/cuda:11.8.0-devel-ubuntu20.04', - ENABLE_MPI='off', - MAKEJOBS=multiprocessing.cpu_count()+2, - CUDA_VERSION='11.8', - **versions) - - # see https://stackoverflow.com/questions/5470257/how-to-see-which-flags-march-native-will-activate - # for information on obtaining CFLAGS settings for specific machines - # gcc -'###' -E - -march=native 2>&1 | sed -r '/cc1/!d;s/(")|(^.* - )|( -mno-[^\ ]+)//g' - - write('docker/greatlakes/Dockerfile', [base_template, - ib_mlx_template, - pmix_template, - ucx_template, - openmpi_template, - glotzerlab_software_template, - finalize_template], - FROM='nvidia/cuda:11.8.0-devel-ubuntu20.04', - system='greatlakes', - CUDA_VERSION='11.8', - OPENMPI_VERSION='4.1', - OPENMPI_PATCHLEVEL='6rc2', - UCX_VERSION='1.15.0', - UCX_RC='-rc5', - PMIX_VERSION='4.2.6', - ENABLE_MPI='on', - MAKEJOBS=multiprocessing.cpu_count()+2, - CFLAGS='-march=skylake-avx512 -mmmx -msse -msse2 -msse3 -mssse3 -mcx16 -msahf -mmovbe -maes -mpclmul -mpopcnt -mabm -mfma -mbmi -mbmi2 -mavx -mavx2 -msse4.2 -msse4.1 -mlzcnt -mrtm -mhle -mrdrnd -mf16c -mfsgsbase -mrdseed -mprfchw -madx -mfxsr -mxsave -mxsaveopt -mavx512f -mavx512cd -mclflushopt -mxsavec -mxsaves -mavx512dq -mavx512bw -mavx512vl -mclwb -mpku --param l1-cache-size=32 --param l1-cache-line-size=64 --param l2-cache-size=25344 -mtune=skylake-avx512', - **versions) - - write('docker/delta/Dockerfile', [base_template, - ib_mlx_template, - pmix_template, - openmpi_template, - glotzerlab_software_template, - finalize_template], - FROM='nvidia/cuda:12.3.1-devel-ubuntu20.04', - system='delta', - CUDA_VERSION='12.3', - OPENMPI_VERSION='4.1', - OPENMPI_PATCHLEVEL='6', - PMIX_VERSION='5.0.1', - LIBFABRIC_VERSION='1.15.2', - ENABLE_MPI='on', - MAKEJOBS=multiprocessing.cpu_count()+2, - CFLAGS='-march=znver2 -mmmx -msse -msse2 -msse3 -mssse3 -msse4a -mcx16 -msahf -mmovbe -maes -msha -mpclmul -mpopcnt -mabm -mfma -mbmi -mbmi2 -mwbnoinvd -mavx -mavx2 -msse4.2 -msse4.1 -mlzcnt -mrdrnd -mf16c -mfsgsbase -mrdseed -mprfchw -madx -mfxsr -mxsave -mxsaveopt -mclflushopt -mxsavec -mxsaves -mclwb -mmwaitx -mclzero -mrdpid --param l1-cache-size=32 --param l1-cache-line-size=64 --param l2-cache-size=512 -mtune=znver2', - **versions) - - write('docker/anvil/Dockerfile', [base_template, - ib_mlx_template, - ucx_template, - openmpi_template, - glotzerlab_software_template, - finalize_template], - FROM='nvidia/cuda:11.7.1-devel-ubuntu20.04', - system='anvil', - CUDA_VERSION='11.7', - OPENMPI_VERSION='4.0', - OPENMPI_PATCHLEVEL='6', - UCX_VERSION='1.14.0', - ENABLE_MPI='on', - MAKEJOBS=multiprocessing.cpu_count()+2, - CFLAGS='-march=znver2 -mmmx -msse -msse2 -msse3 -mssse3 -msse4a -mcx16 -msahf -mmovbe -maes -msha -mpclmul -mpopcnt -mabm -mfma -mbmi -mbmi2 -mwbnoinvd -mavx -mavx2 -msse4.2 -msse4.1 -mlzcnt -mrdrnd -mf16c -mfsgsbase -mrdseed -mprfchw -madx -mfxsr -mxsave -mxsaveopt -mclflushopt -mxsavec -mxsaves -mclwb -mmwaitx -mclzero -mrdpid --param l1-cache-size=32 --param l1-cache-line-size=64 --param l2-cache-size=512 -mtune=znver2', - # unsupported with ubuntu 20.04 compiler - try with 22.04? - # CFLAGS='-march=znver3 -mmmx -mpopcnt -msse -msse2 -msse3 -mssse3 -msse4.1 -msse4.2 -mavx -mavx2 -msse4a -mfma -mbmi -mbmi2 -maes -mpclmul -mvpclmulqdq -madx -mabm -mclflushopt -mclwb -mclzero -mcx16 -mf16c -mfsgsbase -mfxsr -msahf -mlzcnt -mmovbe -mmwaitx -mpku -mprfchw -mrdpid -mrdrnd -mrdseed -msha -mshstk -mvaes -mwbnoinvd -mxsave -mxsavec -mxsaveopt -mxsaves --param l1-cache-size=32 --param l1-cache-line-size=64 --param l2-cache-size=512 -mtune=znver3 -dumpbase', - **versions) - - write('script/crusher/install.sh', [crusher_template, glotzerlab_software_template], - MAKEJOBS=32, - CFLAGS='-march=native', - output='script', - system='crusher', - ENABLE_MPI='on', - ENABLE_TBB='off', - ENABLE_LLVM='off', - HOOMD_GPU_PLATFORM='HIP', - **versions) - - write('script/frontier/install.sh', [frontier_template, glotzerlab_software_template], - MAKEJOBS=32, - CFLAGS='-march=native', - output='script', - system='frontier', - ENABLE_MPI='on', - ENABLE_TBB='off', - ENABLE_LLVM='off', - HOOMD_GPU_PLATFORM='HIP', - **versions) - - write('docker/bridges2/Dockerfile', [base_template, - ib_mlx_template, - ucx_template, - openmpi_template, - glotzerlab_software_template, - finalize_template], - FROM='nvidia/cuda:11.1.1-devel-ubuntu20.04', - system='bridges2', - CUDA_VERSION='11.1', - OPENMPI_VERSION='4.0', - OPENMPI_PATCHLEVEL='5', - UCX_VERSION='1.9.0', - ENABLE_MPI='on', - MAKEJOBS=multiprocessing.cpu_count()+2, - CFLAGS='-march=znver2 -mmmx -msse -msse2 -msse3 -mssse3 -msse4a -mcx16 -msahf -mmovbe -maes -msha -mpclmul -mpopcnt -mabm -mfma -mbmi -mbmi2 -mwbnoinvd -mavx -mavx2 -msse4.2 -msse4.1 -mlzcnt -mrdrnd -mf16c -mfsgsbase -mrdseed -mprfchw -madx -mfxsr -mxsave -mxsaveopt -mclflushopt -mxsavec -mxsaves -mclwb -mmwaitx -mclzero -mrdpid --param l1-cache-size=32 --param l1-cache-line-size=64 --param l2-cache-size=512 -mtune=znver2 -fasynchronous-unwind-tables -fstack-protector-strong -Wformat -Wformat-security -fstack-clash-protection -fcf-protection', - **versions) - - write('docker/expanse/Dockerfile', [base_template, - ib_mlx_template, - pmix_template, - ucx_template, - openmpi_template, - glotzerlab_software_template, - finalize_template], - FROM='nvidia/cuda:11.1.1-devel-ubuntu20.04', - system='expanse', - CUDA_VERSION='11.1', - OPENMPI_VERSION='4.1', - OPENMPI_PATCHLEVEL='1', - PMIX_VERSION='4.1.2', - UCX_VERSION='1.10.1', - ENABLE_MPI='on', - MAKEJOBS=multiprocessing.cpu_count()+2, - CFLAGS='-march=znver2 -mmmx -msse -msse2 -msse3 -mssse3 -msse4a -mcx16 -msahf -mmovbe -maes -msha -mpclmul -mpopcnt -mabm -mfma -mbmi -mbmi2 -mwbnoinvd -mavx -mavx2 -msse4.2 -msse4.1 -mlzcnt -mrdrnd -mf16c -mfsgsbase -mrdseed -mprfchw -madx -mfxsr -mxsave -mxsaveopt -mclflushopt -mxsavec -mxsaves -mclwb -mmwaitx -mclzero -mrdpid --param l1-cache-size=32 --param l1-cache-line-size=64 --param l2-cache-size=512 -mtune=znver2', - **versions) - - write('docker/expanse-gpu/Dockerfile', [base_template, - ib_mlx_template, - ucx_template, - openmpi_template, - glotzerlab_software_template, - finalize_template], - FROM='nvidia/cuda:11.1.1-devel-ubuntu20.04', - system='expanse-gpu', - CUDA_VERSION='11.1', - OPENMPI_VERSION='4.0', - OPENMPI_PATCHLEVEL='4', - UCX_VERSION='1.8.0', - ENABLE_MPI='on', - MAKEJOBS=multiprocessing.cpu_count()+2, - CFLAGS='-march=knl -mmmx -msse -msse2 -msse3 -mssse3 -mcx16 -msahf -mmovbe -maes -mpclmul -mpopcnt -mabm -mfma -mbmi -mbmi2 -mavx -mavx2 -msse4.2 -msse4.1 -mlzcnt -mrtm -mhle -mrdrnd -mf16c -mfsgsbase -mrdseed -mprfchw -madx -mfxsr -mxsave -mxsaveopt -mavx512f -mavx512cd -mclflushopt -mxsavec -mxsaves -mavx512dq -mavx512bw -mavx512vl -mclwb -mpku --param l1-cache-size=32 --param l1-cache-line-size=64 --param l2-cache-size=28160 -mtune=generic', - **versions) diff --git a/packages.yml b/packages.yml deleted file mode 100644 index 410e1229..00000000 --- a/packages.yml +++ /dev/null @@ -1,41 +0,0 @@ -cereal: - ignore: alpha,rc,beta - include_v_prefix: false - repository: https://github.com/USCiLab/cereal - version: 1.3.2 -eigen: - ignore: alpha,rc,beta,before,after,actual,starting - include_v_prefix: false - repository: https://gitlab.com/libeigen/eigen - version: 3.4.0 -embree: - ignore: alpha,rc,beta,ploc - include_v_prefix: false - repository: https://github.com/embree/embree - version: 4.3.1 -fresnel: - ignore: alpha,rc,beta - repository: https://github.com/glotzerlab/fresnel - version: v0.13.5 -hoomd: - ignore: beta,alpha,rc - repository: https://github.com/glotzerlab/hoomd-blue - version: v4.6.0 -osu_microbenchmark: - repository: null - version: 5.4.1 -pybind11: - ignore: alpha,rc,beta - include_v_prefix: false - repository: https://github.com/pybind/pybind11 - version: 2.12.0 -qhull: - ignore: alpha,rc,beta - include_v_prefix: false - repository: https://github.com/qhull/qhull - version: 8.0.2 -tbb: - ignore: alpha,rc,beta,v2021.3.0 - include_v_prefix: false - repository: https://github.com/oneapi-src/oneTBB - version: 2021.11.0 diff --git a/requirements-cupy.txt b/requirements-cupy.txt deleted file mode 100644 index 0c2725dd..00000000 --- a/requirements-cupy.txt +++ /dev/null @@ -1 +0,0 @@ -cupy==13.0.0 diff --git a/requirements-mpi.txt b/requirements-mpi.txt deleted file mode 100644 index 73b18cfb..00000000 --- a/requirements-mpi.txt +++ /dev/null @@ -1 +0,0 @@ -mpi4py==3.1.5 diff --git a/requirements-source.txt b/requirements-source.txt deleted file mode 100644 index e0b13293..00000000 --- a/requirements-source.txt +++ /dev/null @@ -1,7 +0,0 @@ -Cython==3.0.10 -numpy==1.26.4 -pkgconfig==1.5.5 -pybind11==2.12.0 -pythran==0.15.0 -scikit-build==0.17.6 -scipy==1.13.0 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 4dbbc32e..00000000 --- a/requirements.txt +++ /dev/null @@ -1,45 +0,0 @@ -# glotzer tools -freud-analysis==3.0.0 -gsd==3.2.1 -garnett==0.7.1 -pythia-learn==0.3.0 -rowan==1.3.0.post1 -coxeter==0.8.0 -plato-draw==1.12.0 -signac==2.2.0 -signac-flow==0.29.0 -signac-dashboard==0.6.1 - -# transitive dependencies -Werkzeug==3.0.2 -flask-login==0.6.3 -flask==3.0.3 -pyarrow==15.0.2 - -# commonly used packages -h5py==3.10.0 -jupyter==1.0.0 -jupyterlab==4.1.6 -matplotlib==3.8.4 -notebook==7.1.2 -opencv-python==4.9.0.80 -pandas==2.2.1 -Pillow==10.3.0 -pyqt5==5.15.10 -pyyaml==6.0.1 -scikit-learn==1.4.1.post1 -scikit-image==0.22.0 -statsmodels==0.14.1 - - -# test requirements -hypothesis==6.100.0 -pytest==8.1.1 -miniball==1.2.0 -sympy==1.12 -tables==3.9.2 -coverage==7.4.4 -click==8.1.7 -pytest-cov==5.0.0 -ruamel.yaml==0.18.6 -gitpython==3.1.43 diff --git a/script/crusher/install.sh b/script/crusher/install.sh deleted file mode 100755 index f8f9021a..00000000 --- a/script/crusher/install.sh +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/bash - -set -u -set -e - -usage="$(basename "$0") root -- Build software and install in root." - -if [[ $# -lt 1 || $# -gt 1 || $1 == "-h" ]] -then - echo "$usage" - exit 0 -fi - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -ROOT=$1 -module reset -module load cray-python/3.9.13.1 -python3 -m venv $ROOT - -cat >$ROOT/environment.sh << EOL -module reset -module load PrgEnv-gnu -module load cmake/3.23.2 -module load git/2.36.1 -module load rocm/5.1.0 -module load cray-python/3.9.13.1 -module load hdf5/1.12.1 -module load ninja/1.10.2 -module load tmux/3.2a - -# The cray-mpich module does not provide this, it is needed to build mpi4py from source. -export MPICC=\$CRAY_MPICH_DIR/bin/mpicc - -export LD_LIBRARY_PATH=$ROOT/lib:\$LD_LIBRARY_PATH -export PATH=$ROOT/bin:\$PATH -export CPATH=$ROOT/include -export LIBRARY_PATH=$ROOT/lib -export VIRTUAL_ENV=$ROOT -export CMAKE_PREFIX_PATH=$ROOT -export CC=\$GCC_PATH/bin/gcc -export CXX=\$GCC_PATH/bin/g++ - -# Settings to build cupy for rocm: https://docs.cupy.dev/en/stable/install.html -export CUPY_INSTALL_USE_HIP=1 -export ROCM_HOME=\$OLCF_ROCM_ROOT -export HCC_AMDGPU_TARGET=gfx90a - -export PYTHONUNBUFFERED=1 -EOL - -source $ROOT/environment.sh - -set -x - -BUILDDIR=`mktemp -d` -mkdir -p $BUILDDIR - -# deletes the temp directory -function cleanup { - rm -rf "$BUILDDIR" - echo "Deleted temp working directory $BUILDDIR" -} - -trap cleanup EXIT -cd $BUILDDIR - -cp -a $DIR/../../*.txt $BUILDDIR -cd $BUILDDIR - -set -x -python3 -m pip install --upgrade pip setuptools wheel -python3 -m pip install -r requirements-mpi.txt - -# TBB -if [ ! -f $ROOT/lib64/libtbb.so ] -then -curl -sSLO https://github.com/oneapi-src/oneTBB/archive/v2021.11.0.tar.gz \ - && tar -xzf v2021.11.0.tar.gz -C . \ - && cd oneTBB-2021.11.0 \ - && cmake -S . -B build -DTBB_TEST=off -DTBB_STRICT=off -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$ROOT \ - && cmake --build build -j 32 \ - && cmake --install build \ - && cd .. \ - && rm -rf oneTBB-2021.11.0 \ - && rm v2021.11.0.tar.gz \ - || exit 1 -fi - -# Embree -if [ ! -f $ROOT/lib64/libembree4.so ] -then -curl -sSL https://github.com/embree/embree/archive/v4.3.1/embree-4.3.1.tar.gz | tar -xzC $BUILDDIR \ - && cd $BUILDDIR/embree-4.3.1 \ - && mkdir build && cd build \ - && cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DCMAKE_INSTALL_LIBDIR=lib64/ -DCMAKE_BUILD_TYPE=Release -DEMBREE_TUTORIALS=OFF -DEMBREE_MAX_ISA="AVX2" -DEMBREE_ISPC_SUPPORT=OFF \ - && make install -j 32 \ - && cd $BUILDDIR/ && rm -rf embree-* -fi - -# install pybind11 headers -if [ ! -f $ROOT/include/pybind11/pybind11.h ] -then -curl -SL https://github.com/pybind/pybind11/archive/v2.12.0.tar.gz | tar -xzC $BUILDDIR && \ - cd pybind11-2.12.0 && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DPYBIND11_TEST=off && \ - make install && \ - cd $BUILDDIR && rm -rf pybind11-* -fi - -# install cereal headers -if [ ! -f $ROOT/include/cereal/cereal.hpp ] -then -curl -SL https://github.com/USCiLab/cereal/archive/v1.3.2.tar.gz | tar -xzC $BUILDDIR && \ - cd cereal-1.3.2 && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DJUST_INSTALL_CEREAL=on && \ - make install && \ - cd $BUILDDIR && rm -rf cereal-* -fi - -# install eigen headers -if [ ! -f $ROOT/include/eigen3/Eigen/Eigen ] -then -curl -SL https://gitlab.com/libeigen/eigen/-/archive/3.4.0/eigen-3.4.0.tar.gz | tar -xzC $BUILDDIR && \ - cd eigen-3.4.0 && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DBUILD_TESTING=off, -DEIGEN_TEST_NOQT=on && \ - make install && \ - cd $BUILDDIR && rm -rf eigen-* -fi - - - - - -# Install packages that are build requirements of other packages first. -# Use the pip cache in script builds to reduce time when rerunning the install script. - - export CFLAGS="-march=native" CXXFLAGS="-march=native"\ - && python3 -m pip install -r requirements-source.txt \ - || exit 1 - - export CFLAGS="-march=native" CXXFLAGS="-march=native" \ - && python3 -m pip install --no-build-isolation --no-binary freud-analysis,gsd -r requirements.txt \ - && chmod o+rX `python3 -c "import site; print(site.getsitepackages()[0])"`/flow/templates/* \ - || exit 1 - - - - - - -if [ ! -n "$(ls -d $ROOT/lib/python*/site-packages/fresnel)" ] -then - - git clone --recursive --branch v0.13.5 --depth 1 https://github.com/glotzerlab/fresnel \ - && cd fresnel \ - && mkdir -p build \ - && cd build \ - && export CFLAGS="-march=native" CXXFLAGS="-march=native" \ - && cmake ../ -DENABLE_EMBREE=on -DENABLE_OPTIX=off -Dembree_DIR=/opt/embree-4.3.1.x86_64.linux -DCMAKE_INSTALL_PREFIX=`python3 -c "import site; print(site.getsitepackages()[0])"` \ - && make install -j32 \ - && cd ../../ \ - && rm -rf fresnel \ - || exit 1 - - -fi - - - -if [ ! -n "$(ls -d $ROOT/lib/python*/site-packages/hoomd)" ] -then - - git clone --recursive --branch v4.6.0 --depth 1 https://github.com/glotzerlab/hoomd-blue hoomd \ - && cd hoomd \ - && mkdir -p build \ - && cd build \ - && export CFLAGS="-march=native" CXXFLAGS="-march=native" \ - && cmake ../ -DPYTHON_EXECUTABLE="`which python3`" -DENABLE_GPU=on -DENABLE_MPI=on -DENABLE_TBB=off -DENABLE_LLVM=off -DBUILD_TESTING=off -DENABLE_MPI_CUDA=off -DHOOMD_GPU_PLATFORM=HIP \ - && make install -j32 \ - && cd ../../ \ - && rm -rf hoomd \ - || exit 1 - - -fi diff --git a/script/frontier/install.sh b/script/frontier/install.sh deleted file mode 100755 index 7d9eb6f7..00000000 --- a/script/frontier/install.sh +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/bash - -set -u -set -e - -usage="$(basename "$0") root -- Build software and install in root." - -if [[ $# -lt 1 || $# -gt 1 || $1 == "-h" ]] -then - echo "$usage" - exit 0 -fi - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -ROOT=$(realpath $1) -echo "Installing glotzerlab-software to $ROOT" -module purge -module load PrgEnv-gnu -module load cray-python/3.10.10 -python3 -m venv $ROOT - -cat >$ROOT/variables.sh << EOL -module purge -module load PrgEnv-gnu -module load cmake/3.23.2 -module load git/2.36.1 -module load rocm/5.4.3 -module load cray-python/3.10.10 -module load hdf5/1.14.0 -module load ninja/1.10.2 -module load tmux/3.2a -module unload darshan-runtime - -# The cray-mpich module does not provide this, it is needed to build mpi4py from source. -export MPICC=\$CRAY_MPICH_DIR/bin/mpicc - -export LD_LIBRARY_PATH=\$GLOTZERLAB_SOFTWARE_ROOT/lib:\$LD_LIBRARY_PATH -export PATH=\$GLOTZERLAB_SOFTWARE_ROOT/bin:\$PATH -export CPATH=\$GLOTZERLAB_SOFTWARE_ROOT/include -export LIBRARY_PATH=\$GLOTZERLAB_SOFTWARE_ROOT/lib -export VIRTUAL_ENV=\$GLOTZERLAB_SOFTWARE_ROOT -export CMAKE_PREFIX_PATH=\$GLOTZERLAB_SOFTWARE_ROOT -export PYTHONPATH=\$(\${GLOTZERLAB_SOFTWARE_ROOT}/bin/python -c 'import site; print(site.getsitepackages()[0])') -export CC=\$GCC_PATH/bin/gcc -export CXX=\$GCC_PATH/bin/g++ - -# Settings to build cupy for rocm: https://docs.cupy.dev/en/stable/install.html -export CUPY_INSTALL_USE_HIP=1 -export ROCM_HOME=\$OLCF_ROCM_ROOT -export HCC_AMDGPU_TARGET=gfx90a - -export PYTHONUNBUFFERED=1 - -# work around PMI_Init mmap sync errors -export PMI_MMAP_SYNC_WAIT_TIME=1800 -EOL - -cat >$ROOT/environment.sh << EOL -export GLOTZERLAB_SOFTWARE_ROOT=$ROOT -source \$GLOTZERLAB_SOFTWARE_ROOT/variables.sh -EOL - -cat >$ROOT/generate-tar-cache.sh << EOL -#! /usr/bin/bash -usage="\$(basename "\$0") output-file -- Generate a tar cache file." - -if [[ \$# -lt 1 || \$# -gt 1 || \$1 == "-h" ]] -then - echo "\$usage" - exit 0 -fi - -DEST=\$(realpath \$1) - -tar --directory $ROOT --exclude software.tar -cf \$DEST . -EOL -chmod ug+x $ROOT/generate-tar-cache.sh - -source $ROOT/environment.sh - -set -x - -BUILDDIR=`mktemp -d` -mkdir -p $BUILDDIR - -# deletes the temp directory -function cleanup { - rm -rf "$BUILDDIR" - echo "Deleted temp working directory $BUILDDIR" -} - -trap cleanup EXIT -cd $BUILDDIR - -cp -a $DIR/../../*.txt $BUILDDIR -cd $BUILDDIR - -set -x -python3 -m pip install --upgrade pip setuptools wheel -python3 -m pip install -r requirements-mpi.txt - -# TBB -if [ ! -f $ROOT/lib64/libtbb.so ] -then -curl -sSLO https://github.com/oneapi-src/oneTBB/archive/v2021.11.0.tar.gz \ - && tar -xzf v2021.11.0.tar.gz -C . \ - && cd oneTBB-2021.11.0 \ - && cmake -S . -B build -DTBB_TEST=off -DTBB_STRICT=off -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$ROOT \ - && cmake --build build -j 32 \ - && cmake --install build \ - && cd .. \ - && rm -rf oneTBB-2021.11.0 \ - && rm v2021.11.0.tar.gz \ - || exit 1 -fi - -# Embree -if [ ! -f $ROOT/lib64/libembree4.so ] -then -curl -sSL https://github.com/embree/embree/archive/v4.3.1/embree-4.3.1.tar.gz | tar -xzC $BUILDDIR \ - && cd $BUILDDIR/embree-4.3.1 \ - && mkdir build && cd build \ - && cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DCMAKE_INSTALL_LIBDIR=lib64/ -DCMAKE_BUILD_TYPE=Release -DEMBREE_TUTORIALS=OFF -DEMBREE_MAX_ISA="AVX2" -DEMBREE_ISPC_SUPPORT=OFF \ - && make install -j 32 \ - && cd $BUILDDIR/ && rm -rf embree-* -fi - -# install pybind11 headers -if [ ! -f $ROOT/include/pybind11/pybind11.h ] -then -curl -SL https://github.com/pybind/pybind11/archive/v2.12.0.tar.gz | tar -xzC $BUILDDIR && \ - cd pybind11-2.12.0 && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DPYBIND11_TEST=off && \ - make install && \ - cd $BUILDDIR && rm -rf pybind11-* -fi - -# install cereal headers -if [ ! -f $ROOT/include/cereal/cereal.hpp ] -then -curl -SL https://github.com/USCiLab/cereal/archive/v1.3.2.tar.gz | tar -xzC $BUILDDIR && \ - cd cereal-1.3.2 && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DJUST_INSTALL_CEREAL=on && \ - make install && \ - cd $BUILDDIR && rm -rf cereal-* -fi - -# install eigen headers -if [ ! -f $ROOT/include/eigen3/Eigen/Eigen ] -then -curl -SL https://gitlab.com/libeigen/eigen/-/archive/3.4.0/eigen-3.4.0.tar.gz | tar -xzC $BUILDDIR && \ - cd eigen-3.4.0 && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DBUILD_TESTING=off, -DEIGEN_TEST_NOQT=on && \ - make install && \ - cd $BUILDDIR && rm -rf eigen-* -fi - - - - - -# Install packages that are build requirements of other packages first. -# Use the pip cache in script builds to reduce time when rerunning the install script. - - export CFLAGS="-march=native" CXXFLAGS="-march=native"\ - && python3 -m pip install -r requirements-source.txt \ - || exit 1 - - export CFLAGS="-march=native" CXXFLAGS="-march=native" \ - && python3 -m pip install --no-build-isolation --no-binary freud-analysis,gsd -r requirements.txt \ - && chmod o+rX `python3 -c "import site; print(site.getsitepackages()[0])"`/flow/templates/* \ - || exit 1 - - - - - - -if [ ! -n "$(ls -d $ROOT/lib/python*/site-packages/fresnel)" ] -then - - git clone --recursive --branch v0.13.5 --depth 1 https://github.com/glotzerlab/fresnel \ - && cd fresnel \ - && mkdir -p build \ - && cd build \ - && export CFLAGS="-march=native" CXXFLAGS="-march=native" \ - && cmake ../ -DENABLE_EMBREE=on -DENABLE_OPTIX=off -Dembree_DIR=/opt/embree-4.3.1.x86_64.linux -DCMAKE_INSTALL_PREFIX=`python3 -c "import site; print(site.getsitepackages()[0])"` \ - && make install -j32 \ - && cd ../../ \ - && rm -rf fresnel \ - || exit 1 - - -fi - - - -if [ ! -n "$(ls -d $ROOT/lib/python*/site-packages/hoomd)" ] -then - - git clone --recursive --branch v4.6.0 --depth 1 https://github.com/glotzerlab/hoomd-blue hoomd \ - && cd hoomd \ - && mkdir -p build \ - && cd build \ - && export CFLAGS="-march=native" CXXFLAGS="-march=native" \ - && cmake ../ -DPYTHON_EXECUTABLE="`which python3`" -DENABLE_GPU=on -DENABLE_MPI=on -DENABLE_TBB=off -DENABLE_LLVM=off -DBUILD_TESTING=off -DENABLE_MPI_CUDA=off -DHOOMD_GPU_PLATFORM=HIP \ - && make install -j32 \ - && cd ../../ \ - && rm -rf hoomd \ - || exit 1 - - -fi diff --git a/template/base.jinja b/template/base.jinja deleted file mode 100644 index 0a2cb6af..00000000 --- a/template/base.jinja +++ /dev/null @@ -1,101 +0,0 @@ -FROM {{ FROM }} - -ARG GIT_SHA -ARG GIT_BRANCH -ARG CONFIGURATION -ARG TAG - -ENV GLOTZERLAB_SOFTWARE_GIT_SHA=${GIT_SHA} \ - GLOTZERLAB_SOFTWARE_GIT_BRANCH=${GIT_BRANCH} \ - GLOTZERLAB_SOFTWARE_CONFIGURATION=${CONFIGURATION} \ - GLOTZERLAB_SOFTWARE_TAG=${TAG} - -ARG DEBIAN_FRONTEND=noninteractive - -RUN apt-get update && apt-get install -y --no-install-recommends \ - clang-10 \ - cmake \ - curl \ - ffmpeg \ - git \ - hwloc \ - libboost-dev \ - libcereal-dev \ - libclang-10-dev \ - libclang-cpp10-dev \ - libedit-dev \ - libeigen3-dev \ - libevent-dev \ - libhwloc-dev \ - libqhull-dev \ - libtbb-dev \ - libsqlite3-dev \ - llvm-10-dev \ - python3.9 \ - python3.9-dev \ - python3.9-venv \ - python3.9-distutils \ - strace \ - zlib1g-dev \ - ca-certificates \ - && rm -rf /var/lib/apt/lists/* - - -# put clang on the path -ENV PATH=$PATH:/usr/lib/llvm-10/bin - -# Link python3.9 to python3 in user path -RUN rm -f /usr/bin/python3 && ln -s "$(which python3.9)" /usr/bin/python3 - -# prevent python from loading packages from outside the container -# default empty pythonpath -ENV PYTHONPATH=/ignore/pythonpath PYTHONUNBUFFERED=1 -# disable user site directories (https://docs.python.org/3/library/site.html#module-site) -RUN sed -i -e 's/ENABLE_USER_SITE = None/ENABLE_USER_SITE = False/g' `python3 -c 'import site; print(site.__file__)'` - -RUN curl -sSL https://github.com/embree/embree/archive/v{{ EMBREE_VERSION }}/embree-{{ EMBREE_VERSION }}.tar.gz | tar -xzC /root \ - && cd /root/embree-{{ EMBREE_VERSION }} \ - && mkdir build && cd build \ - && cmake ../ -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_INSTALL_LIBDIR=lib/ -DCMAKE_BUILD_TYPE=Release -DEMBREE_TUTORIALS=OFF -DEMBREE_MAX_ISA="AVX2" -DEMBREE_ISPC_SUPPORT=OFF \ - && make install -j {{ MAKEJOBS }} \ - && cd / && rm -rf /root/embree-* - -# mount points for filesystems on clusters -RUN mkdir -p /nfs \ - mkdir -p /oasis \ - mkdir -p /scratch \ - mkdir -p /work \ - mkdir -p /projects \ - mkdir -p /home1 - -# make a python virtual environment to install packages into -# many pip installable packages clobber scripts that ubuntu provides in bin/ and result -# in broken packages -ENV PATH=/opt/glotzerlab/bin:$PATH \ - CMAKE_PREFIX_PATH=/opt/glotzerlab/bin \ - LD_LIBRARY_PATH=/usr/local/lib:${LD_LIBRARY_PATH} - -# setuptools 65 breaks the gsd build: https://github.com/pypa/setuptools/pull/3505 -# pip 22.1 breaks the build. - -RUN python3 -m venv /opt/glotzerlab \ - && /opt/glotzerlab/bin/python3 -m pip install --no-cache-dir --upgrade pip==22.2.2 setuptools==64.0.3 wheel - -# Install the requirements files -COPY requirements*.txt / - -# Fix library search paths -RUN echo "/usr/local/cuda-{{ CUDA_VERSION }}/compat" >> /etc/ld.so.conf.d/999-cuda-glotzerlab.conf -RUN ldconfig - -# setup self test -RUN mkdir /test -COPY test/* /test/ - -# install newer pybind11 -RUN curl -SL https://github.com/pybind/pybind11/archive/v2.10.4.tar.gz | tar -xzC /root \ - && cd /root/pybind11-2.10.4 \ - && mkdir build && cd build \ - && cmake ../ -DCMAKE_INSTALL_PREFIX=/usr/local -DPYBIND11_TEST=off -DPYTHON_EXECUTABLE=/usr/bin/python3 \ - && make install \ - && cd / && rm -rf /root/pybind11-* diff --git a/template/crusher.jinja b/template/crusher.jinja deleted file mode 100644 index 4b35612f..00000000 --- a/template/crusher.jinja +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/bash - -set -u -set -e - -usage="$(basename "$0") root -- Build software and install in root." - -if [[ $# -lt 1 || $# -gt 1 || $1 == "-h" ]] -then - echo "$usage" - exit 0 -fi - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -ROOT=$1 -module reset -module load cray-python/3.9.13.1 -python3 -m venv $ROOT - -cat >$ROOT/environment.sh << EOL -module reset -module load PrgEnv-gnu -module load cmake/3.23.2 -module load git/2.36.1 -module load rocm/5.1.0 -module load cray-python/3.9.13.1 -module load hdf5/1.12.1 -module load ninja/1.10.2 -module load tmux/3.2a - -# The cray-mpich module does not provide this, it is needed to build mpi4py from source. -export MPICC=\$CRAY_MPICH_DIR/bin/mpicc - -export LD_LIBRARY_PATH=$ROOT/lib:\$LD_LIBRARY_PATH -export PATH=$ROOT/bin:\$PATH -export CPATH=$ROOT/include -export LIBRARY_PATH=$ROOT/lib -export VIRTUAL_ENV=$ROOT -export CMAKE_PREFIX_PATH=$ROOT -export CC=\$GCC_PATH/bin/gcc -export CXX=\$GCC_PATH/bin/g++ - -# Settings to build cupy for rocm: https://docs.cupy.dev/en/stable/install.html -export CUPY_INSTALL_USE_HIP=1 -export ROCM_HOME=\$OLCF_ROCM_ROOT -export HCC_AMDGPU_TARGET=gfx90a - -export PYTHONUNBUFFERED=1 -EOL - -source $ROOT/environment.sh - -set -x - -BUILDDIR=`mktemp -d` -mkdir -p $BUILDDIR - -# deletes the temp directory -function cleanup { - rm -rf "$BUILDDIR" - echo "Deleted temp working directory $BUILDDIR" -} - -trap cleanup EXIT -cd $BUILDDIR - -cp -a $DIR/../../*.txt $BUILDDIR -cd $BUILDDIR - -set -x -python3 -m pip install --upgrade pip setuptools wheel -python3 -m pip install -r requirements-mpi.txt - -# TBB -if [ ! -f $ROOT/lib64/libtbb.so ] -then -curl -sSLO https://github.com/oneapi-src/oneTBB/archive/v{{ TBB_VERSION }}.tar.gz \ - && tar -xzf v{{ TBB_VERSION }}.tar.gz -C . \ - && cd oneTBB-{{ TBB_VERSION }} \ - && cmake -S . -B build -DTBB_TEST=off -DTBB_STRICT=off -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$ROOT \ - && cmake --build build -j {{ MAKEJOBS }} \ - && cmake --install build \ - && cd .. \ - && rm -rf oneTBB-{{ TBB_VERSION }} \ - && rm v{{ TBB_VERSION }}.tar.gz \ - || exit 1 -fi - -# Embree -if [ ! -f $ROOT/lib64/libembree4.so ] -then -curl -sSL https://github.com/embree/embree/archive/v{{ EMBREE_VERSION }}/embree-{{ EMBREE_VERSION }}.tar.gz | tar -xzC $BUILDDIR \ - && cd $BUILDDIR/embree-{{ EMBREE_VERSION }} \ - && mkdir build && cd build \ - && cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DCMAKE_INSTALL_LIBDIR=lib64/ -DCMAKE_BUILD_TYPE=Release -DEMBREE_TUTORIALS=OFF -DEMBREE_MAX_ISA="AVX2" -DEMBREE_ISPC_SUPPORT=OFF \ - && make install -j {{ MAKEJOBS }} \ - && cd $BUILDDIR/ && rm -rf embree-* -fi - -# install pybind11 headers -if [ ! -f $ROOT/include/pybind11/pybind11.h ] -then -curl -SL https://github.com/pybind/pybind11/archive/v{{PYBIND11_VERSION}}.tar.gz | tar -xzC $BUILDDIR && \ - cd pybind11-{{PYBIND11_VERSION}} && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DPYBIND11_TEST=off && \ - make install && \ - cd $BUILDDIR && rm -rf pybind11-* -fi - -# install cereal headers -if [ ! -f $ROOT/include/cereal/cereal.hpp ] -then -curl -SL https://github.com/USCiLab/cereal/archive/v{{CEREAL_VERSION}}.tar.gz | tar -xzC $BUILDDIR && \ - cd cereal-{{CEREAL_VERSION}} && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DJUST_INSTALL_CEREAL=on && \ - make install && \ - cd $BUILDDIR && rm -rf cereal-* -fi - -# install eigen headers -if [ ! -f $ROOT/include/eigen3/Eigen/Eigen ] -then -curl -SL https://gitlab.com/libeigen/eigen/-/archive/{{EIGEN_VERSION}}/eigen-{{EIGEN_VERSION}}.tar.gz | tar -xzC $BUILDDIR && \ - cd eigen-{{EIGEN_VERSION}} && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DBUILD_TESTING=off, -DEIGEN_TEST_NOQT=on && \ - make install && \ - cd $BUILDDIR && rm -rf eigen-* -fi diff --git a/template/finalize.jinja b/template/finalize.jinja deleted file mode 100644 index 346e0645..00000000 --- a/template/finalize.jinja +++ /dev/null @@ -1,8 +0,0 @@ - - -# configure unprivileged user -RUN useradd --create-home --shell /bin/bash glotzerlab-software \ - && chown glotzerlab-software:glotzerlab-software -R /test \ - && chmod o+rX -R /test - -USER glotzerlab-software:glotzerlab-software diff --git a/template/frontier.jinja b/template/frontier.jinja deleted file mode 100644 index 5f061b2f..00000000 --- a/template/frontier.jinja +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/bash - -set -u -set -e - -usage="$(basename "$0") root -- Build software and install in root." - -if [[ $# -lt 1 || $# -gt 1 || $1 == "-h" ]] -then - echo "$usage" - exit 0 -fi - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -ROOT=$(realpath $1) -echo "Installing glotzerlab-software to $ROOT" -module purge -module load PrgEnv-gnu -module load cray-python/3.10.10 -python3 -m venv $ROOT - -cat >$ROOT/variables.sh << EOL -module purge -module load PrgEnv-gnu -module load cmake/3.23.2 -module load git/2.36.1 -module load rocm/5.4.3 -module load cray-python/3.10.10 -module load hdf5/1.14.0 -module load ninja/1.10.2 -module load tmux/3.2a -module unload darshan-runtime - -# The cray-mpich module does not provide this, it is needed to build mpi4py from source. -export MPICC=\$CRAY_MPICH_DIR/bin/mpicc - -export LD_LIBRARY_PATH=\$GLOTZERLAB_SOFTWARE_ROOT/lib:\$LD_LIBRARY_PATH -export PATH=\$GLOTZERLAB_SOFTWARE_ROOT/bin:\$PATH -export CPATH=\$GLOTZERLAB_SOFTWARE_ROOT/include -export LIBRARY_PATH=\$GLOTZERLAB_SOFTWARE_ROOT/lib -export VIRTUAL_ENV=\$GLOTZERLAB_SOFTWARE_ROOT -export CMAKE_PREFIX_PATH=\$GLOTZERLAB_SOFTWARE_ROOT -export PYTHONPATH=\$(\${GLOTZERLAB_SOFTWARE_ROOT}/bin/python -c 'import site; print(site.getsitepackages()[0])') -export CC=\$GCC_PATH/bin/gcc -export CXX=\$GCC_PATH/bin/g++ - -# Settings to build cupy for rocm: https://docs.cupy.dev/en/stable/install.html -export CUPY_INSTALL_USE_HIP=1 -export ROCM_HOME=\$OLCF_ROCM_ROOT -export HCC_AMDGPU_TARGET=gfx90a - -export PYTHONUNBUFFERED=1 - -# work around PMI_Init mmap sync errors -export PMI_MMAP_SYNC_WAIT_TIME=1800 -EOL - -cat >$ROOT/environment.sh << EOL -export GLOTZERLAB_SOFTWARE_ROOT=$ROOT -source \$GLOTZERLAB_SOFTWARE_ROOT/variables.sh -EOL - -cat >$ROOT/generate-tar-cache.sh << EOL -#! /usr/bin/bash -usage="\$(basename "\$0") output-file -- Generate a tar cache file." - -if [[ \$# -lt 1 || \$# -gt 1 || \$1 == "-h" ]] -then - echo "\$usage" - exit 0 -fi - -DEST=\$(realpath \$1) - -tar --directory $ROOT --exclude software.tar -cf \$DEST . -EOL -chmod ug+x $ROOT/generate-tar-cache.sh - -source $ROOT/environment.sh - -set -x - -BUILDDIR=`mktemp -d` -mkdir -p $BUILDDIR - -# deletes the temp directory -function cleanup { - rm -rf "$BUILDDIR" - echo "Deleted temp working directory $BUILDDIR" -} - -trap cleanup EXIT -cd $BUILDDIR - -cp -a $DIR/../../*.txt $BUILDDIR -cd $BUILDDIR - -set -x -python3 -m pip install --upgrade pip setuptools wheel -python3 -m pip install -r requirements-mpi.txt - -# TBB -if [ ! -f $ROOT/lib64/libtbb.so ] -then -curl -sSLO https://github.com/oneapi-src/oneTBB/archive/v{{ TBB_VERSION }}.tar.gz \ - && tar -xzf v{{ TBB_VERSION }}.tar.gz -C . \ - && cd oneTBB-{{ TBB_VERSION }} \ - && cmake -S . -B build -DTBB_TEST=off -DTBB_STRICT=off -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$ROOT \ - && cmake --build build -j {{ MAKEJOBS }} \ - && cmake --install build \ - && cd .. \ - && rm -rf oneTBB-{{ TBB_VERSION }} \ - && rm v{{ TBB_VERSION }}.tar.gz \ - || exit 1 -fi - -# Embree -if [ ! -f $ROOT/lib64/libembree4.so ] -then -curl -sSL https://github.com/embree/embree/archive/v{{ EMBREE_VERSION }}/embree-{{ EMBREE_VERSION }}.tar.gz | tar -xzC $BUILDDIR \ - && cd $BUILDDIR/embree-{{ EMBREE_VERSION }} \ - && mkdir build && cd build \ - && cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DCMAKE_INSTALL_LIBDIR=lib64/ -DCMAKE_BUILD_TYPE=Release -DEMBREE_TUTORIALS=OFF -DEMBREE_MAX_ISA="AVX2" -DEMBREE_ISPC_SUPPORT=OFF \ - && make install -j {{ MAKEJOBS }} \ - && cd $BUILDDIR/ && rm -rf embree-* -fi - -# install pybind11 headers -if [ ! -f $ROOT/include/pybind11/pybind11.h ] -then -curl -SL https://github.com/pybind/pybind11/archive/v{{PYBIND11_VERSION}}.tar.gz | tar -xzC $BUILDDIR && \ - cd pybind11-{{PYBIND11_VERSION}} && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DPYBIND11_TEST=off && \ - make install && \ - cd $BUILDDIR && rm -rf pybind11-* -fi - -# install cereal headers -if [ ! -f $ROOT/include/cereal/cereal.hpp ] -then -curl -SL https://github.com/USCiLab/cereal/archive/v{{CEREAL_VERSION}}.tar.gz | tar -xzC $BUILDDIR && \ - cd cereal-{{CEREAL_VERSION}} && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DJUST_INSTALL_CEREAL=on && \ - make install && \ - cd $BUILDDIR && rm -rf cereal-* -fi - -# install eigen headers -if [ ! -f $ROOT/include/eigen3/Eigen/Eigen ] -then -curl -SL https://gitlab.com/libeigen/eigen/-/archive/{{EIGEN_VERSION}}/eigen-{{EIGEN_VERSION}}.tar.gz | tar -xzC $BUILDDIR && \ - cd eigen-{{EIGEN_VERSION}} && \ - mkdir build && cd build && \ - cmake ../ -DCMAKE_INSTALL_PREFIX=$ROOT -DBUILD_TESTING=off, -DEIGEN_TEST_NOQT=on && \ - make install && \ - cd $BUILDDIR && rm -rf eigen-* -fi diff --git a/template/glotzerlab-software.jinja b/template/glotzerlab-software.jinja deleted file mode 100644 index 418bff25..00000000 --- a/template/glotzerlab-software.jinja +++ /dev/null @@ -1,79 +0,0 @@ -{% if output == 'script' %} -{% set RUN = '' %} -{% else %} -{% set RUN = 'RUN' %} -{% endif %} - -{% if output != 'script' %} - -{{ RUN }} export CFLAGS="{{CFLAGS}}" CXXFLAGS="{{CFLAGS}}" \ - && python3 -m pip install -r requirements-source.txt \ - && python3 -m pip cache purge - -# build select packages from source with machine specific flags -{{ RUN }} export CFLAGS="{{CFLAGS}}" CXXFLAGS="{{CFLAGS}}" \ - && python3 -m pip install --no-build-isolation --no-binary freud-analysis,gsd -r requirements.txt \ - && python3 -m pip install cython==0.29.36 \ - && python3 -m pip install --no-build-isolation -r requirements-cupy.txt \ - && python3 -m pip cache purge - -{% else %} - -# Install packages that are build requirements of other packages first. -# Use the pip cache in script builds to reduce time when rerunning the install script. - -{{ RUN }} export CFLAGS="{{CFLAGS}}" CXXFLAGS="{{CFLAGS}}"\ - && python3 -m pip install -r requirements-source.txt \ - || exit 1 - -{{ RUN }} export CFLAGS="{{CFLAGS}}" CXXFLAGS="{{CFLAGS}}" \ - && python3 -m pip install --no-build-isolation --no-binary freud-analysis,gsd -r requirements.txt \ - && chmod o+rX `python3 -c "import site; print(site.getsitepackages()[0])"`/flow/templates/* \ - || exit 1 - -{% if system != 'crusher' and system != 'frontier' %} -{{ RUN }} export CFLAGS="{{CFLAGS}}" CXXFLAGS="{{CFLAGS}}" \ - && python3 -m pip install cython==0.29.36 - && python3 -m pip install --no-build-isolation -r requirements-cupy.txt \ - || exit 1 -{% endif %} - -{% endif %} - -{% if output == 'script' %} -if [ ! -n "$(ls -d $ROOT/lib/python*/site-packages/fresnel)" ] -then -{% endif %} -{{ RUN }} git clone --recursive --branch {{ FRESNEL_VERSION }} --depth 1 https://github.com/glotzerlab/fresnel \ - && cd fresnel \ - && mkdir -p build \ - && cd build \ - && export CFLAGS="{{CFLAGS}}" CXXFLAGS="{{CFLAGS}}" \ - && cmake ../ -DENABLE_EMBREE=on -DENABLE_OPTIX=off -Dembree_DIR=/opt/embree-{{ EMBREE_VERSION }}.x86_64.linux -DCMAKE_INSTALL_PREFIX=`python3 -c "import site; print(site.getsitepackages()[0])"` \ - && make install -j{{ MAKEJOBS }} \ - && cd ../../ \ - && rm -rf fresnel \ - || exit 1 - -{% if output == 'script' %} -fi -{% endif %} - -{% if output == 'script' %} -if [ ! -n "$(ls -d $ROOT/lib/python*/site-packages/hoomd)" ] -then -{% endif %} -{{ RUN }} git clone --recursive --branch {{ HOOMD_VERSION }} --depth 1 https://github.com/glotzerlab/hoomd-blue hoomd \ - && cd hoomd \ - && mkdir -p build \ - && cd build \ - && export CFLAGS="{{CFLAGS}}" CXXFLAGS="{{CFLAGS}}" \ - && cmake ../ -DPYTHON_EXECUTABLE="`which python3`" -DENABLE_GPU=on -DENABLE_MPI={{ ENABLE_MPI }} -DENABLE_TBB={{ ENABLE_TBB|default('on') }} -DENABLE_LLVM={{ ENABLE_LLVM|default('on') }} -DBUILD_TESTING=off -DENABLE_MPI_CUDA={{ ENABLE_MPI_CUDA|default('off') }} -DHOOMD_GPU_PLATFORM={{ HOOMD_GPU_PLATFORM|default('CUDA') }} \ - && make install -j{{ MAKEJOBS }} \ - && cd ../../ \ - && rm -rf hoomd \ - || exit 1 - -{% if output == 'script' %} -fi -{% endif %} diff --git a/template/ib-mlx.jinja b/template/ib-mlx.jinja deleted file mode 100644 index efe0bc04..00000000 --- a/template/ib-mlx.jinja +++ /dev/null @@ -1,29 +0,0 @@ - - -RUN apt-get update && apt-get install -y --no-install-recommends \ - dkms \ - infiniband-diags \ - ibacm \ - mstflint \ - libibmad-dev \ - libibumad-dev \ - opensm \ - srptools \ - ibutils \ - ibverbs-utils \ - rdmacm-utils \ - perftest \ - numactl \ - libnuma-dev \ - libnl-3-200 \ - libnl-route-3-200 \ - libnl-route-3-dev \ - libnl-utils \ - && rm -rf /var/lib/apt/lists/* - -# use ubuntu provided ibverbs -RUN apt-get update && apt-get install -y --no-install-recommends \ - libibverbs-dev \ - ibverbs-providers \ - librdmacm-dev \ - && rm -rf /var/lib/apt/lists/* diff --git a/template/mvapich2.jinja b/template/mvapich2.jinja deleted file mode 100644 index 7ddff040..00000000 --- a/template/mvapich2.jinja +++ /dev/null @@ -1,25 +0,0 @@ - - -RUN apt-get update && apt-get install -y --no-install-recommends \ - byacc \ - flex \ - && rm -rf /var/lib/apt/lists/* - -RUN curl -sSLO http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-{{ MVAPICH_VERSION }}{{ MVAPICH_PATCHLEVEL }}.tar.gz \ - && tar -xzf mvapich2-{{ MVAPICH_VERSION }}{{ MVAPICH_PATCHLEVEL }}.tar.gz -C /root \ - && cd /root/mvapich2-{{ MVAPICH_VERSION }}{{ MVAPICH_PATCHLEVEL }} \ - && ./configure --prefix=/usr --disable-fortran {{ MVAPICH_EXTRA_OPTS }}\ - && make install \ - && rm -rf /root/mvapich2-{{ MVAPICH_VERSION }}{{ MVAPICH_PATCHLEVEL }} \ - && rm /mvapich2-{{ MVAPICH_VERSION }}{{ MVAPICH_PATCHLEVEL }}.tar.gz - -RUN curl -sSLO http://mvapich.cse.ohio-state.edu/download/mvapich/osu-micro-benchmarks-{{ OSU_MICROBENCHMARK_VERSION }}.tar.gz \ - && tar -xzf osu-micro-benchmarks-{{ OSU_MICROBENCHMARK_VERSION }}.tar.gz -C /root \ - && cd /root/osu-micro-benchmarks-{{ OSU_MICROBENCHMARK_VERSION }} \ - && ./configure --prefix=/opt/osu-micro-benchmarks CC=`which mpicc` CXX=`which mpicxx` \ - && cd mpi \ - && make install \ - && rm -rf /root/osu-micro-benchmarks-{{ OSU_MICROBENCHMARK_VERSION }} \ - && rm /osu-micro-benchmarks-{{ OSU_MICROBENCHMARK_VERSION }}.tar.gz - -RUN /opt/glotzerlab/bin/python3 -m pip install --no-cache-dir --no-binary mpi4py -r requirements-mpi.txt diff --git a/template/openmpi.jinja b/template/openmpi.jinja deleted file mode 100644 index ec72668a..00000000 --- a/template/openmpi.jinja +++ /dev/null @@ -1,198 +0,0 @@ - - -{% if system == 'greatlakes' %} - -RUN curl -sSLO https://www.open-mpi.org/software/ompi/v{{ OPENMPI_VERSION }}/downloads/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 \ - && tar -xjf openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 -C /root \ - && cd /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && ./configure --prefix=/usr --with-pmix=/usr --with-libevent=external --with-hwloc=/usr --without-verbs --enable-shared --with-ucx\ - && make all install \ - && rm -rf /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && rm /openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 - -{% elif system == 'anvil' %} - -RUN curl -sSLO https://www.open-mpi.org/software/ompi/v{{ OPENMPI_VERSION }}/downloads/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 \ - && tar -xjf openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 -C /root \ - && cd /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && ./configure --prefix=/usr \ - --enable-shared \ - --disable-silent-rules \ - --disable-builtin-atomics \ - --enable-mpi1-compatibility \ - --without-alps \ - --without-lsf \ - --without-sge \ - --without-tm \ - --without-loadleveler \ - --with-ucx \ - --disable-memchecker \ - --with-libevent=external \ - --with-hwloc=/usr \ - --without-cuda \ - --disable-java \ - --disable-mpi-java \ - --with-gpfs=no \ - --enable-cxx-exceptions \ - && make all install \ - && rm -rf /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && rm /openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 - -{% elif system == 'delta' %} - -RUN curl -sSLO https://github.com/ofiwg/libfabric/releases/download/v{{LIBFABRIC_VERSION}}/libfabric-{{LIBFABRIC_VERSION}}.tar.bz2 \ - && tar -xjf libfabric-{{LIBFABRIC_VERSION}}.tar.bz2 -C /root \ - && cd /root/libfabric-{{LIBFABRIC_VERSION}} \ - && mkdir build && cd build \ - && ../configure --prefix=/usr \ - && make install \ - && rm -rf /root/libfabric-{{LIBFABRIC_VERSION}} \ - && rm /libfabric-{{LIBFABRIC_VERSION}}.tar.bz2 - -RUN curl -sSLO https://www.open-mpi.org/software/ompi/v{{ OPENMPI_VERSION }}/downloads/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 \ - && tar -xjf openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 -C /root \ - && cd /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && ./configure --prefix=/usr \ - --enable-shared \ - --disable-silent-rules \ - --disable-builtin-atomics \ - --with-pmix=/usr \ - --enable-mpi1-compatibility \ - --without-verbs \ - --without-ucx \ - --with-ofi \ - --without-cma \ - --without-mxm \ - --without-xpmem \ - --without-psm2 \ - --without-hcoll \ - --without-fca \ - --without-psm \ - --without-cray-xpmem \ - --without-alps \ - --disable-memchecker \ - --with-libevent=external \ - --with-hwloc=/usr \ - --disable-java \ - --disable-mpi-java \ - --with-gpfs=no \ - --enable-dlopen \ - && make all install \ - && rm -rf /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && rm /openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 - -{% elif system == 'bridges2' %} - -# bridges2 needs UCX - -RUN curl -sSLO https://www.open-mpi.org/software/ompi/v{{ OPENMPI_VERSION }}/downloads/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 \ - && tar -xjf openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 -C /root \ - && cd /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && ./configure --prefix=/usr \ - --enable-shared \ - --disable-silent-rules \ - --disable-builtin-atomics \ - --enable-static \ - --without-pmi \ - --enable-mpi1-compatibility \ - --without-verbs \ - --without-xpmem \ - --without-ofi \ - --without-mxm \ - --without-cma \ - --with-ucx \ - --without-psm \ - --without-knem \ - --without-psm2 \ - --without-hcoll \ - --without-fca \ - --disable-java \ - --disable-mpi-java \ - --without-cuda \ - && make all install \ - && rm -rf /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && rm /openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 - -{% elif system == 'expanse' %} - -RUN curl -sSLO https://www.open-mpi.org/software/ompi/v{{ OPENMPI_VERSION }}/downloads/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 \ - && tar -xjf openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 -C /root \ - && cd /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && ./configure --prefix=/usr \ - --enable-shared \ - --disable-silent-rules \ - --disable-builtin-atomics \ - --enable-static \ - --with-pmix=/usr \ - --enable-mpi1-compatibility \ - --without-fca \ - --without-ofi \ - --without-verbs \ - --without-cuda \ - --without-mxm \ - --without-psm \ - --without-knem \ - --without-hcoll \ - --without-cma \ - --without-xpmem \ - --without-psm2 \ - --without-loadleveler \ - --without-lsf \ - --without-alps \ - --without-tm \ - --without-sge \ - --disable-memchecker \ - --disable-java \ - --disable-mpi-java \ - --with-ucx \ - && make all install \ - && rm -rf /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && rm /openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 - -{% elif system == 'expanse-gpu' %} - -RUN curl -sSLO https://www.open-mpi.org/software/ompi/v{{ OPENMPI_VERSION }}/downloads/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 \ - && tar -xjf openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 -C /root \ - && cd /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && ./configure --prefix=/usr \ - --enable-shared \ - --disable-silent-rules \ - --disable-builtin-atomics \ - --enable-static \ - --enable-mpi1-compatibility \ - --without-fca \ - --without-ofi \ - --without-verbs \ - --without-cuda \ - --without-mxm \ - --without-psm \ - --without-knem \ - --without-hcoll \ - --without-cma \ - --without-xpmem \ - --without-psm2 \ - --without-loadleveler \ - --without-lsf \ - --without-alps \ - --without-tm \ - --without-sge \ - --disable-memchecker \ - --disable-java \ - --disable-mpi-java \ - --with-ucx \ - && make all install \ - && rm -rf /root/openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }} \ - && rm /openmpi-{{ OPENMPI_VERSION }}.{{ OPENMPI_PATCHLEVEL }}.tar.bz2 - -{% endif %} - -RUN curl -sSLO http://mvapich.cse.ohio-state.edu/download/mvapich/osu-micro-benchmarks-{{ OSU_MICROBENCHMARK_VERSION }}.tar.gz \ - && tar -xzf osu-micro-benchmarks-{{ OSU_MICROBENCHMARK_VERSION }}.tar.gz -C /root \ - && cd /root/osu-micro-benchmarks-{{ OSU_MICROBENCHMARK_VERSION }} \ - && ./configure --prefix=/opt/osu-micro-benchmarks CC=`which mpicc` CXX=`which mpicxx` \ - && cd mpi \ - && make install \ - && rm -rf /root/osu-micro-benchmarks-{{ OSU_MICROBENCHMARK_VERSION }} \ - && rm /osu-micro-benchmarks-{{ OSU_MICROBENCHMARK_VERSION }}.tar.gz - -RUN /opt/glotzerlab/bin/python3 -m pip install --no-cache-dir --no-binary mpi4py -r requirements-mpi.txt diff --git a/template/pmix.jinja b/template/pmix.jinja deleted file mode 100644 index fc5a41d2..00000000 --- a/template/pmix.jinja +++ /dev/null @@ -1,11 +0,0 @@ - -RUN curl -sSLO https://github.com/pmix/pmix/releases/download/v{{ PMIX_VERSION }}/pmix-{{ PMIX_VERSION }}.tar.bz2 \ - && tar -xjf pmix-{{ PMIX_VERSION }}.tar.bz2 -C /root \ - && cd /root/pmix-{{ PMIX_VERSION }} \ - && mkdir build && cd build \ - && ../configure --prefix=/usr --without-munge \ - && make all install \ - && rm -rf /root/pmix-{{ PMIX_VERSION }} \ - && rm /pmix-{{ PMIX_VERSION }}.tar.bz2 - -ENV PMIX_MCA_psec=^munge PMIX_MCA_gds=hash diff --git a/template/test.jinja b/template/test.jinja deleted file mode 100644 index 6c9f2a6b..00000000 --- a/template/test.jinja +++ /dev/null @@ -1,50 +0,0 @@ - - -RUN cd /test && \ - python3 serial-cpu.py - -# The latest coxeter release fails tests -# RUN cd \ -# && git clone --recursive --branch v$(python3 -c "import coxeter; print(coxeter.__version__)") --depth 1 https://github.com/glotzerlab/coxeter \ -# && python3 -m pytest -v ./coxeter/tests \ -# && rm -rf coxeter \ -# || exit 1 - -# Run minimal tests on freud - the full suite takes a long time to run. -RUN cd \ - && git clone --recursive --branch v$(python3 -c "import freud; print(freud.__version__)") --depth 1 https://github.com/glotzerlab/freud \ - && cd freud/tests/ \ - && python -m pytest -v test_box_Box.py \ - && cd ../../ \ - && rm -rf freud \ - || exit 1 - -RUN cd && \ - python3 -m pytest -v --pyargs gsd - -RUN cd && \ - python3 -m pytest -v --pyargs fresnel - -RUN cd && \ - python3 -m pytest -v --pyargs hoomd - -RUN cd \ - && git clone --branch v$(python3 -c "import signac; print(signac.__version__)") --depth 1 https://github.com/glotzerlab/signac.git \ - && cd signac \ - && python -m pytest -v tests \ - && cd .. \ - && rm -rf signac - -RUN cd \ - && git clone --branch v$(python3 -c "import flow; print(flow.__version__)") --depth 1 https://github.com/glotzerlab/signac-flow.git \ - && cd signac-flow \ - && python -m pytest -v tests \ - && cd .. \ - && rm -rf signac-flow - -RUN cd \ - && git clone --branch v$(python3 -c "import signac_dashboard; print(signac_dashboard.__version__)") --depth 1 https://github.com/glotzerlab/signac-dashboard.git --recursive \ - && cd signac-dashboard \ - && python -m pytest -v tests \ - && cd .. \ - && rm -rf signac-dashboard diff --git a/template/ucx.jinja b/template/ucx.jinja deleted file mode 100644 index c47abe1f..00000000 --- a/template/ucx.jinja +++ /dev/null @@ -1,9 +0,0 @@ - -RUN curl -sSLO https://github.com/openucx/ucx/releases/download/v{{UCX_VERSION}}{{UCX_RC}}/ucx-{{UCX_VERSION}}.tar.gz \ - && tar -xzf ucx-{{UCX_VERSION}}.tar.gz -C /root \ - && cd /root/ucx-{{UCX_VERSION}} \ - && mkdir build && cd build \ - && ../configure --prefix=/usr \ - && make install \ - && rm -rf /root/ucx-{{UCX_VERSION}} \ - && rm /ucx-{{UCX_VERSION}}.tar.gz diff --git a/test/mpi-cpu.py b/test/mpi-cpu.py deleted file mode 100644 index 3ee0b32a..00000000 --- a/test/mpi-cpu.py +++ /dev/null @@ -1,18 +0,0 @@ -import sys -results = sys.stdout - -try: - import hoomd - dev = hoomd.device.CPU() - assert(dev.communicator.num_ranks == 2) - - if (dev.communicator.rank == 0): - results.write('** Starting MPI CPU tests **\n') - results.write('HOOMD version : {}\n'.format(hoomd.version.version)) - results.write('** MPI CPU tests PASSED **\n') -except: - results.write('** Starting MPI CPU tests **\n') - results.write('** MPI CPU tests FAILED **\n') - - raise - diff --git a/test/mpi-gpu.py b/test/mpi-gpu.py deleted file mode 100644 index af20c268..00000000 --- a/test/mpi-gpu.py +++ /dev/null @@ -1,17 +0,0 @@ -import sys -results = sys.stdout - -try: - import hoomd - dev = hoomd.device.GPU() - assert(dev.communicator.num_ranks == 2) - - if (dev.communicator.rank == 0): - results.write('** Starting MPI GPU tests **\n') - results.write('HOOMD version : {}\n'.format(hoomd.version.version)) - results.write('** MPI GPU tests PASSED **\n') -except: - results.write('** Starting MPI GPU tests **\n') - results.write('** MPI GPU tests FAILED **\n') - - raise diff --git a/test/serial-cpu.py b/test/serial-cpu.py deleted file mode 100644 index 86ee7e7f..00000000 --- a/test/serial-cpu.py +++ /dev/null @@ -1,88 +0,0 @@ -import sys -results = sys.stdout -results.write('** Starting serial CPU tests **\n') - -try: - # Fresnel - import fresnel - results.write('Fresnel version : {}\n'.format(fresnel.version.version)) - dev = fresnel.Device(mode='cpu') - results.write('Fresnel device : {}\n'.format(dev)) - - # Freud - import freud - results.write('Freud version : {}\n'.format(freud.__version__)) - - # garnett - import garnett - results.write('garnett version : {}\n'.format(garnett.__version__)) - - # GSD - import gsd - results.write('GSD version : {}\n'.format(gsd.version.version)) - - # HOOMD - import hoomd - device = hoomd.device.CPU() - results.write('HOOMD version : {}\n'.format(hoomd.version.version)) - results.write('HOOMD flags : {}\n'.format(hoomd.version.compile_flags)) - - # pythia - import pythia - results.write('pythia version : {}\n'.format(pythia.__version__)) - - # libgetar - import plato - results.write('plato version : {}\n'.format(plato.__version__)) - - # rowan - import rowan - results.write('rowan version : {}\n'.format(rowan.__version__)) - - # signac - import signac - results.write('signac version : {}\n'.format(signac.__version__)) - - # signac-flow - import flow - results.write('flow version : {}\n'.format(flow.__version__)) - - results.write('\n') - - # h5py - import h5py - results.write('h5py version : {}\n'.format(h5py.__version__)) - - # matplotlib - import matplotlib - results.write('matplotlib version: {}\n'.format(matplotlib.__version__)) - - # numpy - import numpy - results.write('numpy version : {}\n'.format(matplotlib.__version__)) - - # pandas - import pandas - results.write('pandas version : {}\n'.format(matplotlib.__version__)) - - # pillow - import PIL - results.write('pillow version : {}\n'.format(PIL.__version__)) - - # scipy - import scipy - results.write('scipy version : {}\n'.format(scipy.__version__)) - - # sklearn - import sklearn - results.write('sklearn version : {}\n'.format(sklearn.__version__)) - - # pyyaml - import yaml - results.write('pyyaml version : {}\n'.format(yaml.__version__)) - - results.write('** Serial CPU tests PASSED **\n\n') -except: - results.write('** Serial CPU tests FAILED **\n\n') - raise - diff --git a/test/serial-gpu.py b/test/serial-gpu.py deleted file mode 100644 index b7463c06..00000000 --- a/test/serial-gpu.py +++ /dev/null @@ -1,22 +0,0 @@ -import sys -results = sys.stdout -results.write('** Starting serial GPU tests **\n') - -try: - # Fresnel - #import fresnel - #results.write('Fresnel version : {}\n'.format(fresnel.__version__)) - #dev = fresnel.Device(mode='gpu', n=1) - #results.write('Fresnel device : {}\n'.format(dev)) - - # HOOMD - import hoomd - device = hoomd.device.GPU() - results.write('HOOMD version : {}\n'.format(hoomd.version.version)) - results.write('HOOMD flags : {}\n'.format(hoomd.version.compile_flags)) - - results.write('** Serial GPU tests PASSED **\n\n') -except: - results.write('** Serial GPU tests FAILED **\n\n') - raise -