-
Notifications
You must be signed in to change notification settings - Fork 520
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
2024-08-13 nightly release (9f17b23)
- Loading branch information
pytorchbot
committed
Aug 13, 2024
1 parent
dfc47f0
commit d7dd14e
Showing
18 changed files
with
1,367 additions
and
1,037 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,215 @@ | ||
# Copyright (c) Meta Platforms, Inc. and affiliates. | ||
# All rights reserved. | ||
# This source code is licensed under the BSD-style license found in the | ||
# LICENSE file in the root directory of this source tree. | ||
|
||
# This workflow is used for FBGEMM_GPU-GenAI CI as well as nightly builds of | ||
# FBGEMM_GPU-GenAI against PyTorch-CUDA Nightly. | ||
name: FBGEMM_GPU-GenAI CI | ||
|
||
on: | ||
# PR Trigger (enabled for regression checks and debugging) | ||
# | ||
pull_request: | ||
branches: | ||
- main | ||
|
||
# Push Trigger (enable to catch errors coming out of multiple merges) | ||
# | ||
push: | ||
branches: | ||
- main | ||
|
||
# Cron Trigger (UTC) | ||
# | ||
# Based on the Conda page for PyTorch-nightly, the GPU nightly releases appear | ||
# around 02:30 PST every day (roughly 2 hours after the CPU releases) | ||
# | ||
schedule: | ||
- cron: '45 12 * * *' | ||
|
||
# Manual Trigger | ||
# | ||
workflow_dispatch: | ||
inputs: | ||
publish_to_pypi: | ||
description: Publish Artifact to PyPI | ||
type: boolean | ||
required: false | ||
default: false | ||
|
||
concurrency: | ||
# Cancel previous runs in the PR if a new commit is pushed | ||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} | ||
cancel-in-progress: true | ||
|
||
jobs: | ||
# Build on CPU hosts and upload to GHA | ||
build_artifact: | ||
runs-on: ${{ matrix.host-machine.instance }} | ||
container: | ||
image: amazonlinux:2023 | ||
options: --user root | ||
defaults: | ||
run: | ||
shell: bash | ||
env: | ||
PRELUDE: .github/scripts/setup_env.bash | ||
BUILD_ENV: build_binary | ||
BUILD_VARIANT: cuda | ||
continue-on-error: true | ||
strategy: | ||
# Don't fast-fail all the other builds if one of the them fails | ||
fail-fast: false | ||
matrix: | ||
host-machine: [ | ||
{ arch: x86, instance: "linux.24xlarge" }, | ||
] | ||
python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] | ||
cuda-version: [ "11.8.0", "12.1.1", "12.4.1" ] | ||
compiler: [ "gcc", "clang" ] | ||
|
||
steps: | ||
- name: Setup Build Container | ||
run: yum update -y; yum install -y binutils findutils git pciutils sudo tar wget which | ||
|
||
- name: Checkout the Repository | ||
uses: actions/checkout@v4 | ||
with: | ||
submodules: true | ||
|
||
- name: Display System Info | ||
run: . $PRELUDE; print_system_info | ||
|
||
- name: Display GPU Info | ||
run: . $PRELUDE; print_gpu_info | ||
|
||
- name: Setup Miniconda | ||
run: . $PRELUDE; setup_miniconda $HOME/miniconda | ||
|
||
- name: Create Conda Environment | ||
run: . $PRELUDE; create_conda_environment $BUILD_ENV ${{ matrix.python-version }} | ||
|
||
- name: Install C/C++ Compilers | ||
run: . $PRELUDE; install_cxx_compiler $BUILD_ENV ${{ matrix.compiler }} | ||
|
||
- name: Install Build Tools | ||
run: . $PRELUDE; install_build_tools $BUILD_ENV | ||
|
||
- name: Install CUDA | ||
run: . $PRELUDE; install_cuda $BUILD_ENV ${{ matrix.cuda-version }} | ||
|
||
# Install via PIP to avoid defaulting to the CPU variant if the GPU variant of the day is not ready | ||
- name: Install PyTorch Nightly | ||
run: . $PRELUDE; install_pytorch_pip $BUILD_ENV nightly cuda/${{ matrix.cuda-version }} | ||
|
||
- name: Collect PyTorch Environment Info | ||
if: ${{ success() || failure() }} | ||
run: if . $PRELUDE && which conda; then collect_pytorch_env_info $BUILD_ENV; fi | ||
|
||
- name: Install cuDNN | ||
run: . $PRELUDE; install_cudnn $BUILD_ENV "$(pwd)/build_only/cudnn" ${{ matrix.cuda-version }} | ||
|
||
- name: Prepare FBGEMM_GPU Build | ||
run: . $PRELUDE; cd fbgemm_gpu; prepare_fbgemm_gpu_build $BUILD_ENV | ||
|
||
- name: Build FBGEMM_GPU Wheel | ||
run: . $PRELUDE; cd fbgemm_gpu; build_fbgemm_gpu_package $BUILD_ENV nightly genai | ||
|
||
- name: Upload Built Wheel as GHA Artifact | ||
# Cannot upgrade to actions/upload-artifact@v4 yet because GLIBC on the instance is too old | ||
uses: actions/upload-artifact@v3 | ||
with: | ||
name: fbgemm_gpu_nightly_genai_${{ matrix.host-machine.arch }}_${{ matrix.compiler }}_py${{ matrix.python-version }}_cu${{ matrix.cuda-version }}.whl | ||
path: fbgemm_gpu/dist/*.whl | ||
if-no-files-found: error | ||
|
||
|
||
# Download the built artifact from GHA, test on GPU, and push to PyPI | ||
test_and_publish_artifact: | ||
# runs-on: linux.4xlarge.nvidia.gpu | ||
# Use available instance types - https://github.com/pytorch/test-infra/blob/main/.github/scale-config.yml | ||
runs-on: ${{ matrix.host-machine.instance }} | ||
defaults: | ||
run: | ||
shell: bash | ||
env: | ||
PRELUDE: .github/scripts/setup_env.bash | ||
BUILD_ENV: build_binary | ||
BUILD_VARIANT: genai | ||
ENFORCE_CUDA_DEVICE: 1 | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
host-machine: [ | ||
{ arch: x86, instance: "linux.g5.4xlarge.nvidia.gpu" }, | ||
# TODO: Enable when A100 machine queues are reasonably small enough for doing per-PR CI | ||
# https://hud.pytorch.org/metrics | ||
# { arch: x86, instance: "linux.gcp.a100" }, | ||
] | ||
python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] | ||
cuda-version: [ "11.8.0", "12.1.1", "12.4.1" ] | ||
# Specify exactly ONE CUDA version for artifact publish | ||
cuda-version-publish: [ "12.1.1" ] | ||
compiler: [ "gcc", "clang" ] | ||
needs: build_artifact | ||
|
||
steps: | ||
# Cannot upgrade to actions/checkout@v4 yet because GLIBC on the instance is too old | ||
- name: Checkout the Repository | ||
uses: actions/checkout@v3 | ||
with: | ||
submodules: true | ||
|
||
- name: Download Wheel Artifact from GHA | ||
# Cannot upgrade to actions/download-artifact@v4 yet because GLIBC on the instance is too old | ||
uses: actions/download-artifact@v3 | ||
with: | ||
name: fbgemm_gpu_nightly_genai_${{ matrix.host-machine.arch }}_${{ matrix.compiler }}_py${{ matrix.python-version }}_cu${{ matrix.cuda-version }}.whl | ||
|
||
# Use PyTorch test infrastructure action - https://github.com/pytorch/test-infra/blob/main/.github/actions/setup-nvidia/action.yml | ||
- name: Install NVIDIA Drivers and NVIDIA-Docker Runtime | ||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main | ||
|
||
- name: Display System Info | ||
run: . $PRELUDE; print_system_info; print_ec2_info | ||
|
||
- name: Display GPU Info | ||
run: . $PRELUDE; print_gpu_info | ||
|
||
- name: Setup Miniconda | ||
run: . $PRELUDE; setup_miniconda $HOME/miniconda | ||
|
||
- name: Create Conda Environment | ||
run: . $PRELUDE; create_conda_environment $BUILD_ENV ${{ matrix.python-version }} | ||
|
||
- name: Install C/C++ Compilers for Updated LIBGCC | ||
# Install clang libraries to enable building and install triton | ||
run: . $PRELUDE; install_cxx_compiler $BUILD_ENV clang | ||
|
||
- name: Install CUDA | ||
run: . $PRELUDE; install_cuda $BUILD_ENV ${{ matrix.cuda-version }} | ||
|
||
# Install via PIP to avoid defaulting to the CPU variant if the GPU variant of the day is not ready | ||
- name: Install PyTorch Nightly | ||
run: . $PRELUDE; install_pytorch_pip $BUILD_ENV nightly cuda/${{ matrix.cuda-version }} | ||
|
||
- name: Collect PyTorch Environment Info | ||
if: ${{ success() || failure() }} | ||
run: if . $PRELUDE && which conda; then collect_pytorch_env_info $BUILD_ENV; fi | ||
|
||
- name: Prepare FBGEMM_GPU Build | ||
run: . $PRELUDE; cd fbgemm_gpu; prepare_fbgemm_gpu_build $BUILD_ENV | ||
|
||
- name: Install FBGEMM_GPU Wheel | ||
run: . $PRELUDE; install_fbgemm_gpu_wheel $BUILD_ENV *.whl | ||
|
||
- name: Test with PyTest | ||
timeout-minutes: 30 | ||
run: . $PRELUDE; test_all_fbgemm_gpu_modules $BUILD_ENV | ||
|
||
- name: Push Wheel to PyPI | ||
if: ${{ (github.event_name == 'schedule' && matrix.cuda-version == matrix.cuda-version-publish) || (github.event_name == 'workflow_dispatch' && github.event.inputs.publish_to_pypi == 'true' && matrix.cuda-version == matrix.cuda-version-publish) }} | ||
env: | ||
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} | ||
run: . $PRELUDE; publish_to_pypi $BUILD_ENV *.whl "$PYPI_TOKEN" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11,6 +11,7 @@ | |
# General | ||
.DS_Store | ||
*~ | ||
.hypothesis/ | ||
|
||
# Byte-compiled / optimized / DLL files | ||
__pycache__/ | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.