-
Notifications
You must be signed in to change notification settings - Fork 292
113 lines (112 loc) · 4.89 KB
/
v2-nightly.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
name: TorchBench V2 nightly
on:
workflow_dispatch:
schedule:
- cron: '0 14 * * *' # run at 2 PM UTC
jobs:
run-benchmark:
environment: docker-s3-upload
env:
TORCHBENCH_VER: "v2"
CONFIG_VER: "v2"
CONDA_ENV_NAME: "torchbench-v2-nightly-ci"
OUTPUT_DIR: ".torchbench/v2-nightly-ci"
BISECTION_ROOT: ".torchbench/v2-bisection-ci"
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
IS_GHA: 1
AWS_DEFAULT_REGION: us-east-1
BUILD_ENVIRONMENT: benchmark-nightly
SETUP_SCRIPT: "/data/nvme/bin/setup_instance.sh"
if: ${{ github.repository_owner == 'pytorch' }}
runs-on: [self-hosted, bm-runner]
steps:
- name: Checkout
uses: actions/checkout@v3
with:
ref: v2.0
- name: Create conda env
run: |
python3 ./utils/python_utils.py --create-conda-env ${CONDA_ENV_NAME}
- name: Install PyTorch nightly
run: |
. activate "${CONDA_ENV_NAME}"
. "${SETUP_SCRIPT}"
python utils/cuda_utils.py --install-torch-deps
python utils/cuda_utils.py --install-torch-nightly
- name: Install Torchbench models
run: |
. activate "${CONDA_ENV_NAME}"
. "${SETUP_SCRIPT}"
python install.py
- name: Run benchmark
run: |
. activate "${CONDA_ENV_NAME}"
. "${SETUP_SCRIPT}"
WORKFLOW_HOME="${HOME}/${{ env.OUTPUT_DIR }}/gh${GITHUB_RUN_ID}"
bash ./.github/scripts/run.sh "${WORKFLOW_HOME}"
- name: Generate the bisection config
run: |
set -x
. activate "${CONDA_ENV_NAME}"
. "${SETUP_SCRIPT}"
WORKFLOW_HOME="${HOME}/${{ env.OUTPUT_DIR }}/gh${GITHUB_RUN_ID}"
mkdir -p benchmark-output/
# Update the self-hosted pytorch version
pushd "${HOME}/pytorch"
git fetch origin
popd
pip install gitpython pyyaml dataclasses argparse
# Compare the result from yesterday and report any perf signals
python ./.github/scripts/generate-abtest-config.py \
--pytorch-dir "${HOME}/pytorch" \
--github-issue "${WORKFLOW_HOME}/gh-issue.md" \
--benchmark-dir "${WORKFLOW_HOME}" \
--out "${WORKFLOW_HOME}/bisection.yaml"
# Include in the GitHub artifact
if [ -f "${WORKFLOW_HOME}/gh-issue.md" ]; then
cp "${WORKFLOW_HOME}/bisection.yaml" ./benchmark-output/
cp "${WORKFLOW_HOME}/gh-issue.md" ./benchmark-output/
# Setup the bisection environment
BISECTION_HOME="${HOME}/${{ env.BISECTION_ROOT }}/bisection-gh${GITHUB_RUN_ID}"
mkdir -p "${BISECTION_HOME}"
mv ./benchmark-output/gh-issue.md "${BISECTION_HOME}/gh-issue.md"
cp ./benchmark-output/bisection.yaml "${BISECTION_HOME}/config.yaml"
fi
- name: Dispatch the bisection workflow
if: env.TORCHBENCH_PERF_SIGNAL
run: |
# Get the workflow ID from
# https://api.github.com/repos/pytorch/benchmark/actions/workflows
curl -u xuzhao9:${{ secrets.TORCHBENCH_ACCESS_TOKEN }} \
-X POST \
-H "Accept: application/vnd.github.v3+json" \
https://api.github.com/repos/pytorch/benchmark/actions/workflows/16176850/dispatches \
-d '{"ref": "main", "inputs": {"issue_name": "bisection-gh'"${GITHUB_RUN_ID}"'" } }'
- name: Copy artifact and upload to scribe
run: |
. activate "${CONDA_ENV_NAME}"
TODAY=$(date "+%Y%m%d%H%M%S")
LATEST_RESULT=$(find ${HOME}/${{ env.OUTPUT_DIR }}/gh${GITHUB_RUN_ID} -name "*.json" | sort -r | head -1)
echo "Benchmark result file: ${LATEST_RESULT}"
mkdir -p benchmark-output/
cp "${LATEST_RESULT}" ./benchmark-output/benchmark-result-${CONFIG_VER}-${TODAY}.json
# Load environment variables
CONFIG_DIR=torchbenchmark/score/configs/${CONFIG_VER}
CONFIG_ENV=${CONFIG_DIR}/config-${CONFIG_VER}.env
# Load environment variables
set -a; source "${CONFIG_ENV}"; set +a
SCORE_FILE="./benchmark-result-${CONFIG_VER}-score-${TODAY}.json"
# Generate score file
python compute_score.py --score_version "${CONFIG_VER}" --benchmark_data_file "${LATEST_RESULT}" --output-json "${SCORE_FILE}"
# Upload result to Scribe
python scripts/upload_scribe_${CONFIG_VER}.py --pytest_bench_json "${LATEST_RESULT}" --torchbench_score_file "${SCORE_FILE}"
- name: Upload artifact
uses: actions/upload-artifact@v3
with:
name: Benchmark result
path: benchmark-output/
- name: Destroy conda env
run: |
conda env remove --name "${CONDA_ENV_NAME}"