-
Notifications
You must be signed in to change notification settings - Fork 27
168 lines (152 loc) · 5.09 KB
/
macosx_windows_ci.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
name: Run Tests
on:
push:
# This should disable running the workflow on tags, according to the
# on.<push|pull_request>.<branches|tags> GitHub Actions docs.
branches:
- "*"
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
tests:
name: Run Tests
env:
PYTHON: ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
defaults:
run:
# Adding -l {0} helps ensure conda can be found properly in windows.
shell: bash -l {0}
strategy:
fail-fast: false
matrix:
os: [macos-latest, windows-latest]
python-version: ["3.10", "3.11", "3.12"]
include:
- env_name: pyuvdata_tests_windows
os: windows-latest
- env_name: pyuvdata_tests_mac_arm
os: macos-latest
- env_name: pyuvdata_tests
os: macos-13
python-version: "3.12"
steps:
- uses: actions/checkout@main
with:
fetch-depth: 0
- name: Setup Miniforge
uses: conda-incubator/setup-miniconda@v3
with:
miniforge-version: latest
python-version: ${{ matrix.python-version }}
environment-file: ci/${{ matrix.env_name }}.yml
activate-environment: ${{ matrix.env_name }}
run-post: false
- name: Conda Info
run: |
conda info -a
conda list
PYVER=`python -c "import sys; print('{:d}.{:d}'.format(sys.version_info.major, sys.version_info.minor))"`
if [[ $PYVER != $PYTHON ]]; then
exit 1;
fi
- name: Install
run: |
CFLAGS="-DCYTHON_TRACE=1 -DCYTHON_TRACE_NOGIL=1" pip install .
- name: Run Tests
run: |
python -m pytest -n auto --dist=loadfile --cov=pyuvdata --cov-config=.coveragerc --cov-report xml:./coverage.xml
- uses: codecov/codecov-action@v4
if: success()
with:
token: ${{secrets.CODECOV_TOKEN}} #required
file: ./coverage.xml #optional
benchmark:
name: Performance Benchmark
needs: tests
env:
PYTHON: ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
defaults:
run:
# Adding -l {0} helps ensure conda can be found properly in windows.
shell: bash -l {0}
strategy:
fail-fast: false
matrix:
os: [macos-latest, windows-latest, ubuntu-latest]
python-version: ["3.12"]
include:
- env_name: pyuvdata_tests_windows
os: windows-latest
- env_name: pyuvdata_tests_mac_arm
os: macos-latest
- env_name: pyuvdata_tests
os: macos-13
python-version: "3.12"
- env_name: pyuvdata_tests
os: ubuntu-latest
steps:
- uses: actions/checkout@main
with:
fetch-depth: 0
- name: Setup Minimamba
uses: conda-incubator/setup-miniconda@v3
with:
miniforge-variant: Mambaforge
miniforge-version: latest
use-mamba: true
python-version: ${{ matrix.python-version }}
environment-file: ci/${{ matrix.env_name }}.yml
activate-environment: ${{ matrix.env_name }}
run-post: false
- name: Conda Info
run: |
conda info -a
conda list
PYVER=`python -c "import sys; print('{:d}.{:d}'.format(sys.version_info.major, sys.version_info.minor))"`
if [[ $PYVER != $PYTHON ]]; then
exit 1;
fi
# also install benchmark utility
- name: Install
run: |
pip install pytest-benchmark
pip install .
- name: Run benchmark
run: |
pytest --benchmark-only --benchmark-json output.json
# Download previous benchmark result from cache (if exists)
- name: Download previous benchmark data
uses: actions/cache/restore@v4
with:
path: ./cache
key: ${{ matrix.os }}-benchmark
# Run `github-action-benchmark` action
# this step also EDITS the ./cache/benchmark-data.json file
# We do not need to add output.json to the cache directory
- name: Compare benchmarks
uses: benchmark-action/github-action-benchmark@v1
with:
# What benchmark tool the output.txt came from
tool: 'pytest'
# Where the output from the benchmark tool is stored
output-file-path: output.json
# Where the previous data file is stored
external-data-json-path: ./cache/benchmark-data.json
# Workflow will fail when an alert happens
fail-on-alert: true
# Comment on the PR if the branch is not a fork
comment-on-alert: true
# Enable Job Summary for PRs
summary-always: true
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Store benchmark results
uses: actions/cache/save@v4
# only store the cache if being run on main
if: github.ref == 'refs/heads/main'
with:
path: ./cache
key: ${{ matrix.os }}-benchmark