-
Notifications
You must be signed in to change notification settings - Fork 48
41 lines (34 loc) · 1.14 KB
/
test_llama_end_to_end.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
name: Test Llama End to End
on:
workflow_dispatch:
pull_request:
push:
branches:
- refactor-stateless_llama.py
jobs:
test-llama:
strategy:
matrix:
version: [3.11]
os: [xida-cpu-0]
runs-on: [self-hosted]
steps:
- name: "Checkout Code"
uses: actions/checkout@v2
- name: Sync source deps
run: |
python -m pip install --upgrade pip
# Note: We install in three steps in order to satisfy requirements
# from non default locations first. Installing the PyTorch CPU
# wheels saves multiple minutes and a lot of bandwidth on runner setup.
pip install --index-url https://download.pytorch.org/whl/cpu \
-r pytorch-cpu-requirements.txt \
-r torchvision-requirements.txt
pip install --upgrade -r requirements.txt
pip install -e .[testing]
- name: Run vmfb comparison test
run: |
pytest tests/custom_models/stateless_llama/vmfb_comparison_test.py
- name: Run chat init test
run: |
pytest tests/custom_models/stateless_llama/chat_init_test.py