Skip to content

Commit 314500e

Browse files
authored
Freeze the triton version in vllm-gaudi image to 3.1.0 (opea-project#1220)
The new triton version 3.2.0 can't work with vllm-gaudi. Freeze the triton version in vllm-gaudi image to 3.1.0. Issue create for vllm-fork: HabanaAI/vllm-fork#732 Signed-off-by: chensuyue <suyue.chen@intel.com>
1 parent 4c2e2d4 commit 314500e

8 files changed

+10
-5
lines changed

.github/workflows/_comps-workflow.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,8 @@ jobs:
7070
cd ./vllm-openvino && git checkout v0.6.1 && git rev-parse HEAD && cd ../
7171
fi
7272
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_yml}) != 0 ]]; then
73-
git clone https://github.com/HabanaAI/vllm-fork.git vllm-fork
74-
cd vllm-fork && git checkout v0.6.4.post2+Gaudi-1.19.0 && cd ../
73+
git clone --depth 1 --branch v0.6.4.post2+Gaudi-1.19.0 https://github.com/HabanaAI/vllm-fork.git
74+
sed -i 's/triton/triton==3.1.0/g' vllm-fork/requirements-hpu.txt
7575
fi
7676
- name: Get build list
7777
id: get-build-list

.github/workflows/push-image-build.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,8 @@ jobs:
8787
cd ./vllm-openvino && git checkout v0.6.1 && git rev-parse HEAD && cd ../
8888
fi
8989
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then
90-
git clone https://github.com/HabanaAI/vllm-fork.git vllm-fork
91-
cd vllm-fork && git checkout v0.6.4.post2+Gaudi-1.19.0 && cd ../
90+
git clone --depth 1 --branch v0.6.4.post2+Gaudi-1.19.0 https://github.com/HabanaAI/vllm-fork.git
91+
sed -i 's/triton/triton==3.1.0/g' vllm-fork/requirements-hpu.txt
9292
fi
9393
9494
- name: Build Image

.github/workflows/scripts/get_test_matrix.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ function find_test_2() {
102102
test_files=$(printf '%s\n' "${changed_files[@]}" | grep -E "\.sh") || true
103103
for test_file in ${test_files}; do
104104
if [ -f $test_file ]; then
105-
_service=$(echo $test_file | cut -d'/' -f3 | cut -d'.' -f1 | cut -c6-)
105+
_service=$(echo $test_file | cut -d'/' -f3 | grep -E "\.sh" | cut -d'.' -f1 | cut -c6-)
106106
_fill_in_matrix $_service
107107
fi
108108
done

comps/third_parties/vllm/src/build_docker_vllm.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ if [ "$hw_mode" = "hpu" ]; then
3838
git clone https://github.com/HabanaAI/vllm-fork.git
3939
cd ./vllm-fork/
4040
git checkout v0.6.4.post2+Gaudi-1.19.0
41+
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
4142
docker build -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
4243
cd ..
4344
rm -rf vllm-fork

tests/agent/test_agent_langchain_on_intel_hpu.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ function build_vllm_docker_images() {
5757
fi
5858
cd ./vllm-fork
5959
git checkout v0.6.4.post2+Gaudi-1.19.0
60+
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
6061
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
6162
if [ $? -ne 0 ]; then
6263
echo "opea/vllm-gaudi:comps failed"

tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ function build_docker_images() {
2020
git clone https://github.com/HabanaAI/vllm-fork.git
2121
cd vllm-fork/
2222
git checkout v0.6.4.post2+Gaudi-1.19.0
23+
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
2324
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
2425
if [ $? -ne 0 ]; then
2526
echo "opea/vllm-gaudi built fail"

tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ function build_docker_images() {
2020
git clone https://github.com/HabanaAI/vllm-fork.git
2121
cd vllm-fork/
2222
git checkout v0.6.4.post2+Gaudi-1.19.0
23+
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
2324
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
2425
if [ $? -ne 0 ]; then
2526
echo "opea/vllm-gaudi built fail"

tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ function build_docker_images() {
2020
git clone https://github.com/HabanaAI/vllm-fork.git
2121
cd vllm-fork/
2222
git checkout v0.6.4.post2+Gaudi-1.19.0
23+
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
2324
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
2425
if [ $? -ne 0 ]; then
2526
echo "opea/vllm-gaudi built fail"

0 commit comments

Comments
 (0)