From d0217982beef9b76a1bad406659d1e14dac2ffc3 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Tue, 14 May 2024 21:24:20 +0200 Subject: [PATCH] Bump test torch version (#708) --- .github/workflows/test_inc.yml | 6 +++--- optimum/intel/neural_compressor/modeling_base.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test_inc.yml b/.github/workflows/test_inc.yml index 1ede5e1938..6435d0b719 100644 --- a/.github/workflows/test_inc.yml +++ b/.github/workflows/test_inc.yml @@ -32,7 +32,7 @@ jobs: python -m pip install --upgrade pip pip install cmake pip install py-cpuinfo - pip install torch==2.2 torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cpu + pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cpu pip install .[neural-compressor,diffusers,tests] pip install intel-extension-for-transformers pip install peft @@ -43,7 +43,7 @@ jobs: - name: Test IPEX run: | pip uninstall -y intel-extension-for-transformers - pip install torch==2.1.0 torchaudio==2.1.0 torchvision==0.16 --extra-index-url https://download.pytorch.org/whl/cpu - pip install intel-extension-for-pytorch==2.1.100 + pip install torch==2.3.0 torchaudio==2.3.0 torchvision==0.18 --extra-index-url https://download.pytorch.org/whl/cpu + pip install intel-extension-for-pytorch==2.3.0 pytest tests/neural_compressor/test_ipex.py diff --git a/optimum/intel/neural_compressor/modeling_base.py b/optimum/intel/neural_compressor/modeling_base.py index 2556a6048e..c6d5e7bac0 100644 --- a/optimum/intel/neural_compressor/modeling_base.py +++ b/optimum/intel/neural_compressor/modeling_base.py @@ -147,7 +147,7 @@ def _from_pretrained( try: quantization_config = PretrainedConfig.from_pretrained(model_save_dir / "quantize_config.json") algorithm = getattr(quantization_config, "quant_method", None) - if algorithm in {"rtn", "gptq", "awq", "autoaround"}: + if algorithm in {"rtn", "gptq", "awq", "autoround"}: from intel_extension_for_transformers.transformers.modeling.modeling_auto import ( _BaseQBitsAutoModelClass, )