From d01af1618c7d50dc7757abc9e4e7033f7280c79c Mon Sep 17 00:00:00 2001 From: Alexander Kozlov Date: Mon, 16 Dec 2024 08:54:47 +0300 Subject: [PATCH 1/9] Revert explicit definition of U8 KV-cache (#1071) --- optimum/exporters/openvino/__main__.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/optimum/exporters/openvino/__main__.py b/optimum/exporters/openvino/__main__.py index 128643eb1..592cd85a4 100644 --- a/optimum/exporters/openvino/__main__.py +++ b/optimum/exporters/openvino/__main__.py @@ -474,9 +474,6 @@ class StoreAttr(object): from optimum.intel.openvino.quantization import _weight_only_quantization _weight_only_quantization(submodel, quantization_config) - if "text-generation" in task: - submodel.set_rt_info("u8", ["runtime_options", "KV_CACHE_PRECISION"]) - compressed_submodel_path = submodel_path.parent / f"{submodel_path.stem}_compressed.xml" save_model(submodel, compressed_submodel_path, compress_to_fp16=False) del submodel From 847adbc75cbd311b75001b508452d0b6e90d1df6 Mon Sep 17 00:00:00 2001 From: kaixuanliu Date: Mon, 16 Dec 2024 17:03:50 +0800 Subject: [PATCH 2/9] refine Dockerfile to support both cpu and xpu platform (#981) * refine Dockerfile to support both cpu and xpu Signed-off-by: Liu, Kaixuan * nice code Signed-off-by: Liu, Kaixuan * fix CI issue Signed-off-by: Liu, Kaixuan * rename Dockerfile.intel to Dockerfile.ipex Signed-off-by: Liu, Kaixuan * upgrade ipex to 2.5 version; replace extra-index-url Signed-off-by: Liu, Kaixuan --------- Signed-off-by: Liu, Kaixuan --- .github/workflows/dockerfile_sanity.yml | 8 +-- Dockerfile.ipex | 73 +++++++++++++++++++++++++ docker/Dockerfile.intel | 53 ------------------ 3 files changed, 77 insertions(+), 57 deletions(-) create mode 100644 Dockerfile.ipex delete mode 100644 docker/Dockerfile.intel diff --git a/.github/workflows/dockerfile_sanity.yml b/.github/workflows/dockerfile_sanity.yml index 060b80ca4..738be8031 100644 --- a/.github/workflows/dockerfile_sanity.yml +++ b/.github/workflows/dockerfile_sanity.yml @@ -5,13 +5,13 @@ on: branches: - main paths: - - "docker/Dockerfile.intel" - + - 'Dockerfile.ipex' + pull_request: branches: - main paths: - - "docker/Dockerfile.intel" + - 'Dockerfile.ipex' jobs: build_and_run: @@ -27,7 +27,7 @@ jobs: - name: Build and Run Docker Image run: | IMAGE_NAME="intel_image:latest" - docker build -f docker/Dockerfile.intel -t $IMAGE_NAME . + docker build -f Dockerfile.ipex -t $IMAGE_NAME . if [ $? -ne 0 ]; then echo "Docker image build failed." exit 1 diff --git a/Dockerfile.ipex b/Dockerfile.ipex new file mode 100644 index 000000000..a03b1d26a --- /dev/null +++ b/Dockerfile.ipex @@ -0,0 +1,73 @@ +ARG PLATFORM=cpu + +FROM ubuntu:22.04 as cpu +WORKDIR /usr/src/ +RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ + sh -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + ca-certificates \ + git \ + curl \ + vim \ + build-essential \ + ccache \ + libgoogle-perftools-dev \ + numactl \ + cmake \ + libjpeg-dev \ + pybind11-dev \ + libpng-dev \ + python3 \ + python3-pip \ + && rm -rf /var/lib/apt/lists/*" +RUN /usr/sbin/update-ccache-symlinks +RUN mkdir /opt/ccache && ccache --set-config=cache_dir=/opt/ccache + +ARG IPEX_VERSION=2.5.0 +ARG PYTORCH_VERSION=2.5.1 +ARG TORCHVISION_VERSION=0.20.1+cpu +ARG TORCHAUDIO_VERSION=2.5.1+cpu + +RUN python3 -m pip install --no-cache-dir \ + torch==${PYTORCH_VERSION}+cpu \ + torchvision==${TORCHVISION_VERSION} \ + torchaudio==${TORCHAUDIO_VERSION} \ + --index-url https://download.pytorch.org/whl/cpu && \ + python3 -m pip install intel-openmp -f https://download.pytorch.org/whl/torch_stable.html && \ + python3 -m pip install intel-extension-for-pytorch==$IPEX_VERSION && \ + python3 -m pip install oneccl_bind_pt --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/cn/ && \ + python3 -m pip install --no-cache-dir py-libnuma + +ARG KMP_BLOCKTIME=1 +ENV KMP_BLOCKTIME=${KMP_BLOCKTIME} +ARG KMP_HW_SUBSET=1T +ENV KMP_HW_SUBSET=${KMP_HW_SUBSET} +ENV LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc.so" + +FROM intel/intel-extension-for-pytorch:2.3.110-xpu as xpu +WORKDIR /usr/src/ + +RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ + sh -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + ca-certificates \ + git \ + curl \ + vim \ + ccache \ + libgoogle-perftools-dev \ + numactl \ + libjpeg-dev \ + pybind11-dev \ + libpng-dev \ + && rm -rf /var/lib/apt/lists/*" +RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null + +RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ +| gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt install -y intel-basekit xpu-smi cmake ninja-build pciutils + +FROM ${PLATFORM} + +COPY optimum optimum +COPY Makefile setup.cfg setup.py pyproject.toml README.md ./ +RUN pip install . diff --git a/docker/Dockerfile.intel b/docker/Dockerfile.intel deleted file mode 100644 index ad4ff63e8..000000000 --- a/docker/Dockerfile.intel +++ /dev/null @@ -1,53 +0,0 @@ -# syntax = docker/dockerfile:1 -# based onhttps://github.com/pytorch/pytorch/blob/master/Dockerfile -# -# NOTE: To build this you will need a docker version >= 19.03 and DOCKER_BUILDKIT=1 -# -# If you do not use buildkit you are not going to have a good time -# -# For reference: -# https://docs.docker.com/develop/develop-images/build_enhancements/ - -ARG BASE_IMAGE=ubuntu:22.04 -FROM ${BASE_IMAGE} - -RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ - sh -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ - ca-certificates \ - git \ - curl \ - vim \ - build-essential \ - ccache \ - libgoogle-perftools-dev \ - numactl \ - cmake \ - libjpeg-dev \ - pybind11-dev \ - libpng-dev \ - python3 \ - python3-pip \ - && rm -rf /var/lib/apt/lists/*" -RUN /usr/sbin/update-ccache-symlinks -RUN mkdir /opt/ccache && ccache --set-config=cache_dir=/opt/ccache - -ARG IPEX_VERSION=2.3.100 -ARG PYTORCH_VERSION=2.3.1 -ARG TORCHVISION_VERSION=0.18.1+cpu -ARG TORCHAUDIO_VERSION=2.3.1+cpu - -RUN python3 -m pip install --no-cache-dir \ - intel-openmp \ - torch==${PYTORCH_VERSION}+cpu \ - torchvision==${TORCHVISION_VERSION} \ - torchaudio==${TORCHAUDIO_VERSION} \ - -f https://download.pytorch.org/whl/torch_stable.html && \ - python3 -m pip install intel-extension-for-pytorch==$IPEX_VERSION && \ - python3 -m pip install oneccl_bind_pt --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/ && \ - python3 -m pip install --no-cache-dir py-libnuma - -ARG KMP_BLOCKTIME=1 -ENV KMP_BLOCKTIME=${KMP_BLOCKTIME} -ARG KMP_HW_SUBSET=1T -ENV KMP_HW_SUBSET=${KMP_HW_SUBSET} -ENV LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc.so" From 32ceae1d019f6884db609bd1aa48918820ebca61 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Mon, 16 Dec 2024 17:10:41 +0800 Subject: [PATCH 3/9] fix when attention_mask=None (#1067) * fix when attention_mask=None Signed-off-by: jiqing-feng * fix position_ids Signed-off-by: jiqing-feng * add tests for forward only with input_ids Signed-off-by: jiqing-feng * fix input dtype Signed-off-by: jiqing-feng --------- Signed-off-by: jiqing-feng --- optimum/exporters/ipex/modeling_utils.py | 6 ++--- optimum/intel/ipex/modeling_base.py | 2 ++ tests/ipex/test_modeling.py | 33 +++++++++++++++++++++++- 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/optimum/exporters/ipex/modeling_utils.py b/optimum/exporters/ipex/modeling_utils.py index 8d5f8afa1..ca51c47fb 100755 --- a/optimum/exporters/ipex/modeling_utils.py +++ b/optimum/exporters/ipex/modeling_utils.py @@ -180,7 +180,7 @@ def _llama_model_forward( position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) - position_ids = position_ids.unsqueeze(0) + position_ids = position_ids.unsqueeze(0).repeat_interleave(input_ids.shape[0], 0) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) @@ -297,7 +297,7 @@ def _falcon_model_forward( ) if position_ids is None: - position_ids = cache_position.unsqueeze(0) + position_ids = cache_position.unsqueeze(0).repeat_interleave(input_ids.shape[0], 0) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head @@ -419,7 +419,7 @@ def _gpt2_model_forward( past_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) - position_ids = position_ids.unsqueeze(0) + position_ids = position_ids.unsqueeze(0).repeat_interleave(input_ids.shape[0], 0) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index 8611bddd2..d8f830e51 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -276,6 +276,8 @@ def forward( attention_mask: Optional[torch.FloatTensor] = None, **kwargs, ) -> CausalLMOutputWithPast: + if self.add_patch and input_ids is not None and attention_mask is None: + attention_mask = torch.ones_like(input_ids) return self.model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) def _prepare_generation_config( diff --git a/tests/ipex/test_modeling.py b/tests/ipex/test_modeling.py index 3a6abd9c3..b595f6139 100644 --- a/tests/ipex/test_modeling.py +++ b/tests/ipex/test_modeling.py @@ -241,7 +241,6 @@ def test_compare_to_transformers(self, model_arch): model_id = MODEL_NAMES[model_arch] set_seed(SEED) dtype = torch.float16 if IS_XPU_AVAILABLE else torch.float32 - # Test model forward do not need cache. ipex_model = IPEXModelForCausalLM.from_pretrained(model_id, torch_dtype=dtype, device_map=DEVICE) self.assertIsInstance(ipex_model.config, PretrainedConfig) tokenizer = AutoTokenizer.from_pretrained(model_id) @@ -275,6 +274,38 @@ def test_compare_to_transformers(self, model_arch): self.assertTrue(torch.allclose(outputs.logits, loaded_model_outputs.logits, atol=1e-7)) self.assertTrue(torch.allclose(outputs.logits, init_model_outputs.logits, atol=1e-7)) + @parameterized.expand(SUPPORTED_ARCHITECTURES) + def test_forward(self, model_arch): + model_id = MODEL_NAMES[model_arch] + set_seed(SEED) + dtype = torch.float16 if IS_XPU_AVAILABLE else torch.float32 + ipex_model = IPEXModelForCausalLM.from_pretrained(model_id, torch_dtype=dtype, device_map=DEVICE) + self.assertIsInstance(ipex_model.config, PretrainedConfig) + input_ids = torch.Tensor([[1, 2, 3], [4, 5, 6]]).to(torch.long) + outputs = ipex_model(input_ids) + + self.assertIsInstance(outputs.logits, torch.Tensor) + + transformers_model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=dtype, device_map=DEVICE) + with torch.no_grad(): + transformers_outputs = transformers_model(input_ids) + + # Test re-load model + with tempfile.TemporaryDirectory() as tmpdirname: + ipex_model.save_pretrained(tmpdirname) + loaded_model = self.IPEX_MODEL_CLASS.from_pretrained(tmpdirname, torch_dtype=dtype, device_map=DEVICE) + loaded_model_outputs = loaded_model(input_ids) + + # Test init method + init_model = self.IPEX_MODEL_CLASS(transformers_model) + init_model_outputs = init_model(input_ids) + + # Compare tensor outputs + self.assertTrue(torch.allclose(outputs.logits, transformers_outputs.logits, atol=1e-4)) + # To avoid float pointing error + self.assertTrue(torch.allclose(outputs.logits, loaded_model_outputs.logits, atol=1e-7)) + self.assertTrue(torch.allclose(outputs.logits, init_model_outputs.logits, atol=1e-7)) + @parameterized.expand(SUPPORTED_ARCHITECTURES) def test_pipeline(self, model_arch): dtype = torch.float16 if IS_XPU_AVAILABLE else torch.float32 From 09d080f5d57c5ba72951bb7acd9438f5e7664864 Mon Sep 17 00:00:00 2001 From: Helena Kloosterman Date: Mon, 16 Dec 2024 11:21:36 +0100 Subject: [PATCH 4/9] Update dependencies for older transformers (#1072) --- .github/workflows/test_openvino.yml | 5 +++-- .github/workflows/test_openvino_slow.yml | 4 ++-- tests/openvino/test_quantization.py | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test_openvino.yml b/.github/workflows/test_openvino.yml index 7583c5107..db35324a9 100644 --- a/.github/workflows/test_openvino.yml +++ b/.github/workflows/test_openvino.yml @@ -1,6 +1,7 @@ name: OpenVINO - Test on: + workflow_dispatch: push: branches: - main @@ -46,9 +47,9 @@ jobs: pip install .[openvino,openvino-tokenizers,diffusers,tests] transformers[testing] - if: ${{ matrix.transformers-version != 'latest' }} - name: Downgrade Transformers and Accelerate + name: Install specific dependencies and versions required for older transformers run: | - pip install transformers==${{ matrix.transformers-version }} accelerate==0.* peft==0.13.* + pip install transformers==${{ matrix.transformers-version }} accelerate==0.* peft==0.13.* diffusers==0.30.* transformers_stream_generator - if: ${{ matrix.test-pattern == '*modeling*' }} name: Uninstall NNCF diff --git a/.github/workflows/test_openvino_slow.yml b/.github/workflows/test_openvino_slow.yml index 9ad5ef269..8c3d9b2d3 100644 --- a/.github/workflows/test_openvino_slow.yml +++ b/.github/workflows/test_openvino_slow.yml @@ -46,8 +46,8 @@ jobs: pip uninstall -y nncf - if: ${{ matrix.transformers-version != 'latest' }} - name: Downgrade Transformers and Accelerate - run: pip install transformers==${{ matrix.transformers-version }} accelerate==0.* peft==0.13.* + name: Install specific dependencies and versions required for older transformers + run: pip install transformers==${{ matrix.transformers-version }} accelerate==0.* peft==0.13.*, diffusers==0.30.* transformers_stream_generator - name: Pip freeze run: pip freeze diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index 1fd58646e..d02dea3f1 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -106,8 +106,8 @@ class OVQuantizerTest(unittest.TestCase): weight_only=False, smooth_quant_alpha=0.95, ), - (14, 22, 21) if is_transformers_version("<=", "4.36.0") else (14, 22, 25), - (14, 21, 17) if is_transformers_version("<=", "4.36.0") else (14, 22, 18), + (14, 22, 21) if is_transformers_version("<=", "4.42.4") else (14, 22, 25), + (14, 21, 17) if is_transformers_version("<=", "4.42.4") else (14, 22, 18), ), ] From 9c4bccc858605b016e40daf04604acf5588a99ea Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Mon, 16 Dec 2024 18:34:56 +0800 Subject: [PATCH 5/9] Update README.md (#1068) * update readme Signed-off-by: jiqing-feng * update readme Signed-off-by: jiqing-feng * update ipex readme Signed-off-by: jiqing-feng * update ipex notebook Signed-off-by: jiqing-feng * Update README.md * Update notebooks/ipex/text_generation.ipynb --------- Signed-off-by: jiqing-feng Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- README.md | 4 ++-- notebooks/ipex/text_generation.ipynb | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 0cd317c78..28c580068 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ 🤗 Optimum Intel is the interface between the 🤗 Transformers and Diffusers libraries and the different tools and libraries provided by Intel to accelerate end-to-end pipelines on Intel architectures. -[Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/#introduction) is an open-source library which provides optimizations for both eager mode and graph mode, however, compared to eager mode, graph mode in PyTorch* normally yields better performance from optimization techniques, such as operation fusion. +[Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/#introduction) is an open-source library which provides optimizations like faster attention and operators fusion. Intel [Neural Compressor](https://www.intel.com/content/www/us/en/developer/tools/oneapi/neural-compressor.html) is an open-source library enabling the usage of the most popular compression techniques such as quantization, pruning and knowledge distillation. It supports automatic accuracy-driven tuning strategies in order for users to easily generate quantized model. The users can easily apply static, dynamic and aware-training quantization approaches while giving an expected accuracy criteria. It also supports different weight pruning techniques enabling the creation of pruned model giving a predefined sparsity target. @@ -159,7 +159,7 @@ optimized_model = OVModelForSequenceClassification.from_pretrained(save_dir) ## IPEX -To load your IPEX model, you can just replace your `AutoModelForXxx` class with the corresponding `IPEXModelForXxx` class. You can set `export=True` to load a PyTorch checkpoint, export your model via TorchScript and apply IPEX optimizations : both operators optimization (replaced with customized IPEX operators) and graph-level optimization (like operators fusion) will be applied on your model. +To load your IPEX model, you can just replace your `AutoModelForXxx` class with the corresponding `IPEXModelForXxx` class. It will load a PyTorch checkpoint, and apply IPEX operators optimization (replaced with customized IPEX operators). ```diff from transformers import AutoTokenizer, pipeline - from transformers import AutoModelForCausalLM diff --git a/notebooks/ipex/text_generation.ipynb b/notebooks/ipex/text_generation.ipynb index d1a62d920..4c97d5b6b 100644 --- a/notebooks/ipex/text_generation.ipynb +++ b/notebooks/ipex/text_generation.ipynb @@ -11,7 +11,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To load your IPEX model, you can just replace your `AutoModelForXxx` class with the corresponding `IPEXModelForXxx` class. You can set `export=True` to load a PyTorch checkpoint, export your model via TorchScript and apply IPEX optimizations : both operators optimization (replaced with customized IPEX operators) and graph-level optimization (like operators fusion) will be applied on your model." + "To load your IPEX model, you can just replace your `AutoModelForXxx` class with the corresponding `IPEXModelForXxx` class. It could apply IPEX, providing optimizations like faster attention and operators fusion." ] }, { @@ -60,7 +60,7 @@ } ], "source": [ - "model = IPEXModelForCausalLM.from_pretrained(\"gpt2\", torch_dtype=torch.bfloat16, export=True)\n", + "model = IPEXModelForCausalLM.from_pretrained(\"gpt2\", torch_dtype=torch.bfloat16)\n", "tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n", "input_sentence = [\"Answer the following yes/no question by reasoning step-by-step please. Can you write a whole Haiku in a single tweet?\"]\n", "model_inputs = tokenizer(input_sentence, return_tensors=\"pt\")\n", From 7601bfd436455bf1a0a83184fe53eb487f265905 Mon Sep 17 00:00:00 2001 From: kaixuanliu Date: Tue, 17 Dec 2024 17:53:36 +0800 Subject: [PATCH 6/9] fix import bug when ipex is available but sentence_transformers is not; add `accelerate` dependency to run with XPU (#1077) Signed-off-by: Liu, Kaixuan --- optimum/intel/__init__.py | 1 + setup.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/optimum/intel/__init__.py b/optimum/intel/__init__.py index 335eed563..b441b76f9 100644 --- a/optimum/intel/__init__.py +++ b/optimum/intel/__init__.py @@ -51,6 +51,7 @@ "IPEXModel", ] else: + _import_structure["utils.dummy_ipex_objects"] = [] _import_structure["ipex"] = [ "IPEXModelForCausalLM", "IPEXModelForSequenceClassification", diff --git a/setup.py b/setup.py index ca415fca3..d9b3b8642 100644 --- a/setup.py +++ b/setup.py @@ -66,7 +66,7 @@ "nncf": ["nncf>=2.14.0"], "openvino": ["nncf>=2.14.0", "openvino>=2024.5.0", "openvino-tokenizers>=2024.5.0"], "neural-compressor": ["neural-compressor[pt]>3.0", "accelerate", "transformers<4.46"], - "ipex": ["intel-extension-for-pytorch>=2.4", "transformers>4.45,<4.47"], + "ipex": ["intel-extension-for-pytorch>=2.4", "transformers>4.45,<4.47", "accelerate"], "diffusers": ["diffusers"], "quality": QUALITY_REQUIRE, "tests": TESTS_REQUIRE, From f030583f6db0f0aaa8985fa8dd63ca8bc485f920 Mon Sep 17 00:00:00 2001 From: Nikita Savelyev Date: Tue, 17 Dec 2024 11:16:19 +0100 Subject: [PATCH 7/9] Add a note about data-aware mixed precision assignment (#1075) * Add a note about data-aware mixed precision assignment * Add a note to dataset parameter * Update docs/source/openvino/export.mdx Co-authored-by: Helena Kloosterman * Add a warning --------- Co-authored-by: Helena Kloosterman --- docs/source/openvino/export.mdx | 7 +++++-- optimum/commands/export/openvino.py | 7 +++++-- optimum/intel/openvino/configuration.py | 16 +++++++++++++++- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/docs/source/openvino/export.mdx b/docs/source/openvino/export.mdx index 487688521..3e7e458c0 100644 --- a/docs/source/openvino/export.mdx +++ b/docs/source/openvino/export.mdx @@ -78,7 +78,8 @@ Optional arguments: --ratio RATIO A parameter used when applying 4-bit quantization to control the ratio between 4-bit and 8-bit quantization. If set to 0.8, 80% of the layers will be quantized to int4 while 20% will be quantized to int8. This helps to achieve better accuracy at the sacrifice of the model size - and inference latency. Default value is 1.0. + and inference latency. Default value is 1.0. Note: If dataset is provided, and the ratio is + less than 1.0, then data-aware mixed precision assignment will be applied. --sym Whether to apply symmetric quantization --group-size GROUP_SIZE The group size to use for quantization. Recommended value is 128 and -1 uses per-column @@ -94,7 +95,9 @@ Optional arguments: can use the one from the list ['auto','wikitext2','c4','c4-new']. With 'auto' the dataset will be collected from model's generations. For diffusion models it should be on of ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit']. For - visual language models the dataset must be set to 'contextual'. + visual language models the dataset must be set to 'contextual'. Note: if none of the data-aware + compression algorithms are selected and ratio parameter is omitted or equals 1.0, the dataset + argument will not have an effect on the resulting model. --all-layers Whether embeddings and last MatMul layers should be compressed to INT4. If not provided an weight compression is applied, they are compressed to INT8. --awq Whether to apply AWQ algorithm. AWQ improves generation quality of INT4-compressed LLMs, but diff --git a/optimum/commands/export/openvino.py b/optimum/commands/export/openvino.py index 61c21c5c7..6965efcb5 100644 --- a/optimum/commands/export/openvino.py +++ b/optimum/commands/export/openvino.py @@ -102,7 +102,8 @@ def parse_args_openvino(parser: "ArgumentParser"): default=None, help=( "A parameter used when applying 4-bit quantization to control the ratio between 4-bit and 8-bit quantization. If set to 0.8, 80%% of the layers will be quantized to int4 " - "while 20%% will be quantized to int8. This helps to achieve better accuracy at the sacrifice of the model size and inference latency. Default value is 1.0." + "while 20%% will be quantized to int8. This helps to achieve better accuracy at the sacrifice of the model size and inference latency. Default value is 1.0. " + "Note: If dataset is provided, and the ratio is less than 1.0, then data-aware mixed precision assignment will be applied." ), ) optional_group.add_argument( @@ -140,7 +141,9 @@ def parse_args_openvino(parser: "ArgumentParser"): "dataset will be collected from model's generations. " "For diffusion models it should be on of ['conceptual_captions'," "'laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit']. " - "For visual language models the dataset must be set to 'contextual'." + "For visual language models the dataset must be set to 'contextual'. " + "Note: if none of the data-aware compression algorithms are selected and ratio parameter is omitted or " + "equals 1.0, the dataset argument will not have an effect on the resulting model." ), ) optional_group.add_argument( diff --git a/optimum/intel/openvino/configuration.py b/optimum/intel/openvino/configuration.py index a0fc68361..4fdfe368a 100644 --- a/optimum/intel/openvino/configuration.py +++ b/optimum/intel/openvino/configuration.py @@ -344,6 +344,8 @@ class OVWeightQuantizationConfig(OVQuantizationConfigBase): ratio (`float`, defaults to 1.0): The ratio between baseline and backup precisions (e.g. 0.9 means 90% of layers quantized to INT4_ASYM and the rest to INT8_ASYM). + Note: If dataset is provided, and the ratio is less than 1.0, then data-aware mixed precision assignment + will be applied. all_layers (`bool`, *optional*): Defines how many layers are compressed to 4-bits while the rest are kept in 8-bit precision. sensitivity_metric (`str`, *optional*): @@ -441,7 +443,7 @@ def post_init(self): Safety checker that arguments are correct """ super().post_init() - if self.ratio is not None and not (0 <= self.ratio <= 1): + if not (0 <= self.ratio <= 1): raise ValueError("`ratio` must between 0 and 1.") if self.group_size is not None and self.group_size != -1 and self.group_size <= 0: raise ValueError("`group_size` must be greater than 0 or equal to -1") @@ -461,6 +463,18 @@ def post_init(self): or {stable_diffusion_datasets} for diffusion models, but we found {self.dataset}""" ) + if self.dataset is not None and not ( + self.quant_method == OVQuantizationMethod.AWQ + or self.scale_estimation + or self.gptq + or self.lora_correction + or (self.ratio < 1.0 and self.sensitivity_metric != nncf.SensitivityMetric.WEIGHT_QUANTIZATION_ERROR) + ): + logger.warning( + "The provided dataset won't have any effect on the resulting compressed model because no data-aware " + "quantization algorithm is selected and compression ratio is 1.0." + ) + if self.bits not in [4, 8]: raise ValueError(f"Only support quantization to [4,8] bits but found {self.bits}") From 3c229fc344b02f65cfbd8e1e23a0c7329cb034ee Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Tue, 17 Dec 2024 11:33:10 +0100 Subject: [PATCH 8/9] Reduce img size in dummy_inputs for FLUX (#1070) * Reduce img size in dummy_inputs for FLUX * change dims in function signature * Update optimum/exporters/openvino/model_configs.py --------- Co-authored-by: Ilyas Moutawwakil <57442720+IlyasMoutawwakil@users.noreply.github.com> --- optimum/exporters/openvino/model_configs.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/optimum/exporters/openvino/model_configs.py b/optimum/exporters/openvino/model_configs.py index d9c0165d9..1ffcabb48 100644 --- a/optimum/exporters/openvino/model_configs.py +++ b/optimum/exporters/openvino/model_configs.py @@ -1804,8 +1804,9 @@ def __init__( normalized_config: NormalizedVisionConfig, batch_size: int = DEFAULT_DUMMY_SHAPES["batch_size"], num_channels: int = DEFAULT_DUMMY_SHAPES["num_channels"], - width: int = DEFAULT_DUMMY_SHAPES["width"], - height: int = DEFAULT_DUMMY_SHAPES["height"], + width: int = DEFAULT_DUMMY_SHAPES["width"] // 4, + height: int = DEFAULT_DUMMY_SHAPES["height"] // 4, + # Reduce img shape by 4 for FLUX to reduce memory usage on conversion **kwargs, ): super().__init__(task, normalized_config, batch_size, num_channels, width, height, **kwargs) From a76be08b5b287d8972244451d14050fbd9d3a921 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Tue, 17 Dec 2024 20:09:14 +0800 Subject: [PATCH 9/9] Enable Text2text task on ipex (#1054) * enable IPEXModelForSeq2SeqLM Signed-off-by: jiqing-feng * set static cache Signed-off-by: jiqing-feng * add tests for IPEXModelForSeq2SeqLM Signed-off-by: jiqing-feng * add docs Signed-off-by: jiqing-feng * fix readme Signed-off-by: jiqing-feng * refactor compile Signed-off-by: jiqing-feng * fix check Signed-off-by: jiqing-feng * fix ruff check Signed-off-by: jiqing-feng * fix check Signed-off-by: jiqing-feng * fix tests Signed-off-by: jiqing-feng * fix opt tests Signed-off-by: jiqing-feng --------- Signed-off-by: jiqing-feng --- docs/source/ipex/inference.mdx | 3 +- docs/source/ipex/models.mdx | 1 + optimum/intel/__init__.py | 2 + optimum/intel/ipex/__init__.py | 1 + optimum/intel/ipex/modeling_base.py | 152 +++++++++++++++++----- optimum/intel/ipex/utils.py | 1 + optimum/intel/pipelines/pipeline_base.py | 19 +++ optimum/intel/utils/dummy_ipex_objects.py | 11 ++ tests/ipex/test_modeling.py | 127 +++++++++++++++++- tests/ipex/test_pipelines.py | 44 +++++++ 10 files changed, 330 insertions(+), 31 deletions(-) diff --git a/docs/source/ipex/inference.mdx b/docs/source/ipex/inference.mdx index 54b586924..72826da59 100644 --- a/docs/source/ipex/inference.mdx +++ b/docs/source/ipex/inference.mdx @@ -14,7 +14,7 @@ Optimum Intel can be used to load models from the [Hub](https://huggingface.co/m ## Loading -You can load your model and apply IPEX optimizations (apply torch.compile for non-generation tasks). For supported architectures like LLaMA, BERT and ViT, further optimizations will be applied by patching the model to use custom operators. +You can load your model and apply IPEX optimizations (apply torch.compile except text-generation tasks). For supported architectures like LLaMA, BERT and ViT, further optimizations will be applied by patching the model to use custom operators. For now, support is enabled for Intel CPU/GPU. Previous models converted to TorchScript will be deprecated in v1.22. ```diff @@ -43,3 +43,4 @@ As shown in the table below, each task is associated with a class enabling to au | `IPEXModelForMaskedLM` | `fill-mask` | | `IPEXModelForAudioClassification` | `audio-classification` | | `IPEXModelForCausalLM` | `text-generation` | +| `IPEXModelForSeq2SeqLM` | `text2text-generation` | diff --git a/docs/source/ipex/models.mdx b/docs/source/ipex/models.mdx index 346ca2659..b8cd6c482 100644 --- a/docs/source/ipex/models.mdx +++ b/docs/source/ipex/models.mdx @@ -40,6 +40,7 @@ Here is the list of the supported architectures : - Roberta - Roformer - SqueezeBert +- T5 - UniSpeech - Vit - Wav2Vec2 diff --git a/optimum/intel/__init__.py b/optimum/intel/__init__.py index b441b76f9..ad9fdca07 100644 --- a/optimum/intel/__init__.py +++ b/optimum/intel/__init__.py @@ -54,6 +54,7 @@ _import_structure["utils.dummy_ipex_objects"] = [] _import_structure["ipex"] = [ "IPEXModelForCausalLM", + "IPEXModelForSeq2SeqLM", "IPEXModelForSequenceClassification", "IPEXModelForMaskedLM", "IPEXModelForTokenClassification", @@ -248,6 +249,7 @@ IPEXModelForImageClassification, IPEXModelForMaskedLM, IPEXModelForQuestionAnswering, + IPEXModelForSeq2SeqLM, IPEXModelForSequenceClassification, IPEXModelForTokenClassification, ) diff --git a/optimum/intel/ipex/__init__.py b/optimum/intel/ipex/__init__.py index 62e6afcf6..9aae96b08 100644 --- a/optimum/intel/ipex/__init__.py +++ b/optimum/intel/ipex/__init__.py @@ -20,6 +20,7 @@ IPEXModelForImageClassification, IPEXModelForMaskedLM, IPEXModelForQuestionAnswering, + IPEXModelForSeq2SeqLM, IPEXModelForSequenceClassification, IPEXModelForTokenClassification, ) diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index d8f830e51..af36d06f4 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -30,6 +30,7 @@ AutoModelForImageClassification, AutoModelForMaskedLM, AutoModelForQuestionAnswering, + AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, GenerationConfig, @@ -60,8 +61,8 @@ _IPEX_SUPPORT_MODEL_TYPES = ("llama", "bert", "vit", "falcon", "gpt2") _IPEX_EXPORTED_GENERATION_METHODS = ("sample", "greedy_search", "beam_sample", "beam_search", "assisted_generation") _IPEX_MINIMUM_VERSION_FOR_COMPILE = "2.5.0" -# TODO: Already fixed in torch 2.6, will enable when torch upgrading to 2.6 -_COMPILE_NOT_READY_MODEL_TYPES = ("electra", "roformer", "beit") +# TODO: Some models are already fixed in torch 2.6, will enable them when torch upgrading to 2.6 +_COMPILE_NOT_READY_MODEL_TYPES = ("electra", "roformer", "gpt_neox", "beit", "llama", "falcon", "gpt2") def _is_patched_with_ipex(model, task, use_cache: bool = True): @@ -84,15 +85,21 @@ def __init__( model, config: PretrainedConfig = None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None, + warmup: Optional[bool] = True, **kwargs, ): config = config or model.config OptimizedModel.__init__(self, model=model, config=config) + self._supports_cache_class = getattr(model, "_supports_cache_class", None) + self._supports_sdpa = getattr(model, "_supports_sdpa", None) + self._supports_quantized_cache = getattr(model, "_supports_quantized_cache", None) + self._supports_static_cache = getattr(model, "_supports_static_cache", None) self._dtype = self.model.dtype if self.model.dtype is not None else torch.float32 self.use_cache = kwargs.get("use_cache", False) self.model_save_dir = model_save_dir self._add_patch = _is_patched_with_ipex(model, self.export_feature, self.use_cache) + self.compiled = False self.input_names = set(inspect.signature(model.forward).parameters) @@ -104,25 +111,10 @@ def __init__( if hasattr(self.auto_model_class, "register"): self.auto_model_class.register(AutoConfig, self.__class__) - # Non-generation tasks can use torch.compile to get acceleration. - if ( - model.device.type == "cpu" - and self.export_feature not in _IPEX_EXPORTED_GENERATION_TASKS - and config.model_type not in _COMPILE_NOT_READY_MODEL_TYPES - and is_ipex_version(">=", _IPEX_MINIMUM_VERSION_FOR_COMPILE) - ): - from torch._inductor import config - - # System level optimization - torch._inductor.config.cpp_wrapper = True - os.environ["TORCHINDUCTOR_FREEZING"] = "1" - logger.info("Enable torch.compile optimization, start warm up") - self.model.forward = torch.compile(self.model.forward) - inputs = prepare_jit_inputs(model, self.export_feature, False) - with torch.no_grad(): - self.model(**inputs) - self.model(**inputs) - logger.info("Warm up end") + self.maybe_apply_torch_compile() + + if warmup: + self._init_warmup() @classmethod def _from_transformers(cls, *args, **kwargs): @@ -192,6 +184,31 @@ def to(self, device: Union[torch.device, str]): def can_generate(self): return isinstance(self, GenerationMixin) + def maybe_apply_torch_compile(self): + if ( + self.model.device.type != "cpu" + or self.config.model_type in _COMPILE_NOT_READY_MODEL_TYPES + or is_ipex_version("<", _IPEX_MINIMUM_VERSION_FOR_COMPILE) + ): + return + if self.use_cache and not self._supports_static_cache: + return + from torch._inductor import config as inductor_config + + # System level optimization + inductor_config.cpp_wrapper = True + os.environ["TORCHINDUCTOR_FREEZING"] = "1" + logger.info("Enable torch.compile optimization") + self.model.forward = torch.compile(self.model.forward) + self.compiled = True + + def _init_warmup(self): + inputs = prepare_jit_inputs(self.model, self.export_feature, False) + with torch.no_grad(): + self.model(**inputs) + self.model(**inputs) + logger.info("Warm up end") + class IPEXModelForSequenceClassification(IPEXModel): auto_model_class = AutoModelForSequenceClassification @@ -236,16 +253,10 @@ def __init__( config: PretrainedConfig = None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None, use_cache: bool = True, + warmup: Optional[bool] = True, **kwargs, ): - super().__init__(model, config, model_save_dir=model_save_dir, use_cache=use_cache) - - self._supports_cache_class = getattr(model, "_supports_cache_class", None) - self._supports_sdpa = getattr(model, "_supports_sdpa", None) - self._supports_cache_class = getattr(model, "_supports_cache_class", None) - self._supports_quantized_cache = getattr(model, "_supports_quantized_cache", None) - self._supports_static_cache = getattr(model, "_supports_static_cache", None) - + super().__init__(model, config, model_save_dir=model_save_dir, warmup=False, use_cache=use_cache) if self._add_patch: self._supports_cache_class = True GenerationMixin.__init__(self) @@ -269,6 +280,9 @@ def __init__( if hasattr(self.model_cls, "_convert_to_bloom_cache"): self._convert_to_bloom_cache = self.model_cls._convert_to_bloom_cache + if warmup: + self._init_warmup() + @torch.no_grad() def forward( self, @@ -285,6 +299,9 @@ def _prepare_generation_config( ) -> Tuple[GenerationConfig, Dict]: generation_config, model_kwargs = super()._prepare_generation_config(generation_config, **kwargs) generation_method = generation_config.get_generation_mode().value + if self.compiled and generation_config.cache_implementation != "ipex_paged" and self._supports_static_cache: + # Use static cache for torch compile + generation_config.cache_implementation = "static" if generation_method not in _IPEX_EXPORTED_GENERATION_METHODS: raise ValueError( f"The generation method {generation_method} is not supported for IPEXModelForCausalLM for now, support methods are {_IPEX_EXPORTED_GENERATION_METHODS}" @@ -337,6 +354,83 @@ def generate(self, *args, **kwargs): return result + def _init_warmup(self): + inputs = prepare_jit_inputs(self.model, self.export_feature, False) + self.generate(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=4) + self.generate(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=4) + logger.info("Warm up end") + + +class IPEXModelForSeq2SeqLM(IPEXModel, GenerationMixin): + auto_model_class = AutoModelForSeq2SeqLM + export_feature = "text2text-generation" + + def __init__( + self, + model, + config: PretrainedConfig = None, + model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None, + use_cache: bool = True, + warmup: Optional[bool] = True, + **kwargs, + ): + super().__init__(model, config, model_save_dir=model_save_dir, warmup=False, use_cache=use_cache) + GenerationMixin.__init__(self) + + model_type = self.config.model_type.replace("_", "-") + self.normalized_config = NormalizedConfigManager.get_normalized_config_class(model_type)(self.config) + + self.config.is_decoder = False + self.config.is_encoder_decoder = True + + self.generation_config = GenerationConfig.from_model_config(self.config) + try: + self.model_cls = get_class_from_dynamic_module( + self.config.auto_map["AutoModelForSeq2SeqLM"], model_save_dir + ) + except AttributeError: + self.model_cls = get_model_class(self.config, AutoModelForSeq2SeqLM._model_mapping) + + if hasattr(self.model_cls, "_convert_to_standard_cache"): + self._convert_to_standard_cache = self.model_cls._convert_to_standard_cache + + if warmup: + self._init_warmup() + + @torch.no_grad() + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.FloatTensor] = None, + **kwargs, + ) -> CausalLMOutputWithPast: + return self.model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) + + def _prepare_generation_config( + self, generation_config: Optional[GenerationConfig], **kwargs: Dict + ) -> Tuple[GenerationConfig, Dict]: + generation_config, model_kwargs = super()._prepare_generation_config(generation_config, **kwargs) + # Use static cache for torch.compile + if self.compiled: + generation_config.cache_implementation = "static" + + return generation_config, model_kwargs + + def _reorder_cache(self, *args, **kwargs): + return self.model._reorder_cache(*args, **kwargs) + + def prepare_inputs_for_generation(self, *args, **kwargs): + return self.model.prepare_inputs_for_generation(*args, **kwargs) + + def get_encoder(self, *args, **kwargs): + return self.model.get_encoder(*args, **kwargs) + + def _init_warmup(self): + inputs = prepare_jit_inputs(self.model, self.export_feature, False) + self.generate(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=4) + self.generate(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=4) + logger.info("Warm up end") + def _ipex_crop_past_key_values(model, past_key_values, max_length): if isinstance(model, IPEXModel) and _is_patched_with_ipex(model, "text-generation"): diff --git a/optimum/intel/ipex/utils.py b/optimum/intel/ipex/utils.py index 3d3feb3db..23126bcd4 100644 --- a/optimum/intel/ipex/utils.py +++ b/optimum/intel/ipex/utils.py @@ -16,6 +16,7 @@ _HEAD_TO_AUTOMODELS = { "feature-extraction": "IPEXModel", "text-generation": "IPEXModelForCausalLM", + "text2text-generation": "IPEXModelForSeq2SeqLM", "text-classification": "IPEXModelForSequenceClassification", "token-classification": "IPEXModelForTokenClassification", "question-answering": "IPEXModelForQuestionAnswering", diff --git a/optimum/intel/pipelines/pipeline_base.py b/optimum/intel/pipelines/pipeline_base.py index 5b8531c67..04390ba3b 100644 --- a/optimum/intel/pipelines/pipeline_base.py +++ b/optimum/intel/pipelines/pipeline_base.py @@ -58,6 +58,7 @@ IPEXModelForImageClassification, IPEXModelForMaskedLM, IPEXModelForQuestionAnswering, + IPEXModelForSeq2SeqLM, IPEXModelForSequenceClassification, IPEXModelForTokenClassification, ) @@ -69,6 +70,24 @@ "default": "gpt2", "type": "text", }, + "summarization": { + "impl": SummarizationPipeline, + "class": (IPEXModelForSeq2SeqLM,), + "default": "t5-base", + "type": "text", + }, + "translation": { + "impl": TranslationPipeline, + "class": (IPEXModelForSeq2SeqLM,), + "default": "t5-small", + "type": "text", + }, + "text2text-generation": { + "impl": Text2TextGenerationPipeline, + "class": (IPEXModelForSeq2SeqLM,), + "default": "t5-small", + "type": "text", + }, "fill-mask": { "impl": FillMaskPipeline, "class": (IPEXModelForMaskedLM,), diff --git a/optimum/intel/utils/dummy_ipex_objects.py b/optimum/intel/utils/dummy_ipex_objects.py index de68e4002..7c1922305 100644 --- a/optimum/intel/utils/dummy_ipex_objects.py +++ b/optimum/intel/utils/dummy_ipex_objects.py @@ -70,6 +70,17 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["ipex"]) +class IPEXModelForSeq2SeqLM(metaclass=DummyObject): + _backends = ["ipex"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["ipex"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["ipex"]) + + class IPEXModelForQuestionAnswering(metaclass=DummyObject): _backends = ["ipex"] diff --git a/tests/ipex/test_modeling.py b/tests/ipex/test_modeling.py index b595f6139..419e1bb42 100644 --- a/tests/ipex/test_modeling.py +++ b/tests/ipex/test_modeling.py @@ -26,6 +26,7 @@ from transformers import ( AutoFeatureExtractor, AutoModelForCausalLM, + AutoModelForSeq2SeqLM, AutoModelForQuestionAnswering, AutoTokenizer, GenerationConfig, @@ -37,6 +38,7 @@ IPEXModel, IPEXModelForAudioClassification, IPEXModelForCausalLM, + IPEXModelForSeq2SeqLM, IPEXModelForImageClassification, IPEXModelForMaskedLM, IPEXModelForQuestionAnswering, @@ -45,7 +47,7 @@ IPEXSentenceTransformer, ) from optimum.utils.testing_utils import grid_parameters, require_sentence_transformers -from optimum.intel.utils.import_utils import is_sentence_transformers_available +from optimum.intel.utils.import_utils import is_sentence_transformers_available, is_torch_version if is_sentence_transformers_available(): from sentence_transformers import SentenceTransformer @@ -360,6 +362,9 @@ def test_ipex_beam_search(self, test_name, model_arch, use_cache): model = IPEXModelForCausalLM.from_pretrained( model_id, use_cache=use_cache, torch_dtype=dtype, device_map=DEVICE ) + # It will be removed when torch 2.6 released + if model_arch == "opt" and not use_cache and model.compiled and is_torch_version("<", "2.6.0"): + return if use_cache and model_arch in self.IPEX_PATCHED_SUPPORTED_ARCHITECTURES: self.assertTrue(model.add_patch) transformers_model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=dtype, device_map=DEVICE) @@ -554,6 +559,126 @@ def test_patched_model(self): self.assertTrue(torch.allclose(outputs.logits, transformers_outputs.logits, atol=1e-4)) +class IPEXModelForSeq2SeqLMTest(unittest.TestCase): + IPEX_MODEL_CLASS = IPEXModelForSeq2SeqLM + SUPPORTED_ARCHITECTURES = ("t5",) + GENERATION_LENGTH = 2 + SPEEDUP_CACHE = 1.0 + + @parameterized.expand(SUPPORTED_ARCHITECTURES) + def test_compare_to_transformers(self, model_arch): + model_id = MODEL_NAMES[model_arch] + set_seed(SEED) + dtype = torch.float16 if IS_XPU_AVAILABLE else torch.float32 + # Test model forward do not need cache. + ipex_model = self.IPEX_MODEL_CLASS.from_pretrained(model_id, torch_dtype=dtype) + transformers_model = AutoModelForSeq2SeqLM.from_pretrained(model_id, torch_dtype=dtype) + self.assertIsInstance(ipex_model.config, PretrainedConfig) + tokenizer = AutoTokenizer.from_pretrained(model_id) + tokens = tokenizer( + "This is a sample", + return_tensors="pt", + return_token_type_ids=False if model_arch in ("llama", "llama2") else None, + ) + decoder_start_token_id = transformers_model.config.decoder_start_token_id if model_arch != "mbart" else 2 + decoder_inputs = {"decoder_input_ids": torch.ones((1, 1), dtype=torch.long) * decoder_start_token_id} + outputs = ipex_model(**tokens, **decoder_inputs) + + self.assertIsInstance(outputs.logits, torch.Tensor) + + with torch.no_grad(): + transformers_outputs = transformers_model(**tokens, **decoder_inputs) + + # Test re-load model + with tempfile.TemporaryDirectory() as tmpdirname: + ipex_model.save_pretrained(tmpdirname) + loaded_model = self.IPEX_MODEL_CLASS.from_pretrained(tmpdirname, torch_dtype=dtype) + loaded_model_outputs = loaded_model(**tokens, **decoder_inputs) + + # Test init method + init_model = self.IPEX_MODEL_CLASS(transformers_model) + init_model_outputs = init_model(**tokens, **decoder_inputs) + + # Compare tensor outputs + self.assertTrue(torch.allclose(outputs.logits, transformers_outputs.logits, atol=1e-4)) + # To avoid float pointing error + self.assertTrue(torch.allclose(outputs.logits, loaded_model_outputs.logits, atol=1e-7)) + self.assertTrue(torch.allclose(outputs.logits, init_model_outputs.logits, atol=1e-7)) + + @parameterized.expand(SUPPORTED_ARCHITECTURES) + def test_pipeline(self, model_arch): + dtype = torch.float16 if IS_XPU_AVAILABLE else torch.float32 + model_id = MODEL_NAMES[model_arch] + tokenizer = AutoTokenizer.from_pretrained(model_id) + model = self.IPEX_MODEL_CLASS.from_pretrained(model_id, torch_dtype=dtype) + model.config.encoder_no_repeat_ngram_size = 0 + # model.to("cpu") + pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer) + outputs = pipe("This is a sample", max_new_tokens=10, do_sample=False) + self.assertEqual(pipe.device, model.device) + + def test_compare_with_and_without_past_key_values(self): + model_id = "hf-internal-testing/tiny-random-t5" + dtype = torch.float16 if IS_XPU_AVAILABLE else torch.float32 + model_with_pkv = self.IPEX_MODEL_CLASS.from_pretrained(model_id, use_cache=True, torch_dtype=dtype) + device = model_with_pkv.device + tokenizer = AutoTokenizer.from_pretrained(model_id) + tokens = tokenizer("This is a sample input", return_tensors="pt").to(device) + # Warmup + model_with_pkv.generate(**tokens) + with Timer() as with_pkv_timer: + outputs_model_with_pkv = model_with_pkv.generate( + **tokens, min_new_tokens=self.GENERATION_LENGTH, max_new_tokens=self.GENERATION_LENGTH, num_beams=1 + ) + model_without_pkv = self.IPEX_MODEL_CLASS.from_pretrained(model_id, use_cache=False, torch_dtype=dtype) + # Warmup + model_without_pkv.generate(**tokens) + with Timer() as without_pkv_timer: + outputs_model_without_pkv = model_without_pkv.generate( + **tokens, min_new_tokens=self.GENERATION_LENGTH, max_new_tokens=self.GENERATION_LENGTH, num_beams=1 + ) + self.assertTrue(torch.equal(outputs_model_with_pkv, outputs_model_without_pkv)) + self.assertEqual(outputs_model_with_pkv.shape[1], self.GENERATION_LENGTH + 1) + self.assertEqual(outputs_model_without_pkv.shape[1], self.GENERATION_LENGTH + 1) + + @parameterized.expand( + grid_parameters( + { + "model_arch": SUPPORTED_ARCHITECTURES, + "use_cache": [True, False], + } + ) + ) + def test_ipex_beam_search(self, test_name, model_arch, use_cache): + model_id = MODEL_NAMES[model_arch] + set_seed(SEED) + dtype = torch.float16 if IS_XPU_AVAILABLE else torch.float32 + model = self.IPEX_MODEL_CLASS.from_pretrained(model_id, use_cache=use_cache, torch_dtype=dtype) + device = model.device + transformers_model = AutoModelForSeq2SeqLM.from_pretrained(model_id, torch_dtype=dtype).to(device) + self.assertEqual(model.use_cache, use_cache) + tokenizer = AutoTokenizer.from_pretrained(model_id) + tokenizer.pad_token = tokenizer.eos_token + # Test with batch_size is 1 and 2. + texts = ["This is a sample", ["This is the first input", "This is the second input"]] + generation_configs = ( + GenerationConfig(max_new_tokens=4, num_beams=2, do_sample=False), + GenerationConfig(max_new_tokens=4, num_beams=4, do_sample=False), + GenerationConfig(max_new_tokens=4, num_beams=8, do_sample=False), + GenerationConfig(max_new_tokens=4, num_beams=32, do_sample=False), + GenerationConfig( + max_new_tokens=4, do_sample=False, top_p=0.9, top_k=0, pad_token_id=tokenizer.eos_token_id + ), + ) + for text in texts: + tokens = tokenizer(text, padding=True, return_tensors="pt").to(device) + for generation_config in generation_configs: + outputs = model.generate(**tokens, generation_config=generation_config) + transformers_outputs = transformers_model.generate(**tokens, generation_config=generation_config) + self.assertIsInstance(outputs, torch.Tensor) + self.assertTrue(torch.equal(outputs, transformers_outputs)) + + class IPEXSTModel(unittest.TestCase): SUPPORTED_ARCHITECTURES = ( "st-bert", diff --git a/tests/ipex/test_pipelines.py b/tests/ipex/test_pipelines.py index d9ddaf258..f376c6050 100644 --- a/tests/ipex/test_pipelines.py +++ b/tests/ipex/test_pipelines.py @@ -28,6 +28,7 @@ IPEXModelForImageClassification, IPEXModelForMaskedLM, IPEXModelForQuestionAnswering, + IPEXModelForSeq2SeqLM, IPEXModelForSequenceClassification, IPEXModelForTokenClassification, ) @@ -83,6 +84,7 @@ class PipelinesIntegrationTest(unittest.TestCase): "resnet", "vit", ) + TEXT2TEXT_GENERATION_SUPPORTED_ARCHITECTURES = ("t5",) @parameterized.expand(COMMON_SUPPORTED_ARCHITECTURES) def test_token_classification_pipeline_inference(self, model_arch): @@ -224,3 +226,45 @@ def test_pipeline_load_from_jit_model(self, model_arch): ipex_output = ipex_generator(inputs) self.assertTrue(isinstance(ipex_generator.model, IPEXModelForSequenceClassification)) self.assertGreaterEqual(ipex_output[0]["score"], 0.0) + + @parameterized.expand(TEXT2TEXT_GENERATION_SUPPORTED_ARCHITECTURES) + def test_text2text_generation_pipeline_inference(self, model_arch): + model_id = MODEL_NAMES[model_arch] + dtype = torch.float16 if IS_XPU_AVAILABLE else torch.float32 + transformers_generator = transformers_pipeline("text2text-generation", model_id, torch_dtype=dtype) + ipex_generator = ipex_pipeline("text2text-generation", model_id, accelerator="ipex", torch_dtype=dtype) + inputs = "Describe a real-world application of AI." + with torch.inference_mode(): + transformers_output = transformers_generator(inputs, do_sample=False, max_new_tokens=10) + with torch.inference_mode(): + ipex_output = ipex_generator(inputs, do_sample=False, max_new_tokens=10) + self.assertTrue(isinstance(ipex_generator.model, IPEXModelForSeq2SeqLM)) + self.assertEqual(transformers_output[0]["generated_text"], ipex_output[0]["generated_text"]) + + @parameterized.expand(TEXT2TEXT_GENERATION_SUPPORTED_ARCHITECTURES) + def test_summarization_generation_pipeline_inference(self, model_arch): + model_id = MODEL_NAMES[model_arch] + dtype = torch.float16 if IS_XPU_AVAILABLE else torch.float32 + transformers_generator = transformers_pipeline("summarization", model_id, torch_dtype=dtype) + ipex_generator = ipex_pipeline("summarization", model_id, accelerator="ipex", torch_dtype=dtype) + inputs = "Describe a real-world application of AI." + with torch.inference_mode(): + transformers_output = transformers_generator(inputs, do_sample=False, max_new_tokens=10) + with torch.inference_mode(): + ipex_output = ipex_generator(inputs, do_sample=False, max_new_tokens=10) + self.assertTrue(isinstance(ipex_generator.model, IPEXModelForSeq2SeqLM)) + self.assertEqual(transformers_output[0]["summary_text"], ipex_output[0]["summary_text"]) + + @parameterized.expand(TEXT2TEXT_GENERATION_SUPPORTED_ARCHITECTURES) + def test_translation_generation_pipeline_inference(self, model_arch): + model_id = MODEL_NAMES[model_arch] + dtype = torch.float16 if IS_XPU_AVAILABLE else torch.float32 + transformers_generator = transformers_pipeline("translation", model_id, torch_dtype=dtype) + ipex_generator = ipex_pipeline("translation", model_id, accelerator="ipex", torch_dtype=dtype) + inputs = "Describe a real-world application of AI." + with torch.inference_mode(): + transformers_output = transformers_generator(inputs, do_sample=False, max_new_tokens=10) + with torch.inference_mode(): + ipex_output = ipex_generator(inputs, do_sample=False, max_new_tokens=10) + self.assertTrue(isinstance(ipex_generator.model, IPEXModelForSeq2SeqLM)) + self.assertEqual(transformers_output[0]["translation_text"], ipex_output[0]["translation_text"])