From 4693cd2c62862dd0b9b4508a4bd204e121ea5374 Mon Sep 17 00:00:00 2001 From: Ella Charlaix Date: Mon, 11 Mar 2024 16:39:35 +0100 Subject: [PATCH] Fix default ov config --- optimum/intel/openvino/modeling.py | 4 ++-- optimum/intel/openvino/modeling_base.py | 4 ++-- optimum/intel/openvino/modeling_base_seq2seq.py | 4 ++-- optimum/intel/openvino/modeling_decoder.py | 2 +- optimum/intel/openvino/modeling_diffusion.py | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/optimum/intel/openvino/modeling.py b/optimum/intel/openvino/modeling.py index 7831305d5f..357ca94c07 100644 --- a/optimum/intel/openvino/modeling.py +++ b/optimum/intel/openvino/modeling.py @@ -434,8 +434,8 @@ def _from_transformers( save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) - # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size - if load_in_8bit is None or not quantization_config: + # If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None and not quantization_config: ov_config = None else: ov_config = OVConfig(dtype="fp32") diff --git a/optimum/intel/openvino/modeling_base.py b/optimum/intel/openvino/modeling_base.py index af00f7a06e..7ab99aab42 100644 --- a/optimum/intel/openvino/modeling_base.py +++ b/optimum/intel/openvino/modeling_base.py @@ -314,8 +314,8 @@ def _from_transformers( save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) - # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size - if load_in_8bit is None or not quantization_config: + # If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None and not quantization_config: ov_config = None else: ov_config = OVConfig(dtype="fp32") diff --git a/optimum/intel/openvino/modeling_base_seq2seq.py b/optimum/intel/openvino/modeling_base_seq2seq.py index 3cb43e61b8..28e112c4d9 100644 --- a/optimum/intel/openvino/modeling_base_seq2seq.py +++ b/optimum/intel/openvino/modeling_base_seq2seq.py @@ -258,8 +258,8 @@ def _from_transformers( if use_cache: task = task + "-with-past" - # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size - if load_in_8bit is None or not quantization_config: + # If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None and not quantization_config: ov_config = None else: ov_config = OVConfig(dtype="fp32") diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index 3d9671caf1..edc88d02cb 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -260,7 +260,7 @@ def _from_transformers( if use_cache: task = task + "-with-past" - # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + # If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size if load_in_8bit is None and not quantization_config: ov_export_config = None else: diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 5e8a0cdc59..a985f43d7c 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -321,8 +321,8 @@ def _from_transformers( save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) - # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size - if load_in_8bit is None or not quantization_config: + # If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None and not quantization_config: ov_config = None else: ov_config = OVConfig(dtype="fp32")