From de190fd650e043b4cc0c8a2048b136bac43f34e0 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 17 Jan 2024 08:41:41 +0800 Subject: [PATCH] Update example Signed-off-by: Cheng, Penghui --- examples/neural_compressor/language-modeling/run_clm.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 36422bb024..f3c1b44e57 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -717,11 +717,10 @@ def compute_metrics(eval_preds): if optim_args.apply_quantization and optim_args.quantization_approach in {"static", "dynamic", "weight_only"}: model = trainer.model if isinstance(trainer.model, PreTrainedModel) else trainer.model._model quantizer = INCQuantizer.from_pretrained(model) - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach in ["static", "weight_only"]: num_calibration_samples = min(len(train_dataset), optim_args.num_calibration_samples) train_dataset = train_dataset.select(range(num_calibration_samples)) - if optim_args.quantization_approach == "static": - quantization_config.calibration_sampling_size = num_calibration_samples + quantization_config.calibration_sampling_size = num_calibration_samples quantizer.quantize( quantization_config=quantization_config,