From adb320dfeee254dd2fdfad27a0439809d96b8cfa Mon Sep 17 00:00:00 2001 From: Ofir Gordon Date: Mon, 26 Aug 2024 10:07:55 +0300 Subject: [PATCH 1/3] Fix MP tutorial arguments after release --- ...ample_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb b/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb index d23c6aa70..5f2f61124 100644 --- a/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb +++ b/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb @@ -17,7 +17,7 @@ "id": "59ed8f02" }, "source": [ - "[Run this tutorial in Google Colab](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb)" + "ResourceUtilization_data[Run this tutorial in Google Colab](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb)" ] }, { @@ -481,7 +481,7 @@ "# The candidates bit-width for quantization should be defined in the target platform model:\n", "configuration = mct.core.CoreConfig(mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(\n", " num_of_images=32,\n", - " use_grad_based_weights=False))" + " use_hessian_based_scores=False))" ] }, { @@ -515,7 +515,7 @@ "# while the bias will not)\n", "# examples:\n", "# weights_compression_ratio = 0.75 - About 0.75 of the model's weights memory size when quantized with 8 bits.\n", - "ResourceUtilization = mct.core.ResourceUtilization(ResourceUtilization_data.weights_memory * 0.75)" + "ResourceUtilization = mct.core.ResourceUtilization(resource_utilization_data.weights_memory * 0.75)" ] }, { From 650c63b086d209e6d78fe0fe7c39ed49e2b87779 Mon Sep 17 00:00:00 2001 From: Ofir Gordon Date: Mon, 26 Aug 2024 12:56:46 +0300 Subject: [PATCH 2/3] Fixes --- ...example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb b/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb index 5f2f61124..07e9f84a9 100644 --- a/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb +++ b/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb @@ -17,7 +17,7 @@ "id": "59ed8f02" }, "source": [ - "ResourceUtilization_data[Run this tutorial in Google Colab](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb)" + "[Run this tutorial in Google Colab](https://colab.research.google.com/github/sony/model_optimization/blob/main/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb)" ] }, { @@ -537,7 +537,7 @@ "source": [ "quantized_model, quantization_info = mct.ptq.pytorch_post_training_quantization(model,\n", " representative_data_gen,\n", - " target_ResourceUtilization=ResourceUtilization,\n", + " target_resource_utilization=ResourceUtilization,\n", " core_config=configuration,\n", " target_platform_capabilities=target_platform_cap)\n", " " From 46ed4a405539dcbd78a1daf16dc382edf64b2510 Mon Sep 17 00:00:00 2001 From: Ofir Gordon Date: Mon, 26 Aug 2024 12:59:41 +0300 Subject: [PATCH 3/3] Fixes --- ..._pytorch_mobilenetv2_cifar100_mixed_precision.ipynb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb b/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb index 07e9f84a9..42efd0a19 100644 --- a/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb +++ b/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mobilenetv2_cifar100_mixed_precision.ipynb @@ -515,7 +515,7 @@ "# while the bias will not)\n", "# examples:\n", "# weights_compression_ratio = 0.75 - About 0.75 of the model's weights memory size when quantized with 8 bits.\n", - "ResourceUtilization = mct.core.ResourceUtilization(resource_utilization_data.weights_memory * 0.75)" + "resource_utilization = mct.core.ResourceUtilization(resource_utilization_data.weights_memory * 0.75)" ] }, { @@ -536,10 +536,10 @@ "outputs": [], "source": [ "quantized_model, quantization_info = mct.ptq.pytorch_post_training_quantization(model,\n", - " representative_data_gen,\n", - " target_resource_utilization=ResourceUtilization,\n", - " core_config=configuration,\n", - " target_platform_capabilities=target_platform_cap)\n", + " representative_data_gen,\n", + " target_resource_utilization=resource_utilization,\n", + " core_config=configuration,\n", + " target_platform_capabilities=target_platform_cap)\n", " " ] },