Skip to content

Commit

Permalink
Edit keras export notebook
Browse files Browse the repository at this point in the history
Add usage example of keras_load_quantized_model.
Change paths to local paths.
Remove compression ratio calculation in int8.
  • Loading branch information
reuvenp committed Mar 19, 2024
1 parent 78f2f4d commit 794aa09
Showing 1 changed file with 33 additions and 15 deletions.
48 changes: 33 additions & 15 deletions tutorials/notebooks/keras/export/example_keras_export.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,11 @@
"cell_type": "code",
"source": [
"import numpy as np\n",
"from keras.applications import ResNet50\n",
"from keras.applications import MobileNetV2\n",
"import model_compression_toolkit as mct\n",
"\n",
"# Create a model\n",
"float_model = ResNet50()\n",
"float_model = MobileNetV2()\n",
"# Quantize the model.\n",
"# Notice that here the representative dataset is random for demonstration only.\n",
"quantized_exportable_model, _ = mct.ptq.keras_post_training_quantization(float_model,\n",
Expand Down Expand Up @@ -87,10 +87,8 @@
{
"cell_type": "code",
"source": [
"import tempfile\n",
"\n",
"# Path of exported model\n",
"_, keras_file_path = tempfile.mkstemp('.keras')\n",
"keras_file_path = 'exported_model_mctq.keras'\n",
"\n",
"# Export a keras model with mctq custom quantizers.\n",
"mct.exporter.keras_export_model(model=quantized_exportable_model,\n",
Expand All @@ -107,17 +105,40 @@
"source": [
"Notice that the model has the same size as the quantized exportable model as weights data types are float.\n",
"\n",
"#### Fakely-Quantized"
"#### MCTQ - Loading Exported Model\n",
"\n",
"To load the exported model with MCTQ quantizers, use `mct.keras_load_quantized_model`:"
],
"metadata": {
"id": "Bwx5rxXDF_gb"
}
},
{
"cell_type": "code",
"source": [
"loaded_model = mct.keras_load_quantized_model(keras_file_path)"
],
"metadata": {
"id": "q235XNJQmTdd"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"\n",
"#### Fakely-Quantized"
],
"metadata": {
"id": "sOmDjSehlQba"
}
},
{
"cell_type": "code",
"source": [
"# Path of exported model\n",
"_, keras_file_path = tempfile.mkstemp('.keras')\n",
"keras_file_path = 'exported_model_fakequant.keras'\n",
"\n",
"# Use mode KerasExportSerializationFormat.KERAS for a .keras model\n",
"# and QuantizationFormat.FAKELY_QUANT for fakely-quantized weights\n",
Expand Down Expand Up @@ -154,10 +175,7 @@
{
"cell_type": "code",
"source": [
"import tempfile\n",
"\n",
"# Path of exported model\n",
"_, tflite_file_path = tempfile.mkstemp('.tflite')\n",
"tflite_file_path = 'exported_model_int8.tflite'\n",
"\n",
"# Use mode KerasExportSerializationFormat.TFLITE for tflite model and quantization_format.INT8.\n",
"mct.exporter.keras_export_model(model=quantized_exportable_model,\n",
Expand Down Expand Up @@ -186,12 +204,11 @@
"import os\n",
"\n",
"# Save float model to measure its size\n",
"_, float_file_path = tempfile.mkstemp('.keras')\n",
"float_file_path = 'exported_model_float.keras'\n",
"float_model.save(float_file_path)\n",
"\n",
"print(\"Float model in Mb:\", os.path.getsize(float_file_path) / float(2 ** 20))\n",
"print(\"Quantized model in Mb:\", os.path.getsize(tflite_file_path) / float(2 ** 20))\n",
"print(f'Compression ratio: {os.path.getsize(float_file_path) / os.path.getsize(tflite_file_path)}')"
"print(\"Quantized model in Mb:\", os.path.getsize(tflite_file_path) / float(2 ** 20))"
],
"metadata": {
"id": "LInM16OMGUtF"
Expand All @@ -217,7 +234,8 @@
"cell_type": "code",
"source": [
"# Path of exported model\n",
"_, tflite_file_path = tempfile.mkstemp('.tflite')\n",
"tflite_file_path = 'exported_model_fakequant.tflite'\n",
"\n",
"\n",
"# Use mode KerasExportSerializationFormat.TFLITE for tflite model and QuantizationFormat.FAKELY_QUANT for fakely-quantized weights\n",
"# and activations.\n",
Expand Down

0 comments on commit 794aa09

Please sign in to comment.