From 4d686c4b73b99e3913f306094a302d5234a132a5 Mon Sep 17 00:00:00 2001 From: Julian Buechel Date: Thu, 6 Nov 2025 10:11:08 +0100 Subject: [PATCH 1/3] [fix] for https://github.com/IBM/aihwkit/issues/627 Signed-off-by: Julian Buechel --- src/aihwkit/nn/modules/conv.py | 8 ++++++-- tests/test_calibration.py | 2 +- tests/test_quantized_tile.py | 6 +++--- tests/test_torch_tiles.py | 2 +- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/aihwkit/nn/modules/conv.py b/src/aihwkit/nn/modules/conv.py index ae9a7d81..6859e953 100644 --- a/src/aihwkit/nn/modules/conv.py +++ b/src/aihwkit/nn/modules/conv.py @@ -73,10 +73,14 @@ def __init__( rpu_config = SingleRPUConfig() - if tile_module_class is None: - tile_module_class = rpu_config.get_default_tile_module_class() self.in_features = self.get_tile_size(in_channels, groups, kernel_size) self.out_features = out_channels + + if tile_module_class is None: + tile_module_class = rpu_config.get_default_tile_module_class( + out_size=self.out_features, in_size=self.in_features + ) + self.analog_module = tile_module_class( self.out_features, self.in_features, rpu_config, bias ) diff --git a/tests/test_calibration.py b/tests/test_calibration.py index c5d4ac26..72525b35 100644 --- a/tests/test_calibration.py +++ b/tests/test_calibration.py @@ -56,7 +56,7 @@ def create_analog_network(rpu_config): def get_rpu( - rpu: Union[TorchInferenceRPUConfig, InferenceRPUConfig, QuantizedTorchInferenceRPUConfig], + rpu: Union[TorchInferenceRPUConfig, InferenceRPUConfig, QuantizedTorchInferenceRPUConfig] ): """Create test rpu config.""" rpu.forward.out_noise = 0.01 diff --git a/tests/test_quantized_tile.py b/tests/test_quantized_tile.py index 0dfe0ce0..20b4ae3e 100644 --- a/tests/test_quantized_tile.py +++ b/tests/test_quantized_tile.py @@ -25,7 +25,7 @@ def test_output_quantization(n_bits, symmetric, range_estimator): """Test that output quantization works, returning the appropriate number of states""" def set_perfect_rpuconfig( - rpu_config: Union[TorchInferenceRPUConfig, QuantizedTorchInferenceRPUConfig], + rpu_config: Union[TorchInferenceRPUConfig, QuantizedTorchInferenceRPUConfig] ): rpu_config.forward.is_perfect = True if isinstance(rpu_config, QuantizedTorchInferenceRPUConfig): @@ -70,7 +70,7 @@ def test_array_module_output_quantization( """Test that when an array is used, output quantization is properly applied""" def set_perfect_rpuconfig( - rpu_config: Union[TorchInferenceRPUConfig, QuantizedTorchInferenceRPUConfig], + rpu_config: Union[TorchInferenceRPUConfig, QuantizedTorchInferenceRPUConfig] ): rpu_config.forward.is_perfect = True if isinstance(rpu_config, QuantizedTorchInferenceRPUConfig): @@ -107,7 +107,7 @@ def test_quantized_periphery(n_bits, symmetric, arr_rows, arr_columns): """Test that quantized periphery is properly applied""" def set_perfect_rpuconfig_with_periphery( - rpu_config: Union[TorchInferenceRPUConfig, QuantizedTorchInferenceRPUConfig], + rpu_config: Union[TorchInferenceRPUConfig, QuantizedTorchInferenceRPUConfig] ): rpu_config.forward.is_perfect = True rpu_config.mapping.weight_scaling_omega = 1.0 diff --git a/tests/test_torch_tiles.py b/tests/test_torch_tiles.py index 765ec2b6..d913adeb 100644 --- a/tests/test_torch_tiles.py +++ b/tests/test_torch_tiles.py @@ -459,7 +459,7 @@ def test_noise_and_bound_management( """ def set_bm_nm( - rpu: Union[TorchInferenceRPUConfig, InferenceRPUConfig], + rpu: Union[TorchInferenceRPUConfig, InferenceRPUConfig] ) -> Union[TorchInferenceRPUConfig, InferenceRPUConfig]: """Set the rpu config.""" rpu.forward.out_noise = 0.0 From 1461b073ce218607e6954a2a557ac088ef264bdb Mon Sep 17 00:00:00 2001 From: Julian Buechel Date: Thu, 6 Nov 2025 10:33:48 +0100 Subject: [PATCH 2/3] [fix] mypy python version should be 3.10 Signed-off-by: Julian Buechel --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 74cf3650..5b3c539f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,7 +10,7 @@ add_select = D204,D215,D401,D404 match-dir = ^(?!helpers|definitions).* [mypy] -python_version = 3.8 +python_version = 3.10 namespace_packages = True ignore_missing_imports = True warn_redundant_casts = True From 7f4f4be2c099885d4568656cd1d3fd5037274b7e Mon Sep 17 00:00:00 2001 From: Julian Buechel Date: Fri, 14 Nov 2025 13:53:54 +0100 Subject: [PATCH 3/3] [deprecation] deprecate convert_to_analog_mapped Signed-off-by: Julian Buechel --- ..._resnet34_imagenet_conversion_to_analog.py | 5 -- examples/19_analog_summary_lenet.py | 4 +- notebooks/tutorial/analog_training.ipynb | 8 +-- .../tutorial/extending_functionality.ipynb | 4 +- src/aihwkit/nn/conversion.py | 71 ------------------- 5 files changed, 8 insertions(+), 84 deletions(-) diff --git a/examples/17_resnet34_imagenet_conversion_to_analog.py b/examples/17_resnet34_imagenet_conversion_to_analog.py index ada972d1..5391ca35 100644 --- a/examples/17_resnet34_imagenet_conversion_to_analog.py +++ b/examples/17_resnet34_imagenet_conversion_to_analog.py @@ -41,9 +41,4 @@ # convolutions) model = convert_to_analog(model, rpu_config) -# Note: One can also use ``convert_to_analog_mapped`` instead to -# convert e.g. ``Conv2d`` to ``AnalogConv2dMapped`` (using a special way to -# unfold over multiple tiles in a more memory efficient way -# for some analog tiles on GPU) - print(model) diff --git a/examples/19_analog_summary_lenet.py b/examples/19_analog_summary_lenet.py index aaf1ba92..70017b49 100644 --- a/examples/19_analog_summary_lenet.py +++ b/examples/19_analog_summary_lenet.py @@ -10,7 +10,7 @@ from torch import nn # Imports from aihwkit. -from aihwkit.nn.conversion import convert_to_analog_mapped +from aihwkit.nn.conversion import convert_to_analog from aihwkit.simulator.configs import SingleRPUConfig, ConstantStepDevice from aihwkit.utils.analog_info import analog_summary @@ -36,6 +36,6 @@ nn.LogSoftmax(dim=1), ) -analog_model = convert_to_analog_mapped(model, rpu_config=rpu_config) +analog_model = convert_to_analog(model, rpu_config=rpu_config) analog_summary(analog_model, (1, 1, 28, 28)) diff --git a/notebooks/tutorial/analog_training.ipynb b/notebooks/tutorial/analog_training.ipynb index 698abeb3..417e2ae8 100644 --- a/notebooks/tutorial/analog_training.ipynb +++ b/notebooks/tutorial/analog_training.ipynb @@ -191,9 +191,9 @@ "outputs": [], "source": [ "from torchvision.models import resnet18\n", - "from aihwkit.nn.conversion import convert_to_analog_mapped\n", + "from aihwkit.nn.conversion import convert_to_analog\n", "\n", - "analog_model = convert_to_analog_mapped(resnet18(), rpu_config=rpu_config)\n", + "analog_model = convert_to_analog(resnet18(), rpu_config=rpu_config)\n", "\n", "print(analog_model)" ] @@ -575,7 +575,7 @@ "from torchmetrics.functional import accuracy\n", "\n", "from aihwkit.optim import AnalogSGD\n", - "from aihwkit.nn.conversion import convert_to_analog_mapped\n", + "from aihwkit.nn.conversion import convert_to_analog\n", "\n", "\n", "class LitAnalogModel(pl.LightningModule):\n", @@ -583,7 +583,7 @@ " super().__init__()\n", "\n", " # We simply convert the given model to analog on-the-fly\n", - " self.analog_model = convert_to_analog_mapped(model, rpu_config)\n", + " self.analog_model = convert_to_analog(model, rpu_config)\n", " self.lr = lr\n", "\n", " def forward(self, x):\n", diff --git a/notebooks/tutorial/extending_functionality.ipynb b/notebooks/tutorial/extending_functionality.ipynb index 29477950..0a06d2c3 100644 --- a/notebooks/tutorial/extending_functionality.ipynb +++ b/notebooks/tutorial/extending_functionality.ipynb @@ -128,7 +128,7 @@ "from torchmetrics.functional import accuracy\n", "\n", "from aihwkit.optim import AnalogSGD\n", - "from aihwkit.nn.conversion import convert_to_analog_mapped\n", + "from aihwkit.nn.conversion import convert_to_analog\n", "\n", "PATH_DATASET = os.path.join('data', 'DATASET')\n", "os.makedirs(PATH_DATASET, exist_ok=True)\n", @@ -163,7 +163,7 @@ " super().__init__()\n", "\n", " # We simply convert the given model to analog on-the-fly\n", - " self.analog_model = convert_to_analog_mapped(model, rpu_config)\n", + " self.analog_model = convert_to_analog(model, rpu_config)\n", " self.lr = lr\n", "\n", " def forward(self, x):\n", diff --git a/src/aihwkit/nn/conversion.py b/src/aihwkit/nn/conversion.py index 41b92f99..b751fdb5 100644 --- a/src/aihwkit/nn/conversion.py +++ b/src/aihwkit/nn/conversion.py @@ -214,77 +214,6 @@ def convert_to_analog( return module -def convert_to_analog_mapped( - module: Module, - rpu_config: RPUConfigGeneric, - tile_module_class: Optional[TileModule] = None, - specific_rpu_config_fun: Optional[Callable] = None, - module_name: str = "", - ensure_analog_root: bool = True, - exclude_modules: Optional[List[str]] = None, - inplace: bool = False, - verbose: bool = False, -) -> Module: - """Convert a given digital model to its analog counterpart with tile - mapping support. - - Note: - The torch device (cuda/cpu) is inferred from the original - models parameters, however, if multiple torch - devices are used in a given module, the corresponding analog - module is not moved to any device. - - Args: - module: The torch module to convert. All layers that are - defined in the ``conversion_map``. - rpu_config: RPU config to apply to all converted tiles. - tile_module_class: Custom tile module class - specific_rpu_config_fun: Function that modifies the generic - RPUConfig for specific modules. See - :func:`~specific_rpu_config_id` as an example how to - specify it. - - module_name: Explicitly given name of the base (root) module, - given to ``specific_rpu_config_fun``. - - ensure_analog_root: Whether to ensure that the root module is - of layer type `AnalogLayerBase` so that custom analog are - methods such as `drift_analog_weigths` are available. If - set, it will wrap the model if `AnalogWrapper` if necessary. - - Note: - - Since the module structure changes when wrapped, the - checkpoint names will also change if this is - enabled (for legacy load this might need to be disabled). - - exclude_modules: List of modules names that are in the - conversion map but should be excluded from the conversion - - inplace: Whether to for in place conversion (without deepcopy) - - verbose: Increase verbosity. Will print converted layers. - - - Returns: - Module where all the digital layers are replaced with analog - mapped layers. - - """ - return convert_to_analog( - module, - rpu_config, - tile_module_class, - _DEFAULT_MAPPED_CONVERSION_MAP, - specific_rpu_config_fun, - module_name, - ensure_analog_root, - exclude_modules, - inplace, - verbose, - ) - - def convert_to_digital( module: Module, conversion_set: Optional[Set] = None,