From 0f5c951bc78bc236d9152f0485004eb10b03fdf7 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Sat, 2 Dec 2023 04:29:42 -0500 Subject: [PATCH 1/2] redundant code --- CHANGELOG.md | 4 ++++ analogvnn/fn/reduce_precision.py | 9 +++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 724fcd0..1e056b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## --- + +* Removed redundant code from `reduce_precision`. + ## 1.0.7 * Fixed `GeLU` backward function equation. diff --git a/analogvnn/fn/reduce_precision.py b/analogvnn/fn/reduce_precision.py index 1c8f394..fb7216e 100644 --- a/analogvnn/fn/reduce_precision.py +++ b/analogvnn/fn/reduce_precision.py @@ -12,7 +12,8 @@ def reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE, divide: TEN Args: x (TENSOR_OPERABLE): Tensor precision (TENSOR_OPERABLE): the precision of the quantization. - divide (TENSOR_OPERABLE): the number of bits to be reduced + divide (TENSOR_OPERABLE): the rounding value that is if divide is 0.5, + then 0.6 will be rounded to 1.0 and 0.4 will be rounded to 0.0. Returns: TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values rounded to the nearest @@ -20,11 +21,7 @@ def reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE, divide: TEN """ x = x if isinstance(x, Tensor) else torch.tensor(x, requires_grad=False) - g: Tensor = x * precision - f = torch.sign(g) * torch.maximum( - torch.floor(torch.abs(g)), - torch.ceil(torch.abs(g) - divide) - ) * (1 / precision) + f = torch.sign(x) * torch.ceil(torch.abs(x * precision) - divide) * (1 / precision) return f From 4be24fd97638cdd3ced35df0bb054b1d4579e18b Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Thu, 2 May 2024 19:28:44 -0400 Subject: [PATCH 2/2] v1.0.8 Added `types` argument to `PseudoParameter.parametrize_module` --- CHANGELOG.md | 11 ++--------- CITATION.cff | 8 ++++---- README.md | 15 ++++++++++----- analogvnn/parameter/PseudoParameter.py | 14 ++++++++++++-- pyproject.toml | 2 +- requirements.txt | 1 + 6 files changed, 30 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e056b1..3c6b319 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,45 +1,38 @@ # Changelog -## --- - +## 1.0.8 * Removed redundant code from `reduce_precision`. +* Added `types` argument to `PseudoParameter.parametrize_module` for better selection for Parameterising the Layers. ## 1.0.7 * Fixed `GeLU` backward function equation. ## 1.0.6 - * `Model` is subclass of `BackwardModule` for additional functionality. * Using `inspect.isclass` to check if `backward_class` is a class in `Linear.set_backward_function`. * Repr using `self.__class__.__name__` in all classes. ## 1.0.5 (Patches for Pytorch 2.0.1) - * Removed unnecessary `PseudoParameter.grad` property. * Patch for Pytorch 2.0.1, add filtering inputs in `BackwardGraph._calculate_gradients`. ## 1.0.4 - * Combined `PseudoParameter` and `PseudoParameterModule` for better visibility. * BugFix: fixed save and load of state_dict of `PseudoParameter` and transformation module. * Removed redundant class `analogvnn.parameter.Parameter`. ## 1.0.3 - * Added support for no loss function in `Model` class. * If no loss function is provided, the `Model` object will use outputs for gradient computation. * Added support for multiple loss outputs from loss function. ## 1.0.2 - * Bugfix: removed `graph` from `Layer` class. * `graph` was causing issues with nested `Model` objects. * Now `_use_autograd_graph` is directly set while compiling the `Model` object. ## 1.0.1 (Patches for Pytorch 2.0.0) - * added `grad.setter` to `PseudoParameterModule` class. ## 1.0.0 - * Public release. diff --git a/CITATION.cff b/CITATION.cff index e4be1f6..fd28351 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -11,10 +11,10 @@ preferred-citation: - family-names: Youngblood given-names: Nathan affiliation: University of Pittsburgh - doi: "10.48550/arXiv.2210.10048" - journal: "arXiv preprint arXiv:2210.10048" + doi: "10.1063/5.0134156" + journal: "APL Machine Learning" title: 'AnalogVNN: A fully modular framework for modeling and optimizing photonic neural networks' - year: 2022 + year: 2023 authors: - given-names: Vivswan family-names: Shah @@ -25,7 +25,7 @@ authors: affiliation: University of Pittsburgh identifiers: - type: doi - value: 10.48550/arXiv.2210.10048 + value: 10.1063/5.0134156 description: >- The concept DOI for the collection containing all versions of the Citation File Format. diff --git a/README.md b/README.md index 35f459f..594b5e8 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # AnalogVNN [![arXiv](https://img.shields.io/badge/arXiv-2210.10048-orange.svg)](https://arxiv.org/abs/2210.10048) +[![AML](https://img.shields.io/badge/AML-10.1063/5.0134156-orange.svg)](https://doi.org/10.1063/5.0134156) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Vivswan/AnalogVNN/blob/release/docs/_static/AnalogVNN_Demo.ipynb) [![PyPI version](https://badge.fury.io/py/analogvnn.svg)](https://badge.fury.io/py/analogvnn) @@ -52,18 +53,21 @@ digital neural network models to their analog counterparts with just a few lines taking full advantage of the open-source optimization, deep learning, and GPU acceleration libraries available through PyTorch. -AnalogVNN Paper: [https://arxiv.org/abs/2210.10048](https://arxiv.org/abs/2210.10048) +AnalogVNN Paper: [https://doi.org/10.1063/5.0134156](https://doi.org/10.1063/5.0134156) ## Citing AnalogVNN We would appreciate if you cite the following paper in your publications for which you used AnalogVNN: ```bibtex -@article{shah2022analogvnn, +@article{shah2023analogvnn, title={AnalogVNN: A fully modular framework for modeling and optimizing photonic neural networks}, author={Shah, Vivswan and Youngblood, Nathan}, - journal={arXiv preprint arXiv:2210.10048}, - year={2022} + journal={APL Machine Learning}, + volume={1}, + number={2}, + year={2023}, + publisher={AIP Publishing} } ``` @@ -71,5 +75,6 @@ Or in textual form: ```text Vivswan Shah, and Nathan Youngblood. "AnalogVNN: A fully modular framework for modeling -and optimizing photonic neural networks." arXiv preprint arXiv:2210.10048 (2022). +and optimizing photonic neural networks." APL Machine Learning 1.2 (2023). +DOI: 10.1063/5.0134156 ``` \ No newline at end of file diff --git a/analogvnn/parameter/PseudoParameter.py b/analogvnn/parameter/PseudoParameter.py index 7c3a71c..5706668 100644 --- a/analogvnn/parameter/PseudoParameter.py +++ b/analogvnn/parameter/PseudoParameter.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable, Any +from typing import Callable, Any, Optional, Union, Tuple import torch import torch.nn as nn @@ -219,13 +219,20 @@ def parameterize(cls, module: nn.Module, param_name: str, transformation: Callab return new_param @classmethod - def parametrize_module(cls, module: nn.Module, transformation: Callable, requires_grad: bool = True): + def parametrize_module( + cls, + module: nn.Module, + transformation: Callable, + requires_grad: bool = True, + types: Optional[Union[type, Tuple[type]]] = None, + ): """Parametrize all parameters of a module. Args: module (nn.Module): the module parameters to parametrize. transformation (Callable): the transformation. requires_grad (bool): if True, only parametrized parameters that require gradients. + types (Union[type, Tuple[type]]): the type or tuple of types to parametrize. """ with torch.no_grad(): @@ -236,6 +243,9 @@ def parametrize_module(cls, module: nn.Module, transformation: Callable, require if requires_grad and not parameter.requires_grad: continue + if types is not None and not isinstance(parameter, types): + continue + cls.parameterize(module=module, param_name=name, transformation=transformation) for sub_module in module.children(): diff --git a/pyproject.toml b/pyproject.toml index e04526a..316e325 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ where = ["analogvnn"] [project] # $ pip install analogvnn name = "analogvnn" -version = "1.0.7" +version = "1.0.8" description = "A fully modular framework for modeling and optimizing analog/photonic neural networks" readme = "README.md" requires-python = ">=3.7" diff --git a/requirements.txt b/requirements.txt index 3a91fbe..5dc9f92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ torch torchvision torchaudio +dataclasses numpy>=1.22.2 scipy networkx