diff --git a/CHANGELOG.md b/CHANGELOG.md index 724fcd0..3c6b319 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,41 +1,38 @@ # Changelog +## 1.0.8 +* Removed redundant code from `reduce_precision`. +* Added `types` argument to `PseudoParameter.parametrize_module` for better selection for Parameterising the Layers. + ## 1.0.7 * Fixed `GeLU` backward function equation. ## 1.0.6 - * `Model` is subclass of `BackwardModule` for additional functionality. * Using `inspect.isclass` to check if `backward_class` is a class in `Linear.set_backward_function`. * Repr using `self.__class__.__name__` in all classes. ## 1.0.5 (Patches for Pytorch 2.0.1) - * Removed unnecessary `PseudoParameter.grad` property. * Patch for Pytorch 2.0.1, add filtering inputs in `BackwardGraph._calculate_gradients`. ## 1.0.4 - * Combined `PseudoParameter` and `PseudoParameterModule` for better visibility. * BugFix: fixed save and load of state_dict of `PseudoParameter` and transformation module. * Removed redundant class `analogvnn.parameter.Parameter`. ## 1.0.3 - * Added support for no loss function in `Model` class. * If no loss function is provided, the `Model` object will use outputs for gradient computation. * Added support for multiple loss outputs from loss function. ## 1.0.2 - * Bugfix: removed `graph` from `Layer` class. * `graph` was causing issues with nested `Model` objects. * Now `_use_autograd_graph` is directly set while compiling the `Model` object. ## 1.0.1 (Patches for Pytorch 2.0.0) - * added `grad.setter` to `PseudoParameterModule` class. ## 1.0.0 - * Public release. diff --git a/CITATION.cff b/CITATION.cff index e4be1f6..fd28351 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -11,10 +11,10 @@ preferred-citation: - family-names: Youngblood given-names: Nathan affiliation: University of Pittsburgh - doi: "10.48550/arXiv.2210.10048" - journal: "arXiv preprint arXiv:2210.10048" + doi: "10.1063/5.0134156" + journal: "APL Machine Learning" title: 'AnalogVNN: A fully modular framework for modeling and optimizing photonic neural networks' - year: 2022 + year: 2023 authors: - given-names: Vivswan family-names: Shah @@ -25,7 +25,7 @@ authors: affiliation: University of Pittsburgh identifiers: - type: doi - value: 10.48550/arXiv.2210.10048 + value: 10.1063/5.0134156 description: >- The concept DOI for the collection containing all versions of the Citation File Format. diff --git a/README.md b/README.md index 35f459f..594b5e8 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # AnalogVNN [![arXiv](https://img.shields.io/badge/arXiv-2210.10048-orange.svg)](https://arxiv.org/abs/2210.10048) +[![AML](https://img.shields.io/badge/AML-10.1063/5.0134156-orange.svg)](https://doi.org/10.1063/5.0134156) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Vivswan/AnalogVNN/blob/release/docs/_static/AnalogVNN_Demo.ipynb) [![PyPI version](https://badge.fury.io/py/analogvnn.svg)](https://badge.fury.io/py/analogvnn) @@ -52,18 +53,21 @@ digital neural network models to their analog counterparts with just a few lines taking full advantage of the open-source optimization, deep learning, and GPU acceleration libraries available through PyTorch. -AnalogVNN Paper: [https://arxiv.org/abs/2210.10048](https://arxiv.org/abs/2210.10048) +AnalogVNN Paper: [https://doi.org/10.1063/5.0134156](https://doi.org/10.1063/5.0134156) ## Citing AnalogVNN We would appreciate if you cite the following paper in your publications for which you used AnalogVNN: ```bibtex -@article{shah2022analogvnn, +@article{shah2023analogvnn, title={AnalogVNN: A fully modular framework for modeling and optimizing photonic neural networks}, author={Shah, Vivswan and Youngblood, Nathan}, - journal={arXiv preprint arXiv:2210.10048}, - year={2022} + journal={APL Machine Learning}, + volume={1}, + number={2}, + year={2023}, + publisher={AIP Publishing} } ``` @@ -71,5 +75,6 @@ Or in textual form: ```text Vivswan Shah, and Nathan Youngblood. "AnalogVNN: A fully modular framework for modeling -and optimizing photonic neural networks." arXiv preprint arXiv:2210.10048 (2022). +and optimizing photonic neural networks." APL Machine Learning 1.2 (2023). +DOI: 10.1063/5.0134156 ``` \ No newline at end of file diff --git a/analogvnn/fn/reduce_precision.py b/analogvnn/fn/reduce_precision.py index 1c8f394..fb7216e 100644 --- a/analogvnn/fn/reduce_precision.py +++ b/analogvnn/fn/reduce_precision.py @@ -12,7 +12,8 @@ def reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE, divide: TEN Args: x (TENSOR_OPERABLE): Tensor precision (TENSOR_OPERABLE): the precision of the quantization. - divide (TENSOR_OPERABLE): the number of bits to be reduced + divide (TENSOR_OPERABLE): the rounding value that is if divide is 0.5, + then 0.6 will be rounded to 1.0 and 0.4 will be rounded to 0.0. Returns: TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values rounded to the nearest @@ -20,11 +21,7 @@ def reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE, divide: TEN """ x = x if isinstance(x, Tensor) else torch.tensor(x, requires_grad=False) - g: Tensor = x * precision - f = torch.sign(g) * torch.maximum( - torch.floor(torch.abs(g)), - torch.ceil(torch.abs(g) - divide) - ) * (1 / precision) + f = torch.sign(x) * torch.ceil(torch.abs(x * precision) - divide) * (1 / precision) return f diff --git a/analogvnn/parameter/PseudoParameter.py b/analogvnn/parameter/PseudoParameter.py index 7c3a71c..5706668 100644 --- a/analogvnn/parameter/PseudoParameter.py +++ b/analogvnn/parameter/PseudoParameter.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Callable, Any +from typing import Callable, Any, Optional, Union, Tuple import torch import torch.nn as nn @@ -219,13 +219,20 @@ def parameterize(cls, module: nn.Module, param_name: str, transformation: Callab return new_param @classmethod - def parametrize_module(cls, module: nn.Module, transformation: Callable, requires_grad: bool = True): + def parametrize_module( + cls, + module: nn.Module, + transformation: Callable, + requires_grad: bool = True, + types: Optional[Union[type, Tuple[type]]] = None, + ): """Parametrize all parameters of a module. Args: module (nn.Module): the module parameters to parametrize. transformation (Callable): the transformation. requires_grad (bool): if True, only parametrized parameters that require gradients. + types (Union[type, Tuple[type]]): the type or tuple of types to parametrize. """ with torch.no_grad(): @@ -236,6 +243,9 @@ def parametrize_module(cls, module: nn.Module, transformation: Callable, require if requires_grad and not parameter.requires_grad: continue + if types is not None and not isinstance(parameter, types): + continue + cls.parameterize(module=module, param_name=name, transformation=transformation) for sub_module in module.children(): diff --git a/pyproject.toml b/pyproject.toml index e04526a..316e325 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ where = ["analogvnn"] [project] # $ pip install analogvnn name = "analogvnn" -version = "1.0.7" +version = "1.0.8" description = "A fully modular framework for modeling and optimizing analog/photonic neural networks" readme = "README.md" requires-python = ">=3.7" diff --git a/requirements.txt b/requirements.txt index 82526c1..5dc9f92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ torch torchvision torchaudio +dataclasses numpy>=1.22.2 scipy networkx @@ -9,7 +10,7 @@ importlib-metadata; python_version < '3.8' # Full wheel>=0.38.0 -tensorflow>=2.0.0 +tensorflow>=2.11.1 tensorboard>=2.0.0 torchinfo # conda install graphviz python-graphviz pydot pydotplus python-dotenv @@ -17,3 +18,4 @@ torchinfo graphviz pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability +setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/requirements/requirements-dev.txt b/requirements/requirements-dev.txt index 04d60e8..b07682b 100644 --- a/requirements/requirements-dev.txt +++ b/requirements/requirements-dev.txt @@ -4,3 +4,4 @@ setuptools>=65.5.1 build # building the package {pyproject-build} twine # to publish on pypi {twine upload --repository-url=https://test.pypi.org/legacy/ dist/*} {twine upload dist/*} johnnydep # to see dependencies {johnnydep } +wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/requirements/requirements-docs.txt b/requirements/requirements-docs.txt index a4a7d56..16133ec 100644 --- a/requirements/requirements-docs.txt +++ b/requirements/requirements-docs.txt @@ -12,4 +12,5 @@ sphinx-inline-tabs sphinxext-opengraph sphinxcontrib-katex # math tornado>=6.3.3 # not directly required, pinned by Snyk to avoid a vulnerability -pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability +fonttools>=4.43.0 # not directly required, pinned by Snyk to avoid a vulnerabily +pillow>=10.3.0 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/requirements/requirements-test.txt b/requirements/requirements-test.txt index 54c409a..a20ce8c 100644 --- a/requirements/requirements-test.txt +++ b/requirements/requirements-test.txt @@ -9,3 +9,4 @@ flake8-coding flake8-return # flake8-noreturn>=1.0.1; python_version >= '3.8' flake8-deprecated +setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability