From 46bf072a8019c89002c33669bccd02507cffc29c Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Wed, 17 May 2023 19:48:22 -0400 Subject: [PATCH 01/11] flake8 Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- .flake8 | 35 +++++++++++++++++++------ analogvnn/graph/AcyclicDirectedGraph.py | 4 +-- analogvnn/parameter/PseudoParameter.py | 2 +- analogvnn/utils/TensorboardModelLog.py | 2 +- analogvnn/utils/get_model_summaries.py | 2 +- pyproject.toml | 10 +++---- 6 files changed, 37 insertions(+), 18 deletions(-) diff --git a/.flake8 b/.flake8 index 0078e18..3da9a0b 100644 --- a/.flake8 +++ b/.flake8 @@ -2,17 +2,36 @@ max-line-length = 120 extend-ignore = - C101, # Coding magic comment - D100, # Missing docstring in public module - D104, # Missing docstring in public package - D202, # No blank lines allowed after function docstring - D210, # No whitespaces allowed surrounding docstring text - D401, # First line should be in imperative mood - R504, # unnecessary variable assignment before return statement - R505, # unnecessary else after return statement + # No explicit stacklevel argument found + B028, + + # Coding magic comment + C101, + + # Missing docstring in public module + D100, + + # Missing docstring in public package + D104, + + # No blank lines allowed after function docstring + D202, + + # No whitespaces allowed surrounding docstring text + D210, + + # First line should be in imperative mood + D401, + + # unnecessary variable assignment before return statement + R504, + + # unnecessary else after return statement + R505, per-file-ignores = sample_code.py: D100, D101, D102, D103, D104 + sample_code_non_analog.py: D100, D101, D102, D103, D104 sample_code_with_logs.py: D100, D101, D102, D103, D104 exclude = diff --git a/analogvnn/graph/AcyclicDirectedGraph.py b/analogvnn/graph/AcyclicDirectedGraph.py index d13d705..1f0b83d 100644 --- a/analogvnn/graph/AcyclicDirectedGraph.py +++ b/analogvnn/graph/AcyclicDirectedGraph.py @@ -131,8 +131,8 @@ def add_edge( self.graph.nodes[v_of_edge]['fillcolor'] = 'lightblue' return self - @staticmethod # noqa: C901 - def check_edge_parameters( + @staticmethod + def check_edge_parameters( # noqa: C901 in_arg: Union[None, int, bool], in_kwarg: Union[None, str, bool], out_arg: Union[None, int, bool], diff --git a/analogvnn/parameter/PseudoParameter.py b/analogvnn/parameter/PseudoParameter.py index 41c9576..295a25a 100644 --- a/analogvnn/parameter/PseudoParameter.py +++ b/analogvnn/parameter/PseudoParameter.py @@ -66,7 +66,7 @@ def __init__(self, data=None, requires_grad=True, transformation=None): self._transformed.original = self self._transformation = self.identity self.set_transformation(transformation) - self.substitute_member(self.original, self._transformed, "grad") + self.substitute_member(self.original, self._transformed, 'grad') def __call__(self, *args, **kwargs): """Transforms the parameter. diff --git a/analogvnn/utils/TensorboardModelLog.py b/analogvnn/utils/TensorboardModelLog.py index 283b7f0..3dc366b 100644 --- a/analogvnn/utils/TensorboardModelLog.py +++ b/analogvnn/utils/TensorboardModelLog.py @@ -179,7 +179,7 @@ def add_summary( model=model, input_size=input_size, train_loader=train_loader, - *args, + *args, # noqa: B026 **kwargs ) diff --git a/analogvnn/utils/get_model_summaries.py b/analogvnn/utils/get_model_summaries.py index 1fc72fa..1128b75 100644 --- a/analogvnn/utils/get_model_summaries.py +++ b/analogvnn/utils/get_model_summaries.py @@ -6,7 +6,7 @@ from analogvnn.nn.module.Layer import Layer -def get_model_summaries( +def get_model_summaries( # noqa: C901 model: Optional[nn.Module], input_size: Optional[Sequence[int]] = None, train_loader: DataLoader = None, diff --git a/pyproject.toml b/pyproject.toml index 4d06222..e6b8980 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,9 +82,9 @@ full = [ "graphviz", #"python-graphviz", ] -doc = [ +doc = [# https://www.youtube.com/watch?v=qRSb299awB0&t=2418s "sphinx>=4.2.0", - "sphinx-autobuild", + "sphinx-autobuild", # for live reloading {sphinx-autobuild .\docs .\docs\_build\html} "rst-to-myst[sphinx]", "furo", "myst_parser", @@ -97,7 +97,7 @@ doc = [ "sphinxcontrib-katex", # for math ] flake8 = [ - "flake8", + "flake8", # for style checks {flake8 .\analogvnn\} "flake8-docstrings", "flake8-quotes", "flake8-bugbear", @@ -105,12 +105,12 @@ flake8 = [ "flake8-executable", "flake8-coding", "flake8-return", -# "flake8-noreturn; python_version >= '3.8'", + # "flake8-noreturn; python_version >= '3.8'", "flake8-deprecated", ] dev = [ - "flit", # for building {flit build} "setuptools>=61.0.0", + "flit", # for building {flit build} "build", # building the package {pyproject-build} "twine", # to publish on pypi {twine upload --repository-url=https://test.pypi.org/legacy/ dist/*} {twine upload dist/*} "johnnydep", # to see dependencies {johnnydep } From 141f0eea7ef67d3bc6d681bedefa6c75234e9b57 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Sat, 27 May 2023 04:07:46 -0400 Subject: [PATCH 02/11] license update Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e6b8980..24b4c5c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ classifiers = [# Optional "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Software Development :: Libraries :: Python Modules", # Pick your license as you wish - "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", # Specify the Python versions you support here. In particular, ensure # that you indicate you support Python 3. These classifiers are *not* # checked by "pip install". See instead "python_requires" below. From 47932eafac5b0f1a81e55ce259f184a3bec62395 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Sat, 27 May 2023 20:48:18 -0400 Subject: [PATCH 03/11] unit_test to tests Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- unit_tests/__init__.py => analogvnn/py.typed | 0 pyproject.toml | 6 ++++++ tests/__init__.py | 0 {unit_tests => tests}/test_acyclic_directed_graphs.py | 0 {unit_tests => tests}/test_model_graphs.py | 0 {unit_tests => tests}/test_pseudo_parameter.py | 0 6 files changed, 6 insertions(+) rename unit_tests/__init__.py => analogvnn/py.typed (100%) create mode 100644 tests/__init__.py rename {unit_tests => tests}/test_acyclic_directed_graphs.py (100%) rename {unit_tests => tests}/test_model_graphs.py (100%) rename {unit_tests => tests}/test_pseudo_parameter.py (100%) diff --git a/unit_tests/__init__.py b/analogvnn/py.typed similarity index 100% rename from unit_tests/__init__.py rename to analogvnn/py.typed diff --git a/pyproject.toml b/pyproject.toml index 24b4c5c..a25a811 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,6 +11,12 @@ name = "analogvnn" [tool.setuptools] py-modules = ['analogvnn'] +[tool.setuptools.package-data] +"analogvnn" = ["py.typed"] + +[tool.setuptools.packages.find] +where = ["analogvnn"] + [project] # $ pip install analogvnn name = "analogvnn" diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/unit_tests/test_acyclic_directed_graphs.py b/tests/test_acyclic_directed_graphs.py similarity index 100% rename from unit_tests/test_acyclic_directed_graphs.py rename to tests/test_acyclic_directed_graphs.py diff --git a/unit_tests/test_model_graphs.py b/tests/test_model_graphs.py similarity index 100% rename from unit_tests/test_model_graphs.py rename to tests/test_model_graphs.py diff --git a/unit_tests/test_pseudo_parameter.py b/tests/test_pseudo_parameter.py similarity index 100% rename from unit_tests/test_pseudo_parameter.py rename to tests/test_pseudo_parameter.py From bbfd252497aee04af0785f46fa956eced314a1bc Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Wed, 31 May 2023 00:38:41 -0400 Subject: [PATCH 04/11] model subclass of BackwardModule Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- CHANGELOG.md | 4 ++++ analogvnn/nn/module/Model.py | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 40fba0b..0f43e6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 1.0.6 + +* `Model` is subclass of `BackwardModule` for additional functionality + ## 1.0.5 (Patches for Pytorch 2.0.1) * Removed unnecessary `PseudoParameter.grad` property. diff --git a/analogvnn/nn/module/Model.py b/analogvnn/nn/module/Model.py index 4d5a99e..e4436dc 100644 --- a/analogvnn/nn/module/Model.py +++ b/analogvnn/nn/module/Model.py @@ -7,6 +7,7 @@ from torch import optim, Tensor, nn from torch.utils.data import DataLoader +from analogvnn.backward.BackwardModule import BackwardModule from analogvnn.fn.test import test from analogvnn.fn.train import train from analogvnn.graph.BackwardGraph import BackwardGraph @@ -22,7 +23,7 @@ __all__ = ['Model'] -class Model(Layer): +class Model(Layer, BackwardModule): """Base class for analog neural network models. Attributes: From 005b6d2089b24a38e1d9b240ed66466dfcf2865a Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Wed, 31 May 2023 01:27:52 -0400 Subject: [PATCH 05/11] using inspect.isclass Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- CHANGELOG.md | 9 +++++---- analogvnn/nn/module/Layer.py | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f43e6e..225d9ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,8 @@ ## 1.0.6 -* `Model` is subclass of `BackwardModule` for additional functionality +* `Model` is subclass of `BackwardModule` for additional functionality. +* Using `inspect.isclass` to check if `backward_class` is a class in `Linear.set_backward_function`. ## 1.0.5 (Patches for Pytorch 2.0.1) @@ -11,9 +12,9 @@ ## 1.0.4 -* Combined `PseudoParameter` and `PseudoParameterModule` for better visibility - * BugFix: fixed save and load of state_dict of `PseudoParameter` and transformation module -* Removed redundant class `analogvnn.parameter.Parameter` +* Combined `PseudoParameter` and `PseudoParameterModule` for better visibility. + * BugFix: fixed save and load of state_dict of `PseudoParameter` and transformation module. +* Removed redundant class `analogvnn.parameter.Parameter`. ## 1.0.3 diff --git a/analogvnn/nn/module/Layer.py b/analogvnn/nn/module/Layer.py index f31a34b..e9fd85e 100644 --- a/analogvnn/nn/module/Layer.py +++ b/analogvnn/nn/module/Layer.py @@ -1,6 +1,7 @@ from __future__ import annotations import functools +import inspect from typing import Union, Type, Callable, Sequence, Optional, Set, Iterator, Tuple from torch import nn, Tensor @@ -178,7 +179,7 @@ def set_backward_function(self, backward_class: Union[Callable, BackwardModule, if backward_class == self: return self - if issubclass(backward_class, BackwardModule): + if inspect.isclass(backward_class) and issubclass(backward_class, BackwardModule): self._backward_module = backward_class(self) elif isinstance(backward_class, BackwardModule): backward_class.set_layer(self) From a9e9d3be15da5e3bb1d42b72da5b94c60b538a8c Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Sun, 4 Jun 2023 18:52:46 -0400 Subject: [PATCH 06/11] removed old files Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- tests/test_acyclic_directed_graphs.py | 10 -- tests/test_pseudo_parameter.py | 131 -------------------------- 2 files changed, 141 deletions(-) delete mode 100644 tests/test_acyclic_directed_graphs.py delete mode 100644 tests/test_pseudo_parameter.py diff --git a/tests/test_acyclic_directed_graphs.py b/tests/test_acyclic_directed_graphs.py deleted file mode 100644 index 85cb219..0000000 --- a/tests/test_acyclic_directed_graphs.py +++ /dev/null @@ -1,10 +0,0 @@ -# @staticmethod -# def print_inputs_outputs(input_output_graph, module): -# if len(input_output_graph[module].inputs.args) > 0: -# print(f"{module} :i: {input_output_graph[module].inputs.args}") -# if len(input_output_graph[module].inputs.kwargs.keys()) > 0: -# print(f"{module} :i: {input_output_graph[module].inputs.kwargs}") -# if len(input_output_graph[module].outputs.args) > 0: -# print(f"{module} :o: {input_output_graph[module].outputs.args}") -# if len(input_output_graph[module].outputs.kwargs.keys()) > 0: -# print(f"{module} :o: {input_output_graph[module].outputs.kwargs}") diff --git a/tests/test_pseudo_parameter.py b/tests/test_pseudo_parameter.py deleted file mode 100644 index 3b50d89..0000000 --- a/tests/test_pseudo_parameter.py +++ /dev/null @@ -1,131 +0,0 @@ -import torch -import torch.nn as nn -from torch import Tensor -from torch.optim import Adam - -from analogvnn.backward.BackwardIdentity import BackwardIdentity -from analogvnn.nn.module.Model import Model -from analogvnn.utils.render_autograd_graph import save_autograd_graph_from_module - -# def __getattribute__(self, item): -# print(f"__getattribute__:: {item!r}") -# return super().__getattribute__(item) -# -# def __setattr__(self, key, value): -# print(f"__setattr__:: {key!r} -> {value!r}") -# super().__setattr__(key, value) -# -# # def __set__(self, instance, value): -# # print(f"__set__:: {instance!r} -> {value!r}") -# # super().__set__(instance, value) -# -# def __get__(self, instance, owner): -# print(f"__get__:: {instance!r} -> {owner!r}") -# return super().__get__(instance, owner) -# -# @classmethod -# def __torch_function__(cls, func, types, args=(), kwargs=None): -# pargs = [x for x in args if not isinstance(x, PseudoParameter)] -# print(f"__torch_function__:: {func}, types: {types!r}, args: {pargs!r}, kwargs:{kwargs!r}") -# return super().__torch_function__(func, types, args, {} if kwargs is None else kwargs) - - -if __name__ == '__main__': - class Layer(nn.Module): - def __init__(self): - super().__init__() - - self.weight = nn.Parameter( - data=torch.ones((1, 1)) * 2, - requires_grad=True - ) - - def forward(self, x): - return x + (torch.ones_like(x) * self.weight) - - - class Symmetric(BackwardIdentity, Model): - def forward(self, x): - return torch.rand((1, x.size()[0])) @ x @ torch.rand((x.size()[1], 1)) - - - def pstr(s): - return str(s).replace(" ", "").replace("\n", "") - - - model = Layer() - parametrization = Symmetric() - # parametrization.eval() - - # # Set the parametrization mechanism - # # Fetch the original buffer or parameter - # # We create this early to check for possible errors - # parametrizations = parametrize.ParametrizationList([parametrization], model.weight) - # # Delete the previous parameter or buffer - # delattr(model, "weight") - # # If this is the first parametrization registered on the module, - # # we prepare the module to inject the property - # if not parametrize.is_parametrized(model): - # # Change the class - # _inject_new_class(model) - # # Inject a ``ModuleDict`` into the instance under module.parametrizations - # model.parametrizations = ModuleDict() - # # Add a property into the class - # _inject_property(model, "weight") - # # Add a ParametrizationList - # model.parametrizations["weight"] = parametrizations - - # parametrize.register_parametrization(model, "weight", parametrization) - - PseudoParameter.parameterize(model, "weight", parametrization) - print(f"module.weight = {pstr(model.weight)}") - print(f"module.weight = {pstr(model.weight)}") - model.weight = torch.ones((1, 1)) * 3 - model.weight.requires_grad = False - print(f"module.weight = {pstr(model.weight)}") - model.weight.requires_grad = True - print(f"module.weight.original = {pstr(model.weight.original)}") - print(f"type(module.weight) = {type(model.weight)}") - print(f"module.parameters() = {pstr(list(model.parameters()))}") - print(f"module.named_parameters() = {pstr(list(model.named_parameters(recurse=False)))}") - print(f"module.named_parameters(recurse=True) = {pstr(list(model.named_parameters(recurse=True)))}") - inputs = torch.ones((2, 2), dtype=torch.float, requires_grad=True) - output: Tensor = model(inputs) - print(f"inputs = {pstr(inputs)}") - print(f"output = {pstr(output)}") - - save_autograd_graph_from_module(output, params={ - "inputs": inputs, - "output": output, - "model.weight": model.weight, - # "model.parametrizations.weight.original": model.parametrizations.weight.original, - }).render("C:/X/_data/model_graph", format="svg", cleanup=True) - - print() - print("Forward::") - output: Tensor = model(inputs) - print("Backward::") - output.backward(gradient=torch.ones_like(output)) - print("Accessing::") - print(f"module.weight = {pstr(model.weight)}") - print(f"module.weight.original = {pstr(model.weight.original)}") - print(f"module.weight.grad = {pstr(model.weight.grad)}") - print(f"module.weight.original.grad = {pstr(model.weight.original.grad)}") - print("Update::") - opt = Adam(params=model.parameters()) - print(f"module.weight = {pstr(model.weight)}") - print(f"module.weight.original = {pstr(model.weight.original)}") - print(f"module.weight.grad = {pstr(model.weight.grad)}") - print(f"module.weight.original.grad = {pstr(model.weight.original.grad)}") - print("Step::") - opt.step() - print(f"module.weight = {pstr(model.weight)}") - print(f"module.weight.original = {pstr(model.weight.original)}") - print(f"module.weight.grad = {pstr(model.weight.grad)}") - print(f"module.weight.original.grad = {pstr(model.weight.original.grad)}") - print("zero_grad::") - opt.zero_grad() - print(f"module.weight = {pstr(model.weight)}") - print(f"module.weight.original = {pstr(model.weight.original)}") - print(f"module.weight.grad = {pstr(model.weight.grad)}") - print(f"module.weight.original.grad = {pstr(model.weight.original.grad)}") From 4aeb7184fda6b4293b7feb4bab2cb1ccce9f5327 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Mon, 5 Jun 2023 21:05:48 -0400 Subject: [PATCH 07/11] using self.class.name Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- CHANGELOG.md | 1 + analogvnn/graph/AccumulateGrad.py | 2 +- analogvnn/graph/ArgsKwargs.py | 2 +- analogvnn/parameter/PseudoParameter.py | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 225d9ad..3058eb4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ * `Model` is subclass of `BackwardModule` for additional functionality. * Using `inspect.isclass` to check if `backward_class` is a class in `Linear.set_backward_function`. +* Repr using `self.__class__.__name__` in all classes. ## 1.0.5 (Patches for Pytorch 2.0.1) diff --git a/analogvnn/graph/AccumulateGrad.py b/analogvnn/graph/AccumulateGrad.py index 2c7a0b3..2b656e6 100644 --- a/analogvnn/graph/AccumulateGrad.py +++ b/analogvnn/graph/AccumulateGrad.py @@ -41,7 +41,7 @@ def __repr__(self): str: String representation of the module. """ - return f'AccumulateGrad({self.module})' + return f'{self.__class__.__name__}({self.module})' def __call__( # noqa: C901 self, diff --git a/analogvnn/graph/ArgsKwargs.py b/analogvnn/graph/ArgsKwargs.py index 8616e99..de6ea33 100644 --- a/analogvnn/graph/ArgsKwargs.py +++ b/analogvnn/graph/ArgsKwargs.py @@ -62,7 +62,7 @@ def is_empty(self): def __repr__(self): """Returns a string representation of the parameter.""" - return f'ArgsKwargs(args={self.args}, kwargs={self.kwargs})' + return f'{self.__class__.__name__}(args={self.args}, kwargs={self.kwargs})' @classmethod def to_args_kwargs_object(cls, outputs: ArgsKwargsInput) -> ArgsKwargs: diff --git a/analogvnn/parameter/PseudoParameter.py b/analogvnn/parameter/PseudoParameter.py index 295a25a..7c3a71c 100644 --- a/analogvnn/parameter/PseudoParameter.py +++ b/analogvnn/parameter/PseudoParameter.py @@ -117,7 +117,7 @@ def __repr__(self): str: the string representation. """ - return f'{PseudoParameter.__name__}(' \ + return f'{self.__class__.__name__}(' \ f'transform={self.transformation}' \ f', original={self.original}' \ f')' From 0fb31ac42cd7ca7447ff40ea7602dfa8b7a97aca Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Mon, 5 Jun 2023 21:28:26 -0400 Subject: [PATCH 08/11] v1.0.0 to release branch Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- README.md | 2 +- docs/_static/AnalogVNN_Demo.ipynb | 28 ++++++++++++++-------------- docs/conf.py | 3 +-- docs/sample_code.md | 6 +++--- docs/tutorial.md | 2 +- 5 files changed, 20 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 1fedc3f..26239bf 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # AnalogVNN [![arXiv](https://img.shields.io/badge/arXiv-2210.10048-orange.svg)](https://arxiv.org/abs/2210.10048) -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Vivswan/AnalogVNN/blob/v1.0.0/docs/_static/AnalogVNN_Demo.ipynb) +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Vivswan/AnalogVNN/blob/release/docs/_static/AnalogVNN_Demo.ipynb) [![PyPI version](https://badge.fury.io/py/analogvnn.svg)](https://badge.fury.io/py/analogvnn) [![Documentation Status](https://readthedocs.org/projects/analogvnn/badge/?version=stable)](https://analogvnn.readthedocs.io/en/stable/?badge=stable) diff --git a/docs/_static/AnalogVNN_Demo.ipynb b/docs/_static/AnalogVNN_Demo.ipynb index 0111b2b..5262e1f 100644 --- a/docs/_static/AnalogVNN_Demo.ipynb +++ b/docs/_static/AnalogVNN_Demo.ipynb @@ -18,7 +18,7 @@ "\n", "\n", " \n", " \n", " \n", "
\n", - " \n", + " \n", "
\n", " \n", "
\n", @@ -26,7 +26,7 @@ "
\n", "
\n", - " \n", + " \n", "
\n", " \n", "
\n", @@ -34,7 +34,7 @@ "
\n", "
\n", - " \n", + " \n", "
\n", " \n", "
\n", @@ -42,7 +42,7 @@ "
\n", "
\n", - " \n", + " \n", "
\n", " \n", "
\n", @@ -55,14 +55,14 @@ { "cell_type": "markdown", "source": [ - "#### To create 3 layered linear photonic analog neural network with 4-bit [precision](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#reduceprecision), 0.5 [leakage](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#leakage-or-error-probability) and [clamp](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#clamp) normalization:\n", + "#### To create 3 layered linear photonic analog neural network with 4-bit [precision](https://analogvnn.readthedocs.io/en/release/extra_classes.html#reduceprecision), 0.5 [leakage](https://analogvnn.readthedocs.io/en/release/extra_classes.html#leakage-or-error-probability) and [clamp](https://analogvnn.readthedocs.io/en/release/extra_classes.html#clamp) normalization:\n", "\n", "![3 Layered Linear Photonic Analog Neural Network](analogvnn_model.png)\n", "\n", "Python file:\n", - "[Sample code](https://github.com/Vivswan/AnalogVNN/blob/v1.0.0/sample_code.py)\n", + "[Sample code](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code.py)\n", "and\n", - "[Sample code with logs](https://github.com/Vivswan/AnalogVNN/blob/v1.0.0/sample_code_with_logs.py)" + "[Sample code with logs](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code_with_logs.py)" ], "metadata": { "collapsed": false @@ -192,11 +192,11 @@ "source": [ "## Build a 3 layered linear photonic analog neural network\n", "\n", - "[`FullSequential`](https://analogvnn.readthedocs.io/en/v1.0.0/autoapi/analogvnn/nn/module/FullSequential/index.html#analogvnn.nn.module.FullSequential.FullSequential) is sequential model where backward graph is the reverse of forward graph.\n", + "[`FullSequential`](https://analogvnn.readthedocs.io/en/release/autoapi/analogvnn/nn/module/FullSequential/index.html#analogvnn.nn.module.FullSequential.FullSequential) is sequential model where backward graph is the reverse of forward graph.\n", "\n", - "To add the [Reduce Precision](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#reduce-precision), [Normalization](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#normalization), and [Noise](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#noise) before and after the main Linear layer, `add_layer` function is used.\n", + "To add the [Reduce Precision](https://analogvnn.readthedocs.io/en/release/extra_classes.html#reduce-precision), [Normalization](https://analogvnn.readthedocs.io/en/release/extra_classes.html#normalization), and [Noise](https://analogvnn.readthedocs.io/en/release/extra_classes.html#noise) before and after the main Linear layer, `add_layer` function is used.\n", "\n", - "Leakage definition: [https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#leakage-or-error-probability](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#leakage-or-error-probability)" + "Leakage definition: [https://analogvnn.readthedocs.io/en/release/extra_classes.html#leakage-or-error-probability](https://analogvnn.readthedocs.io/en/release/extra_classes.html#leakage-or-error-probability)" ] }, { @@ -244,7 +244,7 @@ "id": "iOkIKXWoZbmn" }, "source": [ - "Note: [`analogvnn.nn.module.Sequential.Sequential.add_sequence()`](https://analogvnn.readthedocs.io/en/v1.0.0/autoapi/analogvnn/nn/module/Sequential/index.html#analogvnn.nn.module.Sequential.Sequential.add_sequence) is used to create and set forward and backward graphs in AnalogVNN, more information in Inner Workings" + "Note: [`analogvnn.nn.module.Sequential.Sequential.add_sequence()`](https://analogvnn.readthedocs.io/en/release/autoapi/analogvnn/nn/module/Sequential/index.html#analogvnn.nn.module.Sequential.Sequential.add_sequence) is used to create and set forward and backward graphs in AnalogVNN, more information in Inner Workings" ] }, { @@ -276,7 +276,7 @@ "\n", "WeightModel is used to parametrize the parameter of LinearModel to simulate photonic weights\n", "\n", - "[`FullSequential`](https://analogvnn.readthedocs.io/en/v1.0.0/autoapi/analogvnn/nn/module/FullSequential/index.html#analogvnn.nn.module.FullSequential.FullSequential) is sequential model where backward graph is the reverse of forward graph." + "[`FullSequential`](https://analogvnn.readthedocs.io/en/release/autoapi/analogvnn/nn/module/FullSequential/index.html#analogvnn.nn.module.FullSequential.FullSequential) is sequential model where backward graph is the reverse of forward graph." ] }, { @@ -333,7 +333,7 @@ "id": "Dtg27Y80WwR0" }, "source": [ - "Using [`PseudoParameter`](https://analogvnn.readthedocs.io/en/v1.0.0/inner_workings.html#pseudoparameters) to parametrize the parameter" + "Using [`PseudoParameter`](https://analogvnn.readthedocs.io/en/release/inner_workings.html#pseudoparameters) to parametrize the parameter" ] }, { @@ -443,7 +443,7 @@ "source": [ "## Conclusion\n", "\n", - "Congratulations! You have trained a 3 layered linear photonic analog neural network with 4-bit [precision](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#reduceprecision), 0.5 [leakage](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#leakage-or-error-probability) and [clamp](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#clamp) normalization" + "Congratulations! You have trained a 3 layered linear photonic analog neural network with 4-bit [precision](https://analogvnn.readthedocs.io/en/release/extra_classes.html#reduceprecision), 0.5 [leakage](https://analogvnn.readthedocs.io/en/release/extra_classes.html#leakage-or-error-probability) and [clamp](https://analogvnn.readthedocs.io/en/release/extra_classes.html#clamp) normalization" ] }, { diff --git a/docs/conf.py b/docs/conf.py index c838169..4d62740 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -107,8 +107,7 @@ 'light_logo': 'analogvnn-logo-wide-white.svg', 'dark_logo': 'analogvnn-logo-wide-black.svg', 'source_repository': 'https://github.com/Vivswan/AnalogVNN', - # 'source_branch': 'master', - 'source_branch': 'v1.0.0', + 'source_branch': 'release', 'source_directory': 'docs/', } # html_logo = '_static/analogvnn-logo-wide-black.svg' diff --git a/docs/sample_code.md b/docs/sample_code.md index bb827db..63e26a1 100644 --- a/docs/sample_code.md +++ b/docs/sample_code.md @@ -1,15 +1,15 @@ # Sample code -
+ Run in Google Colab: Google Colab ![3 Layered Linear Photonic Analog Neural Network](_static/analogvnn_model.png) -[Sample code](https://github.com/Vivswan/AnalogVNN/blob/v1.0.0/sample_code.py) +[Sample code](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code.py) and -[Sample code with logs](https://github.com/Vivswan/AnalogVNN/blob/v1.0.0/sample_code_with_logs.py) +[Sample code with logs](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code_with_logs.py) for 3 layered linear photonic analog neural network with 4-bit precision, 0.5 {ref}`extra_classes:leakage` and {ref}`extra_classes:clamp` normalization: diff --git a/docs/tutorial.md b/docs/tutorial.md index f8017c1..68fc04d 100644 --- a/docs/tutorial.md +++ b/docs/tutorial.md @@ -1,6 +1,6 @@ # Tutorial - + Run in Google Colab: Google Colab From 4b4dabd1ec9397f2c8cb3539c2c242adabc70665 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Mon, 5 Jun 2023 21:32:27 -0400 Subject: [PATCH 09/11] updated commands Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a25a811..2f7b27c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,7 +118,7 @@ dev = [ "setuptools>=61.0.0", "flit", # for building {flit build} "build", # building the package {pyproject-build} - "twine", # to publish on pypi {twine upload --repository-url=https://test.pypi.org/legacy/ dist/*} {twine upload dist/*} + "twine", # to publish on pypi {twine upload -r testpypi dist/*} {twine upload -r pypi dist/*} "johnnydep", # to see dependencies {johnnydep } ] test = ["analogvnn[flake8]"] From 7e84c7f8b692924c12a563063941d49cfa4df2b7 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Mon, 5 Jun 2023 22:39:01 -0400 Subject: [PATCH 10/11] added usage to README.md --- .gitignore | 1 + README.md | 14 ++++++++++++-- pyproject.toml | 37 ++++--------------------------------- 3 files changed, 17 insertions(+), 35 deletions(-) diff --git a/.gitignore b/.gitignore index 480233d..9d58103 100644 --- a/.gitignore +++ b/.gitignore @@ -226,3 +226,4 @@ fabric.properties # Android studio 3.1+ serialized cache file .idea/caches/build_file_checksums.ser +.pdm-python diff --git a/README.md b/README.md index 26239bf..215f019 100644 --- a/README.md +++ b/README.md @@ -19,12 +19,22 @@ Documentation: [https://analogvnn.readthedocs.io/](https://analogvnn.readthedocs pip install analogvnn ``` -![3 Layered Linear Photonic Analog Neural Network](docs/_static/analogvnn_model.png) +## Usage: -[//]: # (![3 Layered Linear Photonic Analog Neural Network](https://github.com/Vivswan/AnalogVNN/raw/release/docs/_static/analogvnn_model.png)) +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Vivswan/AnalogVNN/blob/release/docs/_static/AnalogVNN_Demo.ipynb) + +- Sample code with AnalogVNN: [sample_code.py](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code.py) +- Sample code without + AnalogVNN: [sample_code_non_analog.py](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code_non_analog.py) +- Sample code with AnalogVNN and + Logs: [sample_code_with_logs.py](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code_with_logs.py) +- Jupyter + Notebook: [AnalogVNN_Demo.ipynb](https://github.com/Vivswan/AnalogVNN/blob/release/docs/_static/AnalogVNN_Demo.ipynb) ## Abstract +![3 Layered Linear Photonic Analog Neural Network](https://github.com/Vivswan/AnalogVNN/raw/release/docs/_static/analogvnn_model.png) + **AnalogVNN** is a simulation framework built on PyTorch which can simulate the effects of optoelectronic noise, limited precision, and signal normalization present in photonic neural network accelerators. We use this framework to train and optimize linear and diff --git a/pyproject.toml b/pyproject.toml index 2f7b27c..1777ad0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,6 @@ [build-system] requires = ["wheel", "setuptools>=61.0.0", "flit_core >=3.2,<4"] -#build-backend = "setuptools.build_meta" build-backend = "flit_core.buildapi" [tool.flit.module] @@ -19,9 +18,9 @@ where = ["analogvnn"] [project] # $ pip install analogvnn -name = "analogvnn" -version = "1.0.5" -description = "A fully modular framework for modeling and optimizing analog/photonic neural networks" # Optional +name = "AnalogVNN" +version = "1.0.6rc9" +description = "A fully modular framework for modeling and optimizing analog/photonic neural networks" readme = "README.md" requires-python = ">=3.7" license = { file = "LICENSE" } @@ -33,23 +32,14 @@ maintainers = [ { name = "Vivswan Shah", email = "vivswanshah@pitt.edu" } ] # For a list of valid classifiers, see https://pypi.org/classifiers/ -classifiers = [# Optional - # How mature is this project? Common values are - # 3 - Alpha - # 4 - Beta - # 5 - Production/Stable +classifiers = [ "Development Status :: 5 - Production/Stable", - # Indicate who your project is intended for "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Topic :: Software Development :: Build Tools", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Software Development :: Libraries :: Python Modules", - # Pick your license as you wish "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", - # Specify the Python versions you support here. In particular, ensure - # that you indicate you support Python 3. These classifiers are *not* - # checked by "pip install". See instead "python_requires" below. "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", @@ -58,12 +48,6 @@ classifiers = [# Optional "Programming Language :: Python :: 3 :: Only", ] -# This field lists other packages that your project depends on to run. -# Any package you put here will be installed by pip when your project is -# installed, so they must be valid existing projects. -# -# For an analysis of this field vs pip's requirements files see: -# https://packaging.python.org/discussions/install-requires-vs-requirements/ dependencies = [ "dataclasses", "scipy", @@ -72,14 +56,6 @@ dependencies = [ "importlib-metadata<5.0.0,>=2.0.0; python_version < '3.8'", ] -# List additional groups of dependencies here (e.g. development -# dependencies). Users will be able to install these using the "extras" -# syntax, for example: -# -# $ pip install analogvnn[dev] -# -# Similar to `dependencies` above, these must be valid existing -# projects. [project.optional-dependencies] full = [ "tensorflow", @@ -131,8 +107,3 @@ all = ["analogvnn[full,dev,doc,test]"] "Homepage" = "https://github.com/Vivswan/AnalogVNN" "Say Thanks!" = "https://vivswan.github.io/" "Source" = "https://github.com/Vivswan/AnalogVNN" - -# The following would provide a command line executable called `sample` -# which executes the function `main` from this package when invoked. -#[project.scripts] # Optional -#sample = "sample:main" \ No newline at end of file From 4b7b1df02d47772c890a6c8770ac42551ed371e5 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Mon, 5 Jun 2023 22:43:51 -0400 Subject: [PATCH 11/11] v1.0.6 --- README.md | 8 ++++++-- docs/install.md | 2 +- pyproject.toml | 4 ++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 215f019..7916090 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [![PyPI version](https://badge.fury.io/py/analogvnn.svg)](https://badge.fury.io/py/analogvnn) [![Documentation Status](https://readthedocs.org/projects/analogvnn/badge/?version=stable)](https://analogvnn.readthedocs.io/en/stable/?badge=stable) -[![Python](https://img.shields.io/badge/python-3.7--3.10-blue)](https://badge.fury.io/py/analogvnn) +[![Python](https://img.shields.io/badge/python-3.7--3.11-blue)](https://badge.fury.io/py/analogvnn) [![License: MPL 2.0](https://img.shields.io/badge/License-MPL_2.0-blue.svg)](https://opensource.org/licenses/MPL-2.0) Documentation: [https://analogvnn.readthedocs.io/](https://analogvnn.readthedocs.io/) @@ -16,7 +16,11 @@ Documentation: [https://analogvnn.readthedocs.io/](https://analogvnn.readthedocs - Install AnalogVNN using [pip](https://pypi.org/project/analogvnn/) ```bash -pip install analogvnn + # Current stable release for CPU and GPU + pip install analogvnn + + # For additional optional features + pip install analogvnn[full] ``` ## Usage: diff --git a/docs/install.md b/docs/install.md index 3d8e707..a7190c3 100644 --- a/docs/install.md +++ b/docs/install.md @@ -2,7 +2,7 @@ AnalogVNN is tested and supported on the following 64-bit systems: -- Python 3.7, 3.8, 3.9, 3.10 +- Python 3.7, 3.8, 3.9, 3.10, 3.11 - Windows 7 and later - Ubuntu 16.04 and later, including WSL - Red Hat Enterprise Linux 7 and later diff --git a/pyproject.toml b/pyproject.toml index 1777ad0..95496fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,8 +18,8 @@ where = ["analogvnn"] [project] # $ pip install analogvnn -name = "AnalogVNN" -version = "1.0.6rc9" +name = "analogvnn" +version = "1.0.6" description = "A fully modular framework for modeling and optimizing analog/photonic neural networks" readme = "README.md" requires-python = ">=3.7"