diff --git a/.flake8 b/.flake8
index 0078e18..3da9a0b 100644
--- a/.flake8
+++ b/.flake8
@@ -2,17 +2,36 @@
max-line-length = 120
extend-ignore =
- C101, # Coding magic comment
- D100, # Missing docstring in public module
- D104, # Missing docstring in public package
- D202, # No blank lines allowed after function docstring
- D210, # No whitespaces allowed surrounding docstring text
- D401, # First line should be in imperative mood
- R504, # unnecessary variable assignment before return statement
- R505, # unnecessary else after return statement
+ # No explicit stacklevel argument found
+ B028,
+
+ # Coding magic comment
+ C101,
+
+ # Missing docstring in public module
+ D100,
+
+ # Missing docstring in public package
+ D104,
+
+ # No blank lines allowed after function docstring
+ D202,
+
+ # No whitespaces allowed surrounding docstring text
+ D210,
+
+ # First line should be in imperative mood
+ D401,
+
+ # unnecessary variable assignment before return statement
+ R504,
+
+ # unnecessary else after return statement
+ R505,
per-file-ignores =
sample_code.py: D100, D101, D102, D103, D104
+ sample_code_non_analog.py: D100, D101, D102, D103, D104
sample_code_with_logs.py: D100, D101, D102, D103, D104
exclude =
diff --git a/.gitignore b/.gitignore
index 480233d..9d58103 100644
--- a/.gitignore
+++ b/.gitignore
@@ -226,3 +226,4 @@ fabric.properties
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
+.pdm-python
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 40fba0b..3058eb4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,11 @@
# Changelog
+## 1.0.6
+
+* `Model` is subclass of `BackwardModule` for additional functionality.
+* Using `inspect.isclass` to check if `backward_class` is a class in `Linear.set_backward_function`.
+* Repr using `self.__class__.__name__` in all classes.
+
## 1.0.5 (Patches for Pytorch 2.0.1)
* Removed unnecessary `PseudoParameter.grad` property.
@@ -7,9 +13,9 @@
## 1.0.4
-* Combined `PseudoParameter` and `PseudoParameterModule` for better visibility
- * BugFix: fixed save and load of state_dict of `PseudoParameter` and transformation module
-* Removed redundant class `analogvnn.parameter.Parameter`
+* Combined `PseudoParameter` and `PseudoParameterModule` for better visibility.
+ * BugFix: fixed save and load of state_dict of `PseudoParameter` and transformation module.
+* Removed redundant class `analogvnn.parameter.Parameter`.
## 1.0.3
diff --git a/README.md b/README.md
index 1fedc3f..7916090 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,11 @@
# AnalogVNN
[![arXiv](https://img.shields.io/badge/arXiv-2210.10048-orange.svg)](https://arxiv.org/abs/2210.10048)
-[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Vivswan/AnalogVNN/blob/v1.0.0/docs/_static/AnalogVNN_Demo.ipynb)
+[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Vivswan/AnalogVNN/blob/release/docs/_static/AnalogVNN_Demo.ipynb)
[![PyPI version](https://badge.fury.io/py/analogvnn.svg)](https://badge.fury.io/py/analogvnn)
[![Documentation Status](https://readthedocs.org/projects/analogvnn/badge/?version=stable)](https://analogvnn.readthedocs.io/en/stable/?badge=stable)
-[![Python](https://img.shields.io/badge/python-3.7--3.10-blue)](https://badge.fury.io/py/analogvnn)
+[![Python](https://img.shields.io/badge/python-3.7--3.11-blue)](https://badge.fury.io/py/analogvnn)
[![License: MPL 2.0](https://img.shields.io/badge/License-MPL_2.0-blue.svg)](https://opensource.org/licenses/MPL-2.0)
Documentation: [https://analogvnn.readthedocs.io/](https://analogvnn.readthedocs.io/)
@@ -16,15 +16,29 @@ Documentation: [https://analogvnn.readthedocs.io/](https://analogvnn.readthedocs
- Install AnalogVNN using [pip](https://pypi.org/project/analogvnn/)
```bash
-pip install analogvnn
+ # Current stable release for CPU and GPU
+ pip install analogvnn
+
+ # For additional optional features
+ pip install analogvnn[full]
```
-![3 Layered Linear Photonic Analog Neural Network](docs/_static/analogvnn_model.png)
+## Usage:
-[//]: # (![3 Layered Linear Photonic Analog Neural Network](https://github.com/Vivswan/AnalogVNN/raw/release/docs/_static/analogvnn_model.png))
+[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Vivswan/AnalogVNN/blob/release/docs/_static/AnalogVNN_Demo.ipynb)
+
+- Sample code with AnalogVNN: [sample_code.py](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code.py)
+- Sample code without
+ AnalogVNN: [sample_code_non_analog.py](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code_non_analog.py)
+- Sample code with AnalogVNN and
+ Logs: [sample_code_with_logs.py](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code_with_logs.py)
+- Jupyter
+ Notebook: [AnalogVNN_Demo.ipynb](https://github.com/Vivswan/AnalogVNN/blob/release/docs/_static/AnalogVNN_Demo.ipynb)
## Abstract
+![3 Layered Linear Photonic Analog Neural Network](https://github.com/Vivswan/AnalogVNN/raw/release/docs/_static/analogvnn_model.png)
+
**AnalogVNN** is a simulation framework built on PyTorch which can simulate the effects of
optoelectronic noise, limited precision, and signal normalization present in photonic
neural network accelerators. We use this framework to train and optimize linear and
diff --git a/analogvnn/graph/AccumulateGrad.py b/analogvnn/graph/AccumulateGrad.py
index 2c7a0b3..2b656e6 100644
--- a/analogvnn/graph/AccumulateGrad.py
+++ b/analogvnn/graph/AccumulateGrad.py
@@ -41,7 +41,7 @@ def __repr__(self):
str: String representation of the module.
"""
- return f'AccumulateGrad({self.module})'
+ return f'{self.__class__.__name__}({self.module})'
def __call__( # noqa: C901
self,
diff --git a/analogvnn/graph/AcyclicDirectedGraph.py b/analogvnn/graph/AcyclicDirectedGraph.py
index d13d705..1f0b83d 100644
--- a/analogvnn/graph/AcyclicDirectedGraph.py
+++ b/analogvnn/graph/AcyclicDirectedGraph.py
@@ -131,8 +131,8 @@ def add_edge(
self.graph.nodes[v_of_edge]['fillcolor'] = 'lightblue'
return self
- @staticmethod # noqa: C901
- def check_edge_parameters(
+ @staticmethod
+ def check_edge_parameters( # noqa: C901
in_arg: Union[None, int, bool],
in_kwarg: Union[None, str, bool],
out_arg: Union[None, int, bool],
diff --git a/analogvnn/graph/ArgsKwargs.py b/analogvnn/graph/ArgsKwargs.py
index 8616e99..de6ea33 100644
--- a/analogvnn/graph/ArgsKwargs.py
+++ b/analogvnn/graph/ArgsKwargs.py
@@ -62,7 +62,7 @@ def is_empty(self):
def __repr__(self):
"""Returns a string representation of the parameter."""
- return f'ArgsKwargs(args={self.args}, kwargs={self.kwargs})'
+ return f'{self.__class__.__name__}(args={self.args}, kwargs={self.kwargs})'
@classmethod
def to_args_kwargs_object(cls, outputs: ArgsKwargsInput) -> ArgsKwargs:
diff --git a/analogvnn/nn/module/Layer.py b/analogvnn/nn/module/Layer.py
index f31a34b..e9fd85e 100644
--- a/analogvnn/nn/module/Layer.py
+++ b/analogvnn/nn/module/Layer.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import functools
+import inspect
from typing import Union, Type, Callable, Sequence, Optional, Set, Iterator, Tuple
from torch import nn, Tensor
@@ -178,7 +179,7 @@ def set_backward_function(self, backward_class: Union[Callable, BackwardModule,
if backward_class == self:
return self
- if issubclass(backward_class, BackwardModule):
+ if inspect.isclass(backward_class) and issubclass(backward_class, BackwardModule):
self._backward_module = backward_class(self)
elif isinstance(backward_class, BackwardModule):
backward_class.set_layer(self)
diff --git a/analogvnn/nn/module/Model.py b/analogvnn/nn/module/Model.py
index 4d5a99e..e4436dc 100644
--- a/analogvnn/nn/module/Model.py
+++ b/analogvnn/nn/module/Model.py
@@ -7,6 +7,7 @@
from torch import optim, Tensor, nn
from torch.utils.data import DataLoader
+from analogvnn.backward.BackwardModule import BackwardModule
from analogvnn.fn.test import test
from analogvnn.fn.train import train
from analogvnn.graph.BackwardGraph import BackwardGraph
@@ -22,7 +23,7 @@
__all__ = ['Model']
-class Model(Layer):
+class Model(Layer, BackwardModule):
"""Base class for analog neural network models.
Attributes:
diff --git a/analogvnn/parameter/PseudoParameter.py b/analogvnn/parameter/PseudoParameter.py
index 41c9576..7c3a71c 100644
--- a/analogvnn/parameter/PseudoParameter.py
+++ b/analogvnn/parameter/PseudoParameter.py
@@ -66,7 +66,7 @@ def __init__(self, data=None, requires_grad=True, transformation=None):
self._transformed.original = self
self._transformation = self.identity
self.set_transformation(transformation)
- self.substitute_member(self.original, self._transformed, "grad")
+ self.substitute_member(self.original, self._transformed, 'grad')
def __call__(self, *args, **kwargs):
"""Transforms the parameter.
@@ -117,7 +117,7 @@ def __repr__(self):
str: the string representation.
"""
- return f'{PseudoParameter.__name__}(' \
+ return f'{self.__class__.__name__}(' \
f'transform={self.transformation}' \
f', original={self.original}' \
f')'
diff --git a/unit_tests/__init__.py b/analogvnn/py.typed
similarity index 100%
rename from unit_tests/__init__.py
rename to analogvnn/py.typed
diff --git a/analogvnn/utils/TensorboardModelLog.py b/analogvnn/utils/TensorboardModelLog.py
index 283b7f0..3dc366b 100644
--- a/analogvnn/utils/TensorboardModelLog.py
+++ b/analogvnn/utils/TensorboardModelLog.py
@@ -179,7 +179,7 @@ def add_summary(
model=model,
input_size=input_size,
train_loader=train_loader,
- *args,
+ *args, # noqa: B026
**kwargs
)
diff --git a/analogvnn/utils/get_model_summaries.py b/analogvnn/utils/get_model_summaries.py
index 1fc72fa..1128b75 100644
--- a/analogvnn/utils/get_model_summaries.py
+++ b/analogvnn/utils/get_model_summaries.py
@@ -6,7 +6,7 @@
from analogvnn.nn.module.Layer import Layer
-def get_model_summaries(
+def get_model_summaries( # noqa: C901
model: Optional[nn.Module],
input_size: Optional[Sequence[int]] = None,
train_loader: DataLoader = None,
diff --git a/docs/_static/AnalogVNN_Demo.ipynb b/docs/_static/AnalogVNN_Demo.ipynb
index 0111b2b..5262e1f 100644
--- a/docs/_static/AnalogVNN_Demo.ipynb
+++ b/docs/_static/AnalogVNN_Demo.ipynb
@@ -18,7 +18,7 @@
"\n",
"
\n",
" \n",
- " \n",
+ " \n",
" \n",
" \n",
" \n",
@@ -26,7 +26,7 @@
" \n",
" | \n",
" \n",
- " \n",
+ " \n",
" \n",
" \n",
" \n",
@@ -34,7 +34,7 @@
" \n",
" | \n",
" \n",
- " \n",
+ " \n",
" \n",
" \n",
" \n",
@@ -42,7 +42,7 @@
" \n",
" | \n",
" \n",
- " \n",
+ " \n",
" \n",
" \n",
" \n",
@@ -55,14 +55,14 @@
{
"cell_type": "markdown",
"source": [
- "#### To create 3 layered linear photonic analog neural network with 4-bit [precision](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#reduceprecision), 0.5 [leakage](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#leakage-or-error-probability) and [clamp](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#clamp) normalization:\n",
+ "#### To create 3 layered linear photonic analog neural network with 4-bit [precision](https://analogvnn.readthedocs.io/en/release/extra_classes.html#reduceprecision), 0.5 [leakage](https://analogvnn.readthedocs.io/en/release/extra_classes.html#leakage-or-error-probability) and [clamp](https://analogvnn.readthedocs.io/en/release/extra_classes.html#clamp) normalization:\n",
"\n",
"![3 Layered Linear Photonic Analog Neural Network](analogvnn_model.png)\n",
"\n",
"Python file:\n",
- "[Sample code](https://github.com/Vivswan/AnalogVNN/blob/v1.0.0/sample_code.py)\n",
+ "[Sample code](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code.py)\n",
"and\n",
- "[Sample code with logs](https://github.com/Vivswan/AnalogVNN/blob/v1.0.0/sample_code_with_logs.py)"
+ "[Sample code with logs](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code_with_logs.py)"
],
"metadata": {
"collapsed": false
@@ -192,11 +192,11 @@
"source": [
"## Build a 3 layered linear photonic analog neural network\n",
"\n",
- "[`FullSequential`](https://analogvnn.readthedocs.io/en/v1.0.0/autoapi/analogvnn/nn/module/FullSequential/index.html#analogvnn.nn.module.FullSequential.FullSequential) is sequential model where backward graph is the reverse of forward graph.\n",
+ "[`FullSequential`](https://analogvnn.readthedocs.io/en/release/autoapi/analogvnn/nn/module/FullSequential/index.html#analogvnn.nn.module.FullSequential.FullSequential) is sequential model where backward graph is the reverse of forward graph.\n",
"\n",
- "To add the [Reduce Precision](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#reduce-precision), [Normalization](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#normalization), and [Noise](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#noise) before and after the main Linear layer, `add_layer` function is used.\n",
+ "To add the [Reduce Precision](https://analogvnn.readthedocs.io/en/release/extra_classes.html#reduce-precision), [Normalization](https://analogvnn.readthedocs.io/en/release/extra_classes.html#normalization), and [Noise](https://analogvnn.readthedocs.io/en/release/extra_classes.html#noise) before and after the main Linear layer, `add_layer` function is used.\n",
"\n",
- "Leakage definition: [https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#leakage-or-error-probability](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#leakage-or-error-probability)"
+ "Leakage definition: [https://analogvnn.readthedocs.io/en/release/extra_classes.html#leakage-or-error-probability](https://analogvnn.readthedocs.io/en/release/extra_classes.html#leakage-or-error-probability)"
]
},
{
@@ -244,7 +244,7 @@
"id": "iOkIKXWoZbmn"
},
"source": [
- "Note: [`analogvnn.nn.module.Sequential.Sequential.add_sequence()`](https://analogvnn.readthedocs.io/en/v1.0.0/autoapi/analogvnn/nn/module/Sequential/index.html#analogvnn.nn.module.Sequential.Sequential.add_sequence) is used to create and set forward and backward graphs in AnalogVNN, more information in Inner Workings"
+ "Note: [`analogvnn.nn.module.Sequential.Sequential.add_sequence()`](https://analogvnn.readthedocs.io/en/release/autoapi/analogvnn/nn/module/Sequential/index.html#analogvnn.nn.module.Sequential.Sequential.add_sequence) is used to create and set forward and backward graphs in AnalogVNN, more information in Inner Workings"
]
},
{
@@ -276,7 +276,7 @@
"\n",
"WeightModel is used to parametrize the parameter of LinearModel to simulate photonic weights\n",
"\n",
- "[`FullSequential`](https://analogvnn.readthedocs.io/en/v1.0.0/autoapi/analogvnn/nn/module/FullSequential/index.html#analogvnn.nn.module.FullSequential.FullSequential) is sequential model where backward graph is the reverse of forward graph."
+ "[`FullSequential`](https://analogvnn.readthedocs.io/en/release/autoapi/analogvnn/nn/module/FullSequential/index.html#analogvnn.nn.module.FullSequential.FullSequential) is sequential model where backward graph is the reverse of forward graph."
]
},
{
@@ -333,7 +333,7 @@
"id": "Dtg27Y80WwR0"
},
"source": [
- "Using [`PseudoParameter`](https://analogvnn.readthedocs.io/en/v1.0.0/inner_workings.html#pseudoparameters) to parametrize the parameter"
+ "Using [`PseudoParameter`](https://analogvnn.readthedocs.io/en/release/inner_workings.html#pseudoparameters) to parametrize the parameter"
]
},
{
@@ -443,7 +443,7 @@
"source": [
"## Conclusion\n",
"\n",
- "Congratulations! You have trained a 3 layered linear photonic analog neural network with 4-bit [precision](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#reduceprecision), 0.5 [leakage](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#leakage-or-error-probability) and [clamp](https://analogvnn.readthedocs.io/en/v1.0.0/extra_classes.html#clamp) normalization"
+ "Congratulations! You have trained a 3 layered linear photonic analog neural network with 4-bit [precision](https://analogvnn.readthedocs.io/en/release/extra_classes.html#reduceprecision), 0.5 [leakage](https://analogvnn.readthedocs.io/en/release/extra_classes.html#leakage-or-error-probability) and [clamp](https://analogvnn.readthedocs.io/en/release/extra_classes.html#clamp) normalization"
]
},
{
diff --git a/docs/conf.py b/docs/conf.py
index c838169..4d62740 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -107,8 +107,7 @@
'light_logo': 'analogvnn-logo-wide-white.svg',
'dark_logo': 'analogvnn-logo-wide-black.svg',
'source_repository': 'https://github.com/Vivswan/AnalogVNN',
- # 'source_branch': 'master',
- 'source_branch': 'v1.0.0',
+ 'source_branch': 'release',
'source_directory': 'docs/',
}
# html_logo = '_static/analogvnn-logo-wide-black.svg'
diff --git a/docs/install.md b/docs/install.md
index 3d8e707..a7190c3 100644
--- a/docs/install.md
+++ b/docs/install.md
@@ -2,7 +2,7 @@
AnalogVNN is tested and supported on the following 64-bit systems:
-- Python 3.7, 3.8, 3.9, 3.10
+- Python 3.7, 3.8, 3.9, 3.10, 3.11
- Windows 7 and later
- Ubuntu 16.04 and later, including WSL
- Red Hat Enterprise Linux 7 and later
diff --git a/docs/sample_code.md b/docs/sample_code.md
index bb827db..63e26a1 100644
--- a/docs/sample_code.md
+++ b/docs/sample_code.md
@@ -1,15 +1,15 @@
# Sample code
-
+
Run in Google Colab:
![3 Layered Linear Photonic Analog Neural Network](_static/analogvnn_model.png)
-[Sample code](https://github.com/Vivswan/AnalogVNN/blob/v1.0.0/sample_code.py)
+[Sample code](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code.py)
and
-[Sample code with logs](https://github.com/Vivswan/AnalogVNN/blob/v1.0.0/sample_code_with_logs.py)
+[Sample code with logs](https://github.com/Vivswan/AnalogVNN/blob/release/sample_code_with_logs.py)
for 3 layered linear photonic analog neural network with 4-bit precision,
0.5 {ref}`extra_classes:leakage` and {ref}`extra_classes:clamp`
normalization:
diff --git a/docs/tutorial.md b/docs/tutorial.md
index f8017c1..68fc04d 100644
--- a/docs/tutorial.md
+++ b/docs/tutorial.md
@@ -1,6 +1,6 @@
# Tutorial
-
+
Run in Google Colab:
diff --git a/pyproject.toml b/pyproject.toml
index 4d06222..95496fc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -2,7 +2,6 @@
[build-system]
requires = ["wheel", "setuptools>=61.0.0", "flit_core >=3.2,<4"]
-#build-backend = "setuptools.build_meta"
build-backend = "flit_core.buildapi"
[tool.flit.module]
@@ -11,11 +10,17 @@ name = "analogvnn"
[tool.setuptools]
py-modules = ['analogvnn']
+[tool.setuptools.package-data]
+"analogvnn" = ["py.typed"]
+
+[tool.setuptools.packages.find]
+where = ["analogvnn"]
+
[project]
# $ pip install analogvnn
name = "analogvnn"
-version = "1.0.5"
-description = "A fully modular framework for modeling and optimizing analog/photonic neural networks" # Optional
+version = "1.0.6"
+description = "A fully modular framework for modeling and optimizing analog/photonic neural networks"
readme = "README.md"
requires-python = ">=3.7"
license = { file = "LICENSE" }
@@ -27,23 +32,14 @@ maintainers = [
{ name = "Vivswan Shah", email = "vivswanshah@pitt.edu" }
]
# For a list of valid classifiers, see https://pypi.org/classifiers/
-classifiers = [# Optional
- # How mature is this project? Common values are
- # 3 - Alpha
- # 4 - Beta
- # 5 - Production/Stable
+classifiers = [
"Development Status :: 5 - Production/Stable",
- # Indicate who your project is intended for
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Software Development :: Build Tools",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
- # Pick your license as you wish
- "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
- # Specify the Python versions you support here. In particular, ensure
- # that you indicate you support Python 3. These classifiers are *not*
- # checked by "pip install". See instead "python_requires" below.
+ "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
@@ -52,12 +48,6 @@ classifiers = [# Optional
"Programming Language :: Python :: 3 :: Only",
]
-# This field lists other packages that your project depends on to run.
-# Any package you put here will be installed by pip when your project is
-# installed, so they must be valid existing projects.
-#
-# For an analysis of this field vs pip's requirements files see:
-# https://packaging.python.org/discussions/install-requires-vs-requirements/
dependencies = [
"dataclasses",
"scipy",
@@ -66,14 +56,6 @@ dependencies = [
"importlib-metadata<5.0.0,>=2.0.0; python_version < '3.8'",
]
-# List additional groups of dependencies here (e.g. development
-# dependencies). Users will be able to install these using the "extras"
-# syntax, for example:
-#
-# $ pip install analogvnn[dev]
-#
-# Similar to `dependencies` above, these must be valid existing
-# projects.
[project.optional-dependencies]
full = [
"tensorflow",
@@ -82,9 +64,9 @@ full = [
"graphviz",
#"python-graphviz",
]
-doc = [
+doc = [# https://www.youtube.com/watch?v=qRSb299awB0&t=2418s
"sphinx>=4.2.0",
- "sphinx-autobuild",
+ "sphinx-autobuild", # for live reloading {sphinx-autobuild .\docs .\docs\_build\html}
"rst-to-myst[sphinx]",
"furo",
"myst_parser",
@@ -97,7 +79,7 @@ doc = [
"sphinxcontrib-katex", # for math
]
flake8 = [
- "flake8",
+ "flake8", # for style checks {flake8 .\analogvnn\}
"flake8-docstrings",
"flake8-quotes",
"flake8-bugbear",
@@ -105,14 +87,14 @@ flake8 = [
"flake8-executable",
"flake8-coding",
"flake8-return",
-# "flake8-noreturn; python_version >= '3.8'",
+ # "flake8-noreturn; python_version >= '3.8'",
"flake8-deprecated",
]
dev = [
- "flit", # for building {flit build}
"setuptools>=61.0.0",
+ "flit", # for building {flit build}
"build", # building the package {pyproject-build}
- "twine", # to publish on pypi {twine upload --repository-url=https://test.pypi.org/legacy/ dist/*} {twine upload dist/*}
+ "twine", # to publish on pypi {twine upload -r testpypi dist/*} {twine upload -r pypi dist/*}
"johnnydep", # to see dependencies {johnnydep }
]
test = ["analogvnn[flake8]"]
@@ -125,8 +107,3 @@ all = ["analogvnn[full,dev,doc,test]"]
"Homepage" = "https://github.com/Vivswan/AnalogVNN"
"Say Thanks!" = "https://vivswan.github.io/"
"Source" = "https://github.com/Vivswan/AnalogVNN"
-
-# The following would provide a command line executable called `sample`
-# which executes the function `main` from this package when invoked.
-#[project.scripts] # Optional
-#sample = "sample:main"
\ No newline at end of file
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/unit_tests/test_model_graphs.py b/tests/test_model_graphs.py
similarity index 100%
rename from unit_tests/test_model_graphs.py
rename to tests/test_model_graphs.py
diff --git a/unit_tests/test_acyclic_directed_graphs.py b/unit_tests/test_acyclic_directed_graphs.py
deleted file mode 100644
index 85cb219..0000000
--- a/unit_tests/test_acyclic_directed_graphs.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# @staticmethod
-# def print_inputs_outputs(input_output_graph, module):
-# if len(input_output_graph[module].inputs.args) > 0:
-# print(f"{module} :i: {input_output_graph[module].inputs.args}")
-# if len(input_output_graph[module].inputs.kwargs.keys()) > 0:
-# print(f"{module} :i: {input_output_graph[module].inputs.kwargs}")
-# if len(input_output_graph[module].outputs.args) > 0:
-# print(f"{module} :o: {input_output_graph[module].outputs.args}")
-# if len(input_output_graph[module].outputs.kwargs.keys()) > 0:
-# print(f"{module} :o: {input_output_graph[module].outputs.kwargs}")
diff --git a/unit_tests/test_pseudo_parameter.py b/unit_tests/test_pseudo_parameter.py
deleted file mode 100644
index 3b50d89..0000000
--- a/unit_tests/test_pseudo_parameter.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import torch
-import torch.nn as nn
-from torch import Tensor
-from torch.optim import Adam
-
-from analogvnn.backward.BackwardIdentity import BackwardIdentity
-from analogvnn.nn.module.Model import Model
-from analogvnn.utils.render_autograd_graph import save_autograd_graph_from_module
-
-# def __getattribute__(self, item):
-# print(f"__getattribute__:: {item!r}")
-# return super().__getattribute__(item)
-#
-# def __setattr__(self, key, value):
-# print(f"__setattr__:: {key!r} -> {value!r}")
-# super().__setattr__(key, value)
-#
-# # def __set__(self, instance, value):
-# # print(f"__set__:: {instance!r} -> {value!r}")
-# # super().__set__(instance, value)
-#
-# def __get__(self, instance, owner):
-# print(f"__get__:: {instance!r} -> {owner!r}")
-# return super().__get__(instance, owner)
-#
-# @classmethod
-# def __torch_function__(cls, func, types, args=(), kwargs=None):
-# pargs = [x for x in args if not isinstance(x, PseudoParameter)]
-# print(f"__torch_function__:: {func}, types: {types!r}, args: {pargs!r}, kwargs:{kwargs!r}")
-# return super().__torch_function__(func, types, args, {} if kwargs is None else kwargs)
-
-
-if __name__ == '__main__':
- class Layer(nn.Module):
- def __init__(self):
- super().__init__()
-
- self.weight = nn.Parameter(
- data=torch.ones((1, 1)) * 2,
- requires_grad=True
- )
-
- def forward(self, x):
- return x + (torch.ones_like(x) * self.weight)
-
-
- class Symmetric(BackwardIdentity, Model):
- def forward(self, x):
- return torch.rand((1, x.size()[0])) @ x @ torch.rand((x.size()[1], 1))
-
-
- def pstr(s):
- return str(s).replace(" ", "").replace("\n", "")
-
-
- model = Layer()
- parametrization = Symmetric()
- # parametrization.eval()
-
- # # Set the parametrization mechanism
- # # Fetch the original buffer or parameter
- # # We create this early to check for possible errors
- # parametrizations = parametrize.ParametrizationList([parametrization], model.weight)
- # # Delete the previous parameter or buffer
- # delattr(model, "weight")
- # # If this is the first parametrization registered on the module,
- # # we prepare the module to inject the property
- # if not parametrize.is_parametrized(model):
- # # Change the class
- # _inject_new_class(model)
- # # Inject a ``ModuleDict`` into the instance under module.parametrizations
- # model.parametrizations = ModuleDict()
- # # Add a property into the class
- # _inject_property(model, "weight")
- # # Add a ParametrizationList
- # model.parametrizations["weight"] = parametrizations
-
- # parametrize.register_parametrization(model, "weight", parametrization)
-
- PseudoParameter.parameterize(model, "weight", parametrization)
- print(f"module.weight = {pstr(model.weight)}")
- print(f"module.weight = {pstr(model.weight)}")
- model.weight = torch.ones((1, 1)) * 3
- model.weight.requires_grad = False
- print(f"module.weight = {pstr(model.weight)}")
- model.weight.requires_grad = True
- print(f"module.weight.original = {pstr(model.weight.original)}")
- print(f"type(module.weight) = {type(model.weight)}")
- print(f"module.parameters() = {pstr(list(model.parameters()))}")
- print(f"module.named_parameters() = {pstr(list(model.named_parameters(recurse=False)))}")
- print(f"module.named_parameters(recurse=True) = {pstr(list(model.named_parameters(recurse=True)))}")
- inputs = torch.ones((2, 2), dtype=torch.float, requires_grad=True)
- output: Tensor = model(inputs)
- print(f"inputs = {pstr(inputs)}")
- print(f"output = {pstr(output)}")
-
- save_autograd_graph_from_module(output, params={
- "inputs": inputs,
- "output": output,
- "model.weight": model.weight,
- # "model.parametrizations.weight.original": model.parametrizations.weight.original,
- }).render("C:/X/_data/model_graph", format="svg", cleanup=True)
-
- print()
- print("Forward::")
- output: Tensor = model(inputs)
- print("Backward::")
- output.backward(gradient=torch.ones_like(output))
- print("Accessing::")
- print(f"module.weight = {pstr(model.weight)}")
- print(f"module.weight.original = {pstr(model.weight.original)}")
- print(f"module.weight.grad = {pstr(model.weight.grad)}")
- print(f"module.weight.original.grad = {pstr(model.weight.original.grad)}")
- print("Update::")
- opt = Adam(params=model.parameters())
- print(f"module.weight = {pstr(model.weight)}")
- print(f"module.weight.original = {pstr(model.weight.original)}")
- print(f"module.weight.grad = {pstr(model.weight.grad)}")
- print(f"module.weight.original.grad = {pstr(model.weight.original.grad)}")
- print("Step::")
- opt.step()
- print(f"module.weight = {pstr(model.weight)}")
- print(f"module.weight.original = {pstr(model.weight.original)}")
- print(f"module.weight.grad = {pstr(model.weight.grad)}")
- print(f"module.weight.original.grad = {pstr(model.weight.original.grad)}")
- print("zero_grad::")
- opt.zero_grad()
- print(f"module.weight = {pstr(model.weight)}")
- print(f"module.weight.original = {pstr(model.weight.original)}")
- print(f"module.weight.grad = {pstr(model.weight.grad)}")
- print(f"module.weight.original.grad = {pstr(model.weight.original.grad)}")
|