From dfa43b4456506ed6946ab73b7f5be489e147fd82 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Sat, 14 Jan 2023 18:00:39 -0500 Subject: [PATCH 01/24] removed pillow Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- pyproject.toml | 2 +- requirements.txt | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 51a3cb0..96c188f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ name = "analogvnn" [project] # $ pip install analogvnn name = "analogvnn" -version = "1.0.0rc6" +version = "1.0.0rc7" description = "A fully modular framework for modeling and optimizing analog/photonic neural networks" # Optional readme = "README.md" requires-python = ">=3.7" diff --git a/requirements.txt b/requirements.txt index 464c65c..49cef03 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,5 @@ importlib-metadata<5.0.0,>=2.0.0; python_version < '3.8' tensorflow>=2.0.0 tensorboard>=2.0.0 torchinfo -pillow # conda install graphviz graphviz From a4be11c665e4fb01ae4181ff9df8500f142773be Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Sat, 14 Jan 2023 18:02:30 -0500 Subject: [PATCH 02/24] version fix Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 49cef03..e87479b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ torchaudio numpy scipy networkx -importlib-metadata<5.0.0,>=2.0.0; python_version < '3.8' +importlib-metadata; python_version < '3.8' # Full tensorflow>=2.0.0 From 3f7cf26aaf2811507bb96086a4734a642307e34b Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Sat, 14 Jan 2023 19:45:05 -0500 Subject: [PATCH 03/24] name change gaussian_dirac_delta Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/fn/dirac_delta.py | 10 +++++----- analogvnn/nn/noise/PoissonNoise.py | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/analogvnn/fn/dirac_delta.py b/analogvnn/fn/dirac_delta.py index 8e9f021..85a0317 100644 --- a/analogvnn/fn/dirac_delta.py +++ b/analogvnn/fn/dirac_delta.py @@ -2,19 +2,19 @@ from analogvnn.utils.common_types import TENSOR_OPERABLE -__all__ = ['dirac_delta'] +__all__ = ['gaussian_dirac_delta'] -def dirac_delta(x: TENSOR_OPERABLE, a: TENSOR_OPERABLE = 0.001) -> TENSOR_OPERABLE: - """`dirac_delta` takes `x` and returns the Dirac delta function of `x` with standard deviation of `a`. +def gaussian_dirac_delta(x: TENSOR_OPERABLE, std: TENSOR_OPERABLE = 0.001) -> TENSOR_OPERABLE: + """Gaussian Dirac Delta function with standard deviation `std` Args: x (TENSOR_OPERABLE): Tensor - a (TENSOR_OPERABLE): standard deviation. + std (TENSOR_OPERABLE): standard deviation. Returns: TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values equal to the Dirac delta function of x. """ - return 1 / (np.abs(a) * np.sqrt(np.pi)) * np.exp(-((x / a) ** 2)) + return 1 / (np.abs(std) * np.sqrt(np.pi)) * np.exp(-((x / std) ** 2)) diff --git a/analogvnn/nn/noise/PoissonNoise.py b/analogvnn/nn/noise/PoissonNoise.py index 39a14ad..8f5a42c 100644 --- a/analogvnn/nn/noise/PoissonNoise.py +++ b/analogvnn/nn/noise/PoissonNoise.py @@ -7,7 +7,7 @@ from torch import Tensor, nn from analogvnn.backward.BackwardIdentity import BackwardIdentity -from analogvnn.fn.dirac_delta import dirac_delta +from analogvnn.fn.dirac_delta import gaussian_dirac_delta from analogvnn.nn.module.Layer import Layer from analogvnn.utils.common_types import TENSOR_OPERABLE from analogvnn.utils.to_tensor_parameter import to_float_tensor, to_nongrad_parameter @@ -219,7 +219,7 @@ def pdf(self, x: Tensor, rate: Tensor) -> Tensor: rate = rate if isinstance(rate, Tensor) else torch.tensor(rate, requires_grad=False) if torch.isclose(rate, torch.zeros_like(rate)): - return dirac_delta(x) + return gaussian_dirac_delta(x) return torch.exp(self.log_prob(x=x, rate=rate)) From 9cd16643d4a4de1cf2148d3e5c0b861d2872ca07 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Sat, 14 Jan 2023 19:46:27 -0500 Subject: [PATCH 04/24] update docs Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/fn/dirac_delta.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/analogvnn/fn/dirac_delta.py b/analogvnn/fn/dirac_delta.py index 85a0317..1b9fe3d 100644 --- a/analogvnn/fn/dirac_delta.py +++ b/analogvnn/fn/dirac_delta.py @@ -13,8 +13,7 @@ def gaussian_dirac_delta(x: TENSOR_OPERABLE, std: TENSOR_OPERABLE = 0.001) -> TE std (TENSOR_OPERABLE): standard deviation. Returns: - TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values equal to the Dirac delta function - of x. + TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, with values of the Gaussian Dirac Delta function. """ return 1 / (np.abs(std) * np.sqrt(np.pi)) * np.exp(-((x / std) ** 2)) From e843f76820215d7f675625b6a47d19eb1bc9b241 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Tue, 17 Jan 2023 19:10:43 -0500 Subject: [PATCH 05/24] type hinting update Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/utils/is_cpu_cuda.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/analogvnn/utils/is_cpu_cuda.py b/analogvnn/utils/is_cpu_cuda.py index 8d7d7c6..582b242 100644 --- a/analogvnn/utils/is_cpu_cuda.py +++ b/analogvnn/utils/is_cpu_cuda.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Tuple +from typing import Tuple, Union import torch @@ -47,11 +47,11 @@ def use_cuda_if_available(self) -> CPUCuda: self.set_device(f'cuda:{torch.cuda.current_device()}') return self - def set_device(self, device_name: str) -> CPUCuda: + def set_device(self, device_name: Union[str, torch.device]) -> CPUCuda: """Set the device to the given device name. Args: - device_name (str): the device name. + device_name (Union[str, torch.device]): the device name. Returns: CPUCuda: self From 13fc6a81f93f906faf14beddde3a9cda876135ce Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Tue, 17 Jan 2023 19:21:04 -0500 Subject: [PATCH 06/24] updated docs Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- .flake8 | 1 + analogvnn/__init__.py | 3 ++- analogvnn/fn/dirac_delta.py | 2 +- pyproject.toml | 12 ++++++------ 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/.flake8 b/.flake8 index 4f02f5c..0078e18 100644 --- a/.flake8 +++ b/.flake8 @@ -6,6 +6,7 @@ extend-ignore = D100, # Missing docstring in public module D104, # Missing docstring in public package D202, # No blank lines allowed after function docstring + D210, # No whitespaces allowed surrounding docstring text D401, # First line should be in imperative mood R504, # unnecessary variable assignment before return statement R505, # unnecessary else after return statement diff --git a/analogvnn/__init__.py b/analogvnn/__init__.py index fb54294..2b82be5 100644 --- a/analogvnn/__init__.py +++ b/analogvnn/__init__.py @@ -1,3 +1,5 @@ +""" AnalogVNN: A fully modular framework for modeling and optimizing analog/photonic neural networks.""" + import sys if sys.version_info[:2] >= (3, 8): @@ -5,7 +7,6 @@ else: import importlib_metadata as metadata # pragma: no cover - __package__ = 'analogvnn' __author__ = 'Vivswan Shah (vivswanshah@pitt.edu)' diff --git a/analogvnn/fn/dirac_delta.py b/analogvnn/fn/dirac_delta.py index 1b9fe3d..31f493e 100644 --- a/analogvnn/fn/dirac_delta.py +++ b/analogvnn/fn/dirac_delta.py @@ -6,7 +6,7 @@ def gaussian_dirac_delta(x: TENSOR_OPERABLE, std: TENSOR_OPERABLE = 0.001) -> TENSOR_OPERABLE: - """Gaussian Dirac Delta function with standard deviation `std` + """Gaussian Dirac Delta function with standard deviation `std`. Args: x (TENSOR_OPERABLE): Tensor diff --git a/pyproject.toml b/pyproject.toml index 96c188f..70225e2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,6 +8,9 @@ build-backend = "flit_core.buildapi" [tool.flit.module] name = "analogvnn" +[tool.setuptools] +py-modules = ['analogvnn'] + [project] # $ pip install analogvnn name = "analogvnn" @@ -106,6 +109,7 @@ flake8 = [ "flake8-deprecated", ] dev = [ + "flit", # for building {flit build} "setuptools>=61.0.0", "build", # building the package {pyproject-build} "twine", # to publish on pypi {twine upload --repository-url=https://test.pypi.org/legacy/ dist/*} {twine upload dist/*} @@ -113,6 +117,7 @@ dev = [ ] test = ["analogvnn[flake8]"] all = ["analogvnn[full,dev,doc,test]"] + [project.urls] "Author" = "https://vivswan.github.io/" "Bug Reports" = "https://github.com/Vivswan/AnalogVNN/issues" @@ -124,9 +129,4 @@ all = ["analogvnn[full,dev,doc,test]"] # The following would provide a command line executable called `sample` # which executes the function `main` from this package when invoked. #[project.scripts] # Optional -#sample = "sample:main" - -# This is configuration specific to the `setuptools` build backend. -# If you are using a different build backend, you will need to change this. -[tool.setuptools] -py-modules = ['analogvnn'] \ No newline at end of file +#sample = "sample:main" \ No newline at end of file From a16cefad35f48487bb59227d1f35e9aa1ef0475d Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Tue, 17 Jan 2023 19:21:34 -0500 Subject: [PATCH 07/24] added flit Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- requirements/requirements-dev.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/requirements-dev.txt b/requirements/requirements-dev.txt index 2a15cd3..30603a0 100644 --- a/requirements/requirements-dev.txt +++ b/requirements/requirements-dev.txt @@ -1,4 +1,5 @@ # Development +flit setuptools>=61.0.0 build # building the package {pyproject-build} twine # to publish on pypi {twine upload --repository-url=https://test.pypi.org/legacy/ dist/*} {twine upload dist/*} From 516cc595448e4ffdfec4e6ba43f7e2fb193ba28b Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Wed, 18 Jan 2023 23:43:12 -0500 Subject: [PATCH 08/24] added get_model_summaries Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/__init__.py | 2 +- analogvnn/fn/__init__.py | 1 + analogvnn/utils/TensorboardModelLog.py | 34 +++------------- analogvnn/utils/get_model_summaries.py | 56 ++++++++++++++++++++++++++ 4 files changed, 63 insertions(+), 30 deletions(-) create mode 100644 analogvnn/utils/get_model_summaries.py diff --git a/analogvnn/__init__.py b/analogvnn/__init__.py index 2b82be5..667e237 100644 --- a/analogvnn/__init__.py +++ b/analogvnn/__init__.py @@ -1,4 +1,4 @@ -""" AnalogVNN: A fully modular framework for modeling and optimizing analog/photonic neural networks.""" +"""AnalogVNN: A fully modular framework for modeling and optimizing analog/photonic neural networks.""" import sys diff --git a/analogvnn/fn/__init__.py b/analogvnn/fn/__init__.py index e69de29..4722389 100644 --- a/analogvnn/fn/__init__.py +++ b/analogvnn/fn/__init__.py @@ -0,0 +1 @@ +"""Additional functions for analogvnn.""" diff --git a/analogvnn/utils/TensorboardModelLog.py b/analogvnn/utils/TensorboardModelLog.py index 5ee65aa..db25434 100644 --- a/analogvnn/utils/TensorboardModelLog.py +++ b/analogvnn/utils/TensorboardModelLog.py @@ -15,6 +15,8 @@ __all__ = ['TensorboardModelLog'] +from analogvnn.utils.get_model_summaries import get_model_summaries + class TensorboardModelLog: """Tensorboard model log. @@ -162,45 +164,19 @@ def add_summary( Returns: Tuple[str, str]: the model __repr__ and the model summary. - - Raises: - ImportError: if torchinfo (https://github.com/tyleryep/torchinfo) is not installed. """ - try: - import torchinfo - except ImportError as e: - raise ImportError('requires torchinfo: https://github.com/tyleryep/torchinfo') from e - if model is None: model = self.model log_id = f'{self.tensorboard.log_dir}_{TensorboardModelLog.add_summary.__name__}_{id(model)}' - if input_size is None: - data_shape = next(iter(train_loader))[0].shape - input_size = tuple(list(data_shape)[1:]) - - use_autograd_graph = False - if isinstance(model, Layer): - use_autograd_graph = model.use_autograd_graph - model.use_autograd_graph = True - - nn_model_summary = torchinfo.summary( - model, + model_str, nn_model_summary = get_model_summaries( + model=model, input_size=input_size, - verbose=torchinfo.Verbosity.QUIET, - col_names=[e.value for e in torchinfo.ColumnSettings], - depth=10, + train_loader=train_loader ) - if isinstance(model, Layer): - model.use_autograd_graph = use_autograd_graph - - nn_model_summary.formatting.verbose = torchinfo.Verbosity.VERBOSE - model_str = str(model) - nn_model_summary = f'{nn_model_summary}' - if log_id in self._log_record: return model_str, nn_model_summary diff --git a/analogvnn/utils/get_model_summaries.py b/analogvnn/utils/get_model_summaries.py new file mode 100644 index 0000000..c159840 --- /dev/null +++ b/analogvnn/utils/get_model_summaries.py @@ -0,0 +1,56 @@ +from typing import Optional, Sequence, Tuple + +from torch import nn +from torch.utils.data import DataLoader + +from analogvnn.nn.module.Layer import Layer + + +def get_model_summaries( + train_loader: DataLoader, + model: Optional[nn.Module] = None, + input_size: Optional[Sequence[int]] = None, +) -> Tuple[str, str]: + """Creates the model summaries. + + Args: + train_loader (DataLoader): the train loader. + model (nn.Module): the model to log. + input_size (Optional[Sequence[int]]): the input size. + + Returns: + Tuple[str, str]: the model __repr__ and the model summary. + + Raises: + ImportError: if torchinfo (https://github.com/tyleryep/torchinfo) is not installed. + """ + + try: + import torchinfo + except ImportError as e: + raise ImportError('requires torchinfo: https://github.com/tyleryep/torchinfo') from e + + if input_size is None: + data_shape = next(iter(train_loader))[0].shape + input_size = tuple(list(data_shape)[1:]) + + use_autograd_graph = False + if isinstance(model, Layer): + use_autograd_graph = model.use_autograd_graph + model.use_autograd_graph = True + + model_summary = torchinfo.summary( + model, + input_size=input_size, + verbose=torchinfo.Verbosity.QUIET, + col_names=[e.value for e in torchinfo.ColumnSettings], + depth=10, + ) + + if isinstance(model, Layer): + model.use_autograd_graph = use_autograd_graph + + model_summary.formatting.verbose = torchinfo.Verbosity.VERBOSE + model_str = str(model) + model_summary = f'{model_summary}' + return model_str, model_summary From bfff5a61d5c85ef081e9a9e69029c2ce54fd897b Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Wed, 18 Jan 2023 23:48:29 -0500 Subject: [PATCH 09/24] correction Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/utils/TensorboardModelLog.py | 6 +++--- analogvnn/utils/get_model_summaries.py | 8 ++++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/analogvnn/utils/TensorboardModelLog.py b/analogvnn/utils/TensorboardModelLog.py index db25434..d2a032e 100644 --- a/analogvnn/utils/TensorboardModelLog.py +++ b/analogvnn/utils/TensorboardModelLog.py @@ -151,16 +151,16 @@ def add_graph( def add_summary( self, - train_loader: DataLoader, - model: Optional[nn.Module] = None, + model: Optional[nn.Module], input_size: Optional[Sequence[int]] = None, + train_loader: Optional[DataLoader] = None, ) -> Tuple[str, str]: """Add the model summary to the tensorboard. Args: - train_loader (DataLoader): the train loader. model (nn.Module): the model to log. input_size (Optional[Sequence[int]]): the input size. + train_loader (Optional[DataLoader]): the train loader. Returns: Tuple[str, str]: the model __repr__ and the model summary. diff --git a/analogvnn/utils/get_model_summaries.py b/analogvnn/utils/get_model_summaries.py index c159840..3c411ff 100644 --- a/analogvnn/utils/get_model_summaries.py +++ b/analogvnn/utils/get_model_summaries.py @@ -7,9 +7,9 @@ def get_model_summaries( - train_loader: DataLoader, - model: Optional[nn.Module] = None, + model: Optional[nn.Module], input_size: Optional[Sequence[int]] = None, + train_loader: DataLoader = None, ) -> Tuple[str, str]: """Creates the model summaries. @@ -23,6 +23,7 @@ def get_model_summaries( Raises: ImportError: if torchinfo (https://github.com/tyleryep/torchinfo) is not installed. + ValueError: if the input_size and train_loader are None. """ try: @@ -30,6 +31,9 @@ def get_model_summaries( except ImportError as e: raise ImportError('requires torchinfo: https://github.com/tyleryep/torchinfo') from e + if input_size is None and train_loader is None: + raise ValueError('input_size or train_loader must be provided') + if input_size is None: data_shape = next(iter(train_loader))[0].shape input_size = tuple(list(data_shape)[1:]) From 76ac9bcf379d3655fdced52e01471be7cb5ab44a Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Thu, 19 Jan 2023 00:04:45 -0500 Subject: [PATCH 10/24] added args and kwargs Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/utils/TensorboardModelLog.py | 8 +++++++- analogvnn/utils/get_model_summaries.py | 16 +++++++++++++--- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/analogvnn/utils/TensorboardModelLog.py b/analogvnn/utils/TensorboardModelLog.py index d2a032e..c1b9826 100644 --- a/analogvnn/utils/TensorboardModelLog.py +++ b/analogvnn/utils/TensorboardModelLog.py @@ -154,6 +154,8 @@ def add_summary( model: Optional[nn.Module], input_size: Optional[Sequence[int]] = None, train_loader: Optional[DataLoader] = None, + *args, + **kwargs ) -> Tuple[str, str]: """Add the model summary to the tensorboard. @@ -161,6 +163,8 @@ def add_summary( model (nn.Module): the model to log. input_size (Optional[Sequence[int]]): the input size. train_loader (Optional[DataLoader]): the train loader. + *args: the arguments to torchinfo.summary. + **kwargs: the keyword arguments to torchinfo.summary. Returns: Tuple[str, str]: the model __repr__ and the model summary. @@ -174,7 +178,9 @@ def add_summary( model_str, nn_model_summary = get_model_summaries( model=model, input_size=input_size, - train_loader=train_loader + train_loader=train_loader, + *args, + **kwargs ) if log_id in self._log_record: diff --git a/analogvnn/utils/get_model_summaries.py b/analogvnn/utils/get_model_summaries.py index 3c411ff..a247c62 100644 --- a/analogvnn/utils/get_model_summaries.py +++ b/analogvnn/utils/get_model_summaries.py @@ -10,6 +10,8 @@ def get_model_summaries( model: Optional[nn.Module], input_size: Optional[Sequence[int]] = None, train_loader: DataLoader = None, + *args, + **kwargs ) -> Tuple[str, str]: """Creates the model summaries. @@ -17,6 +19,8 @@ def get_model_summaries( train_loader (DataLoader): the train loader. model (nn.Module): the model to log. input_size (Optional[Sequence[int]]): the input size. + *args: the arguments to torchinfo.summary. + **kwargs: the keyword arguments to torchinfo.summary. Returns: Tuple[str, str]: the model __repr__ and the model summary. @@ -43,12 +47,18 @@ def get_model_summaries( use_autograd_graph = model.use_autograd_graph model.use_autograd_graph = True + if 'depth' not in kwargs: + kwargs['depth'] = 10 + if 'col_names' not in kwargs: + kwargs['col_names'] = (e.value for e in torchinfo.ColumnSettings) + if 'verbose' not in kwargs: + kwargs['verbose'] = torchinfo.Verbosity.QUIET + model_summary = torchinfo.summary( model, input_size=input_size, - verbose=torchinfo.Verbosity.QUIET, - col_names=[e.value for e in torchinfo.ColumnSettings], - depth=10, + *args, + **kwargs, ) if isinstance(model, Layer): From 8534618f4c51c254f911511c4f4ef4515068ad83 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+vivswan@users.noreply.github.com> Date: Thu, 19 Jan 2023 04:21:58 -0500 Subject: [PATCH 11/24] updated type hinting Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/backward/BackwardFunction.py | 23 +++++++++++------------ analogvnn/nn/module/Model.py | 12 ++++++------ analogvnn/utils/common_types.py | 10 +++++----- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/analogvnn/backward/BackwardFunction.py b/analogvnn/backward/BackwardFunction.py index 73fc7b5..f653d29 100644 --- a/analogvnn/backward/BackwardFunction.py +++ b/analogvnn/backward/BackwardFunction.py @@ -1,12 +1,11 @@ from __future__ import annotations from abc import ABC -from typing import Callable from torch import nn, Tensor from analogvnn.backward.BackwardModule import BackwardModule -from analogvnn.utils.common_types import TENSORS +from analogvnn.utils.common_types import TENSORS, TENSOR_CALLABLE __all__ = ['BackwardFunction'] @@ -15,16 +14,16 @@ class BackwardFunction(BackwardModule, ABC): """The backward module that uses a function to compute the backward gradient. Attributes: - _backward_function (Callable): The function used to compute the backward gradient. + _backward_function (TENSOR_CALLABLE): The function used to compute the backward gradient. """ - _backward_function: Callable + _backward_function: TENSOR_CALLABLE - def __init__(self, backward_function: Callable, layer: nn.Module = None): + def __init__(self, backward_function: TENSOR_CALLABLE, layer: nn.Module = None): """Initializes the backward module. Args: - backward_function (Callable): The function used to compute the backward gradient. + backward_function (TENSOR_CALLABLE): The function used to compute the backward gradient. layer (nn.Module): The layer that this backward module is associated with. """ @@ -32,30 +31,30 @@ def __init__(self, backward_function: Callable, layer: nn.Module = None): self._backward_function = backward_function @property - def backward_function(self) -> Callable: + def backward_function(self) -> TENSOR_CALLABLE: """The function used to compute the backward gradient. Returns: - Callable: The function used to compute the backward gradient. + TENSOR_CALLABLE: The function used to compute the backward gradient. """ return self._backward_function @backward_function.setter - def backward_function(self, backward_function: Callable): + def backward_function(self, backward_function: TENSOR_CALLABLE): """Sets the function used to compute the backward gradient with. Args: - backward_function (Callable): The function used to compute the backward gradient with. + backward_function (TENSOR_CALLABLE): The function used to compute the backward gradient with. """ self.set_backward_function(backward_function) - def set_backward_function(self, backward_function: Callable) -> BackwardFunction: + def set_backward_function(self, backward_function: TENSOR_CALLABLE) -> BackwardFunction: """Sets the function used to compute the backward gradient with. Args: - backward_function (Callable): The function used to compute the backward gradient with. + backward_function (TENSOR_CALLABLE): The function used to compute the backward gradient with. Returns: BackwardFunction: self. diff --git a/analogvnn/nn/module/Model.py b/analogvnn/nn/module/Model.py index ca93f1b..5c85f81 100644 --- a/analogvnn/nn/module/Model.py +++ b/analogvnn/nn/module/Model.py @@ -1,7 +1,7 @@ from __future__ import annotations import typing -from typing import Callable, Optional, Tuple, Set, Iterator +from typing import Optional, Tuple, Set, Iterator import torch from torch import optim, Tensor, nn @@ -13,7 +13,7 @@ from analogvnn.graph.ForwardGraph import ForwardGraph from analogvnn.graph.ModelGraph import ModelGraph from analogvnn.nn.module.Layer import Layer -from analogvnn.utils.common_types import TENSORS +from analogvnn.utils.common_types import TENSORS, TENSOR_CALLABLE from analogvnn.utils.is_cpu_cuda import is_cpu_cuda if typing.TYPE_CHECKING: @@ -32,8 +32,8 @@ class Model(Layer): forward_graph (ForwardGraph): The forward graph of the model. backward_graph (BackwardGraph): The backward graph of the model. optimizer (optim.Optimizer): The optimizer of the model. - loss_function (Callable): The loss function of the model. - accuracy_function (Callable): The accuracy function of the model. + loss_function (Optional[TENSOR_CALLABLE]): The loss function of the model. + accuracy_function (Optional[TENSOR_CALLABLE]): The accuracy function of the model. device (torch.device): The device of the model. """ @@ -48,8 +48,8 @@ class Model(Layer): backward_graph: BackwardGraph optimizer: Optional[optim.Optimizer] - loss_function: Optional[Callable] - accuracy_function: Optional[Callable] + loss_function: Optional[TENSOR_CALLABLE] + accuracy_function: Optional[TENSOR_CALLABLE] device: torch.device def __init__(self, tensorboard_log_dir=None, device=is_cpu_cuda.device): diff --git a/analogvnn/utils/common_types.py b/analogvnn/utils/common_types.py index 60028e4..1af7a57 100644 --- a/analogvnn/utils/common_types.py +++ b/analogvnn/utils/common_types.py @@ -4,11 +4,11 @@ __all__ = ['TENSOR_OPERABLE', 'TENSOR_CALLABLE', 'TENSORS'] -TENSOR_OPERABLE = Union[Sequence[Tensor], Tensor, int, float, bool] +TENSORS = Union[None, Tensor, Sequence[Tensor]] +"""`TENSORS` is a type alias for a tensor or a sequence of tensors. """ + +TENSOR_OPERABLE = Union[TENSORS, int, float, bool] """`TENSOR_OPERABLE` is a type alias for types that can be operated on by a tensor. """ -TENSOR_CALLABLE = Callable[[TENSOR_OPERABLE], TENSOR_OPERABLE] +TENSOR_CALLABLE = Callable[[TENSOR_OPERABLE, ...], TENSOR_OPERABLE] """`TENSOR_CALLABLE` is a type alias for a function that takes a `TENSOR_OPERABLE` and returns a `TENSOR_OPERABLE`. """ - -TENSORS = Union[None, Tensor, Sequence[Tensor]] -"""`TENSORS` is a type alias for a tensor or a sequence of tensors. """ From 201ea9b341c73cf6ba9e50d8b59a9d5a4b31d7c0 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Thu, 19 Jan 2023 04:22:24 -0500 Subject: [PATCH 12/24] bug fix Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/utils/TensorboardModelLog.py | 4 ++-- analogvnn/utils/get_model_summaries.py | 17 +++++++++++------ sample_code_with_logs.py | 4 ++-- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/analogvnn/utils/TensorboardModelLog.py b/analogvnn/utils/TensorboardModelLog.py index c1b9826..80b18c2 100644 --- a/analogvnn/utils/TensorboardModelLog.py +++ b/analogvnn/utils/TensorboardModelLog.py @@ -151,18 +151,18 @@ def add_graph( def add_summary( self, - model: Optional[nn.Module], input_size: Optional[Sequence[int]] = None, train_loader: Optional[DataLoader] = None, + model: Optional[nn.Module] = None, *args, **kwargs ) -> Tuple[str, str]: """Add the model summary to the tensorboard. Args: - model (nn.Module): the model to log. input_size (Optional[Sequence[int]]): the input size. train_loader (Optional[DataLoader]): the train loader. + model (nn.Module): the model to log. *args: the arguments to torchinfo.summary. **kwargs: the keyword arguments to torchinfo.summary. diff --git a/analogvnn/utils/get_model_summaries.py b/analogvnn/utils/get_model_summaries.py index a247c62..1fc72fa 100644 --- a/analogvnn/utils/get_model_summaries.py +++ b/analogvnn/utils/get_model_summaries.py @@ -35,12 +35,18 @@ def get_model_summaries( except ImportError as e: raise ImportError('requires torchinfo: https://github.com/tyleryep/torchinfo') from e - if input_size is None and train_loader is None: + if input_size is None and train_loader is None and 'input_size' not in kwargs: raise ValueError('input_size or train_loader must be provided') - if input_size is None: - data_shape = next(iter(train_loader))[0].shape - input_size = tuple(list(data_shape)[1:]) + if 'input_size' not in kwargs: + if input_size is None: + data_shape = list(next(iter(train_loader))[0].shape) + if train_loader.batch_size > 0: + data_shape[0] = 1 + + input_size = data_shape + + kwargs['input_size'] = input_size use_autograd_graph = False if isinstance(model, Layer): @@ -50,13 +56,12 @@ def get_model_summaries( if 'depth' not in kwargs: kwargs['depth'] = 10 if 'col_names' not in kwargs: - kwargs['col_names'] = (e.value for e in torchinfo.ColumnSettings) + kwargs['col_names'] = tuple(e.value for e in torchinfo.ColumnSettings) if 'verbose' not in kwargs: kwargs['verbose'] = torchinfo.Verbosity.QUIET model_summary = torchinfo.summary( model, - input_size=input_size, *args, **kwargs, ) diff --git a/sample_code_with_logs.py b/sample_code_with_logs.py index acafa9c..61dce45 100644 --- a/sample_code_with_logs.py +++ b/sample_code_with_logs.py @@ -214,8 +214,8 @@ def run_linear3_model(): nn_model.create_tensorboard(str(data_path.joinpath('tensorboard'))) print('Saving Summary and Graphs...') - _, nn_model_summary = nn_model.tensorboard.add_summary(train_loader) - _, weight_model_summary = nn_model.tensorboard.add_summary(train_loader, model=weight_model) + _, nn_model_summary = nn_model.tensorboard.add_summary(train_loader=train_loader) + _, weight_model_summary = nn_model.tensorboard.add_summary(train_loader=train_loader, model=weight_model) save_autograd_graph_from_module(data_path.joinpath('nn_model'), nn_model, next(iter(train_loader))[0]) save_autograd_graph_from_module(data_path.joinpath('weight_model'), weight_model, torch.ones((1, 1))) save_autograd_graph_from_module( From b62ce3e8af6e92f04fa061ade7e752e98e08b204 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+vivswan@users.noreply.github.com> Date: Thu, 19 Jan 2023 04:57:11 -0500 Subject: [PATCH 13/24] better type hinting Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/nn/noise/GaussianNoise.py | 4 ++-- analogvnn/nn/noise/LaplacianNoise.py | 4 ++-- analogvnn/nn/noise/Noise.py | 7 +++++++ analogvnn/nn/noise/PoissonNoise.py | 4 ++-- analogvnn/nn/noise/UniformNoise.py | 4 ++-- analogvnn/nn/normalize/Clamp.py | 5 +++-- analogvnn/nn/normalize/LPNorm.py | 3 ++- analogvnn/nn/normalize/Normalize.py | 3 +-- analogvnn/nn/precision/Precision.py | 7 +++++++ analogvnn/nn/precision/ReducePrecision.py | 4 ++-- analogvnn/nn/precision/StochasticReducePrecision.py | 4 ++-- 11 files changed, 32 insertions(+), 17 deletions(-) create mode 100644 analogvnn/nn/noise/Noise.py create mode 100644 analogvnn/nn/precision/Precision.py diff --git a/analogvnn/nn/noise/GaussianNoise.py b/analogvnn/nn/noise/GaussianNoise.py index 4fe31bd..1041dab 100644 --- a/analogvnn/nn/noise/GaussianNoise.py +++ b/analogvnn/nn/noise/GaussianNoise.py @@ -7,14 +7,14 @@ from torch import Tensor, nn from analogvnn.backward.BackwardIdentity import BackwardIdentity -from analogvnn.nn.module.Layer import Layer +from analogvnn.nn.noise.Noise import Noise from analogvnn.utils.common_types import TENSOR_OPERABLE from analogvnn.utils.to_tensor_parameter import to_nongrad_parameter, to_float_tensor __all__ = ['GaussianNoise'] -class GaussianNoise(Layer, BackwardIdentity): +class GaussianNoise(Noise, BackwardIdentity): """Implements the Gaussian noise function. Attributes: diff --git a/analogvnn/nn/noise/LaplacianNoise.py b/analogvnn/nn/noise/LaplacianNoise.py index ab2b00e..be6e98d 100644 --- a/analogvnn/nn/noise/LaplacianNoise.py +++ b/analogvnn/nn/noise/LaplacianNoise.py @@ -6,14 +6,14 @@ from torch import Tensor, nn from analogvnn.backward.BackwardIdentity import BackwardIdentity -from analogvnn.nn.module.Layer import Layer +from analogvnn.nn.noise.Noise import Noise from analogvnn.utils.common_types import TENSOR_OPERABLE from analogvnn.utils.to_tensor_parameter import to_float_tensor, to_nongrad_parameter __all__ = ['LaplacianNoise'] -class LaplacianNoise(Layer, BackwardIdentity): +class LaplacianNoise(Noise, BackwardIdentity): """Implements the Laplacian noise function. Attributes: diff --git a/analogvnn/nn/noise/Noise.py b/analogvnn/nn/noise/Noise.py new file mode 100644 index 0000000..18b9095 --- /dev/null +++ b/analogvnn/nn/noise/Noise.py @@ -0,0 +1,7 @@ +from analogvnn.nn.module.Layer import Layer + +__all__ = ['Noise'] + + +class Noise(Layer): + """This class is base class for all noise functions.""" diff --git a/analogvnn/nn/noise/PoissonNoise.py b/analogvnn/nn/noise/PoissonNoise.py index 8f5a42c..c42fe78 100644 --- a/analogvnn/nn/noise/PoissonNoise.py +++ b/analogvnn/nn/noise/PoissonNoise.py @@ -8,14 +8,14 @@ from analogvnn.backward.BackwardIdentity import BackwardIdentity from analogvnn.fn.dirac_delta import gaussian_dirac_delta -from analogvnn.nn.module.Layer import Layer +from analogvnn.nn.noise.Noise import Noise from analogvnn.utils.common_types import TENSOR_OPERABLE from analogvnn.utils.to_tensor_parameter import to_float_tensor, to_nongrad_parameter __all__ = ['PoissonNoise'] -class PoissonNoise(Layer, BackwardIdentity): +class PoissonNoise(Noise, BackwardIdentity): """Implements the Poisson noise function. Attributes: diff --git a/analogvnn/nn/noise/UniformNoise.py b/analogvnn/nn/noise/UniformNoise.py index 7e73652..34445d2 100644 --- a/analogvnn/nn/noise/UniformNoise.py +++ b/analogvnn/nn/noise/UniformNoise.py @@ -4,14 +4,14 @@ from torch import Tensor, nn from analogvnn.backward.BackwardIdentity import BackwardIdentity -from analogvnn.nn.module.Layer import Layer +from analogvnn.nn.noise.Noise import Noise from analogvnn.utils.common_types import TENSOR_OPERABLE from analogvnn.utils.to_tensor_parameter import to_float_tensor, to_nongrad_parameter __all__ = ['UniformNoise'] -class UniformNoise(Layer, BackwardIdentity): +class UniformNoise(Noise, BackwardIdentity): """Implements the uniform noise function. Attributes: diff --git a/analogvnn/nn/normalize/Clamp.py b/analogvnn/nn/normalize/Clamp.py index 7db26fc..ef64a10 100644 --- a/analogvnn/nn/normalize/Clamp.py +++ b/analogvnn/nn/normalize/Clamp.py @@ -3,12 +3,13 @@ import torch from torch import Tensor +from analogvnn.backward.BackwardIdentity import BackwardIdentity from analogvnn.nn.normalize.Normalize import Normalize __all__ = ['Clamp', 'Clamp01'] -class Clamp(Normalize): +class Clamp(Normalize, BackwardIdentity): """Implements the clamp normalization function with range [-1, 1].""" @staticmethod @@ -39,7 +40,7 @@ def backward(self, grad_output: Optional[Tensor]) -> Optional[Tensor]: return grad_output * grad -class Clamp01(Normalize): +class Clamp01(Normalize, BackwardIdentity): """Implements the clamp normalization function with range [0, 1].""" @staticmethod diff --git a/analogvnn/nn/normalize/LPNorm.py b/analogvnn/nn/normalize/LPNorm.py index 68cc2d5..d8b2d78 100644 --- a/analogvnn/nn/normalize/LPNorm.py +++ b/analogvnn/nn/normalize/LPNorm.py @@ -1,12 +1,13 @@ import torch from torch import nn, Tensor +from analogvnn.backward.BackwardIdentity import BackwardIdentity from analogvnn.nn.normalize.Normalize import Normalize __all__ = ['LPNorm', 'LPNormW', 'L1Norm', 'L2Norm', 'L1NormW', 'L2NormW', 'L1NormM', 'L2NormM', 'L1NormWM', 'L2NormWM'] -class LPNorm(Normalize): +class LPNorm(Normalize, BackwardIdentity): """Implements the row-wise Lp normalization function. Attributes: diff --git a/analogvnn/nn/normalize/Normalize.py b/analogvnn/nn/normalize/Normalize.py index ea547cf..6d9bb7b 100644 --- a/analogvnn/nn/normalize/Normalize.py +++ b/analogvnn/nn/normalize/Normalize.py @@ -1,8 +1,7 @@ -from analogvnn.backward.BackwardIdentity import BackwardIdentity from analogvnn.nn.module.Layer import Layer __all__ = ['Normalize'] -class Normalize(Layer, BackwardIdentity): +class Normalize(Layer): """This class is base class for all normalization functions.""" diff --git a/analogvnn/nn/precision/Precision.py b/analogvnn/nn/precision/Precision.py new file mode 100644 index 0000000..eb95cff --- /dev/null +++ b/analogvnn/nn/precision/Precision.py @@ -0,0 +1,7 @@ +from analogvnn.nn.module.Layer import Layer + +__all__ = ['Precision'] + + +class Precision(Layer): + """This class is base class for all precision functions.""" diff --git a/analogvnn/nn/precision/ReducePrecision.py b/analogvnn/nn/precision/ReducePrecision.py index 207889a..bdf122e 100644 --- a/analogvnn/nn/precision/ReducePrecision.py +++ b/analogvnn/nn/precision/ReducePrecision.py @@ -3,13 +3,13 @@ from analogvnn.backward.BackwardIdentity import BackwardIdentity from analogvnn.fn.reduce_precision import reduce_precision -from analogvnn.nn.module.Layer import Layer +from analogvnn.nn.precision.Precision import Precision from analogvnn.utils.common_types import TENSOR_OPERABLE __all__ = ['ReducePrecision'] -class ReducePrecision(Layer, BackwardIdentity): +class ReducePrecision(Precision, BackwardIdentity): """Implements the reduce precision function. Attributes: diff --git a/analogvnn/nn/precision/StochasticReducePrecision.py b/analogvnn/nn/precision/StochasticReducePrecision.py index dad532a..566021d 100644 --- a/analogvnn/nn/precision/StochasticReducePrecision.py +++ b/analogvnn/nn/precision/StochasticReducePrecision.py @@ -3,13 +3,13 @@ from analogvnn.backward.BackwardIdentity import BackwardIdentity from analogvnn.fn.reduce_precision import stochastic_reduce_precision -from analogvnn.nn.module.Layer import Layer +from analogvnn.nn.precision.Precision import Precision from analogvnn.utils.common_types import TENSOR_OPERABLE __all__ = ['StochasticReducePrecision'] -class StochasticReducePrecision(Layer, BackwardIdentity): +class StochasticReducePrecision(Precision, BackwardIdentity): """Implements the stochastic reduce precision function. Attributes: From 17130f84c54ed797b5032d1c06b275d9f9d58fcb Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Thu, 19 Jan 2023 05:16:35 -0500 Subject: [PATCH 14/24] better repr Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/nn/precision/StochasticReducePrecision.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/analogvnn/nn/precision/StochasticReducePrecision.py b/analogvnn/nn/precision/StochasticReducePrecision.py index 566021d..448006e 100644 --- a/analogvnn/nn/precision/StochasticReducePrecision.py +++ b/analogvnn/nn/precision/StochasticReducePrecision.py @@ -75,7 +75,7 @@ def extra_repr(self) -> str: str: string """ - return f'precision={self.precision}' + return f'precision={int(self.precision)}' def forward(self, x: Tensor) -> Tensor: """Forward function of the StochasticReducePrecision module. From 1b1755c9353ab93f58eea0d46e3518046bba41ce Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Thu, 19 Jan 2023 05:16:55 -0500 Subject: [PATCH 15/24] save alias for render. Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/graph/AcyclicDirectedGraph.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/analogvnn/graph/AcyclicDirectedGraph.py b/analogvnn/graph/AcyclicDirectedGraph.py index f333918..ada2d39 100644 --- a/analogvnn/graph/AcyclicDirectedGraph.py +++ b/analogvnn/graph/AcyclicDirectedGraph.py @@ -448,3 +448,6 @@ def render(self, *args, real_label: bool = False, **kwargs) -> str: """ return to_graphviz_digraph(self.graph, real_label=real_label).render(*args, **kwargs) + + save = render + """Alias for render.""" From 4e13826b419fc96c301ba4ab9b49913bf30ee1ff Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Thu, 19 Jan 2023 05:20:48 -0500 Subject: [PATCH 16/24] fix bug Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/graph/to_graph_viz_digraph.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/analogvnn/graph/to_graph_viz_digraph.py b/analogvnn/graph/to_graph_viz_digraph.py index 9f9ffd3..5c75318 100644 --- a/analogvnn/graph/to_graph_viz_digraph.py +++ b/analogvnn/graph/to_graph_viz_digraph.py @@ -23,15 +23,9 @@ def to_graphviz_digraph(from_graph: networkx.DiGraph, real_label: bool = False) graphviz.Digraph: the converted graph. Raises: - ImportError: if pygraphviz (https://pygraphviz.github.io/) is not available. ImportError: if graphviz (https://pygraphviz.github.io/) is not available. """ - try: - # noinspection PyPackageRequirements - import pygraphviz # noqa: F401 - except ImportError as e: - raise ImportError('requires pygraphviz: https://pygraphviz.github.io/') from e try: from graphviz import Digraph except ImportError as e: From a6c3639a22e47c3ad158f576a0d4a31e6a37b555 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Tue, 24 Jan 2023 12:44:59 -0500 Subject: [PATCH 17/24] type hinting fix Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/graph/to_graph_viz_digraph.py | 2 +- analogvnn/utils/common_types.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/analogvnn/graph/to_graph_viz_digraph.py b/analogvnn/graph/to_graph_viz_digraph.py index 5c75318..60ae94a 100644 --- a/analogvnn/graph/to_graph_viz_digraph.py +++ b/analogvnn/graph/to_graph_viz_digraph.py @@ -30,6 +30,7 @@ def to_graphviz_digraph(from_graph: networkx.DiGraph, real_label: bool = False) from graphviz import Digraph except ImportError as e: raise ImportError('requires graphviz: https://pygraphviz.github.io/') from e + strict = networkx.number_of_selfloops(from_graph) == 0 and not from_graph.is_multigraph() node_attr = { 'style': 'filled', @@ -44,7 +45,6 @@ def to_graphviz_digraph(from_graph: networkx.DiGraph, real_label: bool = False) name=from_graph.name, strict=strict, node_attr=node_attr, - graph_attr={'size': '12,12'}, format='svg' ) diff --git a/analogvnn/utils/common_types.py b/analogvnn/utils/common_types.py index 1af7a57..a0f0028 100644 --- a/analogvnn/utils/common_types.py +++ b/analogvnn/utils/common_types.py @@ -10,5 +10,5 @@ TENSOR_OPERABLE = Union[TENSORS, int, float, bool] """`TENSOR_OPERABLE` is a type alias for types that can be operated on by a tensor. """ -TENSOR_CALLABLE = Callable[[TENSOR_OPERABLE, ...], TENSOR_OPERABLE] +TENSOR_CALLABLE = Callable[[TENSOR_OPERABLE], TENSOR_OPERABLE] """`TENSOR_CALLABLE` is a type alias for a function that takes a `TENSOR_OPERABLE` and returns a `TENSOR_OPERABLE`. """ From 46c9f480d57f0cc935bc602f62f98b83b2b50051 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Tue, 24 Jan 2023 12:53:37 -0500 Subject: [PATCH 18/24] remove redundant parameter Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/nn/Linear.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/analogvnn/nn/Linear.py b/analogvnn/nn/Linear.py index 445493e..c3bf95a 100644 --- a/analogvnn/nn/Linear.py +++ b/analogvnn/nn/Linear.py @@ -31,12 +31,11 @@ def forward(self, x: Tensor): return y - def backward(self, grad_output: Optional[Tensor], weight: Optional[Tensor] = None) -> Optional[Tensor]: + def backward(self, grad_output: Optional[Tensor]) -> Optional[Tensor]: """Backward pass of the linear layer. Args: grad_output (Optional[Tensor]): The gradient of the output. - weight (Optional[Tensor]): The weight of the layer. Returns: Optional[Tensor]: The gradient of the input. @@ -44,7 +43,7 @@ def backward(self, grad_output: Optional[Tensor], weight: Optional[Tensor] = Non grad_output = to_matrix(grad_output) - weight = to_matrix(self.weight if weight is None else weight) + weight = to_matrix(self.weight) grad_input = grad_output @ weight self.set_grad_of(self.weight, torch.mm(grad_output.t(), self.inputs)) From 9143c809285f04970f101f2443faeb46635f255f Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Sun, 29 Jan 2023 04:46:10 -0500 Subject: [PATCH 19/24] Sequence to tuple or list Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/backward/BackwardModule.py | 4 ++-- analogvnn/graph/ArgsKwargs.py | 4 ++-- analogvnn/graph/ForwardGraph.py | 4 ++-- analogvnn/utils/render_autograd_graph.py | 8 ++++---- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/analogvnn/backward/BackwardModule.py b/analogvnn/backward/BackwardModule.py index 6193a12..7f12bff 100644 --- a/analogvnn/backward/BackwardModule.py +++ b/analogvnn/backward/BackwardModule.py @@ -1,7 +1,7 @@ from __future__ import annotations import abc -from typing import Callable, Any, Optional, Sequence, Tuple, Type +from typing import Callable, Any, Optional, Tuple, Type import torch from torch import nn, Tensor, autograd @@ -69,7 +69,7 @@ def backward(ctx: Any, *grad_outputs: Tensor) -> Tuple[None, None, TENSORS]: backward_module: BackwardModule = ctx.backward_module results = backward_module._call_impl_backward(*grad_outputs) - if isinstance(results, Sequence): + if isinstance(results, (tuple, list)): return (None, None, *results) return None, None, results diff --git a/analogvnn/graph/ArgsKwargs.py b/analogvnn/graph/ArgsKwargs.py index 0899ea4..56a58bb 100644 --- a/analogvnn/graph/ArgsKwargs.py +++ b/analogvnn/graph/ArgsKwargs.py @@ -48,7 +48,7 @@ def __init__(self, args=None, kwargs=None): if isinstance(args, tuple): args = list(args) - if not isinstance(args, List): + if not isinstance(args, list): args = [args] self.args = args @@ -79,7 +79,7 @@ def to_args_kwargs_object(cls, outputs: ArgsKwargsInput) -> ArgsKwargs: pass elif isinstance(outputs, dict): outputs = cls(kwargs=outputs) - elif isinstance(outputs, tuple) and len(outputs) == 2 and isinstance(outputs[1], dict): + elif isinstance(outputs, (tuple, list)) and len(outputs) == 2 and isinstance(outputs[1], dict): outputs = cls(args=outputs[0], kwargs=outputs[1]) else: outputs = cls(args=outputs) diff --git a/analogvnn/graph/ForwardGraph.py b/analogvnn/graph/ForwardGraph.py index f66882c..d394f58 100644 --- a/analogvnn/graph/ForwardGraph.py +++ b/analogvnn/graph/ForwardGraph.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Dict, Sequence +from typing import Dict import torch from torch import Tensor @@ -69,7 +69,7 @@ def calculate( ArgsKwargsOutput: Output of the graph """ - if not isinstance(inputs, Sequence): + if not isinstance(inputs, (tuple, list)): inputs = (inputs,) if not self.graph_state.use_autograd_graph and is_training: diff --git a/analogvnn/utils/render_autograd_graph.py b/analogvnn/utils/render_autograd_graph.py index d2edf16..ca36e04 100644 --- a/analogvnn/utils/render_autograd_graph.py +++ b/analogvnn/utils/render_autograd_graph.py @@ -102,7 +102,7 @@ def get_fn_name(fn: Callable, show_attrs: bool, max_attr_chars: int) -> str: attr = attr[len(SAVED_PREFIX):] if torch.is_tensor(val): attrs[attr] = '[saved tensor]' - elif isinstance(val, Sequence) and any(torch.is_tensor(t) for t in val): + elif isinstance(val, (tuple, list)) and any(torch.is_tensor(t) for t in val): attrs[attr] = '[saved tensors]' else: attrs[attr] = str(val) @@ -205,7 +205,7 @@ def inputs(self, inputs: Union[Tensor, Sequence[Tensor]]): if not inputs: return - if not isinstance(inputs, Sequence): + if not isinstance(inputs, (tuple, list)): inputs = (inputs,) for i, v in enumerate(inputs): @@ -253,7 +253,7 @@ def outputs(self) -> Optional[Sequence[Tensor]]: @outputs.setter def outputs(self, outputs): self._called = True - if outputs is not None and not isinstance(outputs, Sequence): + if outputs is not None and not isinstance(outputs, (tuple, list)): outputs = (outputs,) self._outputs = outputs @@ -504,7 +504,7 @@ def _add_grad_fn(link: Union[Tensor, Callable], autograd_dot: AutoGradDot) -> Op autograd_dot.add_tensor(val, name=attr, fillcolor='orange') continue - if isinstance(val, Sequence): + if isinstance(val, (tuple, list)): for i, t in enumerate(val): if not torch.is_tensor(t): continue From 9a38144abc8a7318b78eeeafa0806e5a245e7a83 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Tue, 31 Jan 2023 01:00:31 -0500 Subject: [PATCH 20/24] added license notice Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/utils/render_autograd_graph.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/analogvnn/utils/render_autograd_graph.py b/analogvnn/utils/render_autograd_graph.py index ca36e04..504d5a5 100644 --- a/analogvnn/utils/render_autograd_graph.py +++ b/analogvnn/utils/render_autograd_graph.py @@ -1,3 +1,7 @@ +# The original snippet is licensed under the MIT License. +# The following code is modified snippet from https://github.com/szagoruyko/pytorchviz/blob/master/torchviz/dot.py +# to render the autograd graph of a module or a tensor for analogvnn. + from __future__ import annotations import dataclasses From 274b1d87f7ff058ec3bd7fc1b92572b7988bda8c Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Mon, 27 Feb 2023 05:12:32 -0500 Subject: [PATCH 21/24] Rewrite super() calls Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- .gitignore | 3 --- analogvnn/backward/BackwardFunction.py | 2 +- analogvnn/backward/BackwardModule.py | 4 ++-- analogvnn/graph/AccumulateGrad.py | 2 +- analogvnn/graph/AcyclicDirectedGraph.py | 2 +- analogvnn/graph/ArgsKwargs.py | 2 +- analogvnn/graph/ModelGraph.py | 2 +- analogvnn/graph/ModelGraphState.py | 2 +- analogvnn/nn/Linear.py | 2 +- analogvnn/nn/activation/ELU.py | 4 ++-- analogvnn/nn/activation/Identity.py | 2 +- analogvnn/nn/activation/ReLU.py | 6 +++--- analogvnn/nn/module/Layer.py | 4 ++-- analogvnn/nn/module/Model.py | 6 +++--- analogvnn/nn/noise/GaussianNoise.py | 2 +- analogvnn/nn/noise/LaplacianNoise.py | 2 +- analogvnn/nn/noise/PoissonNoise.py | 2 +- analogvnn/nn/noise/UniformNoise.py | 2 +- analogvnn/nn/normalize/LPNorm.py | 18 +++++++++--------- analogvnn/nn/precision/ReducePrecision.py | 2 +- .../nn/precision/StochasticReducePrecision.py | 2 +- analogvnn/parameter/Parameter.py | 6 +++--- analogvnn/parameter/PseudoParameter.py | 4 ++-- analogvnn/utils/TensorboardModelLog.py | 2 +- analogvnn/utils/is_cpu_cuda.py | 2 +- docs/_static/AnalogVNN_Demo.ipynb | 4 ++-- docs/tutorial.md | 4 ++-- sample_code.py | 4 ++-- sample_code_with_logs.py | 4 ++-- unit_tests/test_pseudo_parameter.py | 8 ++++---- 30 files changed, 54 insertions(+), 57 deletions(-) diff --git a/.gitignore b/.gitignore index 169fee3..480233d 100644 --- a/.gitignore +++ b/.gitignore @@ -3,9 +3,6 @@ __pycache__/ *.py[cod] *$py.class -_run_files/ -_crc_slurm/ -_results/ _data/ .idea .idea/** diff --git a/analogvnn/backward/BackwardFunction.py b/analogvnn/backward/BackwardFunction.py index f653d29..41ee035 100644 --- a/analogvnn/backward/BackwardFunction.py +++ b/analogvnn/backward/BackwardFunction.py @@ -27,7 +27,7 @@ def __init__(self, backward_function: TENSOR_CALLABLE, layer: nn.Module = None): layer (nn.Module): The layer that this backward module is associated with. """ - super(BackwardFunction, self).__init__(layer) + super().__init__(layer) self._backward_function = backward_function @property diff --git a/analogvnn/backward/BackwardModule.py b/analogvnn/backward/BackwardModule.py index 7f12bff..ec6c4a1 100644 --- a/analogvnn/backward/BackwardModule.py +++ b/analogvnn/backward/BackwardModule.py @@ -81,7 +81,7 @@ def __init__(self, layer: nn.Module = None): layer (nn.Module): The layer for which the backward gradient is computed. """ - super(BackwardModule, self).__init__() + super().__init__() self._layer = None self._set_autograd_backward() if not isinstance(self, nn.Module): @@ -282,7 +282,7 @@ def __getattr__(self, name: str) -> Any: """ if isinstance(self, nn.Module) or self == self._layer: - return super(BackwardModule, self).__getattr__(name) + return super().__getattr__(name) if not str(name).startswith('__') and self._layer is not None and hasattr(self._layer, name): return getattr(self._layer, name) raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name)) diff --git a/analogvnn/graph/AccumulateGrad.py b/analogvnn/graph/AccumulateGrad.py index 4451250..2c7a0b3 100644 --- a/analogvnn/graph/AccumulateGrad.py +++ b/analogvnn/graph/AccumulateGrad.py @@ -30,7 +30,7 @@ def __init__(self, module: Union[nn.Module, Callable]): module (Union[nn.Module, Callable]): Module from which to accumulate gradients. """ - super(AccumulateGrad, self).__init__() + super().__init__() self.input_output_connections = {} self.module = module diff --git a/analogvnn/graph/AcyclicDirectedGraph.py b/analogvnn/graph/AcyclicDirectedGraph.py index ada2d39..d13d705 100644 --- a/analogvnn/graph/AcyclicDirectedGraph.py +++ b/analogvnn/graph/AcyclicDirectedGraph.py @@ -50,7 +50,7 @@ def __init__(self, graph_state: ModelGraphState = None): NotImplementedError: If allow_loops is True, since this is not implemented yet. """ - super(AcyclicDirectedGraph, self).__init__() + super().__init__() self.graph = nx.MultiDiGraph() self.graph_state = graph_state self._is_static = False diff --git a/analogvnn/graph/ArgsKwargs.py b/analogvnn/graph/ArgsKwargs.py index 56a58bb..7164006 100644 --- a/analogvnn/graph/ArgsKwargs.py +++ b/analogvnn/graph/ArgsKwargs.py @@ -39,7 +39,7 @@ def __init__(self, args=None, kwargs=None): kwargs: The keyword arguments. """ - super(ArgsKwargs, self).__init__() + super().__init__() if args is None: args = [] if kwargs is None: diff --git a/analogvnn/graph/ModelGraph.py b/analogvnn/graph/ModelGraph.py index a14b5fd..b04e3f0 100644 --- a/analogvnn/graph/ModelGraph.py +++ b/analogvnn/graph/ModelGraph.py @@ -26,7 +26,7 @@ def __init__(self, use_autograd_graph: bool = False, allow_loops: bool = False): allow_loops: If True, the graph is allowed to contain loops. """ - super(ModelGraph, self).__init__(use_autograd_graph, allow_loops) + super().__init__(use_autograd_graph, allow_loops) self.forward_graph = ForwardGraph(self) self.backward_graph = BackwardGraph(self) diff --git a/analogvnn/graph/ModelGraphState.py b/analogvnn/graph/ModelGraphState.py index 9021cab..c33b44a 100644 --- a/analogvnn/graph/ModelGraphState.py +++ b/analogvnn/graph/ModelGraphState.py @@ -46,7 +46,7 @@ def __init__(self, use_autograd_graph: bool = False, allow_loops=False): allow_loops: If True, the graph is allowed to contain loops. """ - super(ModelGraphState, self).__init__() + super().__init__() self.allow_loops = allow_loops self.use_autograd_graph = use_autograd_graph diff --git a/analogvnn/nn/Linear.py b/analogvnn/nn/Linear.py index c3bf95a..f8338b1 100644 --- a/analogvnn/nn/Linear.py +++ b/analogvnn/nn/Linear.py @@ -76,7 +76,7 @@ def __init__(self, in_features: int, out_features: int, bias: bool = True): bias (bool): True if the layer has a bias. """ - super(Linear, self).__init__() + super().__init__() self.in_features = in_features self.out_features = out_features diff --git a/analogvnn/nn/activation/ELU.py b/analogvnn/nn/activation/ELU.py index 03cabce..4c28265 100644 --- a/analogvnn/nn/activation/ELU.py +++ b/analogvnn/nn/activation/ELU.py @@ -28,7 +28,7 @@ def __init__(self, alpha: float = 1.0507, scale_factor: float = 1.): scale_factor (float): the scale factor parameter. """ - super(SELU, self).__init__() + super().__init__() self.alpha = nn.Parameter(torch.tensor(alpha), requires_grad=False) self.scale_factor = nn.Parameter(torch.tensor(scale_factor), requires_grad=False) @@ -103,4 +103,4 @@ def __init__(self, alpha: float = 1.0507): alpha (float): the alpha parameter. """ - super(ELU, self).__init__(alpha=alpha, scale_factor=1.) + super().__init__(alpha=alpha, scale_factor=1.) diff --git a/analogvnn/nn/activation/Identity.py b/analogvnn/nn/activation/Identity.py index 136741d..39853b9 100644 --- a/analogvnn/nn/activation/Identity.py +++ b/analogvnn/nn/activation/Identity.py @@ -23,7 +23,7 @@ def __init__(self, name=None): name (str): the name of the activation function. """ - super(Identity, self).__init__() + super().__init__() self.name = name def extra_repr(self) -> str: diff --git a/analogvnn/nn/activation/ReLU.py b/analogvnn/nn/activation/ReLU.py index c7358f9..f735e7c 100644 --- a/analogvnn/nn/activation/ReLU.py +++ b/analogvnn/nn/activation/ReLU.py @@ -28,7 +28,7 @@ def __init__(self, alpha: float): alpha (float): the slope of the negative part of the activation function. """ - super(PReLU, self).__init__() + super().__init__() self.alpha = nn.Parameter(torch.tensor(alpha), requires_grad=False) self._zero = nn.Parameter(torch.tensor(0), requires_grad=False) @@ -95,7 +95,7 @@ class ReLU(PReLU): def __init__(self): """Initialize the rectified linear unit (ReLU) activation function.""" - super(ReLU, self).__init__(alpha=0) + super().__init__(alpha=0) @staticmethod def initialise(tensor: Tensor) -> Tensor: @@ -134,4 +134,4 @@ class LeakyReLU(PReLU): def __init__(self): """Initialize the leaky rectified linear unit (LeakyReLU) activation function.""" - super(LeakyReLU, self).__init__(alpha=0.01) + super().__init__(alpha=0.01) diff --git a/analogvnn/nn/module/Layer.py b/analogvnn/nn/module/Layer.py index f73630e..97e20ce 100644 --- a/analogvnn/nn/module/Layer.py +++ b/analogvnn/nn/module/Layer.py @@ -79,7 +79,7 @@ class Layer(nn.Module): def __init__(self): """Initializes the layer.""" - super(Layer, self).__init__() + super().__init__() self._inputs = None self._outputs = None self._backward_module = None @@ -95,7 +95,7 @@ def __call__(self, *inputs, **kwargs): """ self._forward_wrapper(self.forward) - outputs = super(Layer, self).__call__(*inputs, **kwargs) + outputs = super().__call__(*inputs, **kwargs) if self.training: self._inputs = ArgsKwargs(args=inputs, kwargs=kwargs) self._outputs = outputs diff --git a/analogvnn/nn/module/Model.py b/analogvnn/nn/module/Model.py index 5c85f81..20b919c 100644 --- a/analogvnn/nn/module/Model.py +++ b/analogvnn/nn/module/Model.py @@ -60,7 +60,7 @@ def __init__(self, tensorboard_log_dir=None, device=is_cpu_cuda.device): device (torch.device): The device to run the model on. """ - super(Model, self).__init__() + super().__init__() self._compiled = False @@ -94,7 +94,7 @@ def __call__(self, *args, **kwargs): if not self._compiled: raise RuntimeError('Model is not compiled yet.') - return super(Model, self).__call__(*args, **kwargs) + return super().__call__(*args, **kwargs) @property def use_autograd_graph(self): @@ -139,7 +139,7 @@ def named_registered_children( memo.add(self.optimizer) memo.add(self.loss_function) memo.add(self.accuracy_function) - return super(Model, self).named_registered_children(memo=memo) + return super().named_registered_children(memo=memo) def compile(self, device: Optional[torch.device] = None, layer_data: bool = True): """Compile the model. diff --git a/analogvnn/nn/noise/GaussianNoise.py b/analogvnn/nn/noise/GaussianNoise.py index 1041dab..370ab9c 100644 --- a/analogvnn/nn/noise/GaussianNoise.py +++ b/analogvnn/nn/noise/GaussianNoise.py @@ -42,7 +42,7 @@ def __init__( precision (int): the precision of the Gaussian noise. """ - super(GaussianNoise, self).__init__() + super().__init__() if (std is None) + (leakage is None) + (precision is None) != 1: raise ValueError('only 2 out of 3 arguments are needed (std, leakage, precision)') diff --git a/analogvnn/nn/noise/LaplacianNoise.py b/analogvnn/nn/noise/LaplacianNoise.py index be6e98d..98b8b5d 100644 --- a/analogvnn/nn/noise/LaplacianNoise.py +++ b/analogvnn/nn/noise/LaplacianNoise.py @@ -41,7 +41,7 @@ def __init__( precision (int): the precision of the Laplacian noise. """ - super(LaplacianNoise, self).__init__() + super().__init__() if (scale is None) + (leakage is None) + (precision is None) != 1: raise ValueError('only 2 out of 3 arguments are needed (scale, leakage, precision)') diff --git a/analogvnn/nn/noise/PoissonNoise.py b/analogvnn/nn/noise/PoissonNoise.py index c42fe78..35e8f42 100644 --- a/analogvnn/nn/noise/PoissonNoise.py +++ b/analogvnn/nn/noise/PoissonNoise.py @@ -43,7 +43,7 @@ def __init__( precision (Optional[int]): the precision of the Poisson noise. """ - super(PoissonNoise, self).__init__() + super().__init__() if (scale is None) + (max_leakage is None) + (precision is None) != 1: raise ValueError('only 2 out of 3 arguments are needed (scale, max_leakage, precision)') diff --git a/analogvnn/nn/noise/UniformNoise.py b/analogvnn/nn/noise/UniformNoise.py index 34445d2..e7d0a9b 100644 --- a/analogvnn/nn/noise/UniformNoise.py +++ b/analogvnn/nn/noise/UniformNoise.py @@ -43,7 +43,7 @@ def __init__( precision (int): the precision of the uniform noise. """ - super(UniformNoise, self).__init__() + super().__init__() if (low is None or high is None) + (leakage is None) + (precision is None) != 1: raise ValueError('only 2 out of 3 arguments are needed (scale, leakage, precision)') diff --git a/analogvnn/nn/normalize/LPNorm.py b/analogvnn/nn/normalize/LPNorm.py index d8b2d78..1f6ec44 100644 --- a/analogvnn/nn/normalize/LPNorm.py +++ b/analogvnn/nn/normalize/LPNorm.py @@ -27,7 +27,7 @@ def __init__(self, p: int, make_max_1=False): make_max_1 (bool): if True, the maximum absolute value of the output tensor will be 1. """ - super(LPNorm, self).__init__() + super().__init__() self.p = nn.Parameter(torch.tensor(p), requires_grad=False) self.make_max_1 = nn.Parameter(torch.tensor(make_max_1), requires_grad=False) @@ -84,7 +84,7 @@ class L1Norm(LPNorm): def __init__(self): """Initializes the row-wise L1 normalization function.""" - super(L1Norm, self).__init__(p=1, make_max_1=False) + super().__init__(p=1, make_max_1=False) class L2Norm(LPNorm): @@ -93,7 +93,7 @@ class L2Norm(LPNorm): def __init__(self): """Initializes the row-wise L2 normalization function.""" - super(L2Norm, self).__init__(p=2, make_max_1=False) + super().__init__(p=2, make_max_1=False) class L1NormW(LPNormW): @@ -102,7 +102,7 @@ class L1NormW(LPNormW): def __init__(self): """Initializes the whole matrix L1 normalization function.""" - super(L1NormW, self).__init__(p=1, make_max_1=False) + super().__init__(p=1, make_max_1=False) class L2NormW(LPNormW): @@ -111,7 +111,7 @@ class L2NormW(LPNormW): def __init__(self): """Initializes the whole matrix L2 normalization function.""" - super(L2NormW, self).__init__(p=2, make_max_1=False) + super().__init__(p=2, make_max_1=False) class L1NormM(LPNorm): @@ -120,7 +120,7 @@ class L1NormM(LPNorm): def __init__(self): """Initializes the row-wise L1 normalization function with maximum absolute value of 1.""" - super(L1NormM, self).__init__(p=1, make_max_1=True) + super().__init__(p=1, make_max_1=True) class L2NormM(LPNorm): @@ -129,7 +129,7 @@ class L2NormM(LPNorm): def __init__(self): """Initializes the row-wise L2 normalization function with maximum absolute value of 1.""" - super(L2NormM, self).__init__(p=2, make_max_1=True) + super().__init__(p=2, make_max_1=True) class L1NormWM(LPNormW): @@ -138,7 +138,7 @@ class L1NormWM(LPNormW): def __init__(self): """Initializes the whole matrix L1 normalization function with maximum absolute value of 1.""" - super(L1NormWM, self).__init__(p=1, make_max_1=True) + super().__init__(p=1, make_max_1=True) class L2NormWM(LPNormW): @@ -147,4 +147,4 @@ class L2NormWM(LPNormW): def __init__(self): """Initializes the whole matrix L2 normalization function with maximum absolute value of 1.""" - super(L2NormWM, self).__init__(p=2, make_max_1=True) + super().__init__(p=2, make_max_1=True) diff --git a/analogvnn/nn/precision/ReducePrecision.py b/analogvnn/nn/precision/ReducePrecision.py index bdf122e..16476cb 100644 --- a/analogvnn/nn/precision/ReducePrecision.py +++ b/analogvnn/nn/precision/ReducePrecision.py @@ -31,7 +31,7 @@ def __init__(self, precision: int = None, divide: float = 0.5): then 0.6 will be rounded to 1.0 and 0.4 will be rounded to 0.0. """ - super(ReducePrecision, self).__init__() + super().__init__() if precision < 1: raise ValueError(f'precision has to be more than 0, but got {precision}') diff --git a/analogvnn/nn/precision/StochasticReducePrecision.py b/analogvnn/nn/precision/StochasticReducePrecision.py index 448006e..5bc6e27 100644 --- a/analogvnn/nn/precision/StochasticReducePrecision.py +++ b/analogvnn/nn/precision/StochasticReducePrecision.py @@ -26,7 +26,7 @@ def __init__(self, precision: int = 8): precision (int): the precision of the output tensor. """ - super(StochasticReducePrecision, self).__init__() + super().__init__() if precision < 1: raise ValueError('precision has to be more than 0, but got {}'.format(precision)) diff --git a/analogvnn/parameter/Parameter.py b/analogvnn/parameter/Parameter.py index 2999efe..be337e5 100644 --- a/analogvnn/parameter/Parameter.py +++ b/analogvnn/parameter/Parameter.py @@ -22,7 +22,7 @@ def __new__(cls, data=None, requires_grad=True, *args, **kwargs): Parameter: the created parameter. """ - return super(Parameter, cls).__new__(cls, data, requires_grad) + return super().__new__(cls, data, requires_grad) # noinspection PyUnusedLocal def __init__(self, data=None, requires_grad=True, *args, **kwargs): @@ -35,7 +35,7 @@ def __init__(self, data=None, requires_grad=True, *args, **kwargs): **kwargs: additional keyword arguments. """ - super(Parameter, self).__init__() + super().__init__() def __repr__(self, *args, **kwargs): """Returns a string representation of the parameter. @@ -48,4 +48,4 @@ def __repr__(self, *args, **kwargs): str: the string representation. """ - return super(Parameter, self).__repr__(*args, **kwargs) + return super().__repr__(*args, **kwargs) diff --git a/analogvnn/parameter/PseudoParameter.py b/analogvnn/parameter/PseudoParameter.py index 5b14945..2118e6f 100644 --- a/analogvnn/parameter/PseudoParameter.py +++ b/analogvnn/parameter/PseudoParameter.py @@ -32,7 +32,7 @@ def __init__(self, original, transformed): transformed (nn.Parameter): the transformed parameters. """ - super(PseudoParameterModule, self).__init__() + super().__init__() self.original = original self._transformed = transformed @@ -125,7 +125,7 @@ def __init__(self, data=None, requires_grad=True, transformation=None, *args, ** **kwargs: additional keyword arguments. """ - super(PseudoParameter, self).__init__(data, requires_grad, *args, **kwargs) + super().__init__(data, requires_grad, *args, **kwargs) self._transformed = nn.Parameter(data=data, requires_grad=requires_grad) self._transformed.original = self self._transformation = self.identity diff --git a/analogvnn/utils/TensorboardModelLog.py b/analogvnn/utils/TensorboardModelLog.py index 80b18c2..283b7f0 100644 --- a/analogvnn/utils/TensorboardModelLog.py +++ b/analogvnn/utils/TensorboardModelLog.py @@ -41,7 +41,7 @@ def __init__(self, model: Model, log_dir: str): log_dir (str): the directory to log to. """ - super(TensorboardModelLog, self).__init__() + super().__init__() self.model = model self.tensorboard = None self.layer_data = True diff --git a/analogvnn/utils/is_cpu_cuda.py b/analogvnn/utils/is_cpu_cuda.py index 582b242..e56fb16 100644 --- a/analogvnn/utils/is_cpu_cuda.py +++ b/analogvnn/utils/is_cpu_cuda.py @@ -21,7 +21,7 @@ class CPUCuda: def __init__(self): """Initialize the CPUCuda class.""" - super(CPUCuda, self).__init__() + super().__init__() self._device = None self.device_name = None self.use_cpu() diff --git a/docs/_static/AnalogVNN_Demo.ipynb b/docs/_static/AnalogVNN_Demo.ipynb index 09adbe0..0111b2b 100644 --- a/docs/_static/AnalogVNN_Demo.ipynb +++ b/docs/_static/AnalogVNN_Demo.ipynb @@ -209,7 +209,7 @@ "source": [ "class LinearModel(FullSequential):\n", " def __init__(self, activation_class, norm_class, precision_class, precision, noise_class, leakage):\n", - " super(LinearModel, self).__init__()\n", + " super().__init__()\n", "\n", " self.activation_class = activation_class\n", " self.norm_class = norm_class\n", @@ -289,7 +289,7 @@ "source": [ "class WeightModel(FullSequential):\n", " def __init__(self, norm_class, precision_class, precision, noise_class, leakage):\n", - " super(WeightModel, self).__init__()\n", + " super().__init__()\n", " self.all_layers = []\n", "\n", " self.all_layers.append(norm_class())\n", diff --git a/docs/tutorial.md b/docs/tutorial.md index 5c4d7da..f8017c1 100644 --- a/docs/tutorial.md +++ b/docs/tutorial.md @@ -16,7 +16,7 @@ To convert a digital model to its analog counterpart the following steps needs t ```python class LinearModel(FullSequential): def __init__(self, activation_class, norm_class, precision_class, precision, noise_class, leakage): - super(LinearModel, self).__init__() + super().__init__() self.activation_class = activation_class self.norm_class = norm_class @@ -55,7 +55,7 @@ To convert a digital model to its analog counterpart the following steps needs t ```python class WeightModel(FullSequential): def __init__(self, norm_class, precision_class, precision, noise_class, leakage): - super(WeightModel, self).__init__() + super().__init__() self.all_layers = [] self.all_layers.append(norm_class()) diff --git a/sample_code.py b/sample_code.py index b4bc9c1..652f7f4 100644 --- a/sample_code.py +++ b/sample_code.py @@ -93,7 +93,7 @@ def __init__(self, activation_class, norm_class, precision_class, precision, noi the analog channel). """ - super(LinearModel, self).__init__() + super().__init__() self.activation_class = activation_class self.norm_class = norm_class @@ -143,7 +143,7 @@ def __init__(self, norm_class, precision_class, precision, noise_class, leakage) the analog channel). """ - super(WeightModel, self).__init__() + super().__init__() self.all_layers = [] self.all_layers.append(norm_class()) diff --git a/sample_code_with_logs.py b/sample_code_with_logs.py index 61dce45..b1356cc 100644 --- a/sample_code_with_logs.py +++ b/sample_code_with_logs.py @@ -96,7 +96,7 @@ def __init__(self, activation_class, norm_class, precision_class, precision, noi the analog channel). """ - super(LinearModel, self).__init__() + super().__init__() self.activation_class = activation_class self.norm_class = norm_class @@ -146,7 +146,7 @@ def __init__(self, norm_class, precision_class, precision, noise_class, leakage) the analog channel). """ - super(WeightModel, self).__init__() + super().__init__() self.all_layers = [] self.all_layers.append(norm_class()) diff --git a/unit_tests/test_pseudo_parameter.py b/unit_tests/test_pseudo_parameter.py index 726d5ef..3b50d89 100644 --- a/unit_tests/test_pseudo_parameter.py +++ b/unit_tests/test_pseudo_parameter.py @@ -9,19 +9,19 @@ # def __getattribute__(self, item): # print(f"__getattribute__:: {item!r}") -# return super(PseudoParameter, self).__getattribute__(item) +# return super().__getattribute__(item) # # def __setattr__(self, key, value): # print(f"__setattr__:: {key!r} -> {value!r}") -# super(PseudoParameter, self).__setattr__(key, value) +# super().__setattr__(key, value) # # # def __set__(self, instance, value): # # print(f"__set__:: {instance!r} -> {value!r}") -# # super(PseudoParameter, self).__set__(instance, value) +# # super().__set__(instance, value) # # def __get__(self, instance, owner): # print(f"__get__:: {instance!r} -> {owner!r}") -# return super(PseudoParameter, self).__get__(instance, owner) +# return super().__get__(instance, owner) # # @classmethod # def __torch_function__(cls, func, types, args=(), kwargs=None): From f866e884cdab45b176b59786388acab139ca1ab9 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Mon, 27 Feb 2023 05:14:42 -0500 Subject: [PATCH 22/24] using f-strings Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/backward/BackwardModule.py | 2 +- analogvnn/nn/precision/StochasticReducePrecision.py | 4 ++-- analogvnn/utils/render_autograd_graph.py | 2 +- docs/conf.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/analogvnn/backward/BackwardModule.py b/analogvnn/backward/BackwardModule.py index ec6c4a1..9de8dd1 100644 --- a/analogvnn/backward/BackwardModule.py +++ b/analogvnn/backward/BackwardModule.py @@ -285,4 +285,4 @@ def __getattr__(self, name: str) -> Any: return super().__getattr__(name) if not str(name).startswith('__') and self._layer is not None and hasattr(self._layer, name): return getattr(self._layer, name) - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") diff --git a/analogvnn/nn/precision/StochasticReducePrecision.py b/analogvnn/nn/precision/StochasticReducePrecision.py index 5bc6e27..981587e 100644 --- a/analogvnn/nn/precision/StochasticReducePrecision.py +++ b/analogvnn/nn/precision/StochasticReducePrecision.py @@ -28,10 +28,10 @@ def __init__(self, precision: int = 8): super().__init__() if precision < 1: - raise ValueError('precision has to be more than 0, but got {}'.format(precision)) + raise ValueError(f'precision has to be more than 0, but got {precision}') if precision != int(precision): - raise ValueError('precision must be int, but got {}'.format(precision)) + raise ValueError(f'precision must be int, but got {precision}') self.precision = nn.Parameter(torch.tensor(precision), requires_grad=False) diff --git a/analogvnn/utils/render_autograd_graph.py b/analogvnn/utils/render_autograd_graph.py index 504d5a5..a8a1ef3 100644 --- a/analogvnn/utils/render_autograd_graph.py +++ b/analogvnn/utils/render_autograd_graph.py @@ -755,7 +755,7 @@ def parse_trace_graph(graph) -> List[Node]: scope[inputs[i]] = n.scopeName() uname = next(n.outputs()).uniqueName() - assert n.scopeName() != '', '{} has empty scope name'.format(n) + assert n.scopeName() != '', f'{n} has empty scope name' scope[uname] = n.scopeName() scope['0'] = 'input' diff --git a/docs/conf.py b/docs/conf.py index 2b192a0..c838169 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -4,14 +4,14 @@ try: from analogvnn import __version__ - print('Version from module: {}'.format(__version__)) + print(f'Version from module: {__version__}') except Exception: with open('../pyproject.toml', 'r') as f: for i in f.readlines(): if 'version' in i: __version__ = i.split('=')[1].strip().strip('"') break - print('Version from toml: {}'.format(__version__)) + print(f'Version from toml: {__version__}') # Configuration file for the Sphinx documentation builder. # From 70c5afb81e366ba7f64b13f44b614b354938b762 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Mon, 27 Feb 2023 05:16:54 -0500 Subject: [PATCH 23/24] small correction to rewrite super() calls Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- analogvnn/backward/BackwardModule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/analogvnn/backward/BackwardModule.py b/analogvnn/backward/BackwardModule.py index 9de8dd1..f771002 100644 --- a/analogvnn/backward/BackwardModule.py +++ b/analogvnn/backward/BackwardModule.py @@ -282,7 +282,7 @@ def __getattr__(self, name: str) -> Any: """ if isinstance(self, nn.Module) or self == self._layer: - return super().__getattr__(name) + return super(BackwardModule, self).__getattr__(name) if not str(name).startswith('__') and self._layer is not None and hasattr(self._layer, name): return getattr(self._layer, name) raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") From 1919e391ea3fa9090ff9c8f04d97d3861cf4b960 Mon Sep 17 00:00:00 2001 From: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> Date: Tue, 7 Mar 2023 05:55:05 -0500 Subject: [PATCH 24/24] completed v1.0.0 Signed-off-by: Vivswan Shah <58091053+Vivswan@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 70225e2..9929739 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ py-modules = ['analogvnn'] [project] # $ pip install analogvnn name = "analogvnn" -version = "1.0.0rc7" +version = "1.0.0" description = "A fully modular framework for modeling and optimizing analog/photonic neural networks" # Optional readme = "README.md" requires-python = ">=3.7"