Skip to content

Commit

Permalink
Merge pull request #9 from Vivswan/in_progess
Browse files Browse the repository at this point in the history
v1.0.0rc5
  • Loading branch information
Vivswan authored Jan 12, 2023
2 parents e684d8f + 650e9e5 commit e120f7d
Show file tree
Hide file tree
Showing 90 changed files with 934 additions and 5,445 deletions.
22 changes: 22 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
[flake8]
max-line-length = 120

extend-ignore =
C101, # Coding magic comment
D100, # Missing docstring in public module
D104, # Missing docstring in public package
D401, # First line should be in imperative mood

per-file-ignores =
sample_code.py: D100, D101, D102, D103, D104
sample_code_with_logs.py: D100, D101, D102, D103, D104

exclude =
./.git,
./venv,
./_data,
./dist,
./unit_tests,

max-complexity = 10
optional-ascii-coding = True
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ pip install analogvnn

![3 Layered Linear Photonic Analog Neural Network](docs/_static/analogvnn_model.png)

[//]: # (![3 Layered Linear Photonic Analog Neural Network](https://github.com/Vivswan/AnalogVNN/raw/v1.0.0/docs/_static/analogvnn_model.png))

## Abstract

**AnalogVNN** is a simulation framework built on PyTorch which can simulate the effects of
Expand Down Expand Up @@ -53,5 +55,5 @@ Or in textual form:

```text
Vivswan Shah, and Nathan Youngblood. "AnalogVNN: A fully modular framework for modeling
and optimizing photonic neural networks." *arXiv preprint arXiv:2210.10048 (2022)*.
and optimizing photonic neural networks." arXiv preprint arXiv:2210.10048 (2022).
```
7 changes: 4 additions & 3 deletions analogvnn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@

import importlib_metadata

__package__ = "analogvnn"
__package__ = 'analogvnn'
__version__ = importlib_metadata.version(__package__)
__author__ = "Vivswan Shah (vivswanshah@pitt.edu)"
__author__ = 'Vivswan Shah (vivswanshah@pitt.edu)'

if sys.version_info < (3, 7, 0):
import warnings

warnings.warn(
'The installed Python version reached its end-of-life. Please upgrade to a newer Python version for receiving '
'The installed Python version reached its end-of-life. '
'Please upgrade to a newer Python version for receiving '
'further gdshelpers updates.', Warning)
8 changes: 4 additions & 4 deletions analogvnn/backward/BackwardFunction.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ class BackwardFunction(BackwardModule, ABC):
Attributes:
_backward_function (Callable): The function used to compute the backward gradient.
"""

_backward_function: Callable

def __init__(self, backward_function: Callable, layer: nn.Module = None):
Expand All @@ -26,7 +27,7 @@ def __init__(self, backward_function: Callable, layer: nn.Module = None):
backward_function (Callable): The function used to compute the backward gradient.
layer (nn.Module): The layer that this backward module is associated with.
"""
super().__init__(layer)
super(BackwardFunction, self).__init__(layer)
self._backward_function = backward_function

@property
Expand Down Expand Up @@ -60,8 +61,7 @@ def set_backward_function(self, backward_function: Callable) -> BackwardFunction
return self

def backward(self, *grad_output: Tensor, **grad_output_kwarg: Tensor) -> TENSORS:
"""Computes the backward gradient of the input of the layer with respect to the output of the layer
using the backward function.
"""Computes the backward gradient of inputs with respect to outputs using the backward function.
Args:
*grad_output (Tensor): The gradients of the output of the layer.
Expand All @@ -74,6 +74,6 @@ def backward(self, *grad_output: Tensor, **grad_output_kwarg: Tensor) -> TENSORS
NotImplementedError: If the backward function is not set.
"""
if self._backward_function is None:
raise ValueError("set backward_function first before invoking backward")
raise ValueError('set backward_function first before invoking backward')

return self._backward_function(*grad_output, **grad_output_kwarg)
3 changes: 1 addition & 2 deletions analogvnn/backward/BackwardIdentity.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@


class BackwardIdentity(BackwardModule, ABC):
"""The backward module that returns the output gradients as the input gradients.
"""
"""The backward module that returns the output gradients as the input gradients."""

def backward(self, *grad_output: Tensor, **grad_output_kwarg: Tensor) -> TENSORS:
"""Returns the output gradients as the input gradients.
Expand Down
37 changes: 15 additions & 22 deletions analogvnn/backward/BackwardModule.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,15 @@ class BackwardModule(abc.ABC):
_autograd_backward (Type[AutogradBackward]): The autograd backward function.
_disable_autograd_backward (bool): If True the autograd backward function is disabled.
"""

_layer: Optional[nn.Module]
_empty_holder_tensor: Tensor
_autograd_backward: Type[AutogradBackward]
_disable_autograd_backward: bool
_empty_holder_tensor: Tensor = torch.ones((1,), requires_grad=True)
_autograd_backward: Type[AutogradBackward] = None
_disable_autograd_backward: bool = False

# noinspection PyAbstractClass
class AutogradBackward(autograd.Function):
"""Autograd function is used as an optimization and proper calculation of gradients
when using the autograd engine.
"""
"""Optimization and proper calculation of gradients when using the autograd engine."""

# noinspection PyMethodOverriding
@staticmethod
Expand Down Expand Up @@ -65,7 +64,6 @@ def backward(ctx: Any, *grad_outputs: Tensor) -> Tuple[None, None, TENSORS]:
Returns:
TENSORS: The gradients of the input of the function.
"""

backward_module: BackwardModule = ctx.backward_module
results = backward_module._call_impl_backward(*grad_outputs)

Expand All @@ -82,12 +80,7 @@ def __init__(self, layer: nn.Module = None):
"""
super(BackwardModule, self).__init__()
self._layer = None
self._empty_holder_tensor = torch.ones((1,), requires_grad=True)
self._empty_holder_tensor.names = ('_empty_holder_tensor',)
# noinspection PyTypeChecker
self._autograd_backward = None
self._autograd_backward = self._set_autograd_backward()
self._disable_autograd_backward = False
self._set_autograd_backward()
if not isinstance(self, nn.Module):
self.set_layer(layer)

Expand All @@ -104,7 +97,7 @@ def forward(self, *inputs: Tensor, **inputs_kwarg: Tensor) -> TENSORS:
Raises:
NotImplementedError: If the forward pass is not implemented.
"""
raise NotImplementedError(f"Module [{type(self).__name__}] is missing the required \"forward\" function")
raise NotImplementedError(f'Module [{type(self).__name__}] is missing the required "forward" function')

forward._implemented = False

Expand All @@ -122,7 +115,7 @@ def backward(self, *grad_outputs: Tensor, **grad_output_kwarg: Tensor) -> TENSOR
Raises:
NotImplementedError: If the backward pass is not implemented.
"""
raise NotImplementedError(f"Module [{type(self).__name__}] is missing the required \"backward\" function")
raise NotImplementedError(f'Module [{type(self).__name__}] is missing the required "backward" function')

def _call_impl_forward(self, *args: Tensor, **kwarg: Tensor) -> TENSORS:
"""Calls Forward pass of the layer.
Expand Down Expand Up @@ -173,7 +166,7 @@ def has_forward(self) -> bool:
Returns:
bool: True if the forward pass is implemented, False otherwise.
"""
return not hasattr(self.forward, "_implemented")
return not hasattr(self.forward, '_implemented')

@property
def layer(self) -> Optional[nn.Module]:
Expand Down Expand Up @@ -210,11 +203,11 @@ def set_layer(self, layer: Optional[nn.Module]) -> BackwardModule:
ValueError: If the layer is not an instance of nn.Module.
"""
if isinstance(self, nn.Module):
raise ValueError(f"layer of Backward Module is set to itself")
raise ValueError('layer of Backward Module is set to itself')
if self._layer is not None:
raise ValueError(f"changing the layer of Backward Module is not allowed")
raise ValueError('changing the layer of Backward Module is not allowed')
if layer is not None and not isinstance(layer, nn.Module):
raise ValueError(f'layer not instance of Layer class')
raise ValueError('layer not instance of Layer class')

self._layer = layer
self._set_autograd_backward()
Expand All @@ -227,7 +220,7 @@ def _set_autograd_backward(self):
else:
# noinspection PyTypeChecker
self._autograd_backward = type(
f"{layer.__class__.__name__}AutoGrad",
f'{layer.__class__.__name__}AutoGrad',
(BackwardModule.AutogradBackward,),
{}
)
Expand All @@ -252,7 +245,7 @@ def set_grad_of(tensor: Tensor, grad: Tensor) -> Optional[Tensor]:
tensor.backward(gradient=grad, inputs=tensor)
except Exception:
# noinspection PyProtectedMember
for key, value in tensor._backward_hooks.items():
for _, value in tensor._backward_hooks.items():
grad = value(grad)

if tensor.grad is None:
Expand All @@ -276,6 +269,6 @@ def __getattr__(self, name: str) -> Any:
"""
if isinstance(self, nn.Module) or self == self._layer:
return super(BackwardModule, self).__getattr__(name)
if not str(name).startswith("__") and self._layer is not None and hasattr(self._layer, name):
if not str(name).startswith('__') and self._layer is not None and hasattr(self._layer, name):
return getattr(self._layer, name)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name))
6 changes: 2 additions & 4 deletions analogvnn/backward/BackwardUsingForward.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,10 @@


class BackwardUsingForward(BackwardModule, ABC):
"""The backward module that uses the forward function to compute the backward gradient.
"""
"""The backward module that uses the forward function to compute the backward gradient."""

def backward(self, *grad_output: Tensor, **grad_output_kwarg: Tensor) -> TENSORS:
"""Computes the backward gradient of the input of the layer with respect to the output of the layer
using the forward function.
"""Computes the backward gradient of inputs with respect to outputs using the forward function.
Args:
*grad_output (Tensor): The gradients of the output of the layer.
Expand Down
12 changes: 5 additions & 7 deletions analogvnn/fn/reduce_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,13 @@


def reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE, divide: TENSOR_OPERABLE) -> TENSOR_OPERABLE:
"""`reduce_precision` takes `x` and reduces its precision to `precision` by rounding to the
nearest multiple of `precision`.
"""Takes `x` and reduces its precision to `precision` by rounding to the nearest multiple of `precision`.
Args:
x (TENSOR_OPERABLE): Tensor
precision (TENSOR_OPERABLE): the precision of the quantization.
divide (TENSOR_OPERABLE): the number of bits to be reduced
Returns:
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values rounded to the nearest
multiple of precision.
Expand All @@ -29,9 +28,8 @@ def reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE, divide: TEN


def stochastic_reduce_precision(x: TENSOR_OPERABLE, precision: TENSOR_OPERABLE) -> TENSOR_OPERABLE:
"""`stochastic_reduce_precision` takes `x` and reduces its precision to `precision` by rounding to the
nearest multiple of `precision` with a stochastic rounding scheme.
"""Takes `x` and reduces its precision by rounding to the nearest multiple of `precision` with stochastic scheme.
Args:
x (TENSOR_OPERABLE): Tensor
precision (TENSOR_OPERABLE): the precision of the quantization.
Expand Down
1 change: 0 additions & 1 deletion analogvnn/fn/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ def test(model: nn.Module, test_loader: DataLoader, test_run: bool = False) -> T
Returns:
tuple: the loss and accuracy of the model on the test set.
"""

model.eval()
total_loss = 0
total_accuracy = 0
Expand Down
15 changes: 9 additions & 6 deletions analogvnn/graph/AccumulateGrad.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,16 @@


class AccumulateGrad:
"""AccumulateGrad is a module that accumulates the gradients of the outputs of the module
it is attached to. It has no parameters of its own.
"""AccumulateGrad is a module that accumulates the gradients of the outputs of the module it is attached to.
It has no parameters of its own.
Attributes:
module (nn.Module): Module to accumulate gradients for.
input_output_connections (Dict[str, Dict[str, Union[None, bool, int, str, GRAPH_NODE_TYPE]]]): input/output
connections.
"""

input_output_connections: Dict[str, Dict[str, Union[None, bool, int, str, GRAPH_NODE_TYPE]]]
module: Union[nn.Module, Callable]

Expand All @@ -27,6 +29,7 @@ def __init__(self, module: Union[nn.Module, Callable]):
Args:
module (Union[nn.Module, Callable]): Module from which to accumulate gradients.
"""
super(AccumulateGrad, self).__init__()
self.input_output_connections = {}
self.module = module

Expand All @@ -36,9 +39,9 @@ def __repr__(self):
Returns:
str: String representation of the module.
"""
return f"AccumulateGrad({self.module})"
return f'AccumulateGrad({self.module})'

def __call__(
def __call__( # noqa: C901
self,
grad_outputs_args_kwargs: ArgsKwargs,
forward_input_output_graph: Dict[GRAPH_NODE_TYPE, InputOutput]
Expand Down Expand Up @@ -128,10 +131,10 @@ def __call__(
grad_inputs_args[forward_in_arg] += grad_output
continue

raise NotImplementedError("WTF!Why!")
raise NotImplementedError('WTF!Why!')

return ArgsKwargs(
args=[grad_inputs_args[i] for i in sorted(list(grad_inputs_args.keys()))],
args=[grad_inputs_args[i] for i in sorted(grad_inputs_args.keys())],
kwargs=grad_inputs_kwargs
)

Expand Down
Loading

0 comments on commit e120f7d

Please sign in to comment.