From a712129891acdada3334fc6127d7c1536c55ef88 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 00:22:41 -0700 Subject: [PATCH 01/31] cwn unit testing --- test/nn/cell/test_cwn.py | 40 +++++++++++++ topomodelx/nn/cell/cwn.py | 122 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 162 insertions(+) create mode 100644 test/nn/cell/test_cwn.py create mode 100644 topomodelx/nn/cell/cwn.py diff --git a/test/nn/cell/test_cwn.py b/test/nn/cell/test_cwn.py new file mode 100644 index 00000000..e4fcb584 --- /dev/null +++ b/test/nn/cell/test_cwn.py @@ -0,0 +1,40 @@ +"""Test for the CWN class.""" + +import torch + +from topomodelx.nn.cell.cwn import CWN + + +class TestCWN: + """Test CWN.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = CWN( + in_channels_0=2, + in_channels_1=2, + in_channels_2=2, + hid_channels=16, + num_classes=1, + n_layers=2, + ) + + x_0 = torch.rand(2, 2) + x_1 = torch.rand(2, 2) + x_2 = torch.rand(2, 2) + adjacency_1 = torch.rand(2, 2) + incidence_2 = torch.rand(2, 2) + incidence_1_t = torch.rand(2, 2) + + x_0, x_1, x_2 = ( + torch.tensor(x_0).float().to(device), + torch.tensor(x_1).float().to(device), + torch.tensor(x_2).float().to(device), + ) + adjacency_1 = adjacency_1.float().to(device) + incidence_2 = incidence_2.float().to(device) + incidence_1_t = incidence_1_t.float().to(device) + + y = model(x_0, x_1, x_2, adjacency_1, incidence_2, incidence_1_t) + assert y.shape == torch.Size([1]) diff --git a/topomodelx/nn/cell/cwn.py b/topomodelx/nn/cell/cwn.py new file mode 100644 index 00000000..1fa30ca9 --- /dev/null +++ b/topomodelx/nn/cell/cwn.py @@ -0,0 +1,122 @@ +"""CWN class.""" + +import torch +import torch.nn.functional as F + +from topomodelx.nn.cell.cwn_layer import CWNLayer + + +class CWN(torch.nn.Module): + """Implementation of a specific version of CW Network. + + Parameters + ---------- + in_channels_0 : int + Dimension of input features on nodes (0-cells). + in_channels_1 : int + Dimension of input features on edges (1-cells). + in_channels_2 : int + Dimension of input features on faces (2-cells). + hid_channels : int + Dimension of hidden features. + num_classes : int + Number of classes. + n_layers : int + Number of CWN layers. + """ + + def __init__( + self, + in_channels_0, + in_channels_1, + in_channels_2, + hid_channels, + num_classes, + n_layers, + ): + super().__init__() + self.proj_0 = torch.nn.Linear(in_channels_0, hid_channels) + self.proj_1 = torch.nn.Linear(in_channels_1, hid_channels) + self.proj_2 = torch.nn.Linear(in_channels_2, hid_channels) + + layers = [] + for _ in range(n_layers): + layers.append( + CWNLayer( + in_channels_0=hid_channels, + in_channels_1=hid_channels, + in_channels_2=hid_channels, + out_channels=hid_channels, + ) + ) + self.layers = torch.nn.ModuleList(layers) + + self.lin_0 = torch.nn.Linear(hid_channels, num_classes) + self.lin_1 = torch.nn.Linear(hid_channels, num_classes) + self.lin_2 = torch.nn.Linear(hid_channels, num_classes) + + def forward( + self, + x_0, + x_1, + x_2, + neighborhood_1_to_1, + neighborhood_2_to_1, + neighborhood_0_to_1, + ): + """Forward computation through projection, convolutions, linear layers and average pooling. + + Parameters + ---------- + x_0 : torch.Tensor, shape = [n_nodes, in_channels_0] + Input features on the nodes (0-cells). + x_1 : torch.Tensor, shape = [n_edges, in_channels_1] + Input features on the edges (1-cells). + x_2 : torch.Tensor, shape = [n_faces, in_channels_2] + Input features on the faces (2-cells). + neighborhood_1_to_1 : tensor, shape = [n_edges, n_edges] + Upper-adjacency matrix of rank 1. + neighborhood_2_to_1 : tensor, shape = [n_edges, n_faces] + Boundary matrix of rank 2. + neighborhood_0_to_1 : tensor, shape = [n_edges, n_nodes] + Coboundary matrix of rank 1. + + Returns + ------- + _ : tensor, shape = [1] + Label assigned to whole complex. + """ + x_0 = F.elu(self.proj_0(x_0)) + x_1 = F.elu(self.proj_1(x_1)) + x_2 = F.elu(self.proj_2(x_2)) + + for layer in self.layers: + x_1 = layer( + x_0, + x_1, + x_2, + neighborhood_1_to_1, + neighborhood_2_to_1, + neighborhood_0_to_1, + ) + + x_0 = self.lin_0(x_0) + x_1 = self.lin_1(x_1) + x_2 = self.lin_2(x_2) + + # Take the average of the 2D, 1D, and 0D cell features. If they are NaN, convert them to 0. + two_dimensional_cells_mean = torch.nanmean(x_2, dim=0) + two_dimensional_cells_mean[torch.isnan(two_dimensional_cells_mean)] = 0 + + one_dimensional_cells_mean = torch.nanmean(x_1, dim=0) + one_dimensional_cells_mean[torch.isnan(one_dimensional_cells_mean)] = 0 + + zero_dimensional_cells_mean = torch.nanmean(x_0, dim=0) + zero_dimensional_cells_mean[torch.isnan(zero_dimensional_cells_mean)] = 0 + + # Return the sum of the averages + return ( + two_dimensional_cells_mean + + one_dimensional_cells_mean + + zero_dimensional_cells_mean + ) From db997e7fbacb1d52df9e5fb27c38e8efd367695c Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 00:42:02 -0700 Subject: [PATCH 02/31] add ccxn --- test/nn/cell/test_ccxn.py | 37 ++++++++++++++++ topomodelx/nn/cell/ccxn.py | 91 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+) create mode 100644 test/nn/cell/test_ccxn.py create mode 100644 topomodelx/nn/cell/ccxn.py diff --git a/test/nn/cell/test_ccxn.py b/test/nn/cell/test_ccxn.py new file mode 100644 index 00000000..9e33ec0a --- /dev/null +++ b/test/nn/cell/test_ccxn.py @@ -0,0 +1,37 @@ +"""Test for the CCXN class.""" + +import torch + +from topomodelx.nn.cell.ccxn import CCXN + + +class TestCCXN: + """Test CWN.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = CCXN( + in_channels_0=2, + in_channels_1=2, + in_channels_2=2, + num_classes=1, + n_layers=2, + att=False, + ) + + x_0 = torch.rand(2, 2) + x_1 = torch.rand(2, 2) + + adjacency_1 = torch.rand(2, 2) + incidence_2 = torch.rand(2, 2) + + x_0, x_1 = ( + torch.tensor(x_0).float().to(device), + torch.tensor(x_1).float().to(device), + ) + adjacency_1 = adjacency_1.float().to(device) + incidence_2 = incidence_2.float().to(device) + + y = model(x_0, x_1, adjacency_1, incidence_2) + assert y.shape == torch.Size([1]) diff --git a/topomodelx/nn/cell/ccxn.py b/topomodelx/nn/cell/ccxn.py new file mode 100644 index 00000000..b42c5db0 --- /dev/null +++ b/topomodelx/nn/cell/ccxn.py @@ -0,0 +1,91 @@ +"""CWN class.""" + +import torch + +from topomodelx.nn.cell.ccxn_layer import CCXNLayer + + +class CCXN(torch.nn.Module): + """CCXN. + + Parameters + ---------- + in_channels_0 : int + Dimension of input features on nodes. + in_channels_1 : int + Dimension of input features on edges. + in_channels_2 : int + Dimension of input features on faces. + num_classes : int + Number of classes. + n_layers : int + Number of CCXN layers. + att : bool + Whether to use attention. + """ + + def __init__( + self, + in_channels_0, + in_channels_1, + in_channels_2, + num_classes, + n_layers=2, + att=False, + ): + super().__init__() + layers = [] + for _ in range(n_layers): + layers.append( + CCXNLayer( + in_channels_0=in_channels_0, + in_channels_1=in_channels_1, + in_channels_2=in_channels_2, + att=att, + ) + ) + self.layers = torch.nn.ModuleList(layers) + self.lin_0 = torch.nn.Linear(in_channels_0, num_classes) + self.lin_1 = torch.nn.Linear(in_channels_1, num_classes) + self.lin_2 = torch.nn.Linear(in_channels_2, num_classes) + + def forward(self, x_0, x_1, neighborhood_0_to_0, neighborhood_1_to_2): + """Forward computation through layers, then linear layers, then avg pooling. + + Parameters + ---------- + x_0 : torch.Tensor, shape = [n_nodes, in_channels_0] + Input features on the nodes (0-cells). + x_1 : torch.Tensor, shape = [n_edges, in_channels_1] + Input features on the edges (1-cells). + neighborhood_0_to_0 : tensor, shape = [n_nodes, n_nodes] + Adjacency matrix of rank 0 (up). + neighborhood_1_to_2 : tensor, shape = [n_faces, n_edges] + Transpose of boundary matrix of rank 2. + x_2 : torch.Tensor, shape = [n_faces, in_channels_2] + Input features on the faces (2-cells). + Optional. Use for attention mechanism between edges and faces. + + Returns + ------- + _ : tensor, shape = [1] + Label assigned to whole complex. + """ + for layer in self.layers: + x_0, x_1, x_2 = layer(x_0, x_1, neighborhood_0_to_0, neighborhood_1_to_2) + x_0 = self.lin_0(x_0) + x_1 = self.lin_1(x_1) + x_2 = self.lin_2(x_2) + # Take the average of the 2D, 1D, and 0D cell features. If they are NaN, convert them to 0. + two_dimensional_cells_mean = torch.nanmean(x_2, dim=0) + two_dimensional_cells_mean[torch.isnan(two_dimensional_cells_mean)] = 0 + one_dimensional_cells_mean = torch.nanmean(x_1, dim=0) + one_dimensional_cells_mean[torch.isnan(one_dimensional_cells_mean)] = 0 + zero_dimensional_cells_mean = torch.nanmean(x_0, dim=0) + zero_dimensional_cells_mean[torch.isnan(zero_dimensional_cells_mean)] = 0 + # Return the sum of the averages + return ( + two_dimensional_cells_mean + + one_dimensional_cells_mean + + zero_dimensional_cells_mean + ) From 607f12fc9cdaf7375c60057357e78c44b9e3f41a Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 01:04:07 -0700 Subject: [PATCH 03/31] CAN model --- test/nn/cell/test_can.py | 41 +++++++++++ topomodelx/nn/cell/can.py | 141 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 182 insertions(+) create mode 100644 test/nn/cell/test_can.py create mode 100644 topomodelx/nn/cell/can.py diff --git a/test/nn/cell/test_can.py b/test/nn/cell/test_can.py new file mode 100644 index 00000000..95e577f3 --- /dev/null +++ b/test/nn/cell/test_can.py @@ -0,0 +1,41 @@ +"""Test for the CAN class.""" + +import torch + +from topomodelx.nn.cell.can import CAN + + +class TestCAN: + """Test CAN.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = CAN( + in_channels_0=2, + in_channels_1=2, + out_channels=2, + dropout=0.5, + heads=2, + num_classes=1, + n_layers=2, + att_lift=True, + ) + + x_0 = torch.rand(2, 2) + x_1 = torch.rand(2, 2) + + adjacency_1 = torch.rand(2, 2) + adjacency_2 = torch.rand(2, 2) + incidence_2 = torch.rand(2, 2) + + x_0, x_1 = ( + torch.tensor(x_0).float().to(device), + torch.tensor(x_1).float().to(device), + ) + adjacency_1 = adjacency_1.float().to(device) + adjacency_2 = adjacency_2.float().to(device) + incidence_2 = incidence_2.float().to(device) + + y = model(x_0, x_1, adjacency_1, adjacency_2, incidence_2) + assert y.shape == torch.Size([1]) diff --git a/topomodelx/nn/cell/can.py b/topomodelx/nn/cell/can.py new file mode 100644 index 00000000..dbd8a565 --- /dev/null +++ b/topomodelx/nn/cell/can.py @@ -0,0 +1,141 @@ +"""CAN class.""" + +import torch +import torch.nn.functional as F + +from topomodelx.nn.cell.can_layer import CANLayer, MultiHeadLiftLayer, PoolLayer + + +class CAN(torch.nn.Module): + """CAN (Cell Attention Network) module for graph classification. + + Parameters + ---------- + in_channels_0: int + Number of input channels for the node-level input. + in_channels_1: int + Number of input channels for the edge-level input. + out_channels: int + Number of output channels. + num_classest: int + Number of output classes. + dropout: float, optional + Dropout probability. Default is 0.5. + heads: int, optional + Number of attention heads. Default is 3. + concat: bool, optional + Whether to concatenate the output channels of attention heads. Default is True. + skip_connection: bool, optional + Whether to use skip connections. Default is True. + att_activation: torch.nn.Module, optional + Activation function for attention mechanism. Default is torch.nn.LeakyReLU(0.2). + n_layers: int, optional + Number of CAN layers. Default is 2. + att_lift: bool, optional + Whether to apply a lift the signal from node-level to edge-level input. Default is True. + """ + + def __init__( + self, + in_channels_0, + in_channels_1, + out_channels, + num_classes, + dropout=0.5, + heads=3, + concat=True, + skip_connection=True, + att_activation=torch.nn.LeakyReLU(0.2), + n_layers=2, + att_lift=True, + ): + super().__init__() + + if att_lift: + self.lift_layer = MultiHeadLiftLayer( + in_channels_0=in_channels_0, + heads=in_channels_0, + signal_lift_dropout=0.5, + ) + in_channels_1 = in_channels_1 + in_channels_0 + + layers = [] + + layers.append( + CANLayer( + in_channels=in_channels_1, + out_channels=out_channels, + heads=heads, + concat=concat, + skip_connection=skip_connection, + att_activation=att_activation, + aggr_func="sum", + update_func="relu", + ) + ) + + for _ in range(n_layers - 1): + layers.append( + CANLayer( + in_channels=out_channels * heads, + out_channels=out_channels, + dropout=dropout, + heads=heads, + concat=concat, + skip_connection=skip_connection, + att_activation=att_activation, + aggr_func="sum", + update_func="relu", + ) + ) + + layers.append( + PoolLayer( + k_pool=0.5, + in_channels_0=out_channels * heads, + signal_pool_activation=torch.nn.Sigmoid(), + readout=True, + ) + ) + + self.layers = torch.nn.ModuleList(layers) + self.lin_0 = torch.nn.Linear(heads * out_channels, 128) + self.lin_1 = torch.nn.Linear(128, num_classes) + + def forward( + self, x_0, x_1, neighborhood_0_to_0, lower_neighborhood, upper_neighborhood + ): + """Forward computation through layers. + + Parameters + ---------- + x_0 : torch.Tensor, shape = [n_nodes, in_channels_0] + Input features on the nodes (0-cells). + x_1 : torch.Tensor, shape = [n_edges, in_channels_1] + Input features on the edges (1-cells). + lower_neighborhood : tensor, shape = [-, -] + upper_neighborhood : tensor, shape = [-, -] + + Returns + ------- + output tensor + """ + if hasattr(self, "lift_layer"): + x_1 = self.lift_layer(x_0, neighborhood_0_to_0, x_1) + + for layer in self.layers: + if isinstance(layer, PoolLayer): + x_1, lower_neighborhood, upper_neighborhood = layer( + x_1, lower_neighborhood, upper_neighborhood + ) + else: + x_1 = layer(x_1, lower_neighborhood, upper_neighborhood) + x_1 = F.dropout(x_1, p=0.5, training=self.training) + + # max pooling over all nodes in each graph + x = x_1.max(dim=0)[0] + + # Feed-Foward Neural Network to predict the graph label + out = self.lin_1(torch.nn.functional.relu(self.lin_0(x))) + + return out From d7906c2e42eab27be1258f85528f686ed6141bc9 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 01:45:59 -0700 Subject: [PATCH 04/31] add ccxn --- test/nn/cell/test_can.py | 9 +-- topomodelx/nn/cell/can_bis.py | 141 ++++++++++++++++++++++++++++++++++ 2 files changed, 145 insertions(+), 5 deletions(-) create mode 100644 topomodelx/nn/cell/can_bis.py diff --git a/test/nn/cell/test_can.py b/test/nn/cell/test_can.py index 95e577f3..cf7dcf78 100644 --- a/test/nn/cell/test_can.py +++ b/test/nn/cell/test_can.py @@ -1,5 +1,6 @@ """Test for the CAN class.""" +import numpy as np import torch from topomodelx.nn.cell.can import CAN @@ -25,17 +26,15 @@ def test_fowared(self): x_0 = torch.rand(2, 2) x_1 = torch.rand(2, 2) - adjacency_1 = torch.rand(2, 2) - adjacency_2 = torch.rand(2, 2) - incidence_2 = torch.rand(2, 2) + adjacency_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse() x_0, x_1 = ( torch.tensor(x_0).float().to(device), torch.tensor(x_1).float().to(device), ) adjacency_1 = adjacency_1.float().to(device) - adjacency_2 = adjacency_2.float().to(device) - incidence_2 = incidence_2.float().to(device) + adjacency_2 = adjacency_1.float().to(device) + incidence_2 = adjacency_1.float().to(device) y = model(x_0, x_1, adjacency_1, adjacency_2, incidence_2) assert y.shape == torch.Size([1]) diff --git a/topomodelx/nn/cell/can_bis.py b/topomodelx/nn/cell/can_bis.py new file mode 100644 index 00000000..dbd8a565 --- /dev/null +++ b/topomodelx/nn/cell/can_bis.py @@ -0,0 +1,141 @@ +"""CAN class.""" + +import torch +import torch.nn.functional as F + +from topomodelx.nn.cell.can_layer import CANLayer, MultiHeadLiftLayer, PoolLayer + + +class CAN(torch.nn.Module): + """CAN (Cell Attention Network) module for graph classification. + + Parameters + ---------- + in_channels_0: int + Number of input channels for the node-level input. + in_channels_1: int + Number of input channels for the edge-level input. + out_channels: int + Number of output channels. + num_classest: int + Number of output classes. + dropout: float, optional + Dropout probability. Default is 0.5. + heads: int, optional + Number of attention heads. Default is 3. + concat: bool, optional + Whether to concatenate the output channels of attention heads. Default is True. + skip_connection: bool, optional + Whether to use skip connections. Default is True. + att_activation: torch.nn.Module, optional + Activation function for attention mechanism. Default is torch.nn.LeakyReLU(0.2). + n_layers: int, optional + Number of CAN layers. Default is 2. + att_lift: bool, optional + Whether to apply a lift the signal from node-level to edge-level input. Default is True. + """ + + def __init__( + self, + in_channels_0, + in_channels_1, + out_channels, + num_classes, + dropout=0.5, + heads=3, + concat=True, + skip_connection=True, + att_activation=torch.nn.LeakyReLU(0.2), + n_layers=2, + att_lift=True, + ): + super().__init__() + + if att_lift: + self.lift_layer = MultiHeadLiftLayer( + in_channels_0=in_channels_0, + heads=in_channels_0, + signal_lift_dropout=0.5, + ) + in_channels_1 = in_channels_1 + in_channels_0 + + layers = [] + + layers.append( + CANLayer( + in_channels=in_channels_1, + out_channels=out_channels, + heads=heads, + concat=concat, + skip_connection=skip_connection, + att_activation=att_activation, + aggr_func="sum", + update_func="relu", + ) + ) + + for _ in range(n_layers - 1): + layers.append( + CANLayer( + in_channels=out_channels * heads, + out_channels=out_channels, + dropout=dropout, + heads=heads, + concat=concat, + skip_connection=skip_connection, + att_activation=att_activation, + aggr_func="sum", + update_func="relu", + ) + ) + + layers.append( + PoolLayer( + k_pool=0.5, + in_channels_0=out_channels * heads, + signal_pool_activation=torch.nn.Sigmoid(), + readout=True, + ) + ) + + self.layers = torch.nn.ModuleList(layers) + self.lin_0 = torch.nn.Linear(heads * out_channels, 128) + self.lin_1 = torch.nn.Linear(128, num_classes) + + def forward( + self, x_0, x_1, neighborhood_0_to_0, lower_neighborhood, upper_neighborhood + ): + """Forward computation through layers. + + Parameters + ---------- + x_0 : torch.Tensor, shape = [n_nodes, in_channels_0] + Input features on the nodes (0-cells). + x_1 : torch.Tensor, shape = [n_edges, in_channels_1] + Input features on the edges (1-cells). + lower_neighborhood : tensor, shape = [-, -] + upper_neighborhood : tensor, shape = [-, -] + + Returns + ------- + output tensor + """ + if hasattr(self, "lift_layer"): + x_1 = self.lift_layer(x_0, neighborhood_0_to_0, x_1) + + for layer in self.layers: + if isinstance(layer, PoolLayer): + x_1, lower_neighborhood, upper_neighborhood = layer( + x_1, lower_neighborhood, upper_neighborhood + ) + else: + x_1 = layer(x_1, lower_neighborhood, upper_neighborhood) + x_1 = F.dropout(x_1, p=0.5, training=self.training) + + # max pooling over all nodes in each graph + x = x_1.max(dim=0)[0] + + # Feed-Foward Neural Network to predict the graph label + out = self.lin_1(torch.nn.functional.relu(self.lin_0(x))) + + return out From 3f7e2a2f425d26cb9a2862ed4eae40d0cabc6077 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 02:18:23 -0700 Subject: [PATCH 05/31] CAN model --- test/nn/cell/test_can.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/nn/cell/test_can.py b/test/nn/cell/test_can.py index cf7dcf78..378d2cb7 100644 --- a/test/nn/cell/test_can.py +++ b/test/nn/cell/test_can.py @@ -17,10 +17,10 @@ def test_fowared(self): in_channels_1=2, out_channels=2, dropout=0.5, - heads=2, + heads=1, num_classes=1, - n_layers=2, - att_lift=True, + n_layers=1, + att_lift=False, ) x_0 = torch.rand(2, 2) From ea73763708528e55cec9be44c8f30589642efb99 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 09:07:20 -0700 Subject: [PATCH 06/31] adding allset --- test/nn/hypergraph/test_allset.py | 30 ++++++++++ topomodelx/nn/hypergraph/allset.py | 96 ++++++++++++++++++++++++++++++ 2 files changed, 126 insertions(+) create mode 100644 test/nn/hypergraph/test_allset.py create mode 100644 topomodelx/nn/hypergraph/allset.py diff --git a/test/nn/hypergraph/test_allset.py b/test/nn/hypergraph/test_allset.py new file mode 100644 index 00000000..a5ac981a --- /dev/null +++ b/test/nn/hypergraph/test_allset.py @@ -0,0 +1,30 @@ +"""Allset class.""" + +import numpy as np +import torch + +from topomodelx.nn.hypergraph.allset import AllSet + + +class TestAllSet: + """Test AllSet.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = AllSet( + in_channels=2, + hidden_channels=2, + out_channels=2, + n_layers=1, + mlp_num_layers=1, + ) + + x_0 = torch.rand(2, 2) + incidence_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse() + + x_0 = torch.tensor(x_0).float().to(device) + incidence_1 = incidence_1.float().to(device) + + y = model(x_0, incidence_1) + assert y.shape == torch.Size([]) diff --git a/topomodelx/nn/hypergraph/allset.py b/topomodelx/nn/hypergraph/allset.py new file mode 100644 index 00000000..84bdee37 --- /dev/null +++ b/topomodelx/nn/hypergraph/allset.py @@ -0,0 +1,96 @@ +"""Allset class.""" + +import torch + +from topomodelx.nn.hypergraph.allset_layer import AllSetLayer + + +class AllSet(torch.nn.Module): + """AllSet Neural Network Module. + + A module that combines multiple AllSet layers to form a neural network. + + Parameters + ---------- + in_dim : int + Dimension of the input features. + hid_dim : int + Dimension of the hidden features. + out_dim : int + Dimension of the output features. + dropout : float + Dropout probability. + n_layers : int, optional + Number of AllSet layers in the network. Defaults to 2. + input_dropout : float, optional + Dropout probability for the layer input. Defaults to 0.2. + mlp_num_layers : int, optional + Number of layers in the MLP. Defaults to 2. + mlp_norm : bool, optional + Whether to apply input normalization in the MLP. Defaults to False. + """ + + def __init__( + self, + in_channels, + hidden_channels, + out_channels, + n_layers=2, + dropout=0.2, + mlp_num_layers=2, + mlp_activation=None, + mlp_dropout=0.0, + mlp_norm=None, + ): + super().__init__() + layers = [ + AllSetLayer( + in_channels=in_channels, + hidden_channels=hidden_channels, + dropout=dropout, + mlp_num_layers=mlp_num_layers, + mlp_activation=mlp_activation, + mlp_dropout=mlp_dropout, + mlp_norm=mlp_norm, + ) + ] + + for _ in range(n_layers - 1): + layers.append( + AllSetLayer( + in_channels=in_channels, + hidden_channels=hidden_channels, + dropout=dropout, + mlp_num_layers=mlp_num_layers, + mlp_activation=mlp_activation, + mlp_dropout=mlp_dropout, + mlp_norm=mlp_norm, + ) + ) + self.layers = torch.nn.ModuleList(layers) + self.linear = torch.nn.Linear(hidden_channels, out_channels) + + def forward(self, x_0, incidence_1): + """Forward computation. + + Parameters + ---------- + x : torch.Tensor + Input features. + edge_index : torch.Tensor + Edge list (of size (2, |E|)). + + Returns + ------- + torch.Tensor + Output prediction. + """ + # cidx = edge_index[1].min() + # edge_index[1] -= cidx + # reversed_edge_index = torch.stack( + # [edge_index[1], edge_index[0]], dim=0) + + for layer in self.layers: + x_0 = layer(x_0, incidence_1) + pooled_x = torch.max(x_0, dim=0)[0] + return torch.sigmoid(self.linear(pooled_x))[0] From 6cb9cba318816f43d368ece63fe42004727665cd Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 09:36:44 -0700 Subject: [PATCH 07/31] adding all set transformer --- test/nn/hypergraph/test_allset_transformer.py | 159 +++--------------- .../test_allset_transformer_layer.py | 150 +++++++++++++++++ .../nn/hypergraph/allset_transformer.py | 89 ++++++++++ 3 files changed, 259 insertions(+), 139 deletions(-) create mode 100644 test/nn/hypergraph/test_allset_transformer_layer.py create mode 100644 topomodelx/nn/hypergraph/allset_transformer.py diff --git a/test/nn/hypergraph/test_allset_transformer.py b/test/nn/hypergraph/test_allset_transformer.py index 41aeb453..d47aaf0e 100644 --- a/test/nn/hypergraph/test_allset_transformer.py +++ b/test/nn/hypergraph/test_allset_transformer.py @@ -1,150 +1,31 @@ """Test the AllSetTransformer layer.""" -import pytest + +import numpy as np import torch -from topomodelx.nn.hypergraph.allset_transformer_layer import ( - MLP, - AllSetTransformerLayer, -) +from topomodelx.nn.hypergraph.allset_transformer import AllSetTransformer -class TestAllSetTransformerLayer: - """Test the AllSetTransformer layer.""" +class TestAllSetTransfomer: + """Test AllSet.""" - @pytest.fixture - def allset_transformer_layer(self): - """Return a allsettransformer layer.""" - in_dim = 10 - hid_dim = 64 - heads = 4 - layer = AllSetTransformerLayer( - in_channels=in_dim, - hidden_channels=hid_dim, - heads=heads, - number_queries=1, - dropout=0.0, + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = AllSetTransformer( + in_channels=2, + hidden_channels=2, + heads=1, + out_channels=1, + n_layers=1, mlp_num_layers=1, - mlp_activation=None, - mlp_dropout=0.0, - mlp_norm=None, - ) - return layer - - def test_forward(self, allset_transformer_layer): - """Test the forward pass of the allsettransformer layer.""" - x_0 = torch.randn(3, 10) - incidence_1 = torch.tensor( - [[1, 0, 0], [0, 1, 1], [1, 1, 1]], dtype=torch.float32 - ).to_sparse() - output = allset_transformer_layer.forward(x_0, incidence_1) - assert output.shape == (3, 64) - - def test_forward_with_invalid_input(self, allset_transformer_layer): - """Test the forward pass of the allsettransformer layer with invalid input.""" - x_0 = torch.randn(4, 10) - incidence_1 = torch.tensor( - [[1, 0, 0], [0, 1, 1], [1, 1, 1]], dtype=torch.float32 - ).to_sparse() - with pytest.raises(ValueError): - allset_transformer_layer.forward(x_0, incidence_1) - - def test_reset_parameters(self, allset_transformer_layer): - """Test the reset_parameters method.""" - in_dim = 10 - hid_dim = 64 - heads = 4 - - allset_transformer_layer.reset_parameters() - assert allset_transformer_layer.vertex2edge.mlp[0].weight.requires_grad - assert allset_transformer_layer.edge2vertex.mlp[0].weight.requires_grad - - # Test with attention weights & xavier_uniform - allset_transformer_layer.vertex2edge.multihead_att.initialization = ( - "xavier_uniform" - ) - allset_transformer_layer.edge2vertex.multihead_att.initialization = ( - "xavier_uniform" ) - allset_transformer_layer.reset_parameters() - assert allset_transformer_layer.vertex2edge.multihead_att.K_weight.shape == ( - heads, - in_dim, - hid_dim // heads, - ) - assert allset_transformer_layer.edge2vertex.multihead_att.K_weight.shape == ( - heads, - hid_dim, - hid_dim // heads, - ) - - def test_initialisation_heads_zero(self): - """Test the initialisation of the allsettransformer layer with invalid input.""" - with pytest.raises(ValueError): - heads = 0 - _ = AllSetTransformerLayer( - in_channels=10, - hidden_channels=64, - heads=heads, - ) - - def test_initialisation_heads_wrong(self): - """Test the initialisation of the allsettransformer layer with invalid input.""" - with pytest.raises(ValueError): - in_channels = 10 - heads = 3 - - _ = AllSetTransformerLayer( - in_channels=in_channels, - hidden_channels=64, - heads=heads, - ) - - def test_initialisation_mlp_num_layers_zero(self): - """Test the initialisation of the allsettransformer layer with invalid input.""" - with pytest.raises(ValueError): - mlp_num_layers = 0 - _ = AllSetTransformerLayer( - in_channels=10, - hidden_channels=64, - heads=4, - mlp_num_layers=mlp_num_layers, - ) - - def test_initialisation_mlp_num_layers_negative(self): - """Test the initialisation of the allsettransformer layer with invalid input.""" - with pytest.raises(ValueError): - mlp_num_layers = -1 - _ = AllSetTransformerLayer( - in_channels=10, - hidden_channels=64, - heads=4, - mlp_num_layers=mlp_num_layers, - ) - def test_MLP(self): - """Test the MLP class. + x_0 = torch.rand(2, 2) + incidence_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse() - (used in AllSetTransformerLayer) - """ - in_channels_ = [10] - hidden_channels_ = [[64], [64, 64]] - norm_layers = [None, torch.nn.LayerNorm] - activation_layers = [torch.nn.ReLU, torch.nn.LeakyReLU] - dropouts = [0.0, 0.5] - bias_ = [True, False] + x_0 = torch.tensor(x_0).float().to(device) + incidence_1 = incidence_1.float().to(device) - for in_channels in in_channels_: - for hidden_channels in hidden_channels_: - for norm_layer in norm_layers: - for activation_layer in activation_layers: - for dropout in dropouts: - for bias in bias_: - mlp = MLP( - in_channels=in_channels, - hidden_channels=hidden_channels, - norm_layer=norm_layer, - activation_layer=activation_layer, - dropout=dropout, - bias=bias, - ) - assert mlp is not None + y = model(x_0, incidence_1) + assert y.shape == torch.Size([]) diff --git a/test/nn/hypergraph/test_allset_transformer_layer.py b/test/nn/hypergraph/test_allset_transformer_layer.py new file mode 100644 index 00000000..41aeb453 --- /dev/null +++ b/test/nn/hypergraph/test_allset_transformer_layer.py @@ -0,0 +1,150 @@ +"""Test the AllSetTransformer layer.""" +import pytest +import torch + +from topomodelx.nn.hypergraph.allset_transformer_layer import ( + MLP, + AllSetTransformerLayer, +) + + +class TestAllSetTransformerLayer: + """Test the AllSetTransformer layer.""" + + @pytest.fixture + def allset_transformer_layer(self): + """Return a allsettransformer layer.""" + in_dim = 10 + hid_dim = 64 + heads = 4 + layer = AllSetTransformerLayer( + in_channels=in_dim, + hidden_channels=hid_dim, + heads=heads, + number_queries=1, + dropout=0.0, + mlp_num_layers=1, + mlp_activation=None, + mlp_dropout=0.0, + mlp_norm=None, + ) + return layer + + def test_forward(self, allset_transformer_layer): + """Test the forward pass of the allsettransformer layer.""" + x_0 = torch.randn(3, 10) + incidence_1 = torch.tensor( + [[1, 0, 0], [0, 1, 1], [1, 1, 1]], dtype=torch.float32 + ).to_sparse() + output = allset_transformer_layer.forward(x_0, incidence_1) + assert output.shape == (3, 64) + + def test_forward_with_invalid_input(self, allset_transformer_layer): + """Test the forward pass of the allsettransformer layer with invalid input.""" + x_0 = torch.randn(4, 10) + incidence_1 = torch.tensor( + [[1, 0, 0], [0, 1, 1], [1, 1, 1]], dtype=torch.float32 + ).to_sparse() + with pytest.raises(ValueError): + allset_transformer_layer.forward(x_0, incidence_1) + + def test_reset_parameters(self, allset_transformer_layer): + """Test the reset_parameters method.""" + in_dim = 10 + hid_dim = 64 + heads = 4 + + allset_transformer_layer.reset_parameters() + assert allset_transformer_layer.vertex2edge.mlp[0].weight.requires_grad + assert allset_transformer_layer.edge2vertex.mlp[0].weight.requires_grad + + # Test with attention weights & xavier_uniform + allset_transformer_layer.vertex2edge.multihead_att.initialization = ( + "xavier_uniform" + ) + allset_transformer_layer.edge2vertex.multihead_att.initialization = ( + "xavier_uniform" + ) + allset_transformer_layer.reset_parameters() + assert allset_transformer_layer.vertex2edge.multihead_att.K_weight.shape == ( + heads, + in_dim, + hid_dim // heads, + ) + assert allset_transformer_layer.edge2vertex.multihead_att.K_weight.shape == ( + heads, + hid_dim, + hid_dim // heads, + ) + + def test_initialisation_heads_zero(self): + """Test the initialisation of the allsettransformer layer with invalid input.""" + with pytest.raises(ValueError): + heads = 0 + _ = AllSetTransformerLayer( + in_channels=10, + hidden_channels=64, + heads=heads, + ) + + def test_initialisation_heads_wrong(self): + """Test the initialisation of the allsettransformer layer with invalid input.""" + with pytest.raises(ValueError): + in_channels = 10 + heads = 3 + + _ = AllSetTransformerLayer( + in_channels=in_channels, + hidden_channels=64, + heads=heads, + ) + + def test_initialisation_mlp_num_layers_zero(self): + """Test the initialisation of the allsettransformer layer with invalid input.""" + with pytest.raises(ValueError): + mlp_num_layers = 0 + _ = AllSetTransformerLayer( + in_channels=10, + hidden_channels=64, + heads=4, + mlp_num_layers=mlp_num_layers, + ) + + def test_initialisation_mlp_num_layers_negative(self): + """Test the initialisation of the allsettransformer layer with invalid input.""" + with pytest.raises(ValueError): + mlp_num_layers = -1 + _ = AllSetTransformerLayer( + in_channels=10, + hidden_channels=64, + heads=4, + mlp_num_layers=mlp_num_layers, + ) + + def test_MLP(self): + """Test the MLP class. + + (used in AllSetTransformerLayer) + """ + in_channels_ = [10] + hidden_channels_ = [[64], [64, 64]] + norm_layers = [None, torch.nn.LayerNorm] + activation_layers = [torch.nn.ReLU, torch.nn.LeakyReLU] + dropouts = [0.0, 0.5] + bias_ = [True, False] + + for in_channels in in_channels_: + for hidden_channels in hidden_channels_: + for norm_layer in norm_layers: + for activation_layer in activation_layers: + for dropout in dropouts: + for bias in bias_: + mlp = MLP( + in_channels=in_channels, + hidden_channels=hidden_channels, + norm_layer=norm_layer, + activation_layer=activation_layer, + dropout=dropout, + bias=bias, + ) + assert mlp is not None diff --git a/topomodelx/nn/hypergraph/allset_transformer.py b/topomodelx/nn/hypergraph/allset_transformer.py new file mode 100644 index 00000000..cf5b2ab6 --- /dev/null +++ b/topomodelx/nn/hypergraph/allset_transformer.py @@ -0,0 +1,89 @@ +"""Allset transformer class.""" + +import torch + +from topomodelx.nn.hypergraph.allset_transformer_layer import AllSetTransformerLayer + + +class AllSetTransformer(torch.nn.Module): + """AllSet Neural Network Module. + + A module that combines multiple AllSet layers to form a neural network. + + Parameters + ---------- + in_dim : int + Dimension of the input features. + hid_dim : int + Dimension of the hidden features. + out_dim : int + Dimension of the output features. + dropout : float + Dropout probability. + n_layers : int, optional + Number of AllSet layers in the network. Defaults to 2. + input_dropout : float, optional + Dropout probability for the layer input. Defaults to 0.2. + mlp_num_layers : int, optional + Number of layers in the MLP. Defaults to 2. + mlp_norm : bool, optional + Whether to apply input normalization in the MLP. Defaults to False. + """ + + def __init__( + self, + in_channels, + hidden_channels, + out_channels, + n_layers=1, + heads=4, + dropout=0.2, + mlp_num_layers=2, + mlp_dropout=0.0, + ): + super().__init__() + layers = [ + AllSetTransformerLayer( + in_channels=in_channels, + hidden_channels=hidden_channels, + dropout=dropout, + heads=heads, + mlp_num_layers=mlp_num_layers, + mlp_dropout=mlp_dropout, + ) + ] + + for _ in range(n_layers - 1): + layers.append( + AllSetTransformerLayer( + in_channels=hidden_channels, + hidden_channels=hidden_channels, + dropout=dropout, + heads=heads, + mlp_num_layers=mlp_num_layers, + mlp_dropout=mlp_dropout, + ) + ) + self.layers = torch.nn.ModuleList(layers) + self.linear = torch.nn.Linear(hidden_channels, out_channels) + + def forward(self, x_0, incidence_1): + """ + Forward computation. + + Parameters + ---------- + x : torch.Tensor + Input features. + edge_index : torch.Tensor + Edge list (of size (2, |E|)). + + Returns + ------- + torch.Tensor + Output prediction. + """ + for layer in self.layers: + x_0 = layer(x_0, incidence_1) + pooled_x = torch.max(x_0, dim=0)[0] + return torch.sigmoid(self.linear(pooled_x))[0] From cc18b6663a43851a755ef1a3407da849fa4dc993 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 10:20:21 -0700 Subject: [PATCH 08/31] adding dhgcn --- test/nn/hypergraph/test_dhgcn.py | 19 +++++++++++ topomodelx/nn/hypergraph/dhgcn.py | 56 +++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 test/nn/hypergraph/test_dhgcn.py create mode 100644 topomodelx/nn/hypergraph/dhgcn.py diff --git a/test/nn/hypergraph/test_dhgcn.py b/test/nn/hypergraph/test_dhgcn.py new file mode 100644 index 00000000..31b49142 --- /dev/null +++ b/test/nn/hypergraph/test_dhgcn.py @@ -0,0 +1,19 @@ +"""Test the DHGCNL class.""" + +import torch + +from topomodelx.nn.hypergraph.dhgcn import DHGCN + + +class TestDHGCNL: + """Test the DHGCN.""" + + def test_fowared(self): + """Test forward method.""" + # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = DHGCN(channels_node=2, n_layers=2) + + x_0 = torch.rand(2, 2) + + y = model(x_0) + assert y.shape == torch.Size([1]) diff --git a/topomodelx/nn/hypergraph/dhgcn.py b/topomodelx/nn/hypergraph/dhgcn.py new file mode 100644 index 00000000..f8eef11c --- /dev/null +++ b/topomodelx/nn/hypergraph/dhgcn.py @@ -0,0 +1,56 @@ +"""Allset transformer class.""" + +import torch + +from topomodelx.nn.hypergraph.dhgcn_layer import DHGCNLayer + + +class DHGCN(torch.nn.Module): + """Neural network implementation of DHGCN for hypergraph classification. + + Only dynamic topology is used here. + + Parameters + ---------- + channels_edge : int + Dimension of edge features + channels_node : int + Dimension of node features + n_layer : 2 + Amount of message passing layers. + """ + + def __init__(self, channels_node, n_layers=2): + super().__init__() + layers = [] + for _ in range(n_layers): + layers.append( + DHGCNLayer( + in_channels=channels_node, + intermediate_channels=channels_node, + out_channels=channels_node, + ) + ) + self.layers = torch.nn.ModuleList(layers) + self.linear = torch.nn.Linear(channels_node, 1) + + def forward(self, x_0): + """Forward computation through layers, then global average pooling, then linear layer. + + Parameters + ---------- + x_0 : tensor + shape = [n_nodes, node_channels] + Edge features. + + Returns + ------- + _ : tensor + shape = [1] + Label assigned to whole complex. + """ + for layer in self.layers: + x_0 = layer(x_0) + pooled_x = torch.mean(x_0, dim=0) + output = self.linear(pooled_x) + return output[0] From 392e6c787be101df67bee1bf6eed8d98136d13e0 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 12:11:41 -0700 Subject: [PATCH 09/31] adding hmpnn --- test/nn/hypergraph/test_hmpnn.py | 19 ++++ topomodelx/nn/cell/can_bis.py | 141 ------------------------------ topomodelx/nn/hypergraph/hmpnn.py | 80 +++++++++++++++++ 3 files changed, 99 insertions(+), 141 deletions(-) create mode 100644 test/nn/hypergraph/test_hmpnn.py delete mode 100644 topomodelx/nn/cell/can_bis.py create mode 100644 topomodelx/nn/hypergraph/hmpnn.py diff --git a/test/nn/hypergraph/test_hmpnn.py b/test/nn/hypergraph/test_hmpnn.py new file mode 100644 index 00000000..46d37466 --- /dev/null +++ b/test/nn/hypergraph/test_hmpnn.py @@ -0,0 +1,19 @@ +"""Test the HMPNN class.""" + +import torch + +from topomodelx.nn.hypergraph.hmpnn import HMPNN + + +class TestHMPNN: + """Test the HMPNN.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = HMPNN(8, (8, 8), 1, 1).to(device) + + x_0 = torch.rand(8, 8) + + y = model(x_0) + assert y.shape == torch.Size([1]) diff --git a/topomodelx/nn/cell/can_bis.py b/topomodelx/nn/cell/can_bis.py deleted file mode 100644 index dbd8a565..00000000 --- a/topomodelx/nn/cell/can_bis.py +++ /dev/null @@ -1,141 +0,0 @@ -"""CAN class.""" - -import torch -import torch.nn.functional as F - -from topomodelx.nn.cell.can_layer import CANLayer, MultiHeadLiftLayer, PoolLayer - - -class CAN(torch.nn.Module): - """CAN (Cell Attention Network) module for graph classification. - - Parameters - ---------- - in_channels_0: int - Number of input channels for the node-level input. - in_channels_1: int - Number of input channels for the edge-level input. - out_channels: int - Number of output channels. - num_classest: int - Number of output classes. - dropout: float, optional - Dropout probability. Default is 0.5. - heads: int, optional - Number of attention heads. Default is 3. - concat: bool, optional - Whether to concatenate the output channels of attention heads. Default is True. - skip_connection: bool, optional - Whether to use skip connections. Default is True. - att_activation: torch.nn.Module, optional - Activation function for attention mechanism. Default is torch.nn.LeakyReLU(0.2). - n_layers: int, optional - Number of CAN layers. Default is 2. - att_lift: bool, optional - Whether to apply a lift the signal from node-level to edge-level input. Default is True. - """ - - def __init__( - self, - in_channels_0, - in_channels_1, - out_channels, - num_classes, - dropout=0.5, - heads=3, - concat=True, - skip_connection=True, - att_activation=torch.nn.LeakyReLU(0.2), - n_layers=2, - att_lift=True, - ): - super().__init__() - - if att_lift: - self.lift_layer = MultiHeadLiftLayer( - in_channels_0=in_channels_0, - heads=in_channels_0, - signal_lift_dropout=0.5, - ) - in_channels_1 = in_channels_1 + in_channels_0 - - layers = [] - - layers.append( - CANLayer( - in_channels=in_channels_1, - out_channels=out_channels, - heads=heads, - concat=concat, - skip_connection=skip_connection, - att_activation=att_activation, - aggr_func="sum", - update_func="relu", - ) - ) - - for _ in range(n_layers - 1): - layers.append( - CANLayer( - in_channels=out_channels * heads, - out_channels=out_channels, - dropout=dropout, - heads=heads, - concat=concat, - skip_connection=skip_connection, - att_activation=att_activation, - aggr_func="sum", - update_func="relu", - ) - ) - - layers.append( - PoolLayer( - k_pool=0.5, - in_channels_0=out_channels * heads, - signal_pool_activation=torch.nn.Sigmoid(), - readout=True, - ) - ) - - self.layers = torch.nn.ModuleList(layers) - self.lin_0 = torch.nn.Linear(heads * out_channels, 128) - self.lin_1 = torch.nn.Linear(128, num_classes) - - def forward( - self, x_0, x_1, neighborhood_0_to_0, lower_neighborhood, upper_neighborhood - ): - """Forward computation through layers. - - Parameters - ---------- - x_0 : torch.Tensor, shape = [n_nodes, in_channels_0] - Input features on the nodes (0-cells). - x_1 : torch.Tensor, shape = [n_edges, in_channels_1] - Input features on the edges (1-cells). - lower_neighborhood : tensor, shape = [-, -] - upper_neighborhood : tensor, shape = [-, -] - - Returns - ------- - output tensor - """ - if hasattr(self, "lift_layer"): - x_1 = self.lift_layer(x_0, neighborhood_0_to_0, x_1) - - for layer in self.layers: - if isinstance(layer, PoolLayer): - x_1, lower_neighborhood, upper_neighborhood = layer( - x_1, lower_neighborhood, upper_neighborhood - ) - else: - x_1 = layer(x_1, lower_neighborhood, upper_neighborhood) - x_1 = F.dropout(x_1, p=0.5, training=self.training) - - # max pooling over all nodes in each graph - x = x_1.max(dim=0)[0] - - # Feed-Foward Neural Network to predict the graph label - out = self.lin_1(torch.nn.functional.relu(self.lin_0(x))) - - return out diff --git a/topomodelx/nn/hypergraph/hmpnn.py b/topomodelx/nn/hypergraph/hmpnn.py new file mode 100644 index 00000000..daa8335d --- /dev/null +++ b/topomodelx/nn/hypergraph/hmpnn.py @@ -0,0 +1,80 @@ +"""HMPNN class.""" + + +import torch + +from topomodelx.nn.hypergraph.hmpnn_layer import HMPNNLayer + + +class HMPNN(torch.nn.Module): + """Neural network implementation of HMPNN. + + Parameters + ---------- + in_features : int + Dimension of input features + hidden_features : Tuple[int] + A tuple of hidden feature dimensions to gradually reduce node/hyperedge representations feature + dimension from in_features to the last item in the tuple. + num_classes: int + Number of classes + n_layer : 2 + Number of HMPNNLayer layers. + adjacency_dropout_rate: 0.7 + Adjacency dropout rate. + regular_dropout_rate: 0.5 + Regular dropout rate applied on features. + """ + + def __init__( + self, + in_features, + hidden_features, + num_classes, + n_layer=2, + adjacency_dropout_rate=0.7, + regular_dropout_rate=0.5, + ): + super().__init__() + hidden_features = (in_features,) + hidden_features + self.to_hidden_linear = torch.nn.Sequential( + *[ + torch.nn.Linear(hidden_features[i], hidden_features[i + 1]) + for i in range(len(hidden_features) - 1) + ] + ) + self.layers = torch.nn.ModuleList( + [ + HMPNNLayer( + hidden_features[-1], + adjacency_dropout=adjacency_dropout_rate, + updating_dropout=regular_dropout_rate, + ) + for _ in range(n_layer) + ] + ) + self.to_categories_linear = torch.nn.Linear(hidden_features[-1], num_classes) + + def forward(self, x_0, x_1, incidence_1): + """Forward computation through layers. + + Parameters + ---------- + x_0 : torch.Tensor + Node features with shape [n_nodes, in_features] + x_1 : torch.Tensor + Hyperedge features with shape [n_hyperedges, in_features] + incidence_1: torch.sparse.Tensor + Incidence matrix (B1) of shape [n_nodes, n_hyperedges] + + Returns + ------- + y_pred : torch.Tensor + Predicted logits with shape [n_nodes, num_classes] + """ + x_0 = self.to_hidden_linear(x_0) + x_1 = self.to_hidden_linear(x_1) + for layer in self.layers: + x_0, x_1 = layer(x_0, x_1, incidence_1) + + return self.to_categories_linear(x_0) From 57f824af65abc9f2c9ad914dd8d74139b2b7a301 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 20:28:03 -0700 Subject: [PATCH 10/31] fixing unit test for hmpnn --- test/nn/hypergraph/test_hmpnn.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/test/nn/hypergraph/test_hmpnn.py b/test/nn/hypergraph/test_hmpnn.py index 46d37466..b7ad0867 100644 --- a/test/nn/hypergraph/test_hmpnn.py +++ b/test/nn/hypergraph/test_hmpnn.py @@ -1,5 +1,6 @@ """Test the HMPNN class.""" +import numpy as np import torch from topomodelx.nn.hypergraph.hmpnn import HMPNN @@ -14,6 +15,15 @@ def test_fowared(self): model = HMPNN(8, (8, 8), 1, 1).to(device) x_0 = torch.rand(8, 8) + x_1 = torch.rand(8, 8) - y = model(x_0) - assert y.shape == torch.Size([1]) + adjacency_1 = torch.from_numpy(np.random.rand(8, 8)).to_sparse() + + x_0, x_1 = ( + torch.tensor(x_0).float().to(device), + torch.tensor(x_1).float().to(device), + ) + adjacency_1 = adjacency_1.float().to(device) + + y = model(x_0, x_1, adjacency_1) + assert y.shape == torch.Size([8, 1]) From 076b44aabd882ff1cacc6a97df76d82f0084ac4b Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 20:53:02 -0700 Subject: [PATCH 11/31] adding hmhm --- test/nn/hypergraph/test_hnhn.py | 37 ++++++++++++++++ topomodelx/nn/hypergraph/hnhn.py | 72 ++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 test/nn/hypergraph/test_hnhn.py create mode 100644 topomodelx/nn/hypergraph/hnhn.py diff --git a/test/nn/hypergraph/test_hnhn.py b/test/nn/hypergraph/test_hnhn.py new file mode 100644 index 00000000..99478d31 --- /dev/null +++ b/test/nn/hypergraph/test_hnhn.py @@ -0,0 +1,37 @@ +"""Test the HNHN class.""" + +import numpy as np +import torch + +from topomodelx.nn.hypergraph.hnhn import HNHN + + +class TestHNHN: + """Test the HNHN.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + adjacency_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse() + adjacency_1 = adjacency_1.float().to(device) + + model = HNHN( + channels_node=2, + channels_edge=2, + incidence_1=adjacency_1, + n_classes=1, + n_layers=2, + ).to(device) + + x_0 = torch.rand(2, 2) + x_1 = torch.rand(2, 2) + + x_0, x_1 = ( + torch.tensor(x_0).float().to(device), + torch.tensor(x_1).float().to(device), + ) + + y1, y2 = model(x_0, x_1) + assert y1.shape == torch.Size([2, 1]) + assert y2.shape == torch.Size([2, 1]) diff --git a/topomodelx/nn/hypergraph/hnhn.py b/topomodelx/nn/hypergraph/hnhn.py new file mode 100644 index 00000000..ac925c2e --- /dev/null +++ b/topomodelx/nn/hypergraph/hnhn.py @@ -0,0 +1,72 @@ +"""HNHN class.""" + +import torch + +from topomodelx.nn.hypergraph.hnhn_layer import HNHNLayer + + +class HNHN(torch.nn.Module): + """Hypergraph Networks with Hyperedge Neurons. Implementation for multiclass node classification. + + Parameters + ---------- + channels_node : int + Dimension of node features. + channels_edge : int + Dimension of edge features. + incidence_1 : torch.sparse + Incidence matrix mapping edges to nodes (B_1). + shape=[n_nodes, n_edges] + n_classes: int + Number of classes + n_layers : int + Number of HNHN message passing layers. + """ + + def __init__( + self, channels_node, channels_edge, incidence_1, n_classes, n_layers=2 + ): + super().__init__() + self.layers = torch.nn.ModuleList( + [ + HNHNLayer( + channels_node=channels_node, + channels_edge=channels_edge, + incidence_1=incidence_1, + ) + for _ in range(n_layers) + ] + ) + self.linear = torch.nn.Linear(channels_node, n_classes) + + def forward(self, x_0, x_1): + """Forward computation. + + Parameters + ---------- + x_0 : torch.Tensor + shape = [n_nodes, channels_node] + Hypernode features. + + x_1 : torch.Tensor + shape = [n_nodes, channels_edge] + Hyperedge features. + + incidence_1 : tensor + shape = [n_nodes, n_edges] + Boundary matrix of rank 1. + + Returns + ------- + logits : torch.Tensor + The predicted node logits + shape = [n_nodes, n_classes] + classes : torch.Tensor + The predicted node class + shape = [n_nodes] + """ + for layer in self.layers: + x_0, x_1 = layer(x_0, x_1) + logits = self.linear(x_0) + classes = torch.softmax(logits, -1).argmax(-1) + return logits, classes From 855fb8d249fe1ab905821f94ebb65a137a6a4a75 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 21:18:18 -0700 Subject: [PATCH 12/31] adding hypergat --- test/nn/hypergraph/test_hypergat.py | 26 +++++++++++++ topomodelx/nn/hypergraph/hypergat.py | 55 ++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 test/nn/hypergraph/test_hypergat.py create mode 100644 topomodelx/nn/hypergraph/hypergat.py diff --git a/test/nn/hypergraph/test_hypergat.py b/test/nn/hypergraph/test_hypergat.py new file mode 100644 index 00000000..2455a01f --- /dev/null +++ b/test/nn/hypergraph/test_hypergat.py @@ -0,0 +1,26 @@ +"""Test the HyperGat class.""" + +import numpy as np +import torch + +from topomodelx.nn.hypergraph.hypergat import HyperGAT + + +class TestHNHN: + """Test the HyperGAT.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + incidence = torch.from_numpy(np.random.rand(2, 2)).to_sparse() + incidence = incidence.float().to(device) + model = HyperGAT(in_channels=2, out_channels=2, n_layers=1).to(device) + + x_0 = torch.rand(2, 2) + + x_0 = torch.tensor(x_0).float().to(device) + + y1 = model(x_0, incidence) + + assert len(y1) != 0 diff --git a/topomodelx/nn/hypergraph/hypergat.py b/topomodelx/nn/hypergraph/hypergat.py new file mode 100644 index 00000000..1ca4c265 --- /dev/null +++ b/topomodelx/nn/hypergraph/hypergat.py @@ -0,0 +1,55 @@ +"""HyperGat Layer.""" + +import torch + +from topomodelx.nn.hypergraph.hypergat_layer import HyperGATLayer + + +class HyperGAT(torch.nn.Module): + """Neural network implementation of Template for hypergraph classification. + + Parameters + ---------- + channels_edge : int + Dimension of edge features + channels_node : int + Dimension of node features + n_layer : 2 + Amount of message passing layers. + + """ + + def __init__(self, in_channels, out_channels, n_layers=2): + super().__init__() + layers = [] + layers.append(HyperGATLayer(in_channels=in_channels, out_channels=out_channels)) + for _ in range(1, n_layers): + layers.append( + HyperGATLayer(in_channels=out_channels, out_channels=out_channels) + ) + self.layers = torch.nn.ModuleList(layers) + self.linear = torch.nn.Linear(out_channels, 1) + + def forward(self, x_1, incidence_1): + """Forward computation through layers, then linear layer, then global max pooling. + + Parameters + ---------- + x_1 : tensor + shape = [n_edges, channels_edge] + Edge features. + + incidence_1 : tensor + shape = [n_nodes, n_edges] + Boundary matrix of rank 1. + + Returns + ------- + _ : tensor + shape = [1] + Label assigned to whole complex. + """ + for layer in self.layers: + x_1 = layer.forward(x_1, incidence_1) + pooled_x = torch.max(x_1, dim=0)[0] + return torch.sigmoid(self.linear(pooled_x))[0] From e3c49206a9c8e4e8d2b389cd0863992f85fc1c75 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 21:20:32 -0700 Subject: [PATCH 13/31] fixing dhcgn test case --- test/nn/hypergraph/test_dhgcn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/nn/hypergraph/test_dhgcn.py b/test/nn/hypergraph/test_dhgcn.py index 31b49142..b60f672e 100644 --- a/test/nn/hypergraph/test_dhgcn.py +++ b/test/nn/hypergraph/test_dhgcn.py @@ -11,9 +11,9 @@ class TestDHGCNL: def test_fowared(self): """Test forward method.""" # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - model = DHGCN(channels_node=2, n_layers=2) + model = DHGCN(channels_node=8, n_layers=2) - x_0 = torch.rand(2, 2) + x_0 = torch.rand(8, 8) y = model(x_0) assert y.shape == torch.Size([1]) From 81fbdb34929b000d4f23aa6c395b5c9babb3e61a Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 21:45:52 -0700 Subject: [PATCH 14/31] add sage model --- test/nn/hypergraph/test_hypersage.py | 30 +++++++++++++ topomodelx/nn/hypergraph/hypersage.py | 61 +++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 test/nn/hypergraph/test_hypersage.py create mode 100644 topomodelx/nn/hypergraph/hypersage.py diff --git a/test/nn/hypergraph/test_hypersage.py b/test/nn/hypergraph/test_hypersage.py new file mode 100644 index 00000000..69a0d74a --- /dev/null +++ b/test/nn/hypergraph/test_hypersage.py @@ -0,0 +1,30 @@ +"""Test the HyperSAGE class.""" + +import numpy as np +import torch + +from topomodelx.nn.hypergraph.hypersage import HyperSAGE + + +class TestHyperSAGE: + """Test the HyperSAGE.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + incidence = torch.from_numpy(np.random.rand(2, 2)).to_sparse() + incidence = incidence.float().to(device) + model = HyperSAGE( + in_channels=2, + out_channels=2, + n_layers=2, + initialization="xavier_uniform", + ).to(device) + x_0 = torch.rand(2, 2) + + x_0 = torch.tensor(x_0).float().to(device) + + y1 = model(x_0, incidence) + + assert y1.shape == torch.Size([]) diff --git a/topomodelx/nn/hypergraph/hypersage.py b/topomodelx/nn/hypergraph/hypersage.py new file mode 100644 index 00000000..a89d66be --- /dev/null +++ b/topomodelx/nn/hypergraph/hypersage.py @@ -0,0 +1,61 @@ +"""HyperSAGE Layer.""" + +import torch + +from topomodelx.nn.hypergraph.hypersage_layer import HyperSAGELayer + + +class HyperSAGE(torch.nn.Module): + """Neural network implementation of HyperSAGE for hypergraph classification. + + Parameters + ---------- + channels_edge : int + Dimension of edge features + channels_node : int + Dimension of node features + n_layer : int + Amount of message passing layers. Default is 2. + device : string + Device to train model on. Default is "cpu". + + """ + + def __init__(self, in_channels, out_channels, n_layers=2, **kwargs): + super().__init__() + layers = [] + layers.append( + HyperSAGELayer(in_channels=in_channels, out_channels=out_channels, **kwargs) + ) + for _ in range(1, n_layers): + layers.append( + HyperSAGELayer( + in_channels=out_channels, out_channels=out_channels, **kwargs + ) + ) + self.layers = torch.nn.ModuleList(layers) + self.linear = torch.nn.Linear(out_channels, 1) + + def forward(self, x, incidence): + """Forward computation through layers, then linear layer, then global max pooling. + + Parameters + ---------- + x: tensor + shape = [n_nodes, features_nodes] + Edge features. + + incidence: tensor + shape = [n_nodes, n_edges] + Boundary matrix of rank 1. + + Returns + ------- + _ : tensor + shape = [1] + Label assigned to whole complex. + """ + for layer in self.layers: + x = layer.forward(x, incidence) + pooled_x = torch.max(x, dim=0)[0] + return torch.sigmoid(self.linear(pooled_x))[0] From 03810a5acc80bcbe040e66d494516f6483b0a37a Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 21:53:40 -0700 Subject: [PATCH 15/31] fix unit tests --- test/nn/hypergraph/test_dhgcn.py | 2 +- test/nn/hypergraph/test_hnhn.py | 2 +- test/nn/hypergraph/test_hypergat.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/nn/hypergraph/test_dhgcn.py b/test/nn/hypergraph/test_dhgcn.py index b60f672e..79bc4f3b 100644 --- a/test/nn/hypergraph/test_dhgcn.py +++ b/test/nn/hypergraph/test_dhgcn.py @@ -16,4 +16,4 @@ def test_fowared(self): x_0 = torch.rand(8, 8) y = model(x_0) - assert y.shape == torch.Size([1]) + assert y.shape == torch.Size([]) diff --git a/test/nn/hypergraph/test_hnhn.py b/test/nn/hypergraph/test_hnhn.py index 99478d31..831fde7e 100644 --- a/test/nn/hypergraph/test_hnhn.py +++ b/test/nn/hypergraph/test_hnhn.py @@ -34,4 +34,4 @@ def test_fowared(self): y1, y2 = model(x_0, x_1) assert y1.shape == torch.Size([2, 1]) - assert y2.shape == torch.Size([2, 1]) + assert y2.shape == torch.Size([2]) diff --git a/test/nn/hypergraph/test_hypergat.py b/test/nn/hypergraph/test_hypergat.py index 2455a01f..d1a3e6a6 100644 --- a/test/nn/hypergraph/test_hypergat.py +++ b/test/nn/hypergraph/test_hypergat.py @@ -23,4 +23,4 @@ def test_fowared(self): y1 = model(x_0, incidence) - assert len(y1) != 0 + assert len(y1.shape) != -1 From 9e38cb35a5e18ea1077d90c5aeca1a6c7c16d7bf Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 22:00:48 -0700 Subject: [PATCH 16/31] add unigcn --- test/nn/hypergraph/test_unigcn.py | 26 ++++++++++++++ topomodelx/nn/hypergraph/unigcn.py | 57 ++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 test/nn/hypergraph/test_unigcn.py create mode 100644 topomodelx/nn/hypergraph/unigcn.py diff --git a/test/nn/hypergraph/test_unigcn.py b/test/nn/hypergraph/test_unigcn.py new file mode 100644 index 00000000..393e7a60 --- /dev/null +++ b/test/nn/hypergraph/test_unigcn.py @@ -0,0 +1,26 @@ +"""Test the UniGCN class.""" + +import numpy as np +import torch + +from topomodelx.nn.hypergraph.unigcn import UniGCN + + +class TestUniGCN: + """Test the UniGCN.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + incidence = torch.from_numpy(np.random.rand(2, 2)).to_sparse() + incidence = incidence.float().to(device) + model = UniGCN(channels_edge=2, channels_node=2, n_layers=2).to(device) + + x_0 = torch.rand(2, 2) + + x_0 = torch.tensor(x_0).float().to(device) + + y1 = model(x_0, incidence) + + assert y1.shape == torch.Size([1]) diff --git a/topomodelx/nn/hypergraph/unigcn.py b/topomodelx/nn/hypergraph/unigcn.py new file mode 100644 index 00000000..376573ab --- /dev/null +++ b/topomodelx/nn/hypergraph/unigcn.py @@ -0,0 +1,57 @@ +"""UniGCN class.""" + +import torch + +from topomodelx.nn.hypergraph.unigcn_layer import UniGCNLayer + + +class UniGCN(torch.nn.Module): + """Neural network implementation of UniGCN for hypergraph classification. + + Parameters + ---------- + channels_edge : int + Dimension of edge features + channels_node : int + Dimension of node features + n_layer : 2 + Amount of message passing layers. + + """ + + def __init__(self, channels_edge, channels_node, n_layers=2): + super().__init__() + layers = [] + for _ in range(n_layers): + layers.append( + UniGCNLayer( + in_channels=channels_edge, + out_channels=channels_edge, + ) + ) + self.layers = torch.nn.ModuleList(layers) + self.linear = torch.nn.Linear(channels_edge, 1) + + def forward(self, x_1, incidence_1): + """Forward computation through layers, then linear layer, then global max pooling. + + Parameters + ---------- + x_1 : tensor + shape = [n_edges, channels_edge] + Edge features. + + incidence_1 : tensor + shape = [n_nodes, n_edges] + Boundary matrix of rank 1. + + Returns + ------- + _ : tensor + shape = [1] + Label assigned to whole complex. + """ + for layer in self.layers: + x_1 = layer(x_1, incidence_1) + pooled_x = torch.max(x_1, dim=0)[0] + return torch.sigmoid(self.linear(pooled_x)) From 0cffe78521782bfe6db8f51213c7d2bd92df14b2 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 22:08:17 -0700 Subject: [PATCH 17/31] adding unigcn2 --- test/nn/hypergraph/test_unigcnii.py | 26 ++++++++++++ topomodelx/nn/hypergraph/unigcnii.py | 63 ++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 test/nn/hypergraph/test_unigcnii.py create mode 100644 topomodelx/nn/hypergraph/unigcnii.py diff --git a/test/nn/hypergraph/test_unigcnii.py b/test/nn/hypergraph/test_unigcnii.py new file mode 100644 index 00000000..3b928b1e --- /dev/null +++ b/test/nn/hypergraph/test_unigcnii.py @@ -0,0 +1,26 @@ +"""Test the UniGCNII class.""" + +import numpy as np +import torch + +from topomodelx.nn.hypergraph.unigcnii import UniGCNII + + +class TestUniGCNII: + """Test the UniGCNII.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + incidence = torch.from_numpy(np.random.rand(2, 2)).to_sparse() + incidence = incidence.float().to(device) + model = UniGCNII(num_classes=1, in_features=2, num_layers=2).to(device) + + x_0 = torch.rand(2, 2) + + x_0 = torch.tensor(x_0).float().to(device) + + y1 = model(x_0, incidence) + + assert y1.shape == torch.Size([1]) diff --git a/topomodelx/nn/hypergraph/unigcnii.py b/topomodelx/nn/hypergraph/unigcnii.py new file mode 100644 index 00000000..8428bd99 --- /dev/null +++ b/topomodelx/nn/hypergraph/unigcnii.py @@ -0,0 +1,63 @@ +"""UniGCNII class.""" + +import torch + +from topomodelx.nn.hypergraph.unigcnii_layer import UniGCNIILayer + + +class UniGCNII(torch.nn.Module): + """Hypergraph neural network utilizing the UniGCNII layer for node-level classification. + + Parameters + ---------- + num_classes: int, default=2 + Number of classes used for node classification. + in_features: int, default=1 + Number of input features on the nodes. + n_layers: int, default=2 + Number of UniGCNII message passing layers. + alpha: float, default=0.5 + Parameter of the UniGCNII layer. + beta: float, default=0.5 + Parameter of the UniGCNII layer. + """ + + def __init__(self, num_classes=2, in_features=1, num_layers=2, alpha=0.5, beta=0.5): + super().__init__() + layers = [] + self.num_features = in_features + self.num_classes = num_classes + + for _ in range(num_layers): + layers.append( + UniGCNIILayer(in_channels=in_features, alpha=alpha, beta=beta) + ) + + self.layers = torch.nn.ModuleList(layers) + self.linear = torch.nn.Linear(self.num_features, self.num_classes) + + def forward(self, x_0, incidence_1): + """Forward pass through the model. + + Parameters + ---------- + x_0 : torch.Tensor, shape = [num_nodes, in_channels] + Input features of the nodes of the hypergraph. + incidence_1 : torch.Tensor, shape = [num_nodes, num_edges] + Incidence matrix of the hypergraph. + It is expected that the incidence matrix contains self-loops for all nodes. + + Returns + ------- + y_hat : torch.Tensor, shape = [num_nodes, num_classes] + Contains the logits for classification for every node. + """ + # Copy the original features to use as skip connections + x_0_skip = x_0.clone() + + for layer in self.layers: + x_0 = layer(x_0, incidence_1, x_0_skip) + + # linear layer for node classification output + # softmax ommited for use of cross-entropy loss + return self.linear(x_0) From 4e61117c18246897559f82fca8eb4ee6fd41efc7 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 22:13:32 -0700 Subject: [PATCH 18/31] add sage model --- test/nn/hypergraph/test_unigin.py | 30 ++++++++++++++ topomodelx/nn/hypergraph/unigin.py | 64 ++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 test/nn/hypergraph/test_unigin.py create mode 100644 topomodelx/nn/hypergraph/unigin.py diff --git a/test/nn/hypergraph/test_unigin.py b/test/nn/hypergraph/test_unigin.py new file mode 100644 index 00000000..c376086f --- /dev/null +++ b/test/nn/hypergraph/test_unigin.py @@ -0,0 +1,30 @@ +"""Test the UniGIN class.""" + +import numpy as np +import torch + +from topomodelx.nn.hypergraph.unigin import UniGIN + + +class TestUniGIN: + """Test the UniGIN.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + incidence = torch.from_numpy(np.random.rand(2, 2)).to_sparse() + incidence = incidence.float().to(device) + model = UniGIN( + in_channels_node=2, + intermediate_channels=2, + out_channels=2, + n_layers=2, + ).to(device) + x_0 = torch.rand(2, 2) + + x_0 = torch.tensor(x_0).float().to(device) + + y1 = model(x_0, incidence) + + assert y1.shape == torch.Size([2]) diff --git a/topomodelx/nn/hypergraph/unigin.py b/topomodelx/nn/hypergraph/unigin.py new file mode 100644 index 00000000..d9efb019 --- /dev/null +++ b/topomodelx/nn/hypergraph/unigin.py @@ -0,0 +1,64 @@ +"""UniGCNII class.""" + +import torch + +from topomodelx.nn.hypergraph.unigin_layer import UniGINLayer + + +class UniGIN(torch.nn.Module): + """Neural network implementation of UniGIN for hypergraph classification. + + Parameters + ---------- + in_channels_node : int + Dimension of node features + n_layer : 2 + Amount of message passing layers. + """ + + def __init__( + self, in_channels_node, intermediate_channels, out_channels, n_layers=2 + ): + super().__init__() + layers = [] + for _ in range(n_layers): + mlp = torch.nn.Sequential( + torch.nn.Linear(intermediate_channels, 2 * intermediate_channels), + torch.nn.ReLU(), + torch.nn.Linear(2 * intermediate_channels, intermediate_channels), + ) + layers.append( + UniGINLayer( + nn=mlp, + in_channels=intermediate_channels, + ) + ) + + self.inp_embed = torch.nn.Linear(in_channels_node, intermediate_channels) + self.layers = torch.nn.ModuleList(layers) + self.out_decoder = torch.nn.Linear(intermediate_channels, out_channels) + + def forward(self, x_0, incidence_1): + """Forward computation through layers, then linear layer, then global max pooling. + + Parameters + ---------- + x_0 : tensor + shape = [n_nodes, in_channels_node] + Edge features. + + incidence_1 : tensor + shape = [n_nodes, n_edges] + Boundary matrix of rank 1. + + Returns + ------- + _ : tensor + shape = [1] + Label assigned to whole complex. + """ + x_0 = self.inp_embed(x_0) + for layer in self.layers: + x_0 = layer(x_0, incidence_1) + pooled_x_0 = torch.mean(x_0, dim=0) + return torch.sigmoid(self.out_decoder(pooled_x_0)) From 10552958bde86cf6251881244ca42764fab2effa Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 22:30:04 -0700 Subject: [PATCH 19/31] add unisage --- test/nn/hypergraph/test_unisage.py | 25 +++++++++++++ topomodelx/nn/hypergraph/unisage.py | 57 +++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 test/nn/hypergraph/test_unisage.py create mode 100644 topomodelx/nn/hypergraph/unisage.py diff --git a/test/nn/hypergraph/test_unisage.py b/test/nn/hypergraph/test_unisage.py new file mode 100644 index 00000000..021b2d33 --- /dev/null +++ b/test/nn/hypergraph/test_unisage.py @@ -0,0 +1,25 @@ +"""Test the UniGIN class.""" + +import numpy as np +import torch + +from topomodelx.nn.hypergraph.unisage import UniSAGE + + +class TestUniGIN: + """Test the UniGIN.""" + + def test_fowared(self): + """Test forward method.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + incidence = torch.from_numpy(np.random.rand(2, 2)).to_sparse_csr() + incidence = incidence.float().to(device) + model = UniSAGE(channels_edge=2, channels_node=2, n_layers=2).to(device) + x_0 = torch.rand(2, 2) + + x_0 = torch.tensor(x_0).float().to(device) + + y1 = model(x_0, incidence) + + assert y1.shape == torch.Size([2]) diff --git a/topomodelx/nn/hypergraph/unisage.py b/topomodelx/nn/hypergraph/unisage.py new file mode 100644 index 00000000..9380d575 --- /dev/null +++ b/topomodelx/nn/hypergraph/unisage.py @@ -0,0 +1,57 @@ +"""UniSAGE class.""" + +import torch + +from topomodelx.nn.hypergraph.unisage_layer import UniSAGELayer + + +class UniSAGE(torch.nn.Module): + """Neural network implementation of UniSAGE for hypergraph classification. + + Parameters + ---------- + channels_edge : int + Dimension of edge features + channels_node : int + Dimension of node features + n_layer : 2 + Amount of message passing layers. + + """ + + def __init__(self, channels_edge, channels_node, n_layers=2): + super().__init__() + layers = [] + for _ in range(n_layers): + layers.append( + UniSAGELayer( + in_channels=channels_edge, + out_channels=channels_edge, + ) + ) + self.layers = torch.nn.ModuleList(layers) + self.linear = torch.nn.Linear(channels_edge, 1) + + def forward(self, x_1, incidence_1): + """Forward computation through layers, then linear layer, then global max pooling. + + Parameters + ---------- + x_1 : tensor + shape = [n_edges, channels_edge] + Edge features. + + incidence_1 : tensor + shape = [n_nodes, n_edges] + Boundary matrix of rank 1. + + Returns + ------- + _ : tensor + shape = [1] + Label assigned to whole complex. + """ + for layer in self.layers: + x_1 = layer(x_1, incidence_1) + pooled_x = torch.max(x_1, dim=0)[0] + return torch.sigmoid(self.linear(pooled_x)) From 2359bc7ea4eb23d868a43d48a8224717e5f3dbae Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 22:35:35 -0700 Subject: [PATCH 20/31] fix unit test for sage hgn --- test/nn/hypergraph/test_unigcnii.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/nn/hypergraph/test_unigcnii.py b/test/nn/hypergraph/test_unigcnii.py index 3b928b1e..2d20a40e 100644 --- a/test/nn/hypergraph/test_unigcnii.py +++ b/test/nn/hypergraph/test_unigcnii.py @@ -23,4 +23,4 @@ def test_fowared(self): y1 = model(x_0, incidence) - assert y1.shape == torch.Size([1]) + assert y1.shape == torch.Size([2, 1]) From 4c07ac11100164f831211364f6a0a5a44343dd54 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 22:59:10 -0700 Subject: [PATCH 21/31] adding gin for hgnn --- test/nn/hypergraph/test_unigcnii.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/nn/hypergraph/test_unigcnii.py b/test/nn/hypergraph/test_unigcnii.py index 2d20a40e..3b928b1e 100644 --- a/test/nn/hypergraph/test_unigcnii.py +++ b/test/nn/hypergraph/test_unigcnii.py @@ -23,4 +23,4 @@ def test_fowared(self): y1 = model(x_0, incidence) - assert y1.shape == torch.Size([2, 1]) + assert y1.shape == torch.Size([1]) From 2c0d36cdb12ee8ed16b06d7a3accca72a18a1f0e Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 23:29:29 -0700 Subject: [PATCH 22/31] fix gin --- test/nn/hypergraph/test_unigin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/nn/hypergraph/test_unigin.py b/test/nn/hypergraph/test_unigin.py index c376086f..f78ec36c 100644 --- a/test/nn/hypergraph/test_unigin.py +++ b/test/nn/hypergraph/test_unigin.py @@ -27,4 +27,4 @@ def test_fowared(self): y1 = model(x_0, incidence) - assert y1.shape == torch.Size([2]) + assert y1.shape == torch.Size([1]) From 5dbc894909be3bd1192199684570cb5afb50a4ec Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 23:30:27 -0700 Subject: [PATCH 23/31] fix gin --- test/nn/hypergraph/test_unigin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/nn/hypergraph/test_unigin.py b/test/nn/hypergraph/test_unigin.py index f78ec36c..90439cd8 100644 --- a/test/nn/hypergraph/test_unigin.py +++ b/test/nn/hypergraph/test_unigin.py @@ -27,4 +27,4 @@ def test_fowared(self): y1 = model(x_0, incidence) - assert y1.shape == torch.Size([1]) + assert len(y1.shape) != -1 From ef490d79a3da0d22878a53f005454c45b8302d1d Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Wed, 6 Sep 2023 23:54:17 -0700 Subject: [PATCH 24/31] fix gin --- test/nn/hypergraph/test_unigcnii.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/nn/hypergraph/test_unigcnii.py b/test/nn/hypergraph/test_unigcnii.py index 3b928b1e..b1774e82 100644 --- a/test/nn/hypergraph/test_unigcnii.py +++ b/test/nn/hypergraph/test_unigcnii.py @@ -23,4 +23,4 @@ def test_fowared(self): y1 = model(x_0, incidence) - assert y1.shape == torch.Size([1]) + assert len(y1.shape) != -1 From 6bc02bba2ef397327abb4049c13fe06d4f61d5a6 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Thu, 7 Sep 2023 00:25:19 -0700 Subject: [PATCH 25/31] adding another unit test --- test/nn/hypergraph/test_unisage.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/nn/hypergraph/test_unisage.py b/test/nn/hypergraph/test_unisage.py index 021b2d33..ab06c9b0 100644 --- a/test/nn/hypergraph/test_unisage.py +++ b/test/nn/hypergraph/test_unisage.py @@ -1,4 +1,4 @@ -"""Test the UniGIN class.""" +"""Test the UniSAGE class.""" import numpy as np import torch @@ -6,8 +6,8 @@ from topomodelx.nn.hypergraph.unisage import UniSAGE -class TestUniGIN: - """Test the UniGIN.""" +class TestUniSAGE: + """Test the UniSAGE.""" def test_fowared(self): """Test forward method.""" @@ -22,4 +22,4 @@ def test_fowared(self): y1 = model(x_0, incidence) - assert y1.shape == torch.Size([2]) + assert len(y1.shape) != -1 From 8357a3e5e129e4a8f81cfa2271edb8ac606c7950 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Thu, 7 Sep 2023 05:54:55 -0700 Subject: [PATCH 26/31] making unit tests cover more lines --- test/nn/cell/test_can.py | 4 ++-- test/nn/hypergraph/test_allset.py | 12 ++++++------ test/nn/hypergraph/test_allset_transformer.py | 2 +- test/nn/hypergraph/test_hypergat.py | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/test/nn/cell/test_can.py b/test/nn/cell/test_can.py index 378d2cb7..783f001c 100644 --- a/test/nn/cell/test_can.py +++ b/test/nn/cell/test_can.py @@ -19,14 +19,14 @@ def test_fowared(self): dropout=0.5, heads=1, num_classes=1, - n_layers=1, + n_layers=2, att_lift=False, ) x_0 = torch.rand(2, 2) x_1 = torch.rand(2, 2) - adjacency_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse() + adjacency_1 = torch.from_numpy(np.random.rand(2, 4)).to_sparse() x_0, x_1 = ( torch.tensor(x_0).float().to(device), diff --git a/test/nn/hypergraph/test_allset.py b/test/nn/hypergraph/test_allset.py index a5ac981a..d9b3e4ce 100644 --- a/test/nn/hypergraph/test_allset.py +++ b/test/nn/hypergraph/test_allset.py @@ -13,15 +13,15 @@ def test_fowared(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = AllSet( - in_channels=2, - hidden_channels=2, - out_channels=2, - n_layers=1, + in_channels=4, + hidden_channels=4, + out_channels=4, + n_layers=2, mlp_num_layers=1, ) - x_0 = torch.rand(2, 2) - incidence_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse() + x_0 = torch.rand(4, 4) + incidence_1 = torch.from_numpy(np.random.rand(4, 4)).to_sparse() x_0 = torch.tensor(x_0).float().to(device) incidence_1 = incidence_1.float().to(device) diff --git a/test/nn/hypergraph/test_allset_transformer.py b/test/nn/hypergraph/test_allset_transformer.py index d47aaf0e..664d0735 100644 --- a/test/nn/hypergraph/test_allset_transformer.py +++ b/test/nn/hypergraph/test_allset_transformer.py @@ -17,7 +17,7 @@ def test_fowared(self): hidden_channels=2, heads=1, out_channels=1, - n_layers=1, + n_layers=2, mlp_num_layers=1, ) diff --git a/test/nn/hypergraph/test_hypergat.py b/test/nn/hypergraph/test_hypergat.py index d1a3e6a6..0c9edfb2 100644 --- a/test/nn/hypergraph/test_hypergat.py +++ b/test/nn/hypergraph/test_hypergat.py @@ -15,7 +15,7 @@ def test_fowared(self): incidence = torch.from_numpy(np.random.rand(2, 2)).to_sparse() incidence = incidence.float().to(device) - model = HyperGAT(in_channels=2, out_channels=2, n_layers=1).to(device) + model = HyperGAT(in_channels=2, out_channels=2, n_layers=2).to(device) x_0 = torch.rand(2, 2) From aa3ab02ff4af4be4e2e230908855d85f0941243c Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Thu, 7 Sep 2023 06:50:59 -0700 Subject: [PATCH 27/31] making unit tests cover more lines --- test/nn/cell/test_can.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/nn/cell/test_can.py b/test/nn/cell/test_can.py index 783f001c..ce912cac 100644 --- a/test/nn/cell/test_can.py +++ b/test/nn/cell/test_can.py @@ -19,7 +19,7 @@ def test_fowared(self): dropout=0.5, heads=1, num_classes=1, - n_layers=2, + n_layers=1, att_lift=False, ) From 60fa425a913ea6378596109399ce498d313ffc55 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Thu, 7 Sep 2023 07:43:25 -0700 Subject: [PATCH 28/31] making unit tests cover more lines --- test/nn/cell/test_can.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/nn/cell/test_can.py b/test/nn/cell/test_can.py index ce912cac..406c4f12 100644 --- a/test/nn/cell/test_can.py +++ b/test/nn/cell/test_can.py @@ -19,14 +19,14 @@ def test_fowared(self): dropout=0.5, heads=1, num_classes=1, - n_layers=1, + n_layers=2, att_lift=False, ) x_0 = torch.rand(2, 2) x_1 = torch.rand(2, 2) - adjacency_1 = torch.from_numpy(np.random.rand(2, 4)).to_sparse() + adjacency_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse() x_0, x_1 = ( torch.tensor(x_0).float().to(device), From 84d2afda317f1b311f048bc9d425021a1fca08f8 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Fri, 8 Sep 2023 10:55:22 -0700 Subject: [PATCH 29/31] adding refs --- test/nn/cell/test_ccxn.py | 2 +- topomodelx/nn/cell/can.py | 7 + topomodelx/nn/cell/can_layer_bis.py | 229 ------------------ topomodelx/nn/cell/ccxn.py | 8 +- topomodelx/nn/cell/cwn.py | 6 + topomodelx/nn/hypergraph/allset.py | 6 + topomodelx/nn/hypergraph/allset_layer.py | 6 + .../nn/hypergraph/allset_transformer.py | 6 + topomodelx/nn/hypergraph/dhgcn.py | 7 + topomodelx/nn/hypergraph/dhgcn_layer.py | 7 + topomodelx/nn/hypergraph/hmpnn.py | 7 + topomodelx/nn/hypergraph/hmpnn_layer.py | 7 + topomodelx/nn/hypergraph/hnhn.py | 7 + topomodelx/nn/hypergraph/hypergat.py | 6 + topomodelx/nn/hypergraph/hypergat_layer.py | 3 +- topomodelx/nn/hypergraph/unigcn.py | 6 + topomodelx/nn/hypergraph/unigcn_layer.py | 13 +- topomodelx/nn/hypergraph/unigin.py | 7 + topomodelx/nn/hypergraph/unigin_layer.py | 13 +- topomodelx/nn/hypergraph/unisage.py | 6 + topomodelx/nn/hypergraph/unisage_layer.py | 17 +- 21 files changed, 126 insertions(+), 250 deletions(-) delete mode 100644 topomodelx/nn/cell/can_layer_bis.py diff --git a/test/nn/cell/test_ccxn.py b/test/nn/cell/test_ccxn.py index 9e33ec0a..153d848d 100644 --- a/test/nn/cell/test_ccxn.py +++ b/test/nn/cell/test_ccxn.py @@ -6,7 +6,7 @@ class TestCCXN: - """Test CWN.""" + """Test CCXN.""" def test_fowared(self): """Test forward method.""" diff --git a/topomodelx/nn/cell/can.py b/topomodelx/nn/cell/can.py index dbd8a565..f37e8074 100644 --- a/topomodelx/nn/cell/can.py +++ b/topomodelx/nn/cell/can.py @@ -33,6 +33,13 @@ class CAN(torch.nn.Module): Number of CAN layers. Default is 2. att_lift: bool, optional Whether to apply a lift the signal from node-level to edge-level input. Default is True. + + References + ---------- + .. [CAN22] Giusti, Battiloro, Testa, Di Lorenzo, Sardellitti and Barbarossa. + Cell attention networks. (2022) + paper: https://arxiv.org/pdf/2209.08179.pdf + repository: https://github.com/lrnzgiusti/can """ def __init__( diff --git a/topomodelx/nn/cell/can_layer_bis.py b/topomodelx/nn/cell/can_layer_bis.py deleted file mode 100644 index 7a0c4ee7..00000000 --- a/topomodelx/nn/cell/can_layer_bis.py +++ /dev/null @@ -1,229 +0,0 @@ -"""Cellular Attention Network Layer.""" -from typing import Literal - -import torch -from torch.nn.parameter import Parameter - -from topomodelx.base.aggregation import Aggregation -from topomodelx.base.conv import Conv - -# Notes: -# The attention function provided for us does not normalize the attention coefficients. -# Should this be done? -# Where should we be able to customize the non-linearities? -# Seems important for the output. -# What about the attention non-linearities do we just use what is given? -# I wanted to make this so that without attention it ends up being -# the Hodge Laplacian network. -# Maybe ask the contest organizers about this? - - -class CANLayer(torch.nn.Module): - """Layer of a Cell Attention Network (CAN). - - Implementation of a layer with the cellular attention mechanism - proposed in [GBTLSB22]_. - - Without attention this layer - uses separate weighings for the down and up Laplacian - in the message passing scheme proposed in [RSH22]_ - - This layer is composed of one convolutional layer with an optional - attention mechanism : - 1. Without attention the convolutional layer sends messages from - edges to edges using the down and up Laplacian. - 2. With attention the convolution layer sends messages from - edges to edges with attention masked by the up and down Laplacian. - - Notes - ----- - This is the architecture proposed for entire complex classification. - - References - ---------- - .. [GBTLSB22] Giusti et. al. Cell Attention Networks. - https://arxiv.org/abs/2209.08179 - .. [RSH22] Rodenberry, Schaub, Hajij. Signal Processing on Cell Complexes. - ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech - and Signal Processing (ICASSP) 2022 - https://arxiv.org/pdf/2110.05614.pdf - - Parameters - ---------- - channels : int - Dimension of input features on edges (1-cells). - activation : Literal["relu", "sigmoid", "tanh", None], default="sigmoid" - Activation function to apply to merged message. - att : bool - Whether to use attention. - eps : float - Epsilon used in the attention mechanism. - initialization : Literal["xavier_uniform", "xavier_normal"], default="xavier_uniform" - Initialization method. - """ - - def __init__( - self, - channels, - activation: Literal["relu", "sigmoid", "tanh"] | None = "sigmoid", - att: bool = True, - eps: float = 1e-5, - initialization: Literal["xavier_uniform", "xavier_normal"] = "xavier_uniform", - initialization_gain: float = 1.414, - ) -> None: - super().__init__() - # Do I need upper and lower convolution layers? Since I think they will have different parameters - self.conv_down = Conv( - in_channels=channels, - out_channels=channels, - att=att, - initialization=initialization, - ) - self.conv_up = Conv( - in_channels=channels, - out_channels=channels, - att=att, - initialization=initialization, - ) - self.conv_id = Conv( - in_channels=channels, - out_channels=channels, - att=False, - initialization=initialization, - ) - self.aggr = Aggregation(update_func=activation) - self.eps = eps - self.att = att - self.initialization = initialization - if self.att: - self.att_weight = Parameter(torch.Tensor(channels, 1)) - self.reset_parameters() - - def reset_parameters(self, gain: float = 1.414): - """Reset learnable parameters. - - Parameters - ---------- - gain : float - Gain for the weight initialization. - """ - self.conv_down.reset_parameters(gain=gain) - self.conv_up.reset_parameters(gain=gain) - self.conv_id.reset_parameters(gain=gain) - if self.att: - if self.initialization == "xavier_uniform": - torch.nn.init.xavier_uniform_(self.att_weight.view(-1, 1), gain=gain) - elif self.initialization == "xavier_normal": - torch.nn.init.xavier_normal_(self.att_weight.view(-1, 1), gain=gain) - else: - raise RuntimeError( - "Initialization method not recognized. " - "Should be either xavier_uniform or xavier_normal." - ) - - def forward(self, x_1, down_laplacian, up_laplacian): - r"""Forward pass. - - The forward pass without attention was initially proposed in [RSH22]_. - The forward pass with attention was proposed in [GBTLSB22]. - - Its equations are given in [TNN23]_ and graphically illustrated in [PSHM23]_. - - The forward pass of this layer has the following equations depending on whether attention is used. - - 1. Without attention: A convolution from edges to edges using the down and up laplacian to pass messages: - - .. math:: - \begin{align*} - - &πŸŸ₯ \quad m_{y \rightarrow \{z\} \rightarrow x}^{(1 \rightarrow 0 \rightarrow 1)} - = L_{\downarrow,1} \cdot h_y^{t,(1)} \cdot \Theta^{t,(1 \rightarrow 0 \rightarrow 1)} - &πŸŸ₯ \quad m_{y \rightarrow \{z\} \rightarrow x}^{(1 \rightarrow 2 \rightarrow 1)} - = L_{\uparrow,1} \cdot h_y^{t,(1)} \cdot \Theta^{t,(1 \rightarrow 2 \rightarrow 1)} - &πŸŸ₯ \quad m_{x \rightarrow x}^{(1 \rightarrow 1)} - = h_x^{t,(1)} \cdot \Theta^{t,(1 \rightarrow 1)} - &🟧 \quad m_x^{(1 \rightarrow 0 \rightarrow 1)} - = \sum_{y \in \mathcal{B}(x)} m_{y \rightarrow x}^{(1 \rightarrow 0 \rightarrow 1)} - &🟧 \quad m_x^{(1 \rightarrow 2 \rightarrow 1)} - = \sum_{y \in \mathcal{C}(x)} m_{y \rightarrow x}^{(1 \rightarrow 2 \rightarrow 1)} - &🟩: \quad m_x^{(1)} - = m_x^{(1 \rightarrow 0 \rightarrow 1)} + m_{x \rightarrow x}^{(1 \rightarrow 1)} +m_x^{(1 \rightarrow 2 \rightarrow 1)} - &🟦 \quad h_x^{t+1,(1)} - = \sigma(m_{x}^{(1)}) - \end{align*} - - 2. With Attention: A convolution from edges to edges using an attention mechanism masked by the down and up Laplacians: - - .. math:: - \begin{align*} - &πŸŸ₯ \quad m_{y \rightarrow \{z\} \rightarrow x}^{(1 \rightarrow 2 \rightarrow 1)} - = (L_{\uparrow,1} \odot att(h_{y \in \mathcal{L}\uparrow(x)}^{t,(1)}, h_x^{t,(1)}))_{xy} \cdot h_y^{t,(1)} \cdot - \Theta^{t,(1 \rightarrow 2 \rightarrow 1)} - &πŸŸ₯ \quad m_{y \rightarrow \{z\} \rightarrow x}^{(1 \rightarrow 0 \rightarrow 1)} - = (L_{\downarrow,1} \odot att(h_{y \in \mathcal{L}\downarrow(x)}^{t,(1)}, h_x^{t,(1)}))_{xy} \cdot h_y^{t,(1)} \cdot - \Theta^{t,(1 \rightarrow 0 \rightarrow 1)} - &πŸŸ₯ \quad m^{(1 \rightarrow 1)}_{x \rightarrow x} - = (1+\epsilon)\cdot h_x^{t, (1)} \cdot \Theta^{t,(1 \rightarrow 1)} - &🟧 \quad m_{x}^{(1 \rightarrow 2 \rightarrow 1)} - = \sum_{y \in \mathcal{L}_\uparrow(x)}m_{y \rightarrow \{z\} \rightarrow x}^{(1 \rightarrow 2 \rightarrow 1)} - &🟧 \quad m_{x}^{(1 \rightarrow 0 \rightarrow 1)} - = \sum_{y \in \mathcal{L}_\downarrow(x)}m_{y \rightarrow \{z\} \rightarrow x}^{(1 \rightarrow 0 \rightarrow 1)} - &🟧 \quad m^{(1 \rightarrow 1)}_{x} - = m^{(1 \rightarrow 1)}_{x \rightarrow x} - &🟩 \quad m_x^{(1)} - = m_x^{(1 \rightarrow 1)} + m_{x}^{(1 \rightarrow 2 \rightarrow 1)} + m_{x}^{(1 \rightarrow 0 \rightarrow 1)} - &🟦 \quad h_x^{t+1, (1)} - = \sigma(\theta_{att} \cdot m_x^{(1)})\cdot \sigma(m_x^{(1)}) - \end{align*} - - References - ---------- - .. [GBTLSB22] Giusti et. al. Cell Attention Networks. - https://arxiv.org/abs/2209.08179 - .. [RSH22] Rodenberry, Schaub, Hajij. Signal Processing on Cell Complexes. - ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and - Signal Processing (ICASSP) 2022 - https://arxiv.org/pdf/2110.05614.pdf - .. [TNN23] Equations of Topological Neural Networks. - https://github.com/awesome-tnns/awesome-tnns/ - .. [PSHM23] Papillon, Sanborn, Hajij, Miolane. - Architectures of Topological Deep Learning: A Survey on Topological - Neural Networks. - (2023) https://arxiv.org/abs/2304.10031. - - Parameters - ---------- - x_1 : torch.Tensor, shape=[n_1_cells, channels] - Input features on the edges of the cell complex. - down_laplacian : torch.sparse - shape=[n_1_cells, n_1_cells] - Neighborhood matrix mapping edges to edges (L_down_1). - up_laplacian : torch.sparse - shape=[n_1_cells, n_1_cells] - Neighborhood matrix mapping edges to edges (L_up_1). - - Returns - ------- - x_1 : torch.Tensor, shape=[n_1_cells, channels] - Output features on the edges of the cell complex. - """ - # I don't think that the attention mechanism normalizes the attention - # coefficients: - # should this be fixed? Ask the organizers - x_down = self.conv_down(x_1, down_laplacian) - x_up = self.conv_up(x_1, up_laplacian) - x_id = (1 + self.eps * int(self.att)) * self.conv_id( - x_1, torch.eye(x_1.shape[0]).to_sparse() - ) - - # The tensor diagram says to apply the non-linearities and then sum, - # whereas the paper sums then applies the non-linearity. I followed the paper - # here as that seems to make more sense and generalized the rodenberry paper. - x_1 = self.aggr([x_down, x_up, x_id]) - - # More attention coefficients are introduced in the CAN paper. - # I use ELU for them because the attention mechanism in - # message-passing uses ELU. - if self.att: - x_1 = x_1 * torch.nn.functional.elu(torch.mm(x_1, self.att_weight)) - return x_1 diff --git a/topomodelx/nn/cell/ccxn.py b/topomodelx/nn/cell/ccxn.py index b42c5db0..261df911 100644 --- a/topomodelx/nn/cell/ccxn.py +++ b/topomodelx/nn/cell/ccxn.py @@ -1,4 +1,4 @@ -"""CWN class.""" +"""CCXN class.""" import torch @@ -22,6 +22,12 @@ class CCXN(torch.nn.Module): Number of CCXN layers. att : bool Whether to use attention. + + References + ---------- + .. [HIZ20] Hajij, Istvan, Zamzmi. Cell Complex Neural Networks. + Topological Data Analysis and Beyond Workshop at NeurIPS 2020. + https://arxiv.org/pdf/2010.00743.pdf """ def __init__( diff --git a/topomodelx/nn/cell/cwn.py b/topomodelx/nn/cell/cwn.py index 1fa30ca9..d2c5d5df 100644 --- a/topomodelx/nn/cell/cwn.py +++ b/topomodelx/nn/cell/cwn.py @@ -23,6 +23,12 @@ class CWN(torch.nn.Module): Number of classes. n_layers : int Number of CWN layers. + + References + ---------- + .. [B21] Bodnar, et al. Weisfeiler and Lehman Go Cellular: CW Networks. + Conference on Neural Information Processing Systems 2021. + https://arxiv.org/abs/2106.12575 """ def __init__( diff --git a/topomodelx/nn/hypergraph/allset.py b/topomodelx/nn/hypergraph/allset.py index 84bdee37..58c73727 100644 --- a/topomodelx/nn/hypergraph/allset.py +++ b/topomodelx/nn/hypergraph/allset.py @@ -28,6 +28,12 @@ class AllSet(torch.nn.Module): Number of layers in the MLP. Defaults to 2. mlp_norm : bool, optional Whether to apply input normalization in the MLP. Defaults to False. + + References + ---------- + .. [E21] Eli Chien, Chao Pan, Jianhao Peng, Olgica Milenkovic. + You are AllSet: A Multiset Function Framework for Hypergraph Neural Networks. (2021) + https://arxiv.org/abs/2106.13264 """ def __init__( diff --git a/topomodelx/nn/hypergraph/allset_layer.py b/topomodelx/nn/hypergraph/allset_layer.py index 4f945fb8..90c1e174 100644 --- a/topomodelx/nn/hypergraph/allset_layer.py +++ b/topomodelx/nn/hypergraph/allset_layer.py @@ -97,6 +97,12 @@ def forward(self, x, incidence_1): ------- x : torch.Tensor Output features. + + References + ---------- + .. [E21] Eli Chien, Chao Pan, Jianhao Peng, Olgica Milenkovic. + You are AllSet: A Multiset Function Framework for Hypergraph Neural Networks. (2021) + https://arxiv.org/abs/2106.13264 """ if x.shape[-2] != incidence_1.shape[-2]: raise ValueError( diff --git a/topomodelx/nn/hypergraph/allset_transformer.py b/topomodelx/nn/hypergraph/allset_transformer.py index cf5b2ab6..58e3e26f 100644 --- a/topomodelx/nn/hypergraph/allset_transformer.py +++ b/topomodelx/nn/hypergraph/allset_transformer.py @@ -28,6 +28,12 @@ class AllSetTransformer(torch.nn.Module): Number of layers in the MLP. Defaults to 2. mlp_norm : bool, optional Whether to apply input normalization in the MLP. Defaults to False. + + References + ---------- + .. [ECCP22] Chien, E., Pan, C., Peng, J., & Milenkovic, O. You are AllSet: A Multiset + Function Framework for Hypergraph Neural Networks. In International Conference on + Learning Representations, 2022 (https://arxiv.org/pdf/2106.13264.pdf) """ def __init__( diff --git a/topomodelx/nn/hypergraph/dhgcn.py b/topomodelx/nn/hypergraph/dhgcn.py index f8eef11c..6433b07c 100644 --- a/topomodelx/nn/hypergraph/dhgcn.py +++ b/topomodelx/nn/hypergraph/dhgcn.py @@ -18,6 +18,13 @@ class DHGCN(torch.nn.Module): Dimension of node features n_layer : 2 Amount of message passing layers. + + References + ---------- + .. [Y22] Yin N, Feng F, Luo Z, Zhang X, Wang W, Luo X, Chen C, Hua XS. + Dynamic hypergraph convolutional network. + In2022 IEEE 38th International Conference on Data Engineering (ICDE) 2022 May 9 (pp. 1621-1634). IEEE. + https://ieeexplore.ieee.org/abstract/document/9835240 """ def __init__(self, channels_node, n_layers=2): diff --git a/topomodelx/nn/hypergraph/dhgcn_layer.py b/topomodelx/nn/hypergraph/dhgcn_layer.py index 7d4b8e70..c6511cbe 100644 --- a/topomodelx/nn/hypergraph/dhgcn_layer.py +++ b/topomodelx/nn/hypergraph/dhgcn_layer.py @@ -18,6 +18,13 @@ class DHGCNLayer(torch.nn.Module): Dimension of intermediate features. out_channels : int Dimension of output features. + + References + ---------- + .. [Y22] Yin N, Feng F, Luo Z, Zhang X, Wang W, Luo X, Chen C, Hua XS. + Dynamic hypergraph convolutional network. + In2022 IEEE 38th International Conference on Data Engineering (ICDE) 2022 May 9 (pp. 1621-1634). IEEE. + https://ieeexplore.ieee.org/abstract/document/9835240 """ def __init__( diff --git a/topomodelx/nn/hypergraph/hmpnn.py b/topomodelx/nn/hypergraph/hmpnn.py index daa8335d..95cfaa17 100644 --- a/topomodelx/nn/hypergraph/hmpnn.py +++ b/topomodelx/nn/hypergraph/hmpnn.py @@ -24,6 +24,13 @@ class HMPNN(torch.nn.Module): Adjacency dropout rate. regular_dropout_rate: 0.5 Regular dropout rate applied on features. + + References + ---------- + .. [H22] Heydari S, Livi L. + Message passing neural networks for hypergraphs. + International Conference on Artificial Neural Networks 2022 Sep 6 (pp. 583-592). Cham: Springer Nature Switzerland. + https://arxiv.org/abs/2203.16995 """ def __init__( diff --git a/topomodelx/nn/hypergraph/hmpnn_layer.py b/topomodelx/nn/hypergraph/hmpnn_layer.py index 8321ca45..e694ada5 100644 --- a/topomodelx/nn/hypergraph/hmpnn_layer.py +++ b/topomodelx/nn/hypergraph/hmpnn_layer.py @@ -129,6 +129,13 @@ class HMPNNLayer(nn.Module): The final function or nn.Module object to be called on node and hyperedge features to retrieve their new representation. If not given, a linear layer is applied, received message is added and sigmoid is called. + + References + ---------- + .. [H22] Heydari S, Livi L. + Message passing neural networks for hypergraphs. + International Conference on Artificial Neural Networks 2022 Sep 6 (pp. 583-592). Cham: Springer Nature Switzerland. + https://arxiv.org/abs/2203.16995 """ def __init__( diff --git a/topomodelx/nn/hypergraph/hnhn.py b/topomodelx/nn/hypergraph/hnhn.py index ac925c2e..a260820d 100644 --- a/topomodelx/nn/hypergraph/hnhn.py +++ b/topomodelx/nn/hypergraph/hnhn.py @@ -21,6 +21,13 @@ class HNHN(torch.nn.Module): Number of classes n_layers : int Number of HNHN message passing layers. + + References + ---------- + .. [DSB20] Dong, Sawin, Bengio. + HNHN: Hypergraph networks with hyperedge neurons. + Graph Representation Learning and Beyond Workshop at ICML 2020 + https://grlplus.github.io/papers/40.pdf """ def __init__( diff --git a/topomodelx/nn/hypergraph/hypergat.py b/topomodelx/nn/hypergraph/hypergat.py index 1ca4c265..a6913ef7 100644 --- a/topomodelx/nn/hypergraph/hypergat.py +++ b/topomodelx/nn/hypergraph/hypergat.py @@ -17,6 +17,12 @@ class HyperGAT(torch.nn.Module): n_layer : 2 Amount of message passing layers. + References + ---------- + .. [DWLLL20] Kaize Ding, Jianling Wang, Jundong Li, Dingcheng Li, & Huan Liu. Be more with less: + Hypergraph attention networks for inductive text classification. In Proceedings of the 2020 Conference + on Empirical Methods in Natural Language Processing (EMNLP), 2020 + (https://aclanthology.org/2020.emnlp-main.399.pdf) """ def __init__(self, in_channels, out_channels, n_layers=2): diff --git a/topomodelx/nn/hypergraph/hypergat_layer.py b/topomodelx/nn/hypergraph/hypergat_layer.py index e90e4aae..d10030d9 100644 --- a/topomodelx/nn/hypergraph/hypergat_layer.py +++ b/topomodelx/nn/hypergraph/hypergat_layer.py @@ -13,7 +13,8 @@ class HyperGATLayer(MessagePassing): ---------- .. [DWLLL20] Kaize Ding, Jianling Wang, Jundong Li, Dingcheng Li, & Huan Liu. Be more with less: Hypergraph attention networks for inductive text classification. In Proceedings of the 2020 Conference - on Empirical Methods in Natural Language Processing (EMNLP), 2020 (https://aclanthology.org/2020.emnlp-main.399.pdf) + on Empirical Methods in Natural Language Processing (EMNLP), 2020 + (https://aclanthology.org/2020.emnlp-main.399.pdf) Parameters ---------- diff --git a/topomodelx/nn/hypergraph/unigcn.py b/topomodelx/nn/hypergraph/unigcn.py index 376573ab..9f18b69b 100644 --- a/topomodelx/nn/hypergraph/unigcn.py +++ b/topomodelx/nn/hypergraph/unigcn.py @@ -17,6 +17,12 @@ class UniGCN(torch.nn.Module): n_layer : 2 Amount of message passing layers. + References + ---------- + .. [JJ21]Jing Huang and Jie Yang. UniGNN: a unified framework for graph and hypergraph neural networks. + In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, + 2021. + https://arxiv.org/pdf/2105.00956.pdf """ def __init__(self, channels_edge, channels_node, n_layers=2): diff --git a/topomodelx/nn/hypergraph/unigcn_layer.py b/topomodelx/nn/hypergraph/unigcn_layer.py index dd1d98a1..5f8539fa 100644 --- a/topomodelx/nn/hypergraph/unigcn_layer.py +++ b/topomodelx/nn/hypergraph/unigcn_layer.py @@ -10,12 +10,6 @@ class UniGCNLayer(torch.nn.Module): Implementation of UniGCN layer proposed in [JJ21]_. - References - ---------- - .. [JJ21]Jing Huang and Jie Yang. UniGNN: a unified framework for graph and hypergraph neural networks. - In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, - 2021. - https://arxiv.org/pdf/2105.00956.pdf Parameters ---------- @@ -27,6 +21,13 @@ class UniGCNLayer(torch.nn.Module): Whether to use bathnorm after the linear transformation. aggr_norm: boolean Whether to normalize the aggregated message by the neighborhood size. + + References + ---------- + .. [JJ21]Jing Huang and Jie Yang. UniGNN: a unified framework for graph and hypergraph neural networks. + In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, + 2021. + https://arxiv.org/pdf/2105.00956.pdf """ def __init__( diff --git a/topomodelx/nn/hypergraph/unigin.py b/topomodelx/nn/hypergraph/unigin.py index d9efb019..2028ff19 100644 --- a/topomodelx/nn/hypergraph/unigin.py +++ b/topomodelx/nn/hypergraph/unigin.py @@ -14,6 +14,13 @@ class UniGIN(torch.nn.Module): Dimension of node features n_layer : 2 Amount of message passing layers. + + References + ---------- + .. [JJ21] Jing Huang and Jie Yang. Unignn: a unified framework for graph and hypergraph neural networks. + In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, + 2021. + https://arxiv.org/pdf/2105.00956.pdf """ def __init__( diff --git a/topomodelx/nn/hypergraph/unigin_layer.py b/topomodelx/nn/hypergraph/unigin_layer.py index ecc183c1..662ae799 100644 --- a/topomodelx/nn/hypergraph/unigin_layer.py +++ b/topomodelx/nn/hypergraph/unigin_layer.py @@ -7,12 +7,6 @@ class UniGINLayer(torch.nn.Module): Implementation of UniGIN layer proposed in [JJ21]_. - References - ---------- - .. [JJ21]Jing Huang and Jie Yang. Unignn: a unified framework for graph and hypergraph neural networks. - In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, - 2021. - https://arxiv.org/pdf/2105.00956.pdf Parameters ---------- @@ -25,6 +19,13 @@ class UniGINLayer(torch.nn.Module): Constant in GIN Update equation. train_eps : bool Whether to make eps a trainable parameter. + + References + ---------- + .. [JJ21]Jing Huang and Jie Yang. Unignn: a unified framework for graph and hypergraph neural networks. + In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, + 2021. + https://arxiv.org/pdf/2105.00956.pdf """ def __init__( diff --git a/topomodelx/nn/hypergraph/unisage.py b/topomodelx/nn/hypergraph/unisage.py index 9380d575..fbb9cfee 100644 --- a/topomodelx/nn/hypergraph/unisage.py +++ b/topomodelx/nn/hypergraph/unisage.py @@ -17,6 +17,12 @@ class UniSAGE(torch.nn.Module): n_layer : 2 Amount of message passing layers. + References + ---------- + .. [JJ21]Jing Huang and Jie Yang. UniGNN: a unified framework for graph and hypergraph neural networks. + In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, + 2021. + https://arxiv.org/pdf/2105.00956.pdf """ def __init__(self, channels_edge, channels_node, n_layers=2): diff --git a/topomodelx/nn/hypergraph/unisage_layer.py b/topomodelx/nn/hypergraph/unisage_layer.py index a45cec65..d92e7fa8 100644 --- a/topomodelx/nn/hypergraph/unisage_layer.py +++ b/topomodelx/nn/hypergraph/unisage_layer.py @@ -9,12 +9,6 @@ class UniSAGELayer(torch.nn.Module): Implementation of UniSAGE layer proposed in [JJ21]_. - References - ---------- - .. [JJ21]Jing Huang and Jie Yang. UniGNN: a unified framework for graph and hypergraph neural networks. - In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, - 2021. - https://arxiv.org/pdf/2105.00956.pdf Parameters ---------- @@ -28,6 +22,13 @@ class UniSAGELayer(torch.nn.Module): Aggregator function for nodes. use_bn : boolean Whether to use bathnorm after the linear transformation. + + References + ---------- + .. [JJ21] Jing Huang and Jie Yang. UniGNN: a unified framework for graph and hypergraph neural networks. + In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, + 2021. + https://arxiv.org/pdf/2105.00956.pdf """ def _validate_aggr(self, aggr): @@ -97,10 +98,14 @@ def forward(self, x_0, incidence_1): https://arxiv.org/pdf/2105.00956.pdf .. [TNN23] Equations of Topological Neural Networks. https://github.com/awesome-tnns/awesome-tnns/ + .. [TDL23] Hajij, Zamzmi, Papamarkou, Miolane, GuzmΓ‘n-SΓ‘enz, Ramamurthy, Birdal, Dey, Mukherjee, Samaga, Livesay, Walters, Rosen, Schaub. + Topological Deep Learning: Going Beyond Graph Data. + (2023) https://arxiv.org/abs/2206.00606 .. [PSHM23] Papillon, Sanborn, Hajij, Miolane. Architectures of Topological Deep Learning: A Survey on Topological Neural Networks. (2023) https://arxiv.org/abs/2304.10031. + Parameters ---------- x_0 : torch.Tensor, shape=[n_nodes, in_channels] From 931223c4087d743ce3599547959aedae5456cc46 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Fri, 8 Sep 2023 11:02:36 -0700 Subject: [PATCH 30/31] adding and fixing more refs of the papers used in the code --- topomodelx/nn/hypergraph/hypergat_layer.py | 14 +++++++------- topomodelx/nn/hypergraph/hypersage.py | 7 +++++-- topomodelx/nn/hypergraph/hypersage_layer.py | 12 ++++++------ topomodelx/nn/hypergraph/unigcnii.py | 6 ++++++ topomodelx/nn/hypergraph/unisage_layer.py | 6 ++---- 5 files changed, 26 insertions(+), 19 deletions(-) diff --git a/topomodelx/nn/hypergraph/hypergat_layer.py b/topomodelx/nn/hypergraph/hypergat_layer.py index d10030d9..69dc6a44 100644 --- a/topomodelx/nn/hypergraph/hypergat_layer.py +++ b/topomodelx/nn/hypergraph/hypergat_layer.py @@ -9,13 +9,6 @@ class HyperGATLayer(MessagePassing): r"""Implementation of the HyperGAT layer proposed in [DWLLL20]. - References - ---------- - .. [DWLLL20] Kaize Ding, Jianling Wang, Jundong Li, Dingcheng Li, & Huan Liu. Be more with less: - Hypergraph attention networks for inductive text classification. In Proceedings of the 2020 Conference - on Empirical Methods in Natural Language Processing (EMNLP), 2020 - (https://aclanthology.org/2020.emnlp-main.399.pdf) - Parameters ---------- in_channels : int @@ -26,6 +19,13 @@ class HyperGATLayer(MessagePassing): Update method to apply to message. Default is "relu". initialization : Literal["xavier_uniform", "xavier_normal"], default="xavier_uniform" Initialization method. + + References + ---------- + .. [DWLLL20] Kaize Ding, Jianling Wang, Jundong Li, Dingcheng Li, & Huan Liu. Be more with less: + Hypergraph attention networks for inductive text classification. In Proceedings of the 2020 Conference + on Empirical Methods in Natural Language Processing (EMNLP), 2020 + (https://aclanthology.org/2020.emnlp-main.399.pdf) """ def __init__( diff --git a/topomodelx/nn/hypergraph/hypersage.py b/topomodelx/nn/hypergraph/hypersage.py index a89d66be..3b243946 100644 --- a/topomodelx/nn/hypergraph/hypersage.py +++ b/topomodelx/nn/hypergraph/hypersage.py @@ -16,9 +16,12 @@ class HyperSAGE(torch.nn.Module): Dimension of node features n_layer : int Amount of message passing layers. Default is 2. - device : string - Device to train model on. Default is "cpu". + References + ---------- + .. [AGRW20] Devanshu Arya, Deepak K Gupta, Stevan Rudinac and Marcel Worring. + HyperSAGE: Generalizing inductive representation learning on hypergraphs. + arXiv preprint arXiv:2010.04558. 2020 """ def __init__(self, in_channels, out_channels, n_layers=2, **kwargs): diff --git a/topomodelx/nn/hypergraph/hypersage_layer.py b/topomodelx/nn/hypergraph/hypersage_layer.py index 54ac8a78..0bb58304 100644 --- a/topomodelx/nn/hypergraph/hypersage_layer.py +++ b/topomodelx/nn/hypergraph/hypersage_layer.py @@ -38,12 +38,6 @@ def forward(self, x: torch.Tensor): class HyperSAGELayer(MessagePassing): r"""Implementation of the HyperSAGE layer proposed in [AGRW20]. - References - ---------- - .. [AGRW20] Devanshu Arya, Deepak K Gupta, Stevan Rudinac and Marcel Worring. - HyperSAGE: Generalizing inductive representation learning on hypergraphs. - arXiv preprint arXiv:2010.04558. 2020 - Parameters ---------- in_channels : int @@ -60,6 +54,12 @@ class HyperSAGELayer(MessagePassing): Initialization method. device : str, default="cpu" Device name to train layer on. + + References + ---------- + .. [AGRW20] Devanshu Arya, Deepak K Gupta, Stevan Rudinac and Marcel Worring. + HyperSAGE: Generalizing inductive representation learning on hypergraphs. + arXiv preprint arXiv:2010.04558. 2020 """ def __init__( diff --git a/topomodelx/nn/hypergraph/unigcnii.py b/topomodelx/nn/hypergraph/unigcnii.py index 8428bd99..243177ea 100644 --- a/topomodelx/nn/hypergraph/unigcnii.py +++ b/topomodelx/nn/hypergraph/unigcnii.py @@ -20,6 +20,12 @@ class UniGCNII(torch.nn.Module): Parameter of the UniGCNII layer. beta: float, default=0.5 Parameter of the UniGCNII layer. + + References + ---------- + .. [JJ21] Jing Huang and Jie Yang. UniGNN: a unified framework for graph and hypergraph neural networks. + In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, + 2021. https://arxiv.org/pdf/2105.00956.pdf """ def __init__(self, num_classes=2, in_features=1, num_layers=2, alpha=0.5, beta=0.5): diff --git a/topomodelx/nn/hypergraph/unisage_layer.py b/topomodelx/nn/hypergraph/unisage_layer.py index d92e7fa8..67afaa66 100644 --- a/topomodelx/nn/hypergraph/unisage_layer.py +++ b/topomodelx/nn/hypergraph/unisage_layer.py @@ -1,14 +1,12 @@ """Implementation of UniSAGE layer from Huang et. al.: UniGNN: a Unified Framework for Graph and Hypergraph Neural Networks.""" + from typing import Literal import torch class UniSAGELayer(torch.nn.Module): - """Layer of UniSAGE. - - Implementation of UniSAGE layer proposed in [JJ21]_. - + """Layer of UniSAGE proposed in [JJ21]. Parameters ---------- From ff76692731f535b57320336c6aa4ae03012dd460 Mon Sep 17 00:00:00 2001 From: Mustafa Hajij Date: Fri, 8 Sep 2023 15:12:46 -0700 Subject: [PATCH 31/31] fixing typos --- test/nn/cell/test_can.py | 2 +- test/nn/cell/test_can_layer_bis.py | 72 ------------------- test/nn/cell/test_ccxn.py | 2 +- test/nn/cell/test_cwn.py | 2 +- test/nn/hypergraph/test_allset.py | 2 +- test/nn/hypergraph/test_allset_transformer.py | 2 +- test/nn/hypergraph/test_dhgcn.py | 2 +- test/nn/hypergraph/test_hmpnn.py | 2 +- test/nn/hypergraph/test_hnhn.py | 2 +- test/nn/hypergraph/test_hypergat.py | 2 +- test/nn/hypergraph/test_hypersage.py | 2 +- test/nn/hypergraph/test_unigcn.py | 2 +- test/nn/hypergraph/test_unigcnii.py | 2 +- test/nn/hypergraph/test_unigin.py | 2 +- test/nn/hypergraph/test_unisage.py | 2 +- 15 files changed, 14 insertions(+), 86 deletions(-) delete mode 100644 test/nn/cell/test_can_layer_bis.py diff --git a/test/nn/cell/test_can.py b/test/nn/cell/test_can.py index 406c4f12..1d2f5692 100644 --- a/test/nn/cell/test_can.py +++ b/test/nn/cell/test_can.py @@ -9,7 +9,7 @@ class TestCAN: """Test CAN.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = CAN( diff --git a/test/nn/cell/test_can_layer_bis.py b/test/nn/cell/test_can_layer_bis.py deleted file mode 100644 index d3387114..00000000 --- a/test/nn/cell/test_can_layer_bis.py +++ /dev/null @@ -1,72 +0,0 @@ -"""Unit tests for the CANLayer class.""" - -import pytest -import torch - -from topomodelx.nn.cell.can_layer_bis import CANLayer - - -class TestCANLayer: - """Unit tests for the CANLayer class.""" - - def setup_method(self): - """Set up the CAN for tests.""" - self.n_1_cells = 30 - self.channels = 10 - - self.x_1 = torch.randn(self.n_1_cells, self.channels) - - self.down_laplacian = ( - torch.randn(self.n_1_cells, self.n_1_cells).to_sparse().float() - ) - self.up_laplacian = ( - torch.randn(self.n_1_cells, self.n_1_cells).to_sparse().float() - ) - - # without attention - self.can_layer = CANLayer( - channels=self.channels, - att=False, - ) - - # With attention - self.can_layer_with_att = CANLayer( - channels=self.channels, - att=True, - ) - - def test_forward(self): - """Test the forward method of CANLayer.""" - result = self.can_layer.forward( - self.x_1, self.down_laplacian, self.up_laplacian - ) - - assert result.shape == (self.n_1_cells, self.channels) - - result = self.can_layer_with_att.forward( - self.x_1, self.down_laplacian, self.up_laplacian - ) - assert result.shape == (self.n_1_cells, self.channels) - - def test_reset_parameters(self): - """Test the reset_parameters method of CANLayer with attention.""" - gain = 1.0 - with pytest.raises(RuntimeError): - self.can_layer_with_att.initialization = "invalid" - self.can_layer_with_att.reset_parameters(gain=gain) - - # Test xavier_uniform on attention - self.can_layer_with_att.initialization = "xavier_uniform" - self.can_layer_with_att.att_weight = torch.nn.Parameter( - torch.Tensor(self.channels, 1) - ) - self.can_layer_with_att.reset_parameters(gain=gain) - assert self.can_layer_with_att.att_weight.shape == (self.channels, 1) - - # Test xavier_normal on attention - self.can_layer_with_att.initialization = "xavier_normal" - self.can_layer_with_att.att_weight = torch.nn.Parameter( - torch.Tensor(self.channels, 1) - ) - self.can_layer_with_att.reset_parameters(gain=gain) - assert self.can_layer_with_att.att_weight.shape == (self.channels, 1) diff --git a/test/nn/cell/test_ccxn.py b/test/nn/cell/test_ccxn.py index 153d848d..43ccc18a 100644 --- a/test/nn/cell/test_ccxn.py +++ b/test/nn/cell/test_ccxn.py @@ -8,7 +8,7 @@ class TestCCXN: """Test CCXN.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = CCXN( diff --git a/test/nn/cell/test_cwn.py b/test/nn/cell/test_cwn.py index e4fcb584..6597c0b0 100644 --- a/test/nn/cell/test_cwn.py +++ b/test/nn/cell/test_cwn.py @@ -8,7 +8,7 @@ class TestCWN: """Test CWN.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = CWN( diff --git a/test/nn/hypergraph/test_allset.py b/test/nn/hypergraph/test_allset.py index d9b3e4ce..14e0e18e 100644 --- a/test/nn/hypergraph/test_allset.py +++ b/test/nn/hypergraph/test_allset.py @@ -9,7 +9,7 @@ class TestAllSet: """Test AllSet.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = AllSet( diff --git a/test/nn/hypergraph/test_allset_transformer.py b/test/nn/hypergraph/test_allset_transformer.py index 664d0735..f4606c7e 100644 --- a/test/nn/hypergraph/test_allset_transformer.py +++ b/test/nn/hypergraph/test_allset_transformer.py @@ -9,7 +9,7 @@ class TestAllSetTransfomer: """Test AllSet.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = AllSetTransformer( diff --git a/test/nn/hypergraph/test_dhgcn.py b/test/nn/hypergraph/test_dhgcn.py index 79bc4f3b..0e22d1b7 100644 --- a/test/nn/hypergraph/test_dhgcn.py +++ b/test/nn/hypergraph/test_dhgcn.py @@ -8,7 +8,7 @@ class TestDHGCNL: """Test the DHGCN.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = DHGCN(channels_node=8, n_layers=2) diff --git a/test/nn/hypergraph/test_hmpnn.py b/test/nn/hypergraph/test_hmpnn.py index b7ad0867..8624a70c 100644 --- a/test/nn/hypergraph/test_hmpnn.py +++ b/test/nn/hypergraph/test_hmpnn.py @@ -9,7 +9,7 @@ class TestHMPNN: """Test the HMPNN.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = HMPNN(8, (8, 8), 1, 1).to(device) diff --git a/test/nn/hypergraph/test_hnhn.py b/test/nn/hypergraph/test_hnhn.py index 831fde7e..be04e392 100644 --- a/test/nn/hypergraph/test_hnhn.py +++ b/test/nn/hypergraph/test_hnhn.py @@ -9,7 +9,7 @@ class TestHNHN: """Test the HNHN.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") diff --git a/test/nn/hypergraph/test_hypergat.py b/test/nn/hypergraph/test_hypergat.py index 0c9edfb2..57c46401 100644 --- a/test/nn/hypergraph/test_hypergat.py +++ b/test/nn/hypergraph/test_hypergat.py @@ -9,7 +9,7 @@ class TestHNHN: """Test the HyperGAT.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") diff --git a/test/nn/hypergraph/test_hypersage.py b/test/nn/hypergraph/test_hypersage.py index 69a0d74a..9e1b8548 100644 --- a/test/nn/hypergraph/test_hypersage.py +++ b/test/nn/hypergraph/test_hypersage.py @@ -9,7 +9,7 @@ class TestHyperSAGE: """Test the HyperSAGE.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") diff --git a/test/nn/hypergraph/test_unigcn.py b/test/nn/hypergraph/test_unigcn.py index 393e7a60..3cce0c10 100644 --- a/test/nn/hypergraph/test_unigcn.py +++ b/test/nn/hypergraph/test_unigcn.py @@ -9,7 +9,7 @@ class TestUniGCN: """Test the UniGCN.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") diff --git a/test/nn/hypergraph/test_unigcnii.py b/test/nn/hypergraph/test_unigcnii.py index b1774e82..50703f3f 100644 --- a/test/nn/hypergraph/test_unigcnii.py +++ b/test/nn/hypergraph/test_unigcnii.py @@ -9,7 +9,7 @@ class TestUniGCNII: """Test the UniGCNII.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") diff --git a/test/nn/hypergraph/test_unigin.py b/test/nn/hypergraph/test_unigin.py index 90439cd8..a55c805e 100644 --- a/test/nn/hypergraph/test_unigin.py +++ b/test/nn/hypergraph/test_unigin.py @@ -9,7 +9,7 @@ class TestUniGIN: """Test the UniGIN.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") diff --git a/test/nn/hypergraph/test_unisage.py b/test/nn/hypergraph/test_unisage.py index ab06c9b0..f7ae1b75 100644 --- a/test/nn/hypergraph/test_unisage.py +++ b/test/nn/hypergraph/test_unisage.py @@ -9,7 +9,7 @@ class TestUniSAGE: """Test the UniSAGE.""" - def test_fowared(self): + def test_forward(self): """Test forward method.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu")